diff --git a/.ci/complement_package.gotpl b/.ci/complement_package.gotpl
index e1625fd31f..2dd5e5e0e6 100644
--- a/.ci/complement_package.gotpl
+++ b/.ci/complement_package.gotpl
@@ -27,10 +27,10 @@ which is under the Unlicense licence.
{{- . -}}{{- "\n" -}}
{{- end -}}
{{- with .TestCases -}}
- {{- /* Failing tests are first */ -}}
+ {{- /* Passing tests are first */ -}}
{{- range . -}}
- {{- if and (ne .Result "PASS") (ne .Result "SKIP") -}}
- ::group::{{ "\033" }}[0;31m❌{{ " " }}{{- .Name -}}
+ {{- if eq .Result "PASS" -}}
+ ::group::{{ "\033" }}[0;32m✅{{ " " }}{{- .Name -}}
{{- "\033" -}}[0;37m ({{if $settings.ShowTestStatus}}{{.Result}}; {{end}}{{ .Duration -}}
{{- with .Coverage -}}
, coverage: {{ . }}%
@@ -47,7 +47,6 @@ which is under the Unlicense licence.
{{- end -}}
{{- end -}}
-
{{- /* Then skipped tests are second */ -}}
{{- range . -}}
{{- if eq .Result "SKIP" -}}
@@ -68,11 +67,10 @@ which is under the Unlicense licence.
{{- end -}}
{{- end -}}
-
- {{- /* Then passing tests are last */ -}}
+ {{- /* and failing tests are last */ -}}
{{- range . -}}
- {{- if eq .Result "PASS" -}}
- ::group::{{ "\033" }}[0;32m✅{{ " " }}{{- .Name -}}
+ {{- if and (ne .Result "PASS") (ne .Result "SKIP") -}}
+ ::group::{{ "\033" }}[0;31m❌{{ " " }}{{- .Name -}}
{{- "\033" -}}[0;37m ({{if $settings.ShowTestStatus}}{{.Result}}; {{end}}{{ .Duration -}}
{{- with .Coverage -}}
, coverage: {{ . }}%
diff --git a/.ci/scripts/auditwheel_wrapper.py b/.ci/scripts/auditwheel_wrapper.py
new file mode 100755
index 0000000000..a33b39314f
--- /dev/null
+++ b/.ci/scripts/auditwheel_wrapper.py
@@ -0,0 +1,132 @@
+#!/usr/bin/env python
+# Copyright 2022 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Wraps `auditwheel repair` to first check if we're repairing a potentially abi3
+# compatible wheel, if so rename the wheel before repairing it.
+
+import argparse
+import os
+import subprocess
+from typing import Optional
+from zipfile import ZipFile
+
+from packaging.tags import Tag
+from packaging.utils import parse_wheel_filename
+from packaging.version import Version
+
+
+def check_is_abi3_compatible(wheel_file: str) -> None:
+ """Check the contents of the built wheel for any `.so` files that are *not*
+ abi3 compatible.
+ """
+
+ with ZipFile(wheel_file, "r") as wheel:
+ for file in wheel.namelist():
+ if not file.endswith(".so"):
+ continue
+
+ if not file.endswith(".abi3.so"):
+ raise Exception(f"Found non-abi3 lib: {file}")
+
+
+def cpython(wheel_file: str, name: str, version: Version, tag: Tag) -> str:
+ """Replaces the cpython wheel file with a ABI3 compatible wheel"""
+
+ if tag.abi == "abi3":
+ # Nothing to do.
+ return wheel_file
+
+ check_is_abi3_compatible(wheel_file)
+
+ abi3_tag = Tag(tag.interpreter, "abi3", tag.platform)
+
+ dirname = os.path.dirname(wheel_file)
+ new_wheel_file = os.path.join(
+ dirname,
+ f"{name}-{version}-{abi3_tag}.whl",
+ )
+
+ os.rename(wheel_file, new_wheel_file)
+
+ print("Renamed wheel to", new_wheel_file)
+
+ return new_wheel_file
+
+
+def main(wheel_file: str, dest_dir: str, archs: Optional[str]) -> None:
+ """Entry point"""
+
+ # Parse the wheel file name into its parts. Note that `parse_wheel_filename`
+ # normalizes the package name (i.e. it converts matrix_synapse ->
+ # matrix-synapse), which is not what we want.
+ _, version, build, tags = parse_wheel_filename(os.path.basename(wheel_file))
+ name = os.path.basename(wheel_file).split("-")[0]
+
+ if len(tags) != 1:
+ # We expect only a wheel file with only a single tag
+ raise Exception(f"Unexpectedly found multiple tags: {tags}")
+
+ tag = next(iter(tags))
+
+ if build:
+ # We don't use build tags in Synapse
+ raise Exception(f"Unexpected build tag: {build}")
+
+ # If the wheel is for cpython then convert it into an abi3 wheel.
+ if tag.interpreter.startswith("cp"):
+ wheel_file = cpython(wheel_file, name, version, tag)
+
+ # Finally, repair the wheel.
+ if archs is not None:
+ # If we are given archs then we are on macos and need to use
+ # `delocate-listdeps`.
+ subprocess.run(["delocate-listdeps", wheel_file], check=True)
+ subprocess.run(
+ ["delocate-wheel", "--require-archs", archs, "-w", dest_dir, wheel_file],
+ check=True,
+ )
+ else:
+ subprocess.run(["auditwheel", "repair", "-w", dest_dir, wheel_file], check=True)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(description="Tag wheel as abi3 and repair it.")
+
+ parser.add_argument(
+ "--wheel-dir",
+ "-w",
+ metavar="WHEEL_DIR",
+ help="Directory to store delocated wheels",
+ required=True,
+ )
+
+ parser.add_argument(
+ "--require-archs",
+ metavar="archs",
+ default=None,
+ )
+
+ parser.add_argument(
+ "wheel_file",
+ metavar="WHEEL_FILE",
+ )
+
+ args = parser.parse_args()
+
+ wheel_file = args.wheel_file
+ wheel_dir = args.wheel_dir
+ archs = args.require_archs
+
+ main(wheel_file, wheel_dir, archs)
diff --git a/.ci/scripts/calculate_jobs.py b/.ci/scripts/calculate_jobs.py
new file mode 100755
index 0000000000..0cdc20e19c
--- /dev/null
+++ b/.ci/scripts/calculate_jobs.py
@@ -0,0 +1,135 @@
+#!/usr/bin/env python
+# Copyright 2022 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Calculate the trial jobs to run based on if we're in a PR or not.
+
+import json
+import os
+
+
+def set_output(key: str, value: str):
+ # See https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#setting-an-output-parameter
+ with open(os.environ["GITHUB_OUTPUT"], "at") as f:
+ print(f"{key}={value}", file=f)
+
+
+IS_PR = os.environ["GITHUB_REF"].startswith("refs/pull/")
+
+# First calculate the various trial jobs.
+#
+# For each type of test we only run on Py3.7 on PRs
+
+trial_sqlite_tests = [
+ {
+ "python-version": "3.7",
+ "database": "sqlite",
+ "extras": "all",
+ }
+]
+
+if not IS_PR:
+ trial_sqlite_tests.extend(
+ {
+ "python-version": version,
+ "database": "sqlite",
+ "extras": "all",
+ }
+ for version in ("3.8", "3.9", "3.10", "3.11")
+ )
+
+
+trial_postgres_tests = [
+ {
+ "python-version": "3.7",
+ "database": "postgres",
+ "postgres-version": "11",
+ "extras": "all",
+ }
+]
+
+if not IS_PR:
+ trial_postgres_tests.append(
+ {
+ "python-version": "3.11",
+ "database": "postgres",
+ "postgres-version": "15",
+ "extras": "all",
+ }
+ )
+
+trial_no_extra_tests = [
+ {
+ "python-version": "3.7",
+ "database": "sqlite",
+ "extras": "",
+ }
+]
+
+print("::group::Calculated trial jobs")
+print(
+ json.dumps(
+ trial_sqlite_tests + trial_postgres_tests + trial_no_extra_tests, indent=4
+ )
+)
+print("::endgroup::")
+
+test_matrix = json.dumps(
+ trial_sqlite_tests + trial_postgres_tests + trial_no_extra_tests
+)
+set_output("trial_test_matrix", test_matrix)
+
+
+# First calculate the various sytest jobs.
+#
+# For each type of test we only run on focal on PRs
+
+
+sytest_tests = [
+ {
+ "sytest-tag": "focal",
+ },
+ {
+ "sytest-tag": "focal",
+ "postgres": "postgres",
+ },
+ {
+ "sytest-tag": "focal",
+ "postgres": "multi-postgres",
+ "workers": "workers",
+ },
+]
+
+if not IS_PR:
+ sytest_tests.extend(
+ [
+ {
+ "sytest-tag": "testing",
+ "postgres": "postgres",
+ },
+ {
+ "sytest-tag": "buster",
+ "postgres": "multi-postgres",
+ "workers": "workers",
+ },
+ ]
+ )
+
+
+print("::group::Calculated sytest jobs")
+print(json.dumps(sytest_tests, indent=4))
+print("::endgroup::")
+
+test_matrix = json.dumps(sytest_tests)
+set_output("sytest_test_matrix", test_matrix)
diff --git a/.ci/scripts/gotestfmt b/.ci/scripts/gotestfmt
new file mode 100755
index 0000000000..83e0ec6361
--- /dev/null
+++ b/.ci/scripts/gotestfmt
@@ -0,0 +1,21 @@
+#!/bin/bash
+#
+# wraps `gotestfmt`, hiding output from successful packages unless
+# all tests passed.
+
+set -o pipefail
+set -e
+
+# tee the test results to a log, whilst also piping them into gotestfmt,
+# telling it to hide successful results, so that we can clearly see
+# unsuccessful results.
+tee complement.log | gotestfmt -hide successful-packages
+
+# gotestfmt will exit non-zero if there were any failures, so if we got to this
+# point, we must have had a successful result.
+echo "All tests successful; showing all test results"
+
+# Pipe the test results back through gotestfmt, showing all results.
+# The log file consists of JSON lines giving the test results, interspersed
+# with regular stdout lines (including reports of downloaded packages).
+grep '^{"Time":' complement.log | gotestfmt
diff --git a/.ci/scripts/postgres_exec.py b/.ci/scripts/postgres_exec.py
deleted file mode 100755
index 0f39a336d5..0000000000
--- a/.ci/scripts/postgres_exec.py
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2019 The Matrix.org Foundation C.I.C.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import sys
-
-import psycopg2
-
-# a very simple replacment for `psql`, to make up for the lack of the postgres client
-# libraries in the synapse docker image.
-
-# We use "postgres" as a database because it's bound to exist and the "synapse" one
-# doesn't exist yet.
-db_conn = psycopg2.connect(
- user="postgres", host="localhost", password="postgres", dbname="postgres"
-)
-db_conn.autocommit = True
-cur = db_conn.cursor()
-for c in sys.argv[1:]:
- cur.execute(c)
diff --git a/.ci/scripts/test_old_deps.sh b/.ci/scripts/prepare_old_deps.sh
index 478c8d639a..7e4f060b17 100755
--- a/.ci/scripts/test_old_deps.sh
+++ b/.ci/scripts/prepare_old_deps.sh
@@ -5,18 +5,8 @@
# - creates a venv with these old versions using poetry; and finally
# - invokes `trial` to run the tests with old deps.
-# Prevent tzdata from asking for user input
-export DEBIAN_FRONTEND=noninteractive
-
set -ex
-apt-get update
-apt-get install -y \
- python3 python3-dev python3-pip python3-venv pipx \
- libxml2-dev libxslt-dev xmlsec1 zlib1g-dev libjpeg-dev libwebp-dev
-
-export LANG="C.UTF-8"
-
# Prevent virtualenv from auto-updating pip to an incompatible version
export VIRTUALENV_NO_DOWNLOAD=1
@@ -33,12 +23,6 @@ export VIRTUALENV_NO_DOWNLOAD=1
# a `cryptography` compiled against OpenSSL 1.1.
# - Omit systemd: we're not logging to journal here.
-# TODO: also replace caret bounds, see https://python-poetry.org/docs/dependency-specification/#version-constraints
-# We don't use these yet, but IIRC they are the default bound used when you `poetry add`.
-# The sed expression 's/\^/==/g' ought to do the trick. But it would also change
-# `python = "^3.7"` to `python = "==3.7", which would mean we fail because olddeps
-# runs on 3.8 (#12343).
-
sed -i \
-e "s/[~>]=/==/g" \
-e '/^python = "^/!s/\^/==/g' \
@@ -55,7 +39,7 @@ sed -i \
# toml file. This means we don't have to ensure compatibility between old deps and
# dev tools.
-pip install --user toml
+pip install toml wheel
REMOVE_DEV_DEPENDENCIES="
import toml
@@ -69,8 +53,8 @@ with open('pyproject.toml', 'w') as f:
"
python3 -c "$REMOVE_DEV_DEPENDENCIES"
-pipx install poetry==1.1.14
-~/.local/bin/poetry lock
+pip install poetry==1.2.0
+poetry lock
echo "::group::Patched pyproject.toml"
cat pyproject.toml
@@ -78,6 +62,3 @@ echo "::endgroup::"
echo "::group::Lockfile after patch"
cat poetry.lock
echo "::endgroup::"
-
-~/.local/bin/poetry install -E "all test"
-~/.local/bin/poetry run trial --jobs=2 tests
diff --git a/.ci/scripts/setup_complement_prerequisites.sh b/.ci/scripts/setup_complement_prerequisites.sh
index 4848901cbf..42ef654167 100755
--- a/.ci/scripts/setup_complement_prerequisites.sh
+++ b/.ci/scripts/setup_complement_prerequisites.sh
@@ -21,7 +21,7 @@ endblock
block Install Complement Dependencies
sudo apt-get -qq update && sudo apt-get install -qqy libolm3 libolm-dev
- go get -v github.com/haveyoudebuggedit/gotestfmt/v2/cmd/gotestfmt@latest
+ go get -v github.com/gotesttools/gotestfmt/v2/cmd/gotestfmt@latest
endblock
block Install custom gotestfmt template
diff --git a/.ci/scripts/test_export_data_command.sh b/.ci/scripts/test_export_data_command.sh
index 033fd3e24e..9f6c49acff 100755
--- a/.ci/scripts/test_export_data_command.sh
+++ b/.ci/scripts/test_export_data_command.sh
@@ -32,7 +32,7 @@ else
fi
# Create the PostgreSQL database.
-poetry run .ci/scripts/postgres_exec.py "CREATE DATABASE synapse"
+psql -c "CREATE DATABASE synapse"
# Port the SQLite databse to postgres so we can check command works against postgres
echo "+++ Port SQLite3 databse to postgres"
diff --git a/.ci/scripts/test_synapse_port_db.sh b/.ci/scripts/test_synapse_port_db.sh
index b07a6b5d08..8cc41d3dca 100755
--- a/.ci/scripts/test_synapse_port_db.sh
+++ b/.ci/scripts/test_synapse_port_db.sh
@@ -2,27 +2,27 @@
#
# Test script for 'synapse_port_db'.
# - configures synapse and a postgres server.
-# - runs the port script on a prepopulated test sqlite db
-# - also runs it against an new sqlite db
+# - runs the port script on a prepopulated test sqlite db. Checks that the
+# return code is zero.
+# - reruns the port script on the same sqlite db, targetting the same postgres db.
+# Checks that the return code is zero.
+# - runs the port script against a new sqlite db. Checks the return code is zero.
#
# Expects Synapse to have been already installed with `poetry install --extras postgres`.
# Expects `poetry` to be available on the `PATH`.
-set -xe
+set -xe -o pipefail
cd "$(dirname "$0")/../.."
echo "--- Generate the signing key"
-
-# Generate the server's signing key.
poetry run synapse_homeserver --generate-keys -c .ci/sqlite-config.yaml
echo "--- Prepare test database"
-
-# Make sure the SQLite3 database is using the latest schema and has no pending background update.
+# Make sure the SQLite3 database is using the latest schema and has no pending background updates.
poetry run update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
# Create the PostgreSQL database.
-poetry run .ci/scripts/postgres_exec.py "CREATE DATABASE synapse"
+psql -c "CREATE DATABASE synapse"
echo "+++ Run synapse_port_db against test database"
# TODO: this invocation of synapse_port_db (and others below) used to be prepended with `coverage run`,
@@ -45,9 +45,23 @@ rm .ci/test_db.db
poetry run update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
# re-create the PostgreSQL database.
-poetry run .ci/scripts/postgres_exec.py \
- "DROP DATABASE synapse" \
- "CREATE DATABASE synapse"
+psql \
+ -c "DROP DATABASE synapse" \
+ -c "CREATE DATABASE synapse"
echo "+++ Run synapse_port_db against empty database"
poetry run synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
+
+echo "--- Create a brand new postgres database from schema"
+cp .ci/postgres-config.yaml .ci/postgres-config-unported.yaml
+sed -i -e 's/database: synapse/database: synapse_unported/' .ci/postgres-config-unported.yaml
+psql -c "CREATE DATABASE synapse_unported"
+poetry run update_synapse_database --database-config .ci/postgres-config-unported.yaml --run-background-updates
+
+echo "+++ Comparing ported schema with unported schema"
+# Ignore the tables that portdb creates. (Should it tidy them up when the porting is completed?)
+psql synapse -c "DROP TABLE port_from_sqlite3;"
+pg_dump --format=plain --schema-only --no-tablespaces --no-acl --no-owner synapse_unported > unported.sql
+pg_dump --format=plain --schema-only --no-tablespaces --no-acl --no-owner synapse > ported.sql
+# By default, `diff` returns zero if there are no changes and nonzero otherwise
+diff -u unported.sql ported.sql | tee schema_diff
\ No newline at end of file
diff --git a/.dockerignore b/.dockerignore
index 7809863ef3..0b51345cbd 100644
--- a/.dockerignore
+++ b/.dockerignore
@@ -4,8 +4,15 @@
# things to include
!docker
!synapse
+!rust
!README.rst
!pyproject.toml
!poetry.lock
+!Cargo.lock
+!Cargo.toml
+!build_rust.py
+
+rust/target
+synapse/*.so
**/__pycache__
diff --git a/.editorconfig b/.editorconfig
index d629bede5e..bf9021ff82 100644
--- a/.editorconfig
+++ b/.editorconfig
@@ -4,7 +4,7 @@
root = true
# 4 space indentation
-[*.py]
+[*.{py,pyi}]
indent_style = space
indent_size = 4
max_line_length = 88
diff --git a/.flake8 b/.flake8
index acb118c86e..4c6a4d5843 100644
--- a/.flake8
+++ b/.flake8
@@ -8,4 +8,11 @@
# E203: whitespace before ':' (which is contrary to pep8?)
# E731: do not assign a lambda expression, use a def
# E501: Line too long (black enforces this for us)
-ignore=W503,W504,E203,E731,E501
+#
+# flake8-bugbear runs extra checks. Its error codes are described at
+# https://github.com/PyCQA/flake8-bugbear#list-of-warnings
+# B019: Use of functools.lru_cache or functools.cache on methods can lead to memory leaks
+# B023: Functions defined inside a loop must not use variables redefined in the loop
+# B024: Abstract base class with no abstract method.
+
+ignore=W503,W504,E203,E731,E501,B019,B023,B024
diff --git a/.github/ISSUE_TEMPLATE/BUG_REPORT.yml b/.github/ISSUE_TEMPLATE/BUG_REPORT.yml
index 1b304198bc..abe0f656a2 100644
--- a/.github/ISSUE_TEMPLATE/BUG_REPORT.yml
+++ b/.github/ISSUE_TEMPLATE/BUG_REPORT.yml
@@ -74,6 +74,36 @@ body:
- Debian packages from packages.matrix.org
- pip (from PyPI)
- Other (please mention below)
+ - I don't know
+ validations:
+ required: true
+ - type: input
+ id: database
+ attributes:
+ label: Database
+ description: |
+ Are you using SQLite or PostgreSQL? What's the version of your database?
+
+ If PostgreSQL, please also answer the following:
+ - are you using a single PostgreSQL server
+ or [separate servers for `main` and `state`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#databases)?
+ - have you previously ported from SQLite using the Synapse "portdb" script?
+ - have you previously restored from a backup?
+ validations:
+ required: true
+ - type: dropdown
+ id: workers
+ attributes:
+ label: Workers
+ description: |
+ Are you running a single Synapse process, or are you running
+ [2 or more workers](https://matrix-org.github.io/synapse/latest/workers.html)?
+ options:
+ - Single process
+ - Multiple workers
+ - I don't know
+ validations:
+ required: true
- type: textarea
id: platform
attributes:
@@ -84,16 +114,27 @@ body:
validations:
required: true
- type: textarea
+ id: config
+ attributes:
+ label: Configuration
+ description: |
+ Do you have any unusual config options turned on? If so, please provide details.
+
+ - Experimental or undocumented features
+ - [Presence](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#presence)
+ - [Message retention](https://matrix-org.github.io/synapse/latest/message_retention_policies.html)
+ - [Synapse modules](https://matrix-org.github.io/synapse/latest/modules/index.html)
+ - type: textarea
id: logs
attributes:
label: Relevant log output
description: |
Please copy and paste any relevant log output, ideally at INFO or DEBUG log level.
- This will be automatically formatted into code, so there is no need for backticks.
+ This will be automatically formatted into code, so there is no need for backticks (`\``).
Please be careful to remove any personal or private data.
- **Bug reports are usually very difficult to diagnose without logging.**
+ **Bug reports are usually impossible to diagnose without logging.**
render: shell
validations:
required: true
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
new file mode 100644
index 0000000000..7ce353ed64
--- /dev/null
+++ b/.github/dependabot.yml
@@ -0,0 +1,23 @@
+version: 2
+updates:
+ - # "pip" is the correct setting for poetry, per https://docs.github.com/en/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file#package-ecosystem
+ package-ecosystem: "pip"
+ directory: "/"
+ schedule:
+ interval: "weekly"
+
+ - package-ecosystem: "docker"
+ directory: "/docker"
+ schedule:
+ interval: "weekly"
+
+ - package-ecosystem: "github-actions"
+ directory: "/"
+ schedule:
+ interval: "weekly"
+
+ - package-ecosystem: "cargo"
+ directory: "/"
+ versioning-strategy: "lockfile-only"
+ schedule:
+ interval: "weekly"
diff --git a/.github/workflows/dependabot_changelog.yml b/.github/workflows/dependabot_changelog.yml
new file mode 100644
index 0000000000..b6a29a5722
--- /dev/null
+++ b/.github/workflows/dependabot_changelog.yml
@@ -0,0 +1,46 @@
+name: Write changelog for dependabot PR
+on:
+ pull_request:
+ types:
+ - opened
+ - reopened # For debugging!
+
+permissions:
+ # Needed to be able to push the commit. See
+ # https://docs.github.com/en/code-security/dependabot/working-with-dependabot/automating-dependabot-with-github-actions#enable-auto-merge-on-a-pull-request
+ # for a similar example
+ contents: write
+
+jobs:
+ add-changelog:
+ runs-on: 'ubuntu-latest'
+ if: ${{ github.actor == 'dependabot[bot]' }}
+ steps:
+ - uses: actions/checkout@v3
+ with:
+ ref: ${{ github.event.pull_request.head.ref }}
+ - name: Write, commit and push changelog
+ run: |
+ echo "${{ github.event.pull_request.title }}." > "changelog.d/${{ github.event.pull_request.number }}".misc
+ git add changelog.d
+ git config user.email "github-actions[bot]@users.noreply.github.com"
+ git config user.name "GitHub Actions"
+ git commit -m "Changelog"
+ git push
+ shell: bash
+ # The `git push` above does not trigger CI on the dependabot PR.
+ #
+ # By default, workflows can't trigger other workflows when they're just using the
+ # default `GITHUB_TOKEN` access token. (This is intended to stop you from writing
+ # recursive workflow loops by accident, because that'll get very expensive very
+ # quickly.) Instead, you have to manually call out to another workflow, or else
+ # make your changes (i.e. the `git push` above) using a personal access token.
+ # See
+ # https://docs.github.com/en/actions/using-workflows/triggering-a-workflow#triggering-a-workflow-from-a-workflow
+ #
+ # I have tried and failed to find a way to trigger CI on the "merge ref" of the PR.
+ # See git commit history for previous attempts. If anyone desperately wants to try
+ # again in the future, make a matrix-bot account and use its access token to git push.
+
+ # THIS WORKFLOW HAS WRITE PERMISSIONS---do not add other jobs here unless they
+ # are sufficiently locked down to dependabot only as above.
diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml
index d20d30c035..49427ab50d 100644
--- a/.github/workflows/docker.yml
+++ b/.github/workflows/docker.yml
@@ -17,19 +17,19 @@ jobs:
steps:
- name: Set up QEMU
id: qemu
- uses: docker/setup-qemu-action@v1
+ uses: docker/setup-qemu-action@v2
with:
platforms: arm64
- name: Set up Docker Buildx
id: buildx
- uses: docker/setup-buildx-action@v1
+ uses: docker/setup-buildx-action@v2
- name: Inspect builder
run: docker buildx inspect
-
+
- name: Log in to DockerHub
- uses: docker/login-action@v1
+ uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
@@ -48,10 +48,15 @@ jobs:
type=pep440,pattern={{raw}}
- name: Build and push all platforms
- uses: docker/build-push-action@v2
+ uses: docker/build-push-action@v3
with:
push: true
labels: "gitsha1=${{ github.sha }}"
tags: "${{ steps.set-tag.outputs.tags }}"
file: "docker/Dockerfile"
platforms: linux/amd64,linux/arm64
+
+ # arm64 builds OOM without the git fetch setting. c.f.
+ # https://github.com/rust-lang/cargo/issues/10583
+ build-args: |
+ CARGO_NET_GIT_FETCH_WITH_CLI=true
diff --git a/.github/workflows/docs-pr-netlify.yaml b/.github/workflows/docs-pr-netlify.yaml
new file mode 100644
index 0000000000..231982f681
--- /dev/null
+++ b/.github/workflows/docs-pr-netlify.yaml
@@ -0,0 +1,34 @@
+name: Deploy documentation PR preview
+
+on:
+ workflow_run:
+ workflows: [ "Prepare documentation PR preview" ]
+ types:
+ - completed
+
+jobs:
+ netlify:
+ if: github.event.workflow_run.conclusion == 'success' && github.event.workflow_run.event == 'pull_request'
+ runs-on: ubuntu-latest
+ steps:
+ # There's a 'download artifact' action, but it hasn't been updated for the workflow_run action
+ # (https://github.com/actions/download-artifact/issues/60) so instead we get this mess:
+ - name: 📥 Download artifact
+ uses: dawidd6/action-download-artifact@e6e25ac3a2b93187502a8be1ef9e9603afc34925 # v2.24.2
+ with:
+ workflow: docs-pr.yaml
+ run_id: ${{ github.event.workflow_run.id }}
+ name: book
+ path: book
+
+ - name: 📤 Deploy to Netlify
+ uses: matrix-org/netlify-pr-preview@v1
+ with:
+ path: book
+ owner: ${{ github.event.workflow_run.head_repository.owner.login }}
+ branch: ${{ github.event.workflow_run.head_branch }}
+ revision: ${{ github.event.workflow_run.head_sha }}
+ token: ${{ secrets.NETLIFY_AUTH_TOKEN }}
+ site_id: ${{ secrets.NETLIFY_SITE_ID }}
+ desc: Documentation preview
+ deployment_env: PR Documentation Preview
diff --git a/.github/workflows/docs-pr.yaml b/.github/workflows/docs-pr.yaml
new file mode 100644
index 0000000000..cde6cf511e
--- /dev/null
+++ b/.github/workflows/docs-pr.yaml
@@ -0,0 +1,34 @@
+name: Prepare documentation PR preview
+
+on:
+ pull_request:
+ paths:
+ - docs/**
+
+jobs:
+ pages:
+ name: GitHub Pages
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v2
+
+ - name: Setup mdbook
+ uses: peaceiris/actions-mdbook@adeb05db28a0c0004681db83893d56c0388ea9ea # v1.2.0
+ with:
+ mdbook-version: '0.4.17'
+
+ - name: Build the documentation
+ # mdbook will only create an index.html if we're including docs/README.md in SUMMARY.md.
+ # However, we're using docs/README.md for other purposes and need to pick a new page
+ # as the default. Let's opt for the welcome page instead.
+ run: |
+ mdbook build
+ cp book/welcome_and_overview.html book/index.html
+
+ - name: Upload Artifact
+ uses: actions/upload-artifact@v3
+ with:
+ name: book
+ path: book
+ # We'll only use this in a workflow_run, then we're done with it
+ retention-days: 1
diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml
index b366eb8667..575412d965 100644
--- a/.github/workflows/docs.yaml
+++ b/.github/workflows/docs.yaml
@@ -17,10 +17,10 @@ jobs:
name: GitHub Pages
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- name: Setup mdbook
- uses: peaceiris/actions-mdbook@4b5ef36b314c2599664ca107bb8c02412548d79d # v1.1.14
+ uses: peaceiris/actions-mdbook@adeb05db28a0c0004681db83893d56c0388ea9ea # v1.2.0
with:
mdbook-version: '0.4.17'
@@ -54,11 +54,11 @@ jobs:
esac
# finally, set the 'branch-version' var.
- echo "::set-output name=branch-version::$branch"
+ echo "branch-version=$branch" >> "$GITHUB_OUTPUT"
# Deploy to the target directory.
- name: Deploy to gh pages
- uses: peaceiris/actions-gh-pages@068dc23d9710f1ba62e86896f84735d869951305 # v3.8.0
+ uses: peaceiris/actions-gh-pages@de7ea6f8efb354206b205ef54722213d99067935 # v3.9.0
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
publish_dir: ./book
diff --git a/.github/workflows/latest_deps.yml b/.github/workflows/latest_deps.yml
index f263cf612d..a7097d5eae 100644
--- a/.github/workflows/latest_deps.yml
+++ b/.github/workflows/latest_deps.yml
@@ -5,7 +5,7 @@
#
# As an overview this workflow:
# - checks out develop,
-# - installs from source, pulling in the dependencies like a fresh `pip install` would, and
+# - installs from source, pulling in the dependencies like a fresh `pip install` would, and
# - runs mypy and test suites in that checkout.
#
# Based on the twisted trunk CI job.
@@ -25,13 +25,19 @@ jobs:
mypy:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
+ - name: Install Rust
+ uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
+ with:
+ toolchain: stable
+ - uses: Swatinem/rust-cache@v2
+
# The dev dependencies aren't exposed in the wheel metadata (at least with current
# poetry-core versions), so we install with poetry.
- uses: matrix-org/setup-python-poetry@v1
with:
python-version: "3.x"
- poetry-version: "1.2.0b1"
+ poetry-version: "1.2.0"
extras: "all"
# Dump installed versions for debugging.
- run: poetry run pip list > before.txt
@@ -52,7 +58,14 @@ jobs:
postgres-version: "14"
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
+
+ - name: Install Rust
+ uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
+ with:
+ toolchain: stable
+ - uses: Swatinem/rust-cache@v2
+
- run: sudo apt-get -qq install xmlsec1
- name: Set up PostgreSQL ${{ matrix.postgres-version }}
if: ${{ matrix.postgres-version }}
@@ -61,7 +74,7 @@ jobs:
-e POSTGRES_PASSWORD=postgres \
-e POSTGRES_INITDB_ARGS="--lc-collate C --lc-ctype C --encoding UTF8" \
postgres:${{ matrix.postgres-version }}
- - uses: actions/setup-python@v2
+ - uses: actions/setup-python@v4
with:
python-version: "3.x"
- run: pip install .[all,test]
@@ -69,6 +82,12 @@ jobs:
if: ${{ matrix.postgres-version }}
timeout-minutes: 2
run: until pg_isready -h localhost; do sleep 1; done
+
+ # We nuke the local copy, as we've installed synapse into the virtualenv
+ # (rather than use an editable install, which we no longer support). If we
+ # don't do this then python can't find the native lib.
+ - run: rm -rf synapse/
+
- run: python -m twisted.trial --jobs=2 tests
env:
SYNAPSE_POSTGRES: ${{ matrix.database == 'postgres' || '' }}
@@ -112,7 +131,14 @@ jobs:
BLACKLIST: ${{ matrix.workers && 'synapse-blacklist-with-workers' }}
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
+
+ - name: Install Rust
+ uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
+ with:
+ toolchain: stable
+ - uses: Swatinem/rust-cache@v2
+
- name: Ensure sytest runs `pip install`
# Delete the lockfile so sytest will `pip install` rather than `poetry install`
run: rm /src/poetry.lock
@@ -126,7 +152,7 @@ jobs:
if: ${{ always() }}
run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
- name: Upload SyTest logs
- uses: actions/upload-artifact@v2
+ uses: actions/upload-artifact@v3
if: ${{ always() }}
with:
name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.*, ', ') }})
@@ -153,8 +179,8 @@ jobs:
database: Postgres
steps:
- - name: Run actions/checkout@v2 for synapse
- uses: actions/checkout@v2
+ - name: Run actions/checkout@v3 for synapse
+ uses: actions/checkout@v3
with:
path: synapse
@@ -163,7 +189,7 @@ jobs:
- run: |
set -o pipefail
- TEST_ONLY_IGNORE_POETRY_LOCKFILE=1 POSTGRES=${{ (matrix.database == 'Postgres') && 1 || '' }} WORKERS=${{ (matrix.arrangement == 'workers') && 1 || '' }} COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | gotestfmt
+ TEST_ONLY_IGNORE_POETRY_LOCKFILE=1 POSTGRES=${{ (matrix.database == 'Postgres') && 1 || '' }} WORKERS=${{ (matrix.arrangement == 'workers') && 1 || '' }} COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | synapse/.ci/scripts/gotestfmt
shell: bash
name: Run Complement Tests
@@ -172,19 +198,19 @@ jobs:
open-issue:
if: "failure() && github.event_name != 'push' && github.event_name != 'pull_request'"
needs:
- # TODO: should mypy be included here? It feels more brittle than the other two.
+ # TODO: should mypy be included here? It feels more brittle than the others.
- mypy
- trial
- sytest
+ - complement
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- uses: JasonEtco/create-an-issue@5d9504915f79f9cc6d791934b8ef34f2353dd74d # v2.5.0, 2020-12-06
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
update_existing: true
filename: .ci/latest_deps_build_failed_issue_template.md
-
diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml
index ed4fc6179d..0601a7dbaf 100644
--- a/.github/workflows/release-artifacts.yml
+++ b/.github/workflows/release-artifacts.yml
@@ -11,11 +11,12 @@ on:
# we do the full build on tags.
tags: ["v*"]
+ workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
-
+
permissions:
contents: write
@@ -24,8 +25,10 @@ jobs:
name: "Calculate list of debian distros"
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
- - uses: actions/setup-python@v2
+ - uses: actions/checkout@v3
+ - uses: actions/setup-python@v4
+ with:
+ python-version: '3.x'
- id: set-distros
run: |
# if we're running from a tag, get the full list of distros; otherwise just use debian:sid
@@ -33,7 +36,7 @@ jobs:
if [[ $GITHUB_REF == refs/tags/* ]]; then
dists=$(scripts-dev/build_debian_packages.py --show-dists-json)
fi
- echo "::set-output name=distros::$dists"
+ echo "distros=$dists" >> "$GITHUB_OUTPUT"
# map the step outputs to job outputs
outputs:
distros: ${{ steps.set-distros.outputs.distros }}
@@ -49,18 +52,18 @@ jobs:
steps:
- name: Checkout
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
with:
path: src
- name: Set up Docker Buildx
id: buildx
- uses: docker/setup-buildx-action@v1
+ uses: docker/setup-buildx-action@v2
with:
install: true
- name: Set up docker layer caching
- uses: actions/cache@v2
+ uses: actions/cache@v3
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-buildx-${{ github.sha }}
@@ -68,7 +71,9 @@ jobs:
${{ runner.os }}-buildx-
- name: Set up python
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
+ with:
+ python-version: '3.x'
- name: Build the packages
# see https://github.com/docker/build-push-action/issues/252
@@ -84,14 +89,96 @@ jobs:
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
- name: Upload debs as artifacts
- uses: actions/upload-artifact@v2
+ uses: actions/upload-artifact@v3
with:
name: debs
path: debs/*
+ build-wheels:
+ name: Build wheels on ${{ matrix.os }} for ${{ matrix.arch }}
+ runs-on: ${{ matrix.os }}
+ strategy:
+ matrix:
+ os: [ubuntu-20.04, macos-11]
+ arch: [x86_64, aarch64]
+ # is_pr is a flag used to exclude certain jobs from the matrix on PRs.
+ # It is not read by the rest of the workflow.
+ is_pr:
+ - ${{ startsWith(github.ref, 'refs/pull/') }}
+
+ exclude:
+ # Don't build macos wheels on PR CI.
+ - is_pr: true
+ os: "macos-11"
+ # Don't build aarch64 wheels on mac.
+ - os: "macos-11"
+ arch: aarch64
+ # Don't build aarch64 wheels on PR CI.
+ - is_pr: true
+ arch: aarch64
+
+ steps:
+ - uses: actions/checkout@v3
+
+ - uses: actions/setup-python@v4
+ with:
+ # setup-python@v4 doesn't impose a default python version. Need to use 3.x
+ # here, because `python` on osx points to Python 2.7.
+ python-version: "3.x"
+
+ - name: Install cibuildwheel
+ run: python -m pip install cibuildwheel==2.9.0 poetry==1.2.0
+
+ - name: Set up QEMU to emulate aarch64
+ if: matrix.arch == 'aarch64'
+ uses: docker/setup-qemu-action@v2
+ with:
+ platforms: arm64
+
+ - name: Build aarch64 wheels
+ if: matrix.arch == 'aarch64'
+ run: echo 'CIBW_ARCHS_LINUX=aarch64' >> $GITHUB_ENV
+
+ - name: Only build a single wheel on PR
+ if: startsWith(github.ref, 'refs/pull/')
+ run: echo "CIBW_BUILD="cp37-manylinux_${{ matrix.arch }}"" >> $GITHUB_ENV
+
+ - name: Build wheels
+ run: python -m cibuildwheel --output-dir wheelhouse
+ env:
+ # Skip testing for platforms which various libraries don't have wheels
+ # for, and so need extra build deps.
+ CIBW_TEST_SKIP: pp39-* *i686* *musl* pp37-macosx*
+ # Fix Rust OOM errors on emulated aarch64: https://github.com/rust-lang/cargo/issues/10583
+ CARGO_NET_GIT_FETCH_WITH_CLI: true
+ CIBW_ENVIRONMENT_PASS_LINUX: CARGO_NET_GIT_FETCH_WITH_CLI
+
+ - uses: actions/upload-artifact@v3
+ with:
+ name: Wheel
+ path: ./wheelhouse/*.whl
+
build-sdist:
- name: "Build pypi distribution files"
- uses: "matrix-org/backend-meta/.github/workflows/packaging.yml@v1"
+ name: Build sdist
+ runs-on: ubuntu-latest
+ if: ${{ !startsWith(github.ref, 'refs/pull/') }}
+
+ steps:
+ - uses: actions/checkout@v3
+ - uses: actions/setup-python@v4
+ with:
+ python-version: '3.10'
+
+ - run: pip install build
+
+ - name: Build sdist
+ run: python -m build --sdist
+
+ - uses: actions/upload-artifact@v3
+ with:
+ name: Sdist
+ path: dist/*.tar.gz
+
# if it's a tag, create a release and attach the artifacts to it
attach-assets:
@@ -99,11 +186,12 @@ jobs:
if: ${{ !failure() && !cancelled() && startsWith(github.ref, 'refs/tags/') }}
needs:
- build-debs
+ - build-wheels
- build-sdist
runs-on: ubuntu-latest
steps:
- name: Download all workflow run artifacts
- uses: actions/download-artifact@v2
+ uses: actions/download-artifact@v3
- name: Build a tarball for the debs
run: tar -cvJf debs.tar.xz debs
- name: Attach to release
diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml
index 4bc29c8207..b687eb002d 100644
--- a/.github/workflows/tests.yml
+++ b/.github/workflows/tests.yml
@@ -4,26 +4,51 @@ on:
push:
branches: ["develop", "release-*"]
pull_request:
+ workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
+ # Job to detect what has changed so we don't run e.g. Rust checks on PRs that
+ # don't modify Rust code.
+ changes:
+ runs-on: ubuntu-latest
+ outputs:
+ rust: ${{ !startsWith(github.ref, 'refs/pull/') || steps.filter.outputs.rust }}
+ steps:
+ - uses: dorny/paths-filter@v2
+ id: filter
+ # We only check on PRs
+ if: startsWith(github.ref, 'refs/pull/')
+ with:
+ filters: |
+ rust:
+ - 'rust/**'
+ - 'Cargo.toml'
+ - 'Cargo.lock'
+
check-sampleconfig:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
- - uses: actions/setup-python@v2
- - run: pip install .
- - run: scripts-dev/generate_sample_config.sh --check
- - run: scripts-dev/config-lint.sh
+ - uses: actions/checkout@v3
+ - uses: actions/setup-python@v4
+ with:
+ python-version: "3.x"
+ - uses: matrix-org/setup-python-poetry@v1
+ with:
+ extras: "all"
+ - run: poetry run scripts-dev/generate_sample_config.sh --check
+ - run: poetry run scripts-dev/config-lint.sh
check-schema-delta:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
- - uses: actions/setup-python@v2
+ - uses: actions/checkout@v3
+ - uses: actions/setup-python@v4
+ with:
+ python-version: "3.x"
- run: "pip install 'click==8.1.1' 'GitPython>=3.1.20'"
- run: scripts-dev/check_schema_delta.py --force-colors
@@ -35,79 +60,147 @@ jobs:
lint-crlf:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- name: Check line endings
run: scripts-dev/check_line_terminators.sh
lint-newsfile:
- if: ${{ github.base_ref == 'develop' || contains(github.base_ref, 'release-') }}
+ if: ${{ (github.base_ref == 'develop' || contains(github.base_ref, 'release-')) && github.actor != 'dependabot[bot]' }}
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
with:
ref: ${{ github.event.pull_request.head.sha }}
fetch-depth: 0
- - uses: actions/setup-python@v2
+ - uses: actions/setup-python@v4
+ with:
+ python-version: "3.x"
- run: "pip install 'towncrier>=18.6.0rc1'"
- run: scripts-dev/check-newsfragment.sh
env:
PULL_REQUEST_NUMBER: ${{ github.event.number }}
+ lint-pydantic:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v3
+ with:
+ ref: ${{ github.event.pull_request.head.sha }}
+ - uses: matrix-org/setup-python-poetry@v1
+ with:
+ extras: "all"
+ - run: poetry run scripts-dev/check_pydantic_models.py
+
+ lint-clippy:
+ runs-on: ubuntu-latest
+ needs: changes
+ if: ${{ needs.changes.outputs.rust == 'true' }}
+
+ steps:
+ - uses: actions/checkout@v3
+
+ - name: Install Rust
+ # There don't seem to be versioned releases of this action per se: for each rust
+ # version there is a branch which gets constantly rebased on top of master.
+ # We pin to a specific commit for paranoia's sake.
+ uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
+ with:
+ toolchain: 1.58.1
+ components: clippy
+ - uses: Swatinem/rust-cache@v2
+
+ - run: cargo clippy
+
+ lint-rustfmt:
+ runs-on: ubuntu-latest
+ needs: changes
+ if: ${{ needs.changes.outputs.rust == 'true' }}
+
+ steps:
+ - uses: actions/checkout@v3
+
+ - name: Install Rust
+ # There don't seem to be versioned releases of this action per se: for each rust
+ # version there is a branch which gets constantly rebased on top of master.
+ # We pin to a specific commit for paranoia's sake.
+ uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
+ with:
+ toolchain: 1.58.1
+ components: rustfmt
+ - uses: Swatinem/rust-cache@v2
+
+ - run: cargo fmt --check
+
# Dummy step to gate other tests on without repeating the whole list
linting-done:
if: ${{ !cancelled() }} # Run this even if prior jobs were skipped
- needs: [lint, lint-crlf, lint-newsfile, check-sampleconfig, check-schema-delta]
+ needs:
+ - lint
+ - lint-crlf
+ - lint-newsfile
+ - lint-pydantic
+ - check-sampleconfig
+ - check-schema-delta
+ - lint-clippy
+ - lint-rustfmt
runs-on: ubuntu-latest
steps:
- run: "true"
- trial:
+ calculate-test-jobs:
if: ${{ !cancelled() && !failure() }} # Allow previous steps to be skipped, but not fail
needs: linting-done
runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v3
+ - uses: actions/setup-python@v4
+ with:
+ python-version: "3.x"
+ - id: get-matrix
+ run: .ci/scripts/calculate_jobs.py
+ outputs:
+ trial_test_matrix: ${{ steps.get-matrix.outputs.trial_test_matrix }}
+ sytest_test_matrix: ${{ steps.get-matrix.outputs.sytest_test_matrix }}
+
+ trial:
+ if: ${{ !cancelled() && !failure() }} # Allow previous steps to be skipped, but not fail
+ needs: calculate-test-jobs
+ runs-on: ubuntu-latest
strategy:
matrix:
- python-version: ["3.7", "3.8", "3.9", "3.10"]
- database: ["sqlite"]
- extras: ["all"]
- include:
- # Newest Python without optional deps
- - python-version: "3.10"
- extras: ""
-
- # Oldest Python with PostgreSQL
- - python-version: "3.7"
- database: "postgres"
- postgres-version: "10"
- extras: "all"
-
- # Newest Python with newest PostgreSQL
- - python-version: "3.10"
- database: "postgres"
- postgres-version: "14"
- extras: "all"
+ job: ${{ fromJson(needs.calculate-test-jobs.outputs.trial_test_matrix) }}
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- run: sudo apt-get -qq install xmlsec1
- - name: Set up PostgreSQL ${{ matrix.postgres-version }}
- if: ${{ matrix.postgres-version }}
+ - name: Set up PostgreSQL ${{ matrix.job.postgres-version }}
+ if: ${{ matrix.job.postgres-version }}
run: |
docker run -d -p 5432:5432 \
-e POSTGRES_PASSWORD=postgres \
-e POSTGRES_INITDB_ARGS="--lc-collate C --lc-ctype C --encoding UTF8" \
- postgres:${{ matrix.postgres-version }}
+ postgres:${{ matrix.job.postgres-version }}
+
+ - name: Install Rust
+ # There don't seem to be versioned releases of this action per se: for each rust
+ # version there is a branch which gets constantly rebased on top of master.
+ # We pin to a specific commit for paranoia's sake.
+ uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
+ with:
+ toolchain: 1.58.1
+ - uses: Swatinem/rust-cache@v2
+
- uses: matrix-org/setup-python-poetry@v1
with:
- python-version: ${{ matrix.python-version }}
- extras: ${{ matrix.extras }}
+ python-version: ${{ matrix.job.python-version }}
+ extras: ${{ matrix.job.extras }}
- name: Await PostgreSQL
- if: ${{ matrix.postgres-version }}
+ if: ${{ matrix.job.postgres-version }}
timeout-minutes: 2
run: until pg_isready -h localhost; do sleep 1; done
- run: poetry run trial --jobs=2 tests
env:
- SYNAPSE_POSTGRES: ${{ matrix.database == 'postgres' || '' }}
+ SYNAPSE_POSTGRES: ${{ matrix.job.database == 'postgres' || '' }}
SYNAPSE_POSTGRES_HOST: localhost
SYNAPSE_POSTGRES_USER: postgres
SYNAPSE_POSTGRES_PASSWORD: postgres
@@ -128,16 +221,56 @@ jobs:
# Note: sqlite only; no postgres
if: ${{ !cancelled() && !failure() }} # Allow previous steps to be skipped, but not fail
needs: linting-done
- runs-on: ubuntu-latest
+ runs-on: ubuntu-20.04
steps:
- - uses: actions/checkout@v2
- - name: Test with old deps
- uses: docker://ubuntu:focal # For old python and sqlite
- # Note: focal seems to be using 3.8, but the oldest is 3.7?
- # See https://github.com/matrix-org/synapse/issues/12343
+ - uses: actions/checkout@v3
+
+ - name: Install Rust
+ # There don't seem to be versioned releases of this action per se: for each rust
+ # version there is a branch which gets constantly rebased on top of master.
+ # We pin to a specific commit for paranoia's sake.
+ uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
+ with:
+ toolchain: 1.58.1
+ - uses: Swatinem/rust-cache@v2
+
+ # There aren't wheels for some of the older deps, so we need to install
+ # their build dependencies
+ - run: |
+ sudo apt-get -qq install build-essential libffi-dev python-dev \
+ libxml2-dev libxslt-dev xmlsec1 zlib1g-dev libjpeg-dev libwebp-dev
+
+ - uses: actions/setup-python@v4
+ with:
+ python-version: '3.7'
+
+ # Calculating the old-deps actually takes a bunch of time, so we cache the
+ # pyproject.toml / poetry.lock. We need to cache pyproject.toml as
+ # otherwise the `poetry install` step will error due to the poetry.lock
+ # file being outdated.
+ #
+ # This caches the output of `Prepare old deps`, which should generate the
+ # same `pyproject.toml` and `poetry.lock` for a given `pyproject.toml` input.
+ - uses: actions/cache@v3
+ id: cache-poetry-old-deps
+ name: Cache poetry.lock
+ with:
+ path: |
+ poetry.lock
+ pyproject.toml
+ key: poetry-old-deps2-${{ hashFiles('pyproject.toml') }}
+ - name: Prepare old deps
+ if: steps.cache-poetry-old-deps.outputs.cache-hit != 'true'
+ run: .ci/scripts/prepare_old_deps.sh
+
+ # We only now install poetry so that `setup-python-poetry` caches the
+ # right poetry.lock's dependencies.
+ - uses: matrix-org/setup-python-poetry@v1
with:
- workdir: /github/workspace
- entrypoint: .ci/scripts/test_old_deps.sh
+ python-version: '3.7'
+ extras: "all test"
+
+ - run: poetry run trial -j2 tests
- name: Dump logs
# Logs are most useful when the command fails, always include them.
if: ${{ always() }}
@@ -163,7 +296,7 @@ jobs:
extras: ["all"]
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
# Install libs necessary for PyPy to build binary wheels for dependencies
- run: sudo apt-get -qq install xmlsec1 libxml2-dev libxslt-dev
- uses: matrix-org/setup-python-poetry@v1
@@ -186,50 +319,39 @@ jobs:
sytest:
if: ${{ !failure() && !cancelled() }}
- needs: linting-done
+ needs: calculate-test-jobs
runs-on: ubuntu-latest
container:
- image: matrixdotorg/sytest-synapse:${{ matrix.sytest-tag }}
+ image: matrixdotorg/sytest-synapse:${{ matrix.job.sytest-tag }}
volumes:
- ${{ github.workspace }}:/src
env:
SYTEST_BRANCH: ${{ github.head_ref }}
- POSTGRES: ${{ matrix.postgres && 1}}
- MULTI_POSTGRES: ${{ (matrix.postgres == 'multi-postgres') && 1}}
- WORKERS: ${{ matrix.workers && 1 }}
- REDIS: ${{ matrix.redis && 1 }}
- BLACKLIST: ${{ matrix.workers && 'synapse-blacklist-with-workers' }}
+ POSTGRES: ${{ matrix.job.postgres && 1}}
+ MULTI_POSTGRES: ${{ (matrix.job.postgres == 'multi-postgres') && 1}}
+ WORKERS: ${{ matrix.job.workers && 1 }}
+ BLACKLIST: ${{ matrix.job.workers && 'synapse-blacklist-with-workers' }}
TOP: ${{ github.workspace }}
strategy:
fail-fast: false
matrix:
- include:
- - sytest-tag: focal
-
- - sytest-tag: focal
- postgres: postgres
-
- - sytest-tag: testing
- postgres: postgres
-
- - sytest-tag: focal
- postgres: multi-postgres
- workers: workers
-
- - sytest-tag: buster
- postgres: multi-postgres
- workers: workers
-
- - sytest-tag: buster
- postgres: postgres
- workers: workers
- redis: redis
+ job: ${{ fromJson(needs.calculate-test-jobs.outputs.sytest_test_matrix) }}
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- name: Prepare test blacklist
run: cat sytest-blacklist .ci/worker-blacklist > synapse-blacklist-with-workers
+
+ - name: Install Rust
+ # There don't seem to be versioned releases of this action per se: for each rust
+ # version there is a branch which gets constantly rebased on top of master.
+ # We pin to a specific commit for paranoia's sake.
+ uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
+ with:
+ toolchain: 1.58.1
+ - uses: Swatinem/rust-cache@v2
+
- name: Run SyTest
run: /bootstrap.sh synapse
working-directory: /src
@@ -237,10 +359,10 @@ jobs:
if: ${{ always() }}
run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
- name: Upload SyTest logs
- uses: actions/upload-artifact@v2
+ uses: actions/upload-artifact@v3
if: ${{ always() }}
with:
- name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.*, ', ') }})
+ name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.job.*, ', ') }})
path: |
/logs/results.tap
/logs/**/*.log*
@@ -267,28 +389,31 @@ jobs:
--health-retries 5
steps:
- - uses: actions/checkout@v2
- - run: sudo apt-get -qq install xmlsec1
+ - uses: actions/checkout@v3
+ - run: sudo apt-get -qq install xmlsec1 postgresql-client
- uses: matrix-org/setup-python-poetry@v1
with:
- python-version: ${{ matrix.python-version }}
extras: "postgres"
- run: .ci/scripts/test_export_data_command.sh
+ env:
+ PGHOST: localhost
+ PGUSER: postgres
+ PGPASSWORD: postgres
+ PGDATABASE: postgres
+
portdb:
if: ${{ !failure() && !cancelled() }} # Allow previous steps to be skipped, but not fail
needs: linting-done
runs-on: ubuntu-latest
- env:
- TOP: ${{ github.workspace }}
strategy:
matrix:
include:
- python-version: "3.7"
- postgres-version: "10"
+ postgres-version: "11"
- - python-version: "3.10"
- postgres-version: "14"
+ - python-version: "3.11"
+ postgres-version: "15"
services:
postgres:
@@ -305,13 +430,37 @@ jobs:
--health-retries 5
steps:
- - uses: actions/checkout@v2
- - run: sudo apt-get -qq install xmlsec1
+ - uses: actions/checkout@v3
+ - name: Add PostgreSQL apt repository
+ # We need a version of pg_dump that can handle the version of
+ # PostgreSQL being tested against. The Ubuntu package repository lags
+ # behind new releases, so we have to use the PostreSQL apt repository.
+ # Steps taken from https://www.postgresql.org/download/linux/ubuntu/
+ run: |
+ sudo sh -c 'echo "deb http://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list'
+ wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add -
+ sudo apt-get update
+ - run: sudo apt-get -qq install xmlsec1 postgresql-client
- uses: matrix-org/setup-python-poetry@v1
with:
python-version: ${{ matrix.python-version }}
extras: "postgres"
- run: .ci/scripts/test_synapse_port_db.sh
+ id: run_tester_script
+ env:
+ PGHOST: localhost
+ PGUSER: postgres
+ PGPASSWORD: postgres
+ PGDATABASE: postgres
+ - name: "Upload schema differences"
+ uses: actions/upload-artifact@v3
+ if: ${{ failure() && !cancelled() && steps.run_tester_script.outcome == 'failure' }}
+ with:
+ name: Schema dumps
+ path: |
+ unported.sql
+ ported.sql
+ schema_diff
complement:
if: "${{ !failure() && !cancelled() }}"
@@ -332,34 +481,61 @@ jobs:
database: Postgres
steps:
- - name: Run actions/checkout@v2 for synapse
- uses: actions/checkout@v2
+ - name: Run actions/checkout@v3 for synapse
+ uses: actions/checkout@v3
with:
path: synapse
+ - name: Install Rust
+ # There don't seem to be versioned releases of this action per se: for each rust
+ # version there is a branch which gets constantly rebased on top of master.
+ # We pin to a specific commit for paranoia's sake.
+ uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
+ with:
+ toolchain: 1.58.1
+ - uses: Swatinem/rust-cache@v2
+
- name: Prepare Complement's Prerequisites
run: synapse/.ci/scripts/setup_complement_prerequisites.sh
- run: |
set -o pipefail
- POSTGRES=${{ (matrix.database == 'Postgres') && 1 || '' }} WORKERS=${{ (matrix.arrangement == 'workers') && 1 || '' }} COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | gotestfmt
+ POSTGRES=${{ (matrix.database == 'Postgres') && 1 || '' }} WORKERS=${{ (matrix.arrangement == 'workers') && 1 || '' }} COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | synapse/.ci/scripts/gotestfmt
shell: bash
name: Run Complement Tests
+ cargo-test:
+ if: ${{ needs.changes.outputs.rust == 'true' }}
+ runs-on: ubuntu-latest
+ needs:
+ - linting-done
+ - changes
+
+ steps:
+ - uses: actions/checkout@v3
+
+ - name: Install Rust
+ # There don't seem to be versioned releases of this action per se: for each rust
+ # version there is a branch which gets constantly rebased on top of master.
+ # We pin to a specific commit for paranoia's sake.
+ uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
+ with:
+ toolchain: 1.58.1
+ - uses: Swatinem/rust-cache@v2
+
+ - run: cargo test
+
# a job which marks all the other jobs as complete, thus allowing PRs to be merged.
tests-done:
if: ${{ always() }}
needs:
- - check-sampleconfig
- - lint
- - lint-crlf
- - lint-newsfile
- trial
- trial-olddeps
- sytest
- export-data
- portdb
- complement
+ - cargo-test
runs-on: ubuntu-latest
steps:
- uses: matrix-org/done-action@v2
@@ -367,5 +543,7 @@ jobs:
needs: ${{ toJSON(needs) }}
# The newsfile lint may be skipped on non PR builds
- skippable:
+ # Cargo test is skipped if there is no changes on Rust code
+ skippable: |
lint-newsfile
+ cargo-test
diff --git a/.github/workflows/triage-incoming.yml b/.github/workflows/triage-incoming.yml
new file mode 100644
index 0000000000..0f0397cf5b
--- /dev/null
+++ b/.github/workflows/triage-incoming.yml
@@ -0,0 +1,15 @@
+name: Move new issues into the issue triage board
+
+on:
+ issues:
+ types: [ opened ]
+
+jobs:
+ triage:
+ uses: matrix-org/backend-meta/.github/workflows/triage-incoming.yml@v1
+ with:
+ project_id: 'PVT_kwDOAIB0Bs4AFDdZ'
+ content_id: ${{ github.event.issue.node_id }}
+ secrets:
+ github_access_token: ${{ secrets.ELEMENT_BOT_TOKEN }}
+
diff --git a/.github/workflows/triage_labelled.yml b/.github/workflows/triage_labelled.yml
new file mode 100644
index 0000000000..d1ac4357b1
--- /dev/null
+++ b/.github/workflows/triage_labelled.yml
@@ -0,0 +1,44 @@
+name: Move labelled issues to correct projects
+
+on:
+ issues:
+ types: [ labeled ]
+
+jobs:
+ move_needs_info:
+ name: Move X-Needs-Info on the triage board
+ runs-on: ubuntu-latest
+ if: >
+ contains(github.event.issue.labels.*.name, 'X-Needs-Info')
+ steps:
+ - uses: actions/add-to-project@main
+ id: add_project
+ with:
+ project-url: "https://github.com/orgs/matrix-org/projects/67"
+ github-token: ${{ secrets.ELEMENT_BOT_TOKEN }}
+ - name: Set status
+ env:
+ GITHUB_TOKEN: ${{ secrets.ELEMENT_BOT_TOKEN }}
+ run: |
+ gh api graphql -f query='
+ mutation(
+ $project: ID!
+ $item: ID!
+ $fieldid: ID!
+ $columnid: String!
+ ) {
+ updateProjectV2ItemFieldValue(
+ input: {
+ projectId: $project
+ itemId: $item
+ fieldId: $fieldid
+ value: {
+ singleSelectOptionId: $columnid
+ }
+ }
+ ) {
+ projectV2Item {
+ id
+ }
+ }
+ }' -f project="PVT_kwDOAIB0Bs4AFDdZ" -f item=${{ steps.add_project.outputs.itemId }} -f fieldid="PVTSSF_lADOAIB0Bs4AFDdZzgC6ZA4" -f columnid=ba22e43c --silent
diff --git a/.github/workflows/twisted_trunk.yml b/.github/workflows/twisted_trunk.yml
index dd8e6fbb1c..bbbe52d697 100644
--- a/.github/workflows/twisted_trunk.yml
+++ b/.github/workflows/twisted_trunk.yml
@@ -15,7 +15,14 @@ jobs:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
+
+ - name: Install Rust
+ uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
+ with:
+ toolchain: stable
+ - uses: Swatinem/rust-cache@v2
+
- uses: matrix-org/setup-python-poetry@v1
with:
python-version: "3.x"
@@ -32,8 +39,15 @@ jobs:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- run: sudo apt-get -qq install xmlsec1
+
+ - name: Install Rust
+ uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
+ with:
+ toolchain: stable
+ - uses: Swatinem/rust-cache@v2
+
- uses: matrix-org/setup-python-poetry@v1
with:
python-version: "3.x"
@@ -65,7 +79,14 @@ jobs:
- ${{ github.workspace }}:/src
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
+
+ - name: Install Rust
+ uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
+ with:
+ toolchain: stable
+ - uses: Swatinem/rust-cache@v2
+
- name: Patch dependencies
# Note: The poetry commands want to create a virtualenv in /src/.venv/,
# but the sytest-synapse container expects it to be in /venv/.
@@ -88,7 +109,7 @@ jobs:
if: ${{ always() }}
run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
- name: Upload SyTest logs
- uses: actions/upload-artifact@v2
+ uses: actions/upload-artifact@v3
if: ${{ always() }}
with:
name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.*, ', ') }})
@@ -114,8 +135,8 @@ jobs:
database: Postgres
steps:
- - name: Run actions/checkout@v2 for synapse
- uses: actions/checkout@v2
+ - name: Run actions/checkout@v3 for synapse
+ uses: actions/checkout@v3
with:
path: synapse
@@ -127,17 +148,16 @@ jobs:
run: |
set -x
DEBIAN_FRONTEND=noninteractive sudo apt-get install -yqq python3 pipx
- pipx install poetry==1.1.14
+ pipx install poetry==1.2.0
poetry remove -n twisted
poetry add -n --extras tls git+https://github.com/twisted/twisted.git#trunk
poetry lock --no-update
- # NOT IN 1.1.14 poetry lock --check
working-directory: synapse
- run: |
set -o pipefail
- TEST_ONLY_SKIP_DEP_HASH_VERIFICATION=1 POSTGRES=${{ (matrix.database == 'Postgres') && 1 || '' }} WORKERS=${{ (matrix.arrangement == 'workers') && 1 || '' }} COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | gotestfmt
+ TEST_ONLY_SKIP_DEP_HASH_VERIFICATION=1 POSTGRES=${{ (matrix.database == 'Postgres') && 1 || '' }} WORKERS=${{ (matrix.arrangement == 'workers') && 1 || '' }} COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | synapse/.ci/scripts/gotestfmt
shell: bash
name: Run Complement Tests
@@ -153,7 +173,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- uses: JasonEtco/create-an-issue@5d9504915f79f9cc6d791934b8ef34f2353dd74d # v2.5.0, 2020-12-06
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.gitignore b/.gitignore
index e58affb241..15fbfdddf1 100644
--- a/.gitignore
+++ b/.gitignore
@@ -15,8 +15,9 @@ _trial_temp*/
.DS_Store
__pycache__/
-# We do want the poetry lockfile.
+# We do want the poetry and cargo lockfile.
!poetry.lock
+!Cargo.lock
# stuff that is likely to exist when you run a server locally
/*.db
@@ -60,3 +61,10 @@ book/
# complement
/complement-*
/master.tar.gz
+
+# rust
+/target/
+/synapse/*.so
+
+# Poetry will create a setup.py, which we don't want to include.
+/setup.py
diff --git a/.rustfmt.toml b/.rustfmt.toml
new file mode 100644
index 0000000000..bf96e7743d
--- /dev/null
+++ b/.rustfmt.toml
@@ -0,0 +1 @@
+group_imports = "StdExternalCrate"
diff --git a/CHANGES.md b/CHANGES.md
index 0e69f25e0e..d1997f7379 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,3 +1,916 @@
+Synapse 1.72.0 (2022-11-22)
+===========================
+
+Please note that Synapse now only supports PostgreSQL 11+, because PostgreSQL 10 has reached end-of-life, c.f. our [Deprecation Policy](https://github.com/matrix-org/synapse/blob/develop/docs/deprecation_policy.md).
+
+Bugfixes
+--------
+
+- Update forgotten references to legacy metrics in the included Grafana dashboard. ([\#14477](https://github.com/matrix-org/synapse/issues/14477))
+
+
+Synapse 1.72.0rc1 (2022-11-16)
+==============================
+
+Features
+--------
+
+- Add experimental support for [MSC3912](https://github.com/matrix-org/matrix-spec-proposals/pull/3912): Relation-based redactions. ([\#14260](https://github.com/matrix-org/synapse/issues/14260))
+- Build Debian packages for Ubuntu 22.10 (Kinetic Kudu). ([\#14396](https://github.com/matrix-org/synapse/issues/14396))
+- Add an [Admin API](https://matrix-org.github.io/synapse/latest/usage/administration/admin_api/index.html) endpoint for user lookup based on third-party ID (3PID). Contributed by @ashfame. ([\#14405](https://github.com/matrix-org/synapse/issues/14405))
+- Faster joins: include heroes' membership events in the partial join response, for rooms without a name or canonical alias. ([\#14442](https://github.com/matrix-org/synapse/issues/14442))
+
+
+Bugfixes
+--------
+
+- Faster joins: do not block creation of or queries for room aliases during the resync. ([\#14292](https://github.com/matrix-org/synapse/issues/14292))
+- Fix a bug introduced in Synapse 1.64.0rc1 which could cause log spam when fetching events from other homeservers. ([\#14347](https://github.com/matrix-org/synapse/issues/14347))
+- Fix a bug introduced in 1.66 which would not send certain pushrules to clients. Contributed by Nico. ([\#14356](https://github.com/matrix-org/synapse/issues/14356))
+- Fix a bug introduced in v1.71.0rc1 where the power level event was incorrectly created during initial room creation. ([\#14361](https://github.com/matrix-org/synapse/issues/14361))
+- Fix the refresh token endpoint to be under /r0 and /v3 instead of /v1. Contributed by Tulir @ Beeper. ([\#14364](https://github.com/matrix-org/synapse/issues/14364))
+- Fix a long-standing bug where Synapse would raise an error when encountering an unrecognised field in a `/sync` filter, instead of ignoring it for forward compatibility. ([\#14369](https://github.com/matrix-org/synapse/issues/14369))
+- Fix a background database update, introduced in Synapse 1.64.0, which could cause poor database performance. ([\#14374](https://github.com/matrix-org/synapse/issues/14374))
+- Fix PostgreSQL sometimes using table scans for queries against the `event_search` table, taking a long time and a large amount of IO. ([\#14409](https://github.com/matrix-org/synapse/issues/14409))
+- Fix rendering of some HTML templates (including emails). Introduced in v1.71.0. ([\#14448](https://github.com/matrix-org/synapse/issues/14448))
+- Fix a bug introduced in Synapse 1.70.0 where the background updates to add non-thread unique indexes on receipts could fail when upgrading from 1.67.0 or earlier. ([\#14453](https://github.com/matrix-org/synapse/issues/14453))
+
+
+Updates to the Docker image
+---------------------------
+
+- Add all Stream Writer worker types to `configure_workers_and_start.py`. ([\#14197](https://github.com/matrix-org/synapse/issues/14197))
+- Remove references to legacy worker types in the multi-worker Dockerfile. ([\#14294](https://github.com/matrix-org/synapse/issues/14294))
+
+
+Improved Documentation
+----------------------
+
+- Upload documentation PRs to Netlify. ([\#12947](https://github.com/matrix-org/synapse/issues/12947), [\#14370](https://github.com/matrix-org/synapse/issues/14370))
+- Add addtional TURN server configuration example based on [eturnal](https://github.com/processone/eturnal) and adjust general TURN server doc structure. ([\#14293](https://github.com/matrix-org/synapse/issues/14293))
+- Add example on how to load balance /sync requests. Contributed by [aceArt](https://aceart.de). ([\#14297](https://github.com/matrix-org/synapse/issues/14297))
+- Edit sample Nginx reverse proxy configuration to use HTTP/1.1. Contributed by Brad Jones. ([\#14414](https://github.com/matrix-org/synapse/issues/14414))
+
+
+Deprecations and Removals
+-------------------------
+
+- Remove support for PostgreSQL 10. ([\#14392](https://github.com/matrix-org/synapse/issues/14392), [\#14397](https://github.com/matrix-org/synapse/issues/14397))
+
+
+Internal Changes
+----------------
+
+- Run unit tests against Python 3.11. ([\#13812](https://github.com/matrix-org/synapse/issues/13812))
+- Add TLS support for generic worker endpoints. ([\#14128](https://github.com/matrix-org/synapse/issues/14128), [\#14455](https://github.com/matrix-org/synapse/issues/14455))
+- Switch to a maintained action for installing Rust in CI. ([\#14313](https://github.com/matrix-org/synapse/issues/14313))
+- Add override ability to `complement.sh` command line script to request certain types of workers. ([\#14324](https://github.com/matrix-org/synapse/issues/14324))
+- Enabling testing of [MSC3874](https://github.com/matrix-org/matrix-spec-proposals/pull/3874) (filtering of `/messages` by relation type) in complement. ([\#14339](https://github.com/matrix-org/synapse/issues/14339))
+- Concisely log a failure to resolve state due to missing `prev_events`. ([\#14346](https://github.com/matrix-org/synapse/issues/14346))
+- Use a maintained Github action to install Rust. ([\#14351](https://github.com/matrix-org/synapse/issues/14351))
+- Cleanup old worker datastore classes. Contributed by Nick @ Beeper (@fizzadar). ([\#14375](https://github.com/matrix-org/synapse/issues/14375))
+- Test against PostgreSQL 15 in CI. ([\#14394](https://github.com/matrix-org/synapse/issues/14394))
+- Remove unreachable code. ([\#14410](https://github.com/matrix-org/synapse/issues/14410))
+- Clean-up event persistence code. ([\#14411](https://github.com/matrix-org/synapse/issues/14411))
+- Update docstring to clarify that `get_partial_state_events_batch` does not just give you completely arbitrary partial-state events. ([\#14417](https://github.com/matrix-org/synapse/issues/14417))
+- Fix mypy errors introduced by bumping the locked version of `attrs` and `gitpython`. ([\#14433](https://github.com/matrix-org/synapse/issues/14433))
+- Make Dependabot only bump Rust deps in the lock file. ([\#14434](https://github.com/matrix-org/synapse/issues/14434))
+- Fix an incorrect stub return type for `PushRuleEvaluator.run`. ([\#14451](https://github.com/matrix-org/synapse/issues/14451))
+- Improve performance of `/context` in large rooms. ([\#14461](https://github.com/matrix-org/synapse/issues/14461))
+
+
+Synapse 1.71.0 (2022-11-08)
+===========================
+
+Please note that, as announced in the release notes for Synapse 1.69.0, legacy Prometheus metric names are now disabled by default.
+They will be removed altogether in Synapse 1.73.0.
+If not already done, server administrators should update their dashboards and alerting rules to avoid using the deprecated metric names.
+See the [upgrade notes](https://matrix-org.github.io/synapse/v1.71/upgrade.html#upgrading-to-v1710) for more details.
+
+**Note:** in line with our [deprecation policy](https://matrix-org.github.io/synapse/latest/deprecation_policy.html) for platform dependencies, this will be the last release to support PostgreSQL 10, which reaches upstream end-of-life on November 10th, 2022. Future releases of Synapse will require PostgreSQL 11+.
+
+No significant changes since 1.71.0rc2.
+
+
+Synapse 1.71.0rc2 (2022-11-04)
+==============================
+
+Improved Documentation
+----------------------
+
+- Document the changes to monthly active user metrics due to deprecation of legacy Prometheus metric names. ([\#14358](https://github.com/matrix-org/synapse/issues/14358), [\#14360](https://github.com/matrix-org/synapse/issues/14360))
+
+
+Deprecations and Removals
+-------------------------
+
+- Disable legacy Prometheus metric names by default. They can still be re-enabled for now, but they will be removed altogether in Synapse 1.73.0. ([\#14353](https://github.com/matrix-org/synapse/issues/14353))
+
+
+Internal Changes
+----------------
+
+- Run unit tests against Python 3.11. ([\#13812](https://github.com/matrix-org/synapse/issues/13812))
+
+
+Synapse 1.71.0rc1 (2022-11-01)
+==============================
+
+Features
+--------
+
+- Support back-channel logouts from OpenID Connect providers. ([\#11414](https://github.com/matrix-org/synapse/issues/11414))
+- Allow use of Postgres and SQLlite full-text search operators in search queries. ([\#11635](https://github.com/matrix-org/synapse/issues/11635), [\#14310](https://github.com/matrix-org/synapse/issues/14310), [\#14311](https://github.com/matrix-org/synapse/issues/14311))
+- Implement [MSC3664](https://github.com/matrix-org/matrix-doc/pull/3664), Pushrules for relations. Contributed by Nico. ([\#11804](https://github.com/matrix-org/synapse/issues/11804))
+- Improve aesthetics of HTML templates. Note that these changes do not retroactively apply to templates which have been [customised](https://matrix-org.github.io/synapse/latest/templates.html#templates) by server admins. ([\#13652](https://github.com/matrix-org/synapse/issues/13652))
+- Enable write-ahead logging for SQLite installations. Contributed by [@asymmetric](https://github.com/asymmetric). ([\#13897](https://github.com/matrix-org/synapse/issues/13897))
+- Show erasure status when [listing users](https://matrix-org.github.io/synapse/latest/admin_api/user_admin_api.html#query-user-account) in the Admin API. ([\#14205](https://github.com/matrix-org/synapse/issues/14205))
+- Provide a specific error code when a `/sync` request provides a filter which doesn't represent a JSON object. ([\#14262](https://github.com/matrix-org/synapse/issues/14262))
+
+
+Bugfixes
+--------
+
+- Fix a long-standing bug where the `update_synapse_database` script could not be run with multiple databases. Contributed by @thefinn93 @ Beeper. ([\#13422](https://github.com/matrix-org/synapse/issues/13422))
+- Fix a bug which prevented setting an avatar on homeservers which have an explicit port in their `server_name` and have `max_avatar_size` and/or `allowed_avatar_mimetypes` configuration. Contributed by @ashfame. ([\#13927](https://github.com/matrix-org/synapse/issues/13927))
+- Check appservice user interest against the local users instead of all users in the room to align with [MSC3905](https://github.com/matrix-org/matrix-spec-proposals/pull/3905). ([\#13958](https://github.com/matrix-org/synapse/issues/13958))
+- Fix a long-standing bug where Synapse would accidentally include extra information in the response to [`PUT /_matrix/federation/v2/invite/{roomId}/{eventId}`](https://spec.matrix.org/v1.4/server-server-api/#put_matrixfederationv2inviteroomideventid). ([\#14064](https://github.com/matrix-org/synapse/issues/14064))
+- Fix a bug introduced in Synapse 1.64.0 where presence updates could be missing from `/sync` responses. ([\#14243](https://github.com/matrix-org/synapse/issues/14243))
+- Fix a bug introduced in Synapse 1.60.0 which caused an error to be logged when Synapse received a SIGHUP signal if debug logging was enabled. ([\#14258](https://github.com/matrix-org/synapse/issues/14258))
+- Prevent history insertion ([MSC2716](https://github.com/matrix-org/matrix-spec-proposals/pull/2716)) during an partial join ([MSC3706](https://github.com/matrix-org/matrix-spec-proposals/pull/3706)). ([\#14291](https://github.com/matrix-org/synapse/issues/14291))
+- Fix a bug introduced in Synapse 1.34.0 where device names would be returned via a federation user key query request when `allow_device_name_lookup_over_federation` was set to `false`. ([\#14304](https://github.com/matrix-org/synapse/issues/14304))
+- Fix a bug introduced in Synapse 0.34.0 where logs could include error spam when background processes are measured as taking a negative amount of time. ([\#14323](https://github.com/matrix-org/synapse/issues/14323))
+- Fix a bug introduced in Synapse 1.70.0 where clients were unable to PUT new [dehydrated devices](https://github.com/matrix-org/matrix-spec-proposals/pull/2697). ([\#14336](https://github.com/matrix-org/synapse/issues/14336))
+
+
+Improved Documentation
+----------------------
+
+- Explain how to disable the use of [`trusted_key_servers`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#trusted_key_servers). ([\#13999](https://github.com/matrix-org/synapse/issues/13999))
+- Add workers settings to [configuration manual](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#individual-worker-configuration). ([\#14086](https://github.com/matrix-org/synapse/issues/14086))
+- Correct the name of the config option [`encryption_enabled_by_default_for_room_type`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#encryption_enabled_by_default_for_room_type). ([\#14110](https://github.com/matrix-org/synapse/issues/14110))
+- Update docstrings of `SynapseError` and `FederationError` to bettter describe what they are used for and the effects of using them are. ([\#14191](https://github.com/matrix-org/synapse/issues/14191))
+
+
+Internal Changes
+----------------
+
+- Remove unused `@lru_cache` decorator. ([\#13595](https://github.com/matrix-org/synapse/issues/13595))
+- Save login tokens in database and prevent login token reuse. ([\#13844](https://github.com/matrix-org/synapse/issues/13844))
+- Refactor OIDC tests to better mimic an actual OIDC provider. ([\#13910](https://github.com/matrix-org/synapse/issues/13910))
+- Fix type annotation causing import time error in the Complement forking launcher. ([\#14084](https://github.com/matrix-org/synapse/issues/14084))
+- Refactor [MSC3030](https://github.com/matrix-org/matrix-spec-proposals/pull/3030) `/timestamp_to_event` endpoint to loop over federation destinations with standard pattern and error handling. ([\#14096](https://github.com/matrix-org/synapse/issues/14096))
+- Add initial power level event to batch of bulk persisted events when creating a new room. ([\#14228](https://github.com/matrix-org/synapse/issues/14228))
+- Refactor `/key/` endpoints to use `RestServlet` classes. ([\#14229](https://github.com/matrix-org/synapse/issues/14229))
+- Switch to using the `matrix-org/backend-meta` version of `triage-incoming` for new issues in CI. ([\#14230](https://github.com/matrix-org/synapse/issues/14230))
+- Build wheels on macos 11, not 10.15. ([\#14249](https://github.com/matrix-org/synapse/issues/14249))
+- Add debugging to help diagnose lost device list updates. ([\#14268](https://github.com/matrix-org/synapse/issues/14268))
+- Add Rust cache to CI for `trial` runs. ([\#14287](https://github.com/matrix-org/synapse/issues/14287))
+- Improve type hinting of `RawHeaders`. ([\#14303](https://github.com/matrix-org/synapse/issues/14303))
+- Use Poetry 1.2.0 in the Twisted Trunk CI job. ([\#14305](https://github.com/matrix-org/synapse/issues/14305))
+
+<details>
+<summary>Dependency updates</summary>
+
+Runtime:
+
+- Bump anyhow from 1.0.65 to 1.0.66. ([\#14278](https://github.com/matrix-org/synapse/issues/14278))
+- Bump jinja2 from 3.0.3 to 3.1.2. ([\#14271](https://github.com/matrix-org/synapse/issues/14271))
+- Bump prometheus-client from 0.14.0 to 0.15.0. ([\#14274](https://github.com/matrix-org/synapse/issues/14274))
+- Bump psycopg2 from 2.9.4 to 2.9.5. ([\#14331](https://github.com/matrix-org/synapse/issues/14331))
+- Bump pysaml2 from 7.1.2 to 7.2.1. ([\#14270](https://github.com/matrix-org/synapse/issues/14270))
+- Bump sentry-sdk from 1.5.11 to 1.10.1. ([\#14330](https://github.com/matrix-org/synapse/issues/14330))
+- Bump serde from 1.0.145 to 1.0.147. ([\#14277](https://github.com/matrix-org/synapse/issues/14277))
+- Bump serde_json from 1.0.86 to 1.0.87. ([\#14279](https://github.com/matrix-org/synapse/issues/14279))
+
+Tooling and CI:
+
+- Bump black from 22.3.0 to 22.10.0. ([\#14328](https://github.com/matrix-org/synapse/issues/14328))
+- Bump flake8-bugbear from 21.3.2 to 22.9.23. ([\#14042](https://github.com/matrix-org/synapse/issues/14042))
+- Bump peaceiris/actions-gh-pages from 3.8.0 to 3.9.0. ([\#14276](https://github.com/matrix-org/synapse/issues/14276))
+- Bump peaceiris/actions-mdbook from 1.1.14 to 1.2.0. ([\#14275](https://github.com/matrix-org/synapse/issues/14275))
+- Bump setuptools-rust from 1.5.1 to 1.5.2. ([\#14273](https://github.com/matrix-org/synapse/issues/14273))
+- Bump twine from 3.8.0 to 4.0.1. ([\#14332](https://github.com/matrix-org/synapse/issues/14332))
+- Bump types-opentracing from 2.4.7 to 2.4.10. ([\#14133](https://github.com/matrix-org/synapse/issues/14133))
+- Bump types-requests from 2.28.11 to 2.28.11.2. ([\#14272](https://github.com/matrix-org/synapse/issues/14272))
+</details>
+
+Synapse 1.70.1 (2022-10-28)
+===========================
+
+This release fixes some regressions that were discovered in 1.70.0.
+
+[#14300](https://github.com/matrix-org/synapse/issues/14300)
+was previously reported to be a regression in 1.70.0 as well. However, we have
+since concluded that it was limited to the reporter and thus have not needed
+to include any fix for it in 1.70.1.
+
+
+Bugfixes
+--------
+
+- Fix a bug introduced in Synapse 1.70.0rc1 where the access tokens sent to application services as headers were malformed. Application services which were obtaining access tokens from query parameters were not affected. ([\#14301](https://github.com/matrix-org/synapse/issues/14301))
+- Fix room creation being rate limited too aggressively since Synapse v1.69.0. ([\#14314](https://github.com/matrix-org/synapse/issues/14314))
+
+
+Synapse 1.70.0 (2022-10-26)
+===========================
+
+No significant changes since 1.70.0rc2.
+
+
+Synapse 1.70.0rc2 (2022-10-25)
+==============================
+
+Bugfixes
+--------
+
+- Fix a bug introduced in Synapse 1.70.0rc1 where the information returned from the `/threads` API could be stale when threaded events are redacted. ([\#14248](https://github.com/matrix-org/synapse/issues/14248))
+- Fix a bug introduced in Synapse 1.70.0rc1 leading to broken outbound federation when using Python 3.7. ([\#14280](https://github.com/matrix-org/synapse/issues/14280))
+- Fix a bug introduced in Synapse 1.70.0rc1 where edits to non-message events were aggregated by the homeserver. ([\#14283](https://github.com/matrix-org/synapse/issues/14283))
+
+
+Internal Changes
+----------------
+
+- Build ABI3 wheels for CPython. ([\#14253](https://github.com/matrix-org/synapse/issues/14253))
+- For the aarch64 architecture, only build wheels for CPython manylinux. ([\#14259](https://github.com/matrix-org/synapse/issues/14259))
+
+
+Synapse 1.70.0rc1 (2022-10-19)
+==============================
+
+Features
+--------
+
+- Support for [MSC3856](https://github.com/matrix-org/matrix-spec-proposals/pull/3856): threads list API. ([\#13394](https://github.com/matrix-org/synapse/issues/13394), [\#14171](https://github.com/matrix-org/synapse/issues/14171), [\#14175](https://github.com/matrix-org/synapse/issues/14175))
+- Support for thread-specific notifications & receipts ([MSC3771](https://github.com/matrix-org/matrix-spec-proposals/pull/3771) and [MSC3773](https://github.com/matrix-org/matrix-spec-proposals/pull/3773)). ([\#13776](https://github.com/matrix-org/synapse/issues/13776), [\#13824](https://github.com/matrix-org/synapse/issues/13824), [\#13877](https://github.com/matrix-org/synapse/issues/13877), [\#13878](https://github.com/matrix-org/synapse/issues/13878), [\#14050](https://github.com/matrix-org/synapse/issues/14050), [\#14140](https://github.com/matrix-org/synapse/issues/14140), [\#14159](https://github.com/matrix-org/synapse/issues/14159), [\#14163](https://github.com/matrix-org/synapse/issues/14163), [\#14174](https://github.com/matrix-org/synapse/issues/14174), [\#14222](https://github.com/matrix-org/synapse/issues/14222))
+- Stop fetching missing `prev_events` after we already know their signature is invalid. ([\#13816](https://github.com/matrix-org/synapse/issues/13816))
+- Send application service access tokens as a header (and query parameter). Implements [MSC2832](https://github.com/matrix-org/matrix-spec-proposals/pull/2832). ([\#13996](https://github.com/matrix-org/synapse/issues/13996))
+- Ignore server ACL changes when generating pushes. Implements [MSC3786](https://github.com/matrix-org/matrix-spec-proposals/pull/3786). ([\#13997](https://github.com/matrix-org/synapse/issues/13997))
+- Experimental support for redirecting to an implementation of a [MSC3886](https://github.com/matrix-org/matrix-spec-proposals/pull/3886) HTTP rendezvous service. ([\#14018](https://github.com/matrix-org/synapse/issues/14018))
+- The `/relations` endpoint can now be used on workers. ([\#14028](https://github.com/matrix-org/synapse/issues/14028))
+- Advertise support for Matrix 1.3 and 1.4 on `/_matrix/client/versions`. ([\#14032](https://github.com/matrix-org/synapse/issues/14032), [\#14184](https://github.com/matrix-org/synapse/issues/14184))
+- Improve validation of request bodies for the [Device Management](https://spec.matrix.org/v1.4/client-server-api/#device-management) and [MSC2697 Device Dehyrdation](https://github.com/matrix-org/matrix-spec-proposals/pull/2697) client-server API endpoints. ([\#14054](https://github.com/matrix-org/synapse/issues/14054))
+- Experimental support for [MSC3874](https://github.com/matrix-org/matrix-spec-proposals/pull/3874): Filtering threads from the `/messages` endpoint. ([\#14148](https://github.com/matrix-org/synapse/issues/14148))
+- Improve the validation of the following PUT endpoints: [`/directory/room/{roomAlias}`](https://spec.matrix.org/v1.4/client-server-api/#put_matrixclientv3directoryroomroomalias), [`/directory/list/room/{roomId}`](https://spec.matrix.org/v1.4/client-server-api/#put_matrixclientv3directorylistroomroomid) and [`/directory/list/appservice/{networkId}/{roomId}`](https://spec.matrix.org/v1.4/application-service-api/#put_matrixclientv3directorylistappservicenetworkidroomid). ([\#14179](https://github.com/matrix-org/synapse/issues/14179))
+- Build and publish binary wheels for `aarch64` platforms. ([\#14212](https://github.com/matrix-org/synapse/issues/14212))
+
+
+Bugfixes
+--------
+
+- Prevent device names from appearing in device list updates in some situations when `allow_device_name_lookup_over_federation` is `false`. (This is not comprehensive: see [\#13114](https://github.com/matrix-org/synapse/issues/13114).) ([\#10015](https://github.com/matrix-org/synapse/issues/10015))
+- Fix a long-standing bug where redactions were not being sent over federation if we did not have the original event. ([\#13813](https://github.com/matrix-org/synapse/issues/13813))
+- Fix a long-standing bug where edits of non-`m.room.message` events would not be correctly bundled or have their new content applied. ([\#14034](https://github.com/matrix-org/synapse/issues/14034))
+- Fix a bug introduced in Synapse 1.53.0 when querying `/publicRooms` with both a `room_type` filter and a `third_party_instance_id`. ([\#14053](https://github.com/matrix-org/synapse/issues/14053))
+- Fix a bug introduced in Synapse 1.35.0 where errors parsing a `/send_join` or `/state` response would produce excessive, low-quality Sentry events. ([\#14065](https://github.com/matrix-org/synapse/issues/14065))
+- Fix a long-standing bug where Synapse would error on the optional 'invite_room_state' field not being provided to [`PUT /_matrix/federation/v2/invite/{roomId}/{eventId}`](https://spec.matrix.org/v1.4/server-server-api/#put_matrixfederationv2inviteroomideventid). ([\#14083](https://github.com/matrix-org/synapse/issues/14083))
+- Fix a bug where invalid oEmbed fields would cause the entire response to be discarded. Introduced in Synapse 1.18.0. ([\#14089](https://github.com/matrix-org/synapse/issues/14089))
+- Fix a bug introduced in Synapse 1.37.0 in which an incorrect key name was used for sending and receiving room metadata when knocking on a room. ([\#14102](https://github.com/matrix-org/synapse/issues/14102))
+- Fix a bug introduced in v1.69.0rc1 where the joined hosts for a given event were not being properly cached. ([\#14125](https://github.com/matrix-org/synapse/issues/14125))
+- Fix a bug introduced in Synapse 1.30.0 where purging and rejoining a room without restarting in-between would result in a broken room. ([\#14161](https://github.com/matrix-org/synapse/issues/14161), [\#14164](https://github.com/matrix-org/synapse/issues/14164))
+- Fix [MSC3030](https://github.com/matrix-org/matrix-spec-proposals/pull/3030) `/timestamp_to_event` endpoint returning potentially inaccurate closest events with `outliers` present. ([\#14215](https://github.com/matrix-org/synapse/issues/14215))
+
+
+Updates to the Docker image
+---------------------------
+
+- Update the version of frozendict in Docker images and Debian packages from 2.3.3 to 2.3.4, which may fix memory leak problems. ([\#13955](https://github.com/matrix-org/synapse/issues/13955))
+- Use the `minimal` Rust profile when building Synapse. ([\#14141](https://github.com/matrix-org/synapse/issues/14141))
+- Prevent a class of database sharding errors when using `Dockerfile-workers` to spawn multiple instances of the same worker. Contributed by Jason Little. ([\#14165](https://github.com/matrix-org/synapse/issues/14165))
+- Set `LD_PRELOAD` to use jemalloc memory allocator in Dockerfile-workers. ([\#14182](https://github.com/matrix-org/synapse/issues/14182))
+- Fix pre-startup logging being lost when using the `Dockerfile-workers` image. ([\#14195](https://github.com/matrix-org/synapse/issues/14195))
+
+
+Improved Documentation
+----------------------
+
+- Add sample worker files for `pusher` and `federation_sender`. ([\#14077](https://github.com/matrix-org/synapse/issues/14077))
+- Improve the listener example on the metrics documentation. ([\#14078](https://github.com/matrix-org/synapse/issues/14078))
+- Expand Google OpenID Connect example config to map email attribute. Contributed by @ptman. ([\#14081](https://github.com/matrix-org/synapse/issues/14081))
+- The changelog entry ending in a full stop or exclamation mark is not optional. ([\#14087](https://github.com/matrix-org/synapse/issues/14087))
+- Fix links to jemalloc documentation, which were broken in [#13491](https://github.com/matrix-org/synapse/pull/14124). ([\#14093](https://github.com/matrix-org/synapse/issues/14093))
+- Remove not needed `replication` listener in docker compose example. ([\#14107](https://github.com/matrix-org/synapse/issues/14107))
+- Fix name of `alias_creation_rules` option in the config manual documentation. ([\#14124](https://github.com/matrix-org/synapse/issues/14124))
+- Clarify comment on event contexts. ([\#14145](https://github.com/matrix-org/synapse/issues/14145))
+- Fix dead link to the [Admin Registration API](https://matrix-org.github.io/synapse/latest/admin_api/register_api.html). ([\#14189](https://github.com/matrix-org/synapse/issues/14189))
+
+
+Deprecations and Removals
+-------------------------
+
+- Remove the experimental implementation of [MSC3772](https://github.com/matrix-org/matrix-spec-proposals/pull/3772). ([\#14094](https://github.com/matrix-org/synapse/issues/14094))
+- Remove the unstable identifier for [MSC3715](https://github.com/matrix-org/matrix-doc/pull/3715). ([\#14106](https://github.com/matrix-org/synapse/issues/14106), [\#14146](https://github.com/matrix-org/synapse/issues/14146))
+
+
+Internal Changes
+----------------
+
+- Optimise queries used to get a users rooms during sync. Contributed by Nick @ Beeper (@fizzadar). ([\#13991](https://github.com/matrix-org/synapse/issues/13991))
+- Update authlib from 0.15.5 to 1.1.0. ([\#14006](https://github.com/matrix-org/synapse/issues/14006))
+- Make `parse_server_name` consistent in handling invalid server names. ([\#14007](https://github.com/matrix-org/synapse/issues/14007))
+- Don't repeatedly wake up the same users for batched events. ([\#14033](https://github.com/matrix-org/synapse/issues/14033))
+- Complement test image: capture logs from nginx. ([\#14063](https://github.com/matrix-org/synapse/issues/14063))
+- Don't create noisy Sentry events when a requester drops connection to the metrics server mid-request. ([\#14072](https://github.com/matrix-org/synapse/issues/14072))
+- Run the integration test suites with the asyncio reactor enabled in CI. ([\#14092](https://github.com/matrix-org/synapse/issues/14092))
+- Add debug logs to figure out why an event was filtered out of the client response. ([\#14095](https://github.com/matrix-org/synapse/issues/14095))
+- Indicate what endpoint came back with a JSON response we were unable to parse. ([\#14097](https://github.com/matrix-org/synapse/issues/14097))
+- Break up calls to fetch rooms for many users. Contributed by Nick @ Beeper (@fizzadar). ([\#14109](https://github.com/matrix-org/synapse/issues/14109))
+- Faster joins: prioritise the server we joined by when restarting a partial join resync. ([\#14126](https://github.com/matrix-org/synapse/issues/14126))
+- Cache Rust build cache when building docker images. ([\#14130](https://github.com/matrix-org/synapse/issues/14130))
+- Enable dependabot for Rust dependencies. ([\#14132](https://github.com/matrix-org/synapse/issues/14132))
+- Bump typing-extensions from 4.1.1 to 4.4.0. ([\#14134](https://github.com/matrix-org/synapse/issues/14134))
+- Use the `minimal` Rust profile when building Synapse. ([\#14141](https://github.com/matrix-org/synapse/issues/14141))
+- Remove unused configuration code. ([\#14142](https://github.com/matrix-org/synapse/issues/14142))
+- Prepare for the [`gotestfmt` repository move](https://github.com/GoTestTools/gotestfmt/discussions/46). ([\#14144](https://github.com/matrix-org/synapse/issues/14144))
+- Invalidate rooms for user caches on replicated event, fix sync cache race in synapse workers. Contributed by Nick @ Beeper (@fizzadar). ([\#14155](https://github.com/matrix-org/synapse/issues/14155))
+- Enable url previews when testing with complement. ([\#14198](https://github.com/matrix-org/synapse/issues/14198))
+- When authenticating batched events, check for auth events in batch as well as DB. ([\#14214](https://github.com/matrix-org/synapse/issues/14214))
+- Update CI config to avoid GitHub Actions deprecation warnings. ([\#14216](https://github.com/matrix-org/synapse/issues/14216), [\#14224](https://github.com/matrix-org/synapse/issues/14224))
+- Update dependency requirements to allow building with poetry-core 1.3.2. ([\#14217](https://github.com/matrix-org/synapse/issues/14217))
+- Rename the `cache_memory` extra to `cache-memory`, for compatability with poetry-core 1.3.0 and [PEP 685](https://peps.python.org/pep-0685/). From-source installations using this extra will need to install using the new name. ([\#14221](https://github.com/matrix-org/synapse/issues/14221))
+- Specify dev-dependencies using lower bounds, to reduce the likelihood of a dependabot merge conflict. The lockfile continues to pin to specific versions. ([\#14227](https://github.com/matrix-org/synapse/issues/14227))
+
+
+Synapse 1.69.0 (2022-10-17)
+===========================
+
+Please note that legacy Prometheus metric names are now deprecated and will be removed in Synapse 1.73.0.
+Server administrators should update their dashboards and alerting rules to avoid using the deprecated metric names.
+See the [upgrade notes](https://matrix-org.github.io/synapse/v1.69/upgrade.html#upgrading-to-v1690) for more details.
+
+
+No significant changes since 1.69.0rc4.
+
+
+Synapse 1.69.0rc4 (2022-10-14)
+==============================
+
+Bugfixes
+--------
+
+- Fix poor performance of the `event_push_backfill_thread_id` background update, which was introduced in Synapse 1.68.0rc1. ([\#14172](https://github.com/matrix-org/synapse/issues/14172), [\#14181](https://github.com/matrix-org/synapse/issues/14181))
+
+
+Updates to the Docker image
+---------------------------
+
+- Fix docker build OOMing in CI for arm64 builds. ([\#14173](https://github.com/matrix-org/synapse/issues/14173))
+
+
+Synapse 1.69.0rc3 (2022-10-12)
+==============================
+
+Bugfixes
+--------
+
+- Fix an issue with Docker images causing the Rust dependencies to not be pinned correctly. Introduced in v1.68.0 ([\#14129](https://github.com/matrix-org/synapse/issues/14129))
+- Fix a bug introduced in Synapse 1.69.0rc1 which would cause registration replication requests to fail if the worker sending the request is not running Synapse 1.69. ([\#14135](https://github.com/matrix-org/synapse/issues/14135))
+- Fix error in background update when rotating existing notifications. Introduced in v1.69.0rc2. ([\#14138](https://github.com/matrix-org/synapse/issues/14138))
+
+
+Internal Changes
+----------------
+
+- Rename the `url_preview` extra to `url-preview`, for compatability with poetry-core 1.3.0 and [PEP 685](https://peps.python.org/pep-0685/). From-source installations using this extra will need to install using the new name. ([\#14085](https://github.com/matrix-org/synapse/issues/14085))
+
+
+Synapse 1.69.0rc2 (2022-10-06)
+==============================
+
+Deprecations and Removals
+-------------------------
+
+- Deprecate the `generate_short_term_login_token` method in favor of an async `create_login_token` method in the Module API. ([\#13842](https://github.com/matrix-org/synapse/issues/13842))
+
+
+Internal Changes
+----------------
+
+- Ensure Synapse v1.69 works with upcoming database changes in v1.70. ([\#14045](https://github.com/matrix-org/synapse/issues/14045))
+- Fix a bug introduced in Synapse v1.68.0 where messages could not be sent in rooms with non-integer `notifications` power level. ([\#14073](https://github.com/matrix-org/synapse/issues/14073))
+- Temporarily pin build-system requirements to workaround an incompatibility with poetry-core 1.3.0. This will be reverted before the v1.69.0 release proper, see [\#14079](https://github.com/matrix-org/synapse/issues/14079). ([\#14080](https://github.com/matrix-org/synapse/issues/14080))
+
+
+Synapse 1.69.0rc1 (2022-10-04)
+==============================
+
+Features
+--------
+
+- Allow application services to set the `origin_server_ts` of a state event by providing the query parameter `ts` in [`PUT /_matrix/client/r0/rooms/{roomId}/state/{eventType}/{stateKey}`](https://spec.matrix.org/v1.4/client-server-api/#put_matrixclientv3roomsroomidstateeventtypestatekey), per [MSC3316](https://github.com/matrix-org/matrix-doc/pull/3316). Contributed by @lukasdenk. ([\#11866](https://github.com/matrix-org/synapse/issues/11866))
+- Allow server admins to require a manual approval process before new accounts can be used (using [MSC3866](https://github.com/matrix-org/matrix-spec-proposals/pull/3866)). ([\#13556](https://github.com/matrix-org/synapse/issues/13556))
+- Exponentially backoff from backfilling the same event over and over. ([\#13635](https://github.com/matrix-org/synapse/issues/13635), [\#13936](https://github.com/matrix-org/synapse/issues/13936))
+- Add cache invalidation across workers to module API. ([\#13667](https://github.com/matrix-org/synapse/issues/13667), [\#13947](https://github.com/matrix-org/synapse/issues/13947))
+- Experimental implementation of [MSC3882](https://github.com/matrix-org/matrix-spec-proposals/pull/3882) to allow an existing device/session to generate a login token for use on a new device/session. ([\#13722](https://github.com/matrix-org/synapse/issues/13722), [\#13868](https://github.com/matrix-org/synapse/issues/13868))
+- Experimental support for thread-specific receipts ([MSC3771](https://github.com/matrix-org/matrix-spec-proposals/pull/3771)). ([\#13782](https://github.com/matrix-org/synapse/issues/13782), [\#13893](https://github.com/matrix-org/synapse/issues/13893), [\#13932](https://github.com/matrix-org/synapse/issues/13932), [\#13937](https://github.com/matrix-org/synapse/issues/13937), [\#13939](https://github.com/matrix-org/synapse/issues/13939))
+- Add experimental support for [MSC3881: Remotely toggle push notifications for another client](https://github.com/matrix-org/matrix-spec-proposals/pull/3881). ([\#13799](https://github.com/matrix-org/synapse/issues/13799), [\#13831](https://github.com/matrix-org/synapse/issues/13831), [\#13860](https://github.com/matrix-org/synapse/issues/13860))
+- Keep track when an event pulled over federation fails its signature check so we can intelligently back-off in the future. ([\#13815](https://github.com/matrix-org/synapse/issues/13815))
+- Improve validation for the unspecced, internal-only `_matrix/client/unstable/add_threepid/msisdn/submit_token` endpoint. ([\#13832](https://github.com/matrix-org/synapse/issues/13832))
+- Faster remote room joins: record _when_ we first partial-join to a room. ([\#13892](https://github.com/matrix-org/synapse/issues/13892))
+- Support a `dir` parameter on the `/relations` endpoint per [MSC3715](https://github.com/matrix-org/matrix-doc/pull/3715). ([\#13920](https://github.com/matrix-org/synapse/issues/13920))
+- Ask mail servers receiving emails from Synapse to not send automatic replies (e.g. out-of-office responses). ([\#13957](https://github.com/matrix-org/synapse/issues/13957))
+
+
+Bugfixes
+--------
+
+- Send push notifications for invites received over federation. ([\#13719](https://github.com/matrix-org/synapse/issues/13719), [\#14014](https://github.com/matrix-org/synapse/issues/14014))
+- Fix a long-standing bug where typing events would be accepted from remote servers not present in a room. Also fix a bug where incoming typing events would cause other incoming events to get stuck during a fast join. ([\#13830](https://github.com/matrix-org/synapse/issues/13830))
+- Fix a bug introduced in Synapse v1.53.0 where the experimental implementation of [MSC3715](https://github.com/matrix-org/matrix-spec-proposals/pull/3715) would give incorrect results when paginating forward. ([\#13840](https://github.com/matrix-org/synapse/issues/13840))
+- Fix access token leak to logs from proxy agent. ([\#13855](https://github.com/matrix-org/synapse/issues/13855))
+- Fix `have_seen_event` cache not being invalidated after we persist an event which causes inefficiency effects like extra `/state` federation calls. ([\#13863](https://github.com/matrix-org/synapse/issues/13863))
+- Faster room joins: Fix a bug introduced in 1.66.0 where an error would be logged when syncing after joining a room. ([\#13872](https://github.com/matrix-org/synapse/issues/13872))
+- Fix a bug introduced in 1.66.0 where some required fields in the pushrules sent to clients were not present anymore. Contributed by Nico. ([\#13904](https://github.com/matrix-org/synapse/issues/13904))
+- Fix packaging to include `Cargo.lock` in `sdist`. ([\#13909](https://github.com/matrix-org/synapse/issues/13909))
+- Fix a long-standing bug where device updates could cause delays sending out to-device messages over federation. ([\#13922](https://github.com/matrix-org/synapse/issues/13922))
+- Fix a bug introduced in v1.68.0 where Synapse would require `setuptools_rust` at runtime, even though the package is only required at build time. ([\#13952](https://github.com/matrix-org/synapse/issues/13952))
+- Fix a long-standing bug where `POST /_matrix/client/v3/keys/query` requests could result in excessively large SQL queries. ([\#13956](https://github.com/matrix-org/synapse/issues/13956))
+- Fix a performance regression in the `get_users_in_room` database query. Introduced in v1.67.0. ([\#13972](https://github.com/matrix-org/synapse/issues/13972))
+- Fix a bug introduced in v1.68.0 bug where Rust extension wasn't built in `release` mode when using `poetry install`. ([\#14009](https://github.com/matrix-org/synapse/issues/14009))
+- Do not return an unspecified `original_event` field when using the stable `/relations` endpoint. Introduced in Synapse v1.57.0. ([\#14025](https://github.com/matrix-org/synapse/issues/14025))
+- Correctly handle a race with device lists when a remote user leaves during a partial join. ([\#13885](https://github.com/matrix-org/synapse/issues/13885))
+- Correctly handle sending local device list updates to remote servers during a partial join. ([\#13934](https://github.com/matrix-org/synapse/issues/13934))
+
+
+Improved Documentation
+----------------------
+
+- Add `worker_main_http_uri` for the worker generator bash script. ([\#13772](https://github.com/matrix-org/synapse/issues/13772))
+- Update URL for the NixOS module for Synapse. ([\#13818](https://github.com/matrix-org/synapse/issues/13818))
+- Fix a mistake in sso_mapping_providers.md: `map_user_attributes` is expected to return `display_name`, not `displayname`. ([\#13836](https://github.com/matrix-org/synapse/issues/13836))
+- Fix a cross-link from the registration admin API to the `registration_shared_secret` configuration documentation. ([\#13870](https://github.com/matrix-org/synapse/issues/13870))
+- Update the man page for the `hash_password` script to correct the default number of bcrypt rounds performed. ([\#13911](https://github.com/matrix-org/synapse/issues/13911), [\#13930](https://github.com/matrix-org/synapse/issues/13930))
+- Emphasize the right reasons when to use `(room_id, event_id)` in a database schema. ([\#13915](https://github.com/matrix-org/synapse/issues/13915))
+- Add instruction to contributing guide for running unit tests in parallel. Contributed by @ashfame. ([\#13928](https://github.com/matrix-org/synapse/issues/13928))
+- Clarify that the `auto_join_rooms` config option can also be used with Space aliases. ([\#13931](https://github.com/matrix-org/synapse/issues/13931))
+- Add some cross references to worker documentation. ([\#13974](https://github.com/matrix-org/synapse/issues/13974))
+- Linkify urls in config documentation. ([\#14003](https://github.com/matrix-org/synapse/issues/14003))
+
+
+Deprecations and Removals
+-------------------------
+
+- Remove the `complete_sso_login` method from the Module API which was deprecated in Synapse 1.13.0. ([\#13843](https://github.com/matrix-org/synapse/issues/13843))
+- Announce that legacy metric names are deprecated, will be turned off by default in Synapse v1.71.0 and removed altogether in Synapse v1.73.0. See the upgrade notes for more information. ([\#14024](https://github.com/matrix-org/synapse/issues/14024))
+
+
+Internal Changes
+----------------
+
+- Speed up creation of DM rooms. ([\#13487](https://github.com/matrix-org/synapse/issues/13487), [\#13800](https://github.com/matrix-org/synapse/issues/13800))
+- Port push rules to using Rust. ([\#13768](https://github.com/matrix-org/synapse/issues/13768), [\#13838](https://github.com/matrix-org/synapse/issues/13838), [\#13889](https://github.com/matrix-org/synapse/issues/13889))
+- Optimise get rooms for user calls. Contributed by Nick @ Beeper (@fizzadar). ([\#13787](https://github.com/matrix-org/synapse/issues/13787))
+- Update the script which makes full schema dumps. ([\#13792](https://github.com/matrix-org/synapse/issues/13792))
+- Use shared methods for cache invalidation when persisting events, remove duplicate codepaths. Contributed by Nick @ Beeper (@fizzadar). ([\#13796](https://github.com/matrix-org/synapse/issues/13796))
+- Improve the `synapse.api.auth.Auth` mock used in unit tests. ([\#13809](https://github.com/matrix-org/synapse/issues/13809))
+- Faster Remote Room Joins: tell remote homeservers that we are unable to authorise them if they query a room which has partial state on our server. ([\#13823](https://github.com/matrix-org/synapse/issues/13823))
+- Carry IdP Session IDs through user-mapping sessions. ([\#13839](https://github.com/matrix-org/synapse/issues/13839))
+- Fix the release script not publishing binary wheels. ([\#13850](https://github.com/matrix-org/synapse/issues/13850))
+- Raise issue if complement fails with latest deps. ([\#13859](https://github.com/matrix-org/synapse/issues/13859))
+- Correct the comments in the complement dockerfile. ([\#13867](https://github.com/matrix-org/synapse/issues/13867))
+- Create a new snapshot of the database schema. ([\#13873](https://github.com/matrix-org/synapse/issues/13873))
+- Faster room joins: Send device list updates to most servers in rooms with partial state. ([\#13874](https://github.com/matrix-org/synapse/issues/13874), [\#14013](https://github.com/matrix-org/synapse/issues/14013))
+- Add comments to the Prometheus recording rules to make it clear which set of rules you need for Grafana or Prometheus Console. ([\#13876](https://github.com/matrix-org/synapse/issues/13876))
+- Only pull relevant backfill points from the database based on the current depth and limit (instead of all) every time we want to `/backfill`. ([\#13879](https://github.com/matrix-org/synapse/issues/13879))
+- Faster room joins: Avoid waiting for full state when processing `/keys/changes` requests. ([\#13888](https://github.com/matrix-org/synapse/issues/13888))
+- Improve backfill robustness by trying more servers when we get a `4xx` error back. ([\#13890](https://github.com/matrix-org/synapse/issues/13890))
+- Fix mypy errors with canonicaljson 1.6.3. ([\#13905](https://github.com/matrix-org/synapse/issues/13905))
+- Faster remote room joins: correctly handle remote device list updates during a partial join. ([\#13913](https://github.com/matrix-org/synapse/issues/13913))
+- Complement image: propagate SIGTERM to all workers. ([\#13914](https://github.com/matrix-org/synapse/issues/13914))
+- Update an innaccurate comment in Synapse's upsert database helper. ([\#13924](https://github.com/matrix-org/synapse/issues/13924))
+- Update mypy (0.950 -> 0.981) and mypy-zope (0.3.7 -> 0.3.11). ([\#13925](https://github.com/matrix-org/synapse/issues/13925), [\#13993](https://github.com/matrix-org/synapse/issues/13993))
+- Use dedicated `get_local_users_in_room(room_id)` function to find local users when calculating users to copy over during a room upgrade. ([\#13960](https://github.com/matrix-org/synapse/issues/13960))
+- Refactor language in user directory `_track_user_joined_room` code to make it more clear that we use both local and remote users. ([\#13966](https://github.com/matrix-org/synapse/issues/13966))
+- Revert catch-all exceptions being recorded as event pull attempt failures (only handle what we know about). ([\#13969](https://github.com/matrix-org/synapse/issues/13969))
+- Speed up calculating push actions in large rooms. ([\#13973](https://github.com/matrix-org/synapse/issues/13973), [\#13992](https://github.com/matrix-org/synapse/issues/13992))
+- Enable update notifications from Github's dependabot. ([\#13976](https://github.com/matrix-org/synapse/issues/13976))
+- Prototype a workflow to automatically add changelogs to dependabot PRs. ([\#13998](https://github.com/matrix-org/synapse/issues/13998), [\#14011](https://github.com/matrix-org/synapse/issues/14011), [\#14017](https://github.com/matrix-org/synapse/issues/14017), [\#14021](https://github.com/matrix-org/synapse/issues/14021), [\#14027](https://github.com/matrix-org/synapse/issues/14027))
+- Fix type annotations to be compatible with new annotations in development versions of twisted. ([\#14012](https://github.com/matrix-org/synapse/issues/14012))
+- Clear out stale entries in `event_push_actions_staging` table. ([\#14020](https://github.com/matrix-org/synapse/issues/14020))
+- Bump versions of GitHub actions. ([\#13978](https://github.com/matrix-org/synapse/issues/13978), [\#13979](https://github.com/matrix-org/synapse/issues/13979), [\#13980](https://github.com/matrix-org/synapse/issues/13980), [\#13982](https://github.com/matrix-org/synapse/issues/13982), [\#14015](https://github.com/matrix-org/synapse/issues/14015), [\#14019](https://github.com/matrix-org/synapse/issues/14019), [\#14022](https://github.com/matrix-org/synapse/issues/14022), [\#14023](https://github.com/matrix-org/synapse/issues/14023))
+
+
+Synapse 1.68.0 (2022-09-27)
+===========================
+
+Please note that Synapse will now refuse to start if configured to use a version of SQLite older than 3.27.
+
+In addition, please note that installing Synapse from a source checkout now requires a recent Rust compiler.
+Those using packages will not be affected. On most platforms, installing with `pip install matrix-synapse` will not be affected.
+See the [upgrade notes](https://matrix-org.github.io/synapse/v1.68/upgrade.html#upgrading-to-v1680).
+
+Bugfixes
+--------
+
+- Fix packaging to include `Cargo.lock` in `sdist`. ([\#13909](https://github.com/matrix-org/synapse/issues/13909))
+
+
+Synapse 1.68.0rc2 (2022-09-23)
+==============================
+
+Bugfixes
+--------
+
+- Fix building from packaged sdist. Broken in v1.68.0rc1. ([\#13866](https://github.com/matrix-org/synapse/issues/13866))
+
+
+Internal Changes
+----------------
+
+- Fix the release script not publishing binary wheels. ([\#13850](https://github.com/matrix-org/synapse/issues/13850))
+- Lower minimum supported rustc version to 1.58.1. ([\#13857](https://github.com/matrix-org/synapse/issues/13857))
+- Lock Rust dependencies' versions. ([\#13858](https://github.com/matrix-org/synapse/issues/13858))
+
+
+Synapse 1.68.0rc1 (2022-09-20)
+==============================
+
+Features
+--------
+
+- Keep track of when we fail to process a pulled event over federation so we can intelligently back off in the future. ([\#13589](https://github.com/matrix-org/synapse/issues/13589), [\#13814](https://github.com/matrix-org/synapse/issues/13814))
+- Add an [admin API endpoint to fetch messages within a particular window of time](https://matrix-org.github.io/synapse/v1.68/admin_api/rooms.html#room-messages-api). ([\#13672](https://github.com/matrix-org/synapse/issues/13672))
+- Add an [admin API endpoint to find a user based on their external ID in an auth provider](https://matrix-org.github.io/synapse/v1.68/admin_api/user_admin_api.html#find-a-user-based-on-their-id-in-an-auth-provider). ([\#13810](https://github.com/matrix-org/synapse/issues/13810))
+- Cancel the processing of key query requests when they time out. ([\#13680](https://github.com/matrix-org/synapse/issues/13680))
+- Improve validation of request bodies for the following client-server API endpoints: [`/account/3pid/msisdn/requestToken`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3account3pidmsisdnrequesttoken), [`/org.matrix.msc3720/account_status`](https://github.com/matrix-org/matrix-spec-proposals/blob/babolivier/user_status/proposals/3720-account-status.md#post-_matrixclientv1account_status), [`/account/3pid/add`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3account3pidadd), [`/account/3pid/bind`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3account3pidbind), [`/account/3pid/delete`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3account3piddelete) and [`/account/3pid/unbind`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3account3pidunbind). ([\#13687](https://github.com/matrix-org/synapse/issues/13687), [\#13736](https://github.com/matrix-org/synapse/issues/13736))
+- Document the timestamp when a user accepts the consent, if [consent tracking](https://matrix-org.github.io/synapse/latest/consent_tracking.html) is used. ([\#13741](https://github.com/matrix-org/synapse/issues/13741))
+- Add a `listeners[x].request_id_header` configuration option to specify which request header to extract and use as the request ID in order to correlate requests from a reverse proxy. ([\#13801](https://github.com/matrix-org/synapse/issues/13801))
+
+
+Bugfixes
+--------
+
+- Fix a bug introduced in Synapse 1.41.0 where the `/hierarchy` API returned non-standard information (a `room_id` field under each entry in `children_state`). ([\#13506](https://github.com/matrix-org/synapse/issues/13506))
+- Fix a long-standing bug where previously rejected events could end up in room state because they pass auth checks given the current state of the room. ([\#13723](https://github.com/matrix-org/synapse/issues/13723))
+- Fix a long-standing bug where Synapse fails to start if a signing key file contains an empty line. ([\#13738](https://github.com/matrix-org/synapse/issues/13738))
+- Fix a long-standing bug where Synapse would fail to handle malformed user IDs or room aliases gracefully in certain cases. ([\#13746](https://github.com/matrix-org/synapse/issues/13746))
+- Fix a long-standing bug where device lists would remain cached when remote users left and rejoined the last room shared with the local homeserver. ([\#13749](https://github.com/matrix-org/synapse/issues/13749), [\#13826](https://github.com/matrix-org/synapse/issues/13826))
+- Fix a long-standing bug that could cause stale caches in some rare cases on the first startup of Synapse with replication. ([\#13766](https://github.com/matrix-org/synapse/issues/13766))
+- Fix a long-standing spec compliance bug where Synapse would accept a trailing slash on the end of `/get_missing_events` federation requests. ([\#13789](https://github.com/matrix-org/synapse/issues/13789))
+- Delete associated data from `event_failed_pull_attempts`, `insertion_events`, `insertion_event_extremities`, `insertion_event_extremities`, `insertion_event_extremities` when purging the room. ([\#13825](https://github.com/matrix-org/synapse/issues/13825))
+
+
+Improved Documentation
+----------------------
+
+- Note that `libpq` is required on ARM-based Macs. ([\#13480](https://github.com/matrix-org/synapse/issues/13480))
+- Fix a mistake in the config manual introduced in Synapse 1.22.0: the `event_cache_size` _is_ scaled by `caches.global_factor`. ([\#13726](https://github.com/matrix-org/synapse/issues/13726))
+- Fix a typo in the documentation for the login ratelimiting configuration. ([\#13727](https://github.com/matrix-org/synapse/issues/13727))
+- Define Synapse's compatability policy for SQLite versions. ([\#13728](https://github.com/matrix-org/synapse/issues/13728))
+- Add docs for the common fix of deleting the `matrix_synapse.egg-info/` directory for fixing Python dependency problems. ([\#13785](https://github.com/matrix-org/synapse/issues/13785))
+- Update request log format documentation to mention the format used when the authenticated user is controlling another user. ([\#13794](https://github.com/matrix-org/synapse/issues/13794))
+
+
+Deprecations and Removals
+-------------------------
+
+- Synapse will now refuse to start if configured to use SQLite < 3.27. ([\#13760](https://github.com/matrix-org/synapse/issues/13760))
+- Don't include redundant `prev_state` in new events. Contributed by Denis Kariakin (@dakariakin). ([\#13791](https://github.com/matrix-org/synapse/issues/13791))
+
+
+Internal Changes
+----------------
+
+- Add a stub Rust crate. ([\#12595](https://github.com/matrix-org/synapse/issues/12595), [\#13734](https://github.com/matrix-org/synapse/issues/13734), [\#13735](https://github.com/matrix-org/synapse/issues/13735), [\#13743](https://github.com/matrix-org/synapse/issues/13743), [\#13763](https://github.com/matrix-org/synapse/issues/13763), [\#13769](https://github.com/matrix-org/synapse/issues/13769), [\#13778](https://github.com/matrix-org/synapse/issues/13778))
+- Bump the minimum dependency of `matrix_common` to 1.3.0 to make use of the `MXCUri` class. Use `MXCUri` to simplify media retention test code. ([\#13162](https://github.com/matrix-org/synapse/issues/13162))
+- Add and populate the `event_stream_ordering` column on the `receipts` table for future optimisation of push action processing. Contributed by Nick @ Beeper (@fizzadar). ([\#13703](https://github.com/matrix-org/synapse/issues/13703))
+- Rename the `EventFormatVersions` enum values so that they line up with room version numbers. ([\#13706](https://github.com/matrix-org/synapse/issues/13706))
+- Update trial old deps CI to use Poetry 1.2.0. ([\#13707](https://github.com/matrix-org/synapse/issues/13707), [\#13725](https://github.com/matrix-org/synapse/issues/13725))
+- Add experimental configuration option to allow disabling legacy Prometheus metric names. ([\#13714](https://github.com/matrix-org/synapse/issues/13714), [\#13717](https://github.com/matrix-org/synapse/issues/13717), [\#13718](https://github.com/matrix-org/synapse/issues/13718))
+- Fix typechecking with latest types-jsonschema. ([\#13724](https://github.com/matrix-org/synapse/issues/13724))
+- Strip number suffix from instance name to consolidate services that traces are spread over. ([\#13729](https://github.com/matrix-org/synapse/issues/13729))
+- Instrument `get_metadata_for_events` for understandable traces in Jaeger. ([\#13730](https://github.com/matrix-org/synapse/issues/13730))
+- Remove old queries to join room memberships to current state events. Contributed by Nick @ Beeper (@fizzadar). ([\#13745](https://github.com/matrix-org/synapse/issues/13745))
+- Avoid raising an error due to malformed user IDs in `get_current_hosts_in_room`. Malformed user IDs cannot currently join a room, so this error would not be hit. ([\#13748](https://github.com/matrix-org/synapse/issues/13748))
+- Update the docstrings for `get_users_in_room` and `get_current_hosts_in_room` to explain the impact of partial state. ([\#13750](https://github.com/matrix-org/synapse/issues/13750))
+- Use an additional database query when persisting receipts. ([\#13752](https://github.com/matrix-org/synapse/issues/13752))
+- Preparatory work for storing thread IDs for notifications and receipts. ([\#13753](https://github.com/matrix-org/synapse/issues/13753))
+- Re-type hint some collections as read-only. ([\#13754](https://github.com/matrix-org/synapse/issues/13754))
+- Remove unused Prometheus recording rules from `synapse-v2.rules` and add comments describing where the rest are used. ([\#13756](https://github.com/matrix-org/synapse/issues/13756))
+- Add a check for editable installs if the Rust library needs rebuilding. ([\#13759](https://github.com/matrix-org/synapse/issues/13759))
+- Tag traces with the instance name to be able to easily jump into the right logs and filter traces by instance. ([\#13761](https://github.com/matrix-org/synapse/issues/13761))
+- Concurrently fetch room push actions when calculating badge counts. Contributed by Nick @ Beeper (@fizzadar). ([\#13765](https://github.com/matrix-org/synapse/issues/13765))
+- Update the script which makes full schema dumps. ([\#13770](https://github.com/matrix-org/synapse/issues/13770))
+- Deduplicate `is_server_notices_room`. ([\#13780](https://github.com/matrix-org/synapse/issues/13780))
+- Simplify the dependency DAG in the tests workflow. ([\#13784](https://github.com/matrix-org/synapse/issues/13784))
+- Remove an old, incorrect migration file. ([\#13788](https://github.com/matrix-org/synapse/issues/13788))
+- Remove unused method in `synapse.api.auth.Auth`. ([\#13795](https://github.com/matrix-org/synapse/issues/13795))
+- Fix a memory leak when running the unit tests. ([\#13798](https://github.com/matrix-org/synapse/issues/13798))
+- Use partial indices on SQLite. ([\#13802](https://github.com/matrix-org/synapse/issues/13802))
+- Check that portdb generates the same postgres schema as that in the source tree. ([\#13808](https://github.com/matrix-org/synapse/issues/13808))
+- Fix Docker build when Rust .so has been built locally first. ([\#13811](https://github.com/matrix-org/synapse/issues/13811))
+- Complement: Initialise the Postgres database directly inside the target image instead of the base Postgres image to fix building using Buildah. ([\#13819](https://github.com/matrix-org/synapse/issues/13819))
+- Support providing an index predicate clause when doing upserts. ([\#13822](https://github.com/matrix-org/synapse/issues/13822))
+- Minor speedups to linting in CI. ([\#13827](https://github.com/matrix-org/synapse/issues/13827))
+
+
+Synapse 1.67.0 (2022-09-13)
+===========================
+
+This release removes using the deprecated direct TCP replication configuration
+for workers. Server admins should use Redis instead. See the [upgrade
+notes](https://matrix-org.github.io/synapse/v1.67/upgrade.html#upgrading-to-v1670).
+
+The minimum version of `poetry` supported for managing source checkouts is now
+1.2.0.
+
+**Notice:** from the next major release (1.68.0) installing Synapse from a source
+checkout will require a recent Rust compiler. Those using packages or
+`pip install matrix-synapse` will not be affected. See the [upgrade
+notes](https://matrix-org.github.io/synapse/v1.67/upgrade.html#upgrading-to-v1670).
+
+**Notice:** from the next major release (1.68.0), running Synapse with a SQLite
+database will require SQLite version 3.27.0 or higher. (The [current minimum
+ version is SQLite 3.22.0](https://github.com/matrix-org/synapse/blob/release-v1.67/synapse/storage/engines/sqlite.py#L69-L78).)
+See [#12983](https://github.com/matrix-org/synapse/issues/12983) and the [upgrade notes](https://matrix-org.github.io/synapse/v1.67/upgrade.html#upgrading-to-v1670) for more details.
+
+
+No significant changes since 1.67.0rc1.
+
+
+Synapse 1.67.0rc1 (2022-09-06)
+==============================
+
+Features
+--------
+
+- Support setting the registration shared secret in a file, via a new `registration_shared_secret_path` configuration option. ([\#13614](https://github.com/matrix-org/synapse/issues/13614))
+- Change the default startup behaviour so that any missing "additional" configuration files (signing key, etc) are generated automatically. ([\#13615](https://github.com/matrix-org/synapse/issues/13615))
+- Improve performance of sending messages in rooms with thousands of local users. ([\#13634](https://github.com/matrix-org/synapse/issues/13634))
+
+
+Bugfixes
+--------
+
+- Fix a bug introduced in Synapse 1.13 where the [List Rooms admin API](https://matrix-org.github.io/synapse/develop/admin_api/rooms.html#list-room-api) would return integers instead of booleans for the `federatable` and `public` fields when using a Sqlite database. ([\#13509](https://github.com/matrix-org/synapse/issues/13509))
+- Fix bug that user cannot `/forget` rooms after the last member has left the room. ([\#13546](https://github.com/matrix-org/synapse/issues/13546))
+- Faster Room Joins: fix `/make_knock` blocking indefinitely when the room in question is a partial-stated room. ([\#13583](https://github.com/matrix-org/synapse/issues/13583))
+- Fix loading the current stream position behind the actual position. ([\#13585](https://github.com/matrix-org/synapse/issues/13585))
+- Fix a longstanding bug in `register_new_matrix_user` which meant it was always necessary to explicitly give a server URL. ([\#13616](https://github.com/matrix-org/synapse/issues/13616))
+- Fix the running of [MSC1763](https://github.com/matrix-org/matrix-spec-proposals/pull/1763) retention purge_jobs in deployments with background jobs running on a worker by forcing them back onto the main worker. Contributed by Brad @ Beeper. ([\#13632](https://github.com/matrix-org/synapse/issues/13632))
+- Fix a long-standing bug that downloaded media for URL previews was not deleted while database background updates were running. ([\#13657](https://github.com/matrix-org/synapse/issues/13657))
+- Fix [MSC3030](https://github.com/matrix-org/matrix-spec-proposals/pull/3030) `/timestamp_to_event` endpoint to return the correct next event when the events have the same timestamp. ([\#13658](https://github.com/matrix-org/synapse/issues/13658))
+- Fix bug where we wedge media plugins if clients disconnect early. Introduced in v1.22.0. ([\#13660](https://github.com/matrix-org/synapse/issues/13660))
+- Fix a long-standing bug which meant that keys for unwhitelisted servers were not returned by `/_matrix/key/v2/query`. ([\#13683](https://github.com/matrix-org/synapse/issues/13683))
+- Fix a bug introduced in Synapse 1.20.0 that would cause the unstable unread counts from [MSC2654](https://github.com/matrix-org/matrix-spec-proposals/pull/2654) to be calculated even if the feature is disabled. ([\#13694](https://github.com/matrix-org/synapse/issues/13694))
+
+
+Updates to the Docker image
+---------------------------
+
+- Update docker image to use a stable version of poetry. ([\#13688](https://github.com/matrix-org/synapse/issues/13688))
+
+
+Improved Documentation
+----------------------
+
+- Improve the description of the ["chain cover index"](https://matrix-org.github.io/synapse/latest/auth_chain_difference_algorithm.html) used internally by Synapse. ([\#13602](https://github.com/matrix-org/synapse/issues/13602))
+- Document how ["monthly active users"](https://matrix-org.github.io/synapse/latest/usage/administration/monthly_active_users.html) is calculated and used. ([\#13617](https://github.com/matrix-org/synapse/issues/13617))
+- Improve documentation around user registration. ([\#13640](https://github.com/matrix-org/synapse/issues/13640))
+- Remove documentation of legacy `frontend_proxy` worker app. ([\#13645](https://github.com/matrix-org/synapse/issues/13645))
+- Clarify documentation that HTTP replication traffic can be protected with a shared secret. ([\#13656](https://github.com/matrix-org/synapse/issues/13656))
+- Remove unintentional colons from [config manual](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html) headers. ([\#13665](https://github.com/matrix-org/synapse/issues/13665))
+- Update docs to make enabling metrics more clear. ([\#13678](https://github.com/matrix-org/synapse/issues/13678))
+- Clarify `(room_id, event_id)` global uniqueness and how we should scope our database schemas. ([\#13701](https://github.com/matrix-org/synapse/issues/13701))
+
+
+Deprecations and Removals
+-------------------------
+
+- Drop support for calling `/_matrix/client/v3/rooms/{roomId}/invite` without an `id_access_token`, which was not permitted by the spec. Contributed by @Vetchu. ([\#13241](https://github.com/matrix-org/synapse/issues/13241))
+- Remove redundant `_get_joined_users_from_context` cache. Contributed by Nick @ Beeper (@fizzadar). ([\#13569](https://github.com/matrix-org/synapse/issues/13569))
+- Remove the ability to use direct TCP replication with workers. Direct TCP replication was deprecated in Synapse 1.18.0. Workers now require using Redis. ([\#13647](https://github.com/matrix-org/synapse/issues/13647))
+- Remove support for unstable [private read receipts](https://github.com/matrix-org/matrix-spec-proposals/pull/2285). ([\#13653](https://github.com/matrix-org/synapse/issues/13653), [\#13692](https://github.com/matrix-org/synapse/issues/13692))
+
+
+Internal Changes
+----------------
+
+- Extend the release script to wait for GitHub Actions to finish and to be usable as a guide for the whole process. ([\#13483](https://github.com/matrix-org/synapse/issues/13483))
+- Add experimental configuration option to allow disabling legacy Prometheus metric names. ([\#13540](https://github.com/matrix-org/synapse/issues/13540))
+- Cache user IDs instead of profiles to reduce cache memory usage. Contributed by Nick @ Beeper (@fizzadar). ([\#13573](https://github.com/matrix-org/synapse/issues/13573), [\#13600](https://github.com/matrix-org/synapse/issues/13600))
+- Optimize how Synapse calculates domains to fetch from during backfill. ([\#13575](https://github.com/matrix-org/synapse/issues/13575))
+- Comment about a better future where we can get the state diff between two events. ([\#13586](https://github.com/matrix-org/synapse/issues/13586))
+- Instrument `_check_sigs_and_hash_and_fetch` to trace time spent in child concurrent calls for understandable traces in Jaeger. ([\#13588](https://github.com/matrix-org/synapse/issues/13588))
+- Improve performance of `@cachedList`. ([\#13591](https://github.com/matrix-org/synapse/issues/13591))
+- Minor speed up of fetching large numbers of push rules. ([\#13592](https://github.com/matrix-org/synapse/issues/13592))
+- Optimise push action fetching queries. Contributed by Nick @ Beeper (@fizzadar). ([\#13597](https://github.com/matrix-org/synapse/issues/13597))
+- Rename `event_map` to `unpersisted_events` when computing the auth differences. ([\#13603](https://github.com/matrix-org/synapse/issues/13603))
+- Refactor `get_users_in_room(room_id)` mis-use with dedicated `get_current_hosts_in_room(room_id)` function. ([\#13605](https://github.com/matrix-org/synapse/issues/13605))
+- Use dedicated `get_local_users_in_room(room_id)` function to find local users when calculating `join_authorised_via_users_server` of a `/make_join` request. ([\#13606](https://github.com/matrix-org/synapse/issues/13606))
+- Refactor `get_users_in_room(room_id)` mis-use to lookup single local user with dedicated `check_local_user_in_room(...)` function. ([\#13608](https://github.com/matrix-org/synapse/issues/13608))
+- Drop unused column `application_services_state.last_txn`. ([\#13627](https://github.com/matrix-org/synapse/issues/13627))
+- Improve readability of Complement CI logs by printing failure results last. ([\#13639](https://github.com/matrix-org/synapse/issues/13639))
+- Generalise the `@cancellable` annotation so it can be used on functions other than just servlet methods. ([\#13662](https://github.com/matrix-org/synapse/issues/13662))
+- Introduce a `CommonUsageMetrics` class to share some usage metrics between the Prometheus exporter and the phone home stats. ([\#13671](https://github.com/matrix-org/synapse/issues/13671))
+- Add some logging to help track down #13444. ([\#13679](https://github.com/matrix-org/synapse/issues/13679))
+- Update poetry lock file for v1.2.0. ([\#13689](https://github.com/matrix-org/synapse/issues/13689))
+- Add cache to `is_partial_state_room`. ([\#13693](https://github.com/matrix-org/synapse/issues/13693))
+- Update the Grafana dashboard that is included with Synapse in the `contrib` directory. ([\#13697](https://github.com/matrix-org/synapse/issues/13697))
+- Only run trial CI on all python versions on non-PRs. ([\#13698](https://github.com/matrix-org/synapse/issues/13698))
+- Fix typechecking with latest types-jsonschema. ([\#13712](https://github.com/matrix-org/synapse/issues/13712))
+- Reduce number of CI checks we run for PRs. ([\#13713](https://github.com/matrix-org/synapse/issues/13713))
+
+
+Synapse 1.66.0 (2022-08-31)
+===========================
+
+No significant changes since 1.66.0rc2.
+
+This release removes the ability for homeservers to delegate email ownership
+verification and password reset confirmation to identity servers. This removal
+was originally planned for Synapse 1.64, but was later deferred until now. See
+the [upgrade notes](https://matrix-org.github.io/synapse/v1.66/upgrade.html#upgrading-to-v1660) for more details.
+
+Deployments with multiple workers should note that the direct TCP replication
+configuration was deprecated in Synapse 1.18.0 and will be removed in Synapse
+v1.67.0. In particular, the TCP `replication` [listener](https://matrix-org.github.io/synapse/v1.66/usage/configuration/config_documentation.html#listeners)
+type (not to be confused with the `replication` resource on the `http` listener
+type) and the `worker_replication_port` config option will be removed .
+
+To migrate to Redis, add the [`redis` config](https://matrix-org.github.io/synapse/v1.66/workers.html#shared-configuration),
+then remove the TCP `replication` listener from config of the master and
+`worker_replication_port` from worker config. Note that a HTTP listener with a
+`replication` resource is still required. See the
+[worker documentation](https://matrix-org.github.io/synapse/v1.66/workers.html)
+for more details.
+
+
+Synapse 1.66.0rc2 (2022-08-30)
+==============================
+
+Bugfixes
+--------
+
+- Fix a bug introduced in Synapse 1.66.0rc1 where the new rate limit metrics were misreported (`synapse_rate_limit_sleep_affected_hosts`, `synapse_rate_limit_reject_affected_hosts`). ([\#13649](https://github.com/matrix-org/synapse/issues/13649))
+
+
+Synapse 1.66.0rc1 (2022-08-23)
+==============================
+
+Features
+--------
+
+- Improve validation of request bodies for the following client-server API endpoints: [`/account/password`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3accountpassword), [`/account/password/email/requestToken`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3accountpasswordemailrequesttoken), [`/account/deactivate`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3accountdeactivate) and [`/account/3pid/email/requestToken`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3account3pidemailrequesttoken). ([\#13188](https://github.com/matrix-org/synapse/issues/13188), [\#13563](https://github.com/matrix-org/synapse/issues/13563))
+- Add forgotten status to [Room Details Admin API](https://matrix-org.github.io/synapse/latest/admin_api/rooms.html#room-details-api). ([\#13503](https://github.com/matrix-org/synapse/issues/13503))
+- Add an experimental implementation for [MSC3852 (Expose user agents on `Device`)](https://github.com/matrix-org/matrix-spec-proposals/pull/3852). ([\#13549](https://github.com/matrix-org/synapse/issues/13549))
+- Add `org.matrix.msc2716v4` experimental room version with updated content fields. Part of [MSC2716 (Importing history)](https://github.com/matrix-org/matrix-spec-proposals/pull/2716). ([\#13551](https://github.com/matrix-org/synapse/issues/13551))
+- Add support for compression to federation responses. ([\#13537](https://github.com/matrix-org/synapse/issues/13537))
+- Improve performance of sending messages in rooms with thousands of local users. ([\#13522](https://github.com/matrix-org/synapse/issues/13522), [\#13547](https://github.com/matrix-org/synapse/issues/13547))
+
+
+Bugfixes
+--------
+
+- Faster room joins: make `/joined_members` block whilst the room is partial stated. ([\#13514](https://github.com/matrix-org/synapse/issues/13514))
+- Fix a bug introduced in Synapse 1.21.0 where the [`/event_reports` Admin API](https://matrix-org.github.io/synapse/develop/admin_api/event_reports.html) could return a total count which was larger than the number of results you can actually query for. ([\#13525](https://github.com/matrix-org/synapse/issues/13525))
+- Fix a bug introduced in Synapse 1.52.0 where sending server notices fails if `max_avatar_size` or `allowed_avatar_mimetypes` is set and not `system_mxid_avatar_url`. ([\#13566](https://github.com/matrix-org/synapse/issues/13566))
+- Fix a bug where the `opentracing.force_tracing_for_users` config option would not apply to [`/sendToDevice`](https://spec.matrix.org/v1.3/client-server-api/#put_matrixclientv3sendtodeviceeventtypetxnid) and [`/keys/upload`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3keysupload) requests. ([\#13574](https://github.com/matrix-org/synapse/issues/13574))
+
+
+Improved Documentation
+----------------------
+
+- Add `openssl` example for generating registration HMAC digest. ([\#13472](https://github.com/matrix-org/synapse/issues/13472))
+- Tidy up Synapse's README. ([\#13491](https://github.com/matrix-org/synapse/issues/13491))
+- Document that event purging related to the `redaction_retention_period` config option is executed only every 5 minutes. ([\#13492](https://github.com/matrix-org/synapse/issues/13492))
+- Add a warning to retention documentation regarding the possibility of database corruption. ([\#13497](https://github.com/matrix-org/synapse/issues/13497))
+- Document that the `DOCKER_BUILDKIT=1` flag is needed to build the docker image. ([\#13515](https://github.com/matrix-org/synapse/issues/13515))
+- Add missing links in `user_consent` section of configuration manual. ([\#13536](https://github.com/matrix-org/synapse/issues/13536))
+- Fix the doc and some warnings that were referring to the nonexistent `custom_templates_directory` setting (instead of `custom_template_directory`). ([\#13538](https://github.com/matrix-org/synapse/issues/13538))
+
+
+Deprecations and Removals
+-------------------------
+
+- Remove the ability for homeservers to delegate email ownership verification
+ and password reset confirmation to identity servers. See [upgrade notes](https://matrix-org.github.io/synapse/v1.66/upgrade.html#upgrading-to-v1660) for more details.
+
+Internal Changes
+----------------
+
+### Faster room joins
+
+- Update the rejected state of events during de-partial-stating. ([\#13459](https://github.com/matrix-org/synapse/issues/13459))
+- Avoid blocking lazy-loading `/sync`s during partial joins due to remote memberships. Pull remote memberships from auth events instead of the room state. ([\#13477](https://github.com/matrix-org/synapse/issues/13477))
+- Refuse to start when faster joins is enabled on a deployment with workers, since worker configurations are not currently supported. ([\#13531](https://github.com/matrix-org/synapse/issues/13531))
+
+### Metrics and tracing
+
+- Allow use of both `@trace` and `@tag_args` stacked on the same function. ([\#13453](https://github.com/matrix-org/synapse/issues/13453))
+- Instrument the federation/backfill part of `/messages` for understandable traces in Jaeger. ([\#13489](https://github.com/matrix-org/synapse/issues/13489))
+- Instrument `FederationStateIdsServlet` (`/state_ids`) for understandable traces in Jaeger. ([\#13499](https://github.com/matrix-org/synapse/issues/13499), [\#13554](https://github.com/matrix-org/synapse/issues/13554))
+- Track HTTP response times over 10 seconds from `/messages` (`synapse_room_message_list_rest_servlet_response_time_seconds`). ([\#13533](https://github.com/matrix-org/synapse/issues/13533))
+- Add metrics to track how the rate limiter is affecting requests (sleep/reject). ([\#13534](https://github.com/matrix-org/synapse/issues/13534), [\#13541](https://github.com/matrix-org/synapse/issues/13541))
+- Add metrics to time how long it takes us to do backfill processing (`synapse_federation_backfill_processing_before_time_seconds`, `synapse_federation_backfill_processing_after_time_seconds`). ([\#13535](https://github.com/matrix-org/synapse/issues/13535), [\#13584](https://github.com/matrix-org/synapse/issues/13584))
+- Add metrics to track rate limiter queue timing (`synapse_rate_limit_queue_wait_time_seconds`). ([\#13544](https://github.com/matrix-org/synapse/issues/13544))
+- Update metrics to track `/messages` response time by room size. ([\#13545](https://github.com/matrix-org/synapse/issues/13545))
+
+### Everything else
+
+- Refactor methods in `synapse.api.auth.Auth` to use `Requester` objects everywhere instead of user IDs. ([\#13024](https://github.com/matrix-org/synapse/issues/13024))
+- Clean-up tests for notifications. ([\#13471](https://github.com/matrix-org/synapse/issues/13471))
+- Add some miscellaneous comments to document sync, especially around `compute_state_delta`. ([\#13474](https://github.com/matrix-org/synapse/issues/13474))
+- Use literals in place of `HTTPStatus` constants in tests. ([\#13479](https://github.com/matrix-org/synapse/issues/13479), [\#13488](https://github.com/matrix-org/synapse/issues/13488))
+- Add comments about how event push actions are rotated. ([\#13485](https://github.com/matrix-org/synapse/issues/13485))
+- Modify HTML template content to better support mobile devices' screen sizes. ([\#13493](https://github.com/matrix-org/synapse/issues/13493))
+- Add a linter script which will reject non-strict types in Pydantic models. ([\#13502](https://github.com/matrix-org/synapse/issues/13502))
+- Reduce the number of tests using legacy TCP replication. ([\#13543](https://github.com/matrix-org/synapse/issues/13543))
+- Allow specifying additional request fields when using the `HomeServerTestCase.login` helper method. ([\#13549](https://github.com/matrix-org/synapse/issues/13549))
+- Make `HomeServerTestCase` load any configured homeserver modules automatically. ([\#13558](https://github.com/matrix-org/synapse/issues/13558))
+
+
+Synapse 1.65.0 (2022-08-16)
+===========================
+
+No significant changes since 1.65.0rc2.
+
+
+Synapse 1.65.0rc2 (2022-08-11)
+==============================
+
+Internal Changes
+----------------
+
+- Revert 'Remove the unspecced `room_id` field in the `/hierarchy` response. ([\#13365](https://github.com/matrix-org/synapse/issues/13365))' to give more time for clients to update. ([\#13501](https://github.com/matrix-org/synapse/issues/13501))
+
+
+Synapse 1.65.0rc1 (2022-08-09)
+==============================
+
+Features
+--------
+
+- Add support for stable prefixes for [MSC2285 (private read receipts)](https://github.com/matrix-org/matrix-spec-proposals/pull/2285). ([\#13273](https://github.com/matrix-org/synapse/issues/13273))
+- Add new unstable error codes `ORG.MATRIX.MSC3848.ALREADY_JOINED`, `ORG.MATRIX.MSC3848.NOT_JOINED`, and `ORG.MATRIX.MSC3848.INSUFFICIENT_POWER` described in [MSC3848](https://github.com/matrix-org/matrix-spec-proposals/pull/3848). ([\#13343](https://github.com/matrix-org/synapse/issues/13343))
+- Use stable prefixes for [MSC3827](https://github.com/matrix-org/matrix-spec-proposals/pull/3827). ([\#13370](https://github.com/matrix-org/synapse/issues/13370))
+- Add a new module API method to translate a room alias into a room ID. ([\#13428](https://github.com/matrix-org/synapse/issues/13428))
+- Add a new module API method to create a room. ([\#13429](https://github.com/matrix-org/synapse/issues/13429))
+- Add remote join capability to the module API's `update_room_membership` method (in a backwards compatible manner). ([\#13441](https://github.com/matrix-org/synapse/issues/13441))
+
+
+Bugfixes
+--------
+
+- Update the version of the LDAP3 auth provider module included in the `matrixdotorg/synapse` DockerHub images and the Debian packages hosted on packages.matrix.org to 0.2.2. This version fixes a regression in the module. ([\#13470](https://github.com/matrix-org/synapse/issues/13470))
+- Fix a bug introduced in Synapse 1.41.0 where the `/hierarchy` API returned non-standard information (a `room_id` field under each entry in `children_state`) (this was reverted in v1.65.0rc2, see changelog notes above). ([\#13365](https://github.com/matrix-org/synapse/issues/13365))
+- Fix a bug introduced in Synapse 0.24.0 that would respond with the wrong error status code to `/joined_members` requests when the requester is not a current member of the room. Contributed by @andrewdoh. ([\#13374](https://github.com/matrix-org/synapse/issues/13374))
+- Fix bug in handling of typing events for appservices. Contributed by Nick @ Beeper (@fizzadar). ([\#13392](https://github.com/matrix-org/synapse/issues/13392))
+- Fix a bug introduced in Synapse 1.57.0 where rooms listed in `exclude_rooms_from_sync` in the configuration file would not be properly excluded from incremental syncs. ([\#13408](https://github.com/matrix-org/synapse/issues/13408))
+- Fix a bug in the experimental faster-room-joins support which could cause it to get stuck in an infinite loop. ([\#13353](https://github.com/matrix-org/synapse/issues/13353))
+- Faster room joins: fix a bug which caused rejected events to become un-rejected during state syncing. ([\#13413](https://github.com/matrix-org/synapse/issues/13413))
+- Faster room joins: fix error when running out of servers to sync partial state with, so that Synapse raises the intended error instead. ([\#13432](https://github.com/matrix-org/synapse/issues/13432))
+
+
+Updates to the Docker image
+---------------------------
+
+- Make Docker images build on armv7 by installing cryptography dependencies in the 'requirements' stage. Contributed by Jasper Spaans. ([\#13372](https://github.com/matrix-org/synapse/issues/13372))
+
+
+Improved Documentation
+----------------------
+
+- Update the 'registration tokens' page to acknowledge that the relevant MSC was merged into version 1.2 of the Matrix specification. Contributed by @moan0s. ([\#11897](https://github.com/matrix-org/synapse/issues/11897))
+- Document which HTTP resources support gzip compression. ([\#13221](https://github.com/matrix-org/synapse/issues/13221))
+- Add steps describing how to elevate an existing user to administrator by manipulating the database. ([\#13230](https://github.com/matrix-org/synapse/issues/13230))
+- Fix wrong headline for `url_preview_accept_language` in documentation. ([\#13437](https://github.com/matrix-org/synapse/issues/13437))
+- Remove redundant 'Contents' section from the Configuration Manual. Contributed by @dklimpel. ([\#13438](https://github.com/matrix-org/synapse/issues/13438))
+- Update documentation for config setting `macaroon_secret_key`. ([\#13443](https://github.com/matrix-org/synapse/issues/13443))
+- Update outdated information on `sso_mapping_providers` documentation. ([\#13449](https://github.com/matrix-org/synapse/issues/13449))
+- Fix example code in module documentation of `password_auth_provider_callbacks`. ([\#13450](https://github.com/matrix-org/synapse/issues/13450))
+- Make the configuration for the cache clearer. ([\#13481](https://github.com/matrix-org/synapse/issues/13481))
+
+
+Internal Changes
+----------------
+
+- Extend the release script to automatically push a new SyTest branch, rather than having that be a manual process. ([\#12978](https://github.com/matrix-org/synapse/issues/12978))
+- Make minor clarifications to the error messages given when we fail to join a room via any server. ([\#13160](https://github.com/matrix-org/synapse/issues/13160))
+- Enable Complement CI tests in the 'latest deps' test run. ([\#13213](https://github.com/matrix-org/synapse/issues/13213))
+- Fix long-standing bugged logic which was never hit in `get_pdu` asking every remote destination even after it finds an event. ([\#13346](https://github.com/matrix-org/synapse/issues/13346))
+- Faster room joins: avoid blocking when pulling events with partially missing prev events. ([\#13355](https://github.com/matrix-org/synapse/issues/13355))
+- Instrument `/messages` for understandable traces in Jaeger. ([\#13368](https://github.com/matrix-org/synapse/issues/13368))
+- Remove an unused argument to `get_relations_for_event`. ([\#13383](https://github.com/matrix-org/synapse/issues/13383))
+- Add a `merge-back` command to the release script, which automates merging the correct branches after a release. ([\#13393](https://github.com/matrix-org/synapse/issues/13393))
+- Adding missing type hints to tests. ([\#13397](https://github.com/matrix-org/synapse/issues/13397))
+- Faster Room Joins: don't leave a stuck room partial state flag if the join fails. ([\#13403](https://github.com/matrix-org/synapse/issues/13403))
+- Refactor `_resolve_state_at_missing_prevs` to compute an `EventContext` instead. ([\#13404](https://github.com/matrix-org/synapse/issues/13404), [\#13431](https://github.com/matrix-org/synapse/issues/13431))
+- Faster Room Joins: prevent Synapse from answering federated join requests for a room which it has not fully joined yet. ([\#13416](https://github.com/matrix-org/synapse/issues/13416))
+- Re-enable running Complement tests against Synapse with workers. ([\#13420](https://github.com/matrix-org/synapse/issues/13420))
+- Prevent unnecessary lookups to any external `get_event` cache. Contributed by Nick @ Beeper (@fizzadar). ([\#13435](https://github.com/matrix-org/synapse/issues/13435))
+- Add some tracing to give more insight into local room joins. ([\#13439](https://github.com/matrix-org/synapse/issues/13439))
+- Rename class `RateLimitConfig` to `RatelimitSettings` and `FederationRateLimitConfig` to `FederationRatelimitSettings`. ([\#13442](https://github.com/matrix-org/synapse/issues/13442))
+- Add some comments about how event push actions are stored. ([\#13445](https://github.com/matrix-org/synapse/issues/13445), [\#13455](https://github.com/matrix-org/synapse/issues/13455))
+- Improve rebuild speed for the "synapse-workers" docker image. ([\#13447](https://github.com/matrix-org/synapse/issues/13447))
+- Fix `@tag_args` being off-by-one with the arguments when tagging a span (tracing). ([\#13452](https://github.com/matrix-org/synapse/issues/13452))
+- Update type of `EventContext.rejected`. ([\#13460](https://github.com/matrix-org/synapse/issues/13460))
+- Use literals in place of `HTTPStatus` constants in tests. ([\#13463](https://github.com/matrix-org/synapse/issues/13463), [\#13469](https://github.com/matrix-org/synapse/issues/13469))
+- Correct a misnamed argument in state res v2 internals. ([\#13467](https://github.com/matrix-org/synapse/issues/13467))
+
+
Synapse 1.64.0 (2022-08-02)
===========================
@@ -7,7 +920,7 @@ No significant changes since 1.64.0rc2.
Deprecation Warning
-------------------
-Synapse v1.66.0 will remove the ability to delegate the tasks of verifying email address ownership, and password reset confirmation, to an identity server.
+Synapse 1.66.0 will remove the ability to delegate the tasks of verifying email address ownership, and password reset confirmation, to an identity server.
If you require your homeserver to verify e-mail addresses or to support password resets via e-mail, please configure your homeserver with SMTP access so that it can send e-mails on its own behalf.
[Consult the configuration documentation for more information.](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#email)
@@ -16,7 +929,7 @@ If you require your homeserver to verify e-mail addresses or to support password
Synapse 1.64.0rc2 (2022-07-29)
==============================
-This RC reintroduces support for `account_threepid_delegates.email`, which was removed in 1.64.0rc1. It remains deprecated and will be removed altogether in Synapse v1.66.0. ([\#13406](https://github.com/matrix-org/synapse/issues/13406))
+This RC reintroduces support for `account_threepid_delegates.email`, which was removed in 1.64.0rc1. It remains deprecated and will be removed altogether in Synapse 1.66.0. ([\#13406](https://github.com/matrix-org/synapse/issues/13406))
Synapse 1.64.0rc1 (2022-07-26)
@@ -211,6 +1124,20 @@ No significant changes since 1.62.0rc3.
Authors of spam-checker plugins should consult the [upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.62/docs/upgrade.md#upgrading-to-v1620) to learn about the enriched signatures for spam checker callbacks, which are supported with this release of Synapse.
+## Security advisory
+
+The following issue is fixed in 1.62.0.
+
+* [GHSA-jhjh-776m-4765](https://github.com/matrix-org/synapse/security/advisories/GHSA-jhjh-776m-4765) / [CVE-2022-31152](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-31152)
+
+ Synapse instances prior to 1.62.0 did not implement the Matrix [event authorization rules](https://spec.matrix.org/v1.3/rooms/v10/#authorization-rules) correctly. An attacker could craft events which would be accepted by Synapse but not a spec-conformant server, potentially causing divergence in the room state between servers.
+
+ Homeservers with federation disabled via the [`federation_domain_whitelist`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#federation_domain_whitelist) config option are unaffected.
+
+ Administrators of homeservers with federation enabled are advised to upgrade to v1.62.0 or higher.
+
+ Fixed by [#13087](https://github.com/matrix-org/synapse/pull/13087) and [#13088](https://github.com/matrix-org/synapse/pull/13088).
+
Synapse 1.62.0rc3 (2022-07-04)
==============================
@@ -251,7 +1178,7 @@ Bugfixes
- Fix a bug introduced in Synapse 1.58 where Synapse would not report full version information when installed from a git checkout. This is a best-effort affair and not guaranteed to be stable. ([\#12973](https://github.com/matrix-org/synapse/issues/12973))
- Fix a bug introduced in Synapse 1.60 where Synapse would fail to start if the `sqlite3` module was not available. ([\#12979](https://github.com/matrix-org/synapse/issues/12979))
- Fix a bug where non-standard information was required when requesting the `/hierarchy` API over federation. Introduced
- in Synapse v1.41.0. ([\#12991](https://github.com/matrix-org/synapse/issues/12991))
+ in Synapse 1.41.0. ([\#12991](https://github.com/matrix-org/synapse/issues/12991))
- Fix a long-standing bug which meant that rate limiting was not restrictive enough in some cases. ([\#13018](https://github.com/matrix-org/synapse/issues/13018))
- Fix a bug introduced in Synapse 1.58 where profile requests for a malformed user ID would ccause an internal error. Synapse now returns 400 Bad Request in this situation. ([\#13041](https://github.com/matrix-org/synapse/issues/13041))
- Fix some inconsistencies in the event authentication code. ([\#13087](https://github.com/matrix-org/synapse/issues/13087), [\#13088](https://github.com/matrix-org/synapse/issues/13088))
@@ -844,7 +1771,7 @@ If you have already upgraded to Synapse 1.57.0 without problem, then you have no
Updates to the Docker image
---------------------------
-- Include version 0.2.0 of the Synapse LDAP Auth Provider module in the Docker image. This matches the version that was present in the Docker image for Synapse v1.56.0. ([\#12512](https://github.com/matrix-org/synapse/issues/12512))
+- Include version 0.2.0 of the Synapse LDAP Auth Provider module in the Docker image. This matches the version that was present in the Docker image for Synapse 1.56.0. ([\#12512](https://github.com/matrix-org/synapse/issues/12512))
Synapse 1.57.0 (2022-04-19)
@@ -1096,10 +2023,10 @@ Features
Bugfixes
--------
-- Use the proper serialization format for bundled thread aggregations. The bug has existed since Synapse v1.48.0. ([\#12090](https://github.com/matrix-org/synapse/issues/12090))
+- Use the proper serialization format for bundled thread aggregations. The bug has existed since Synapse 1.48.0. ([\#12090](https://github.com/matrix-org/synapse/issues/12090))
- Fix a long-standing bug when redacting events with relations. ([\#12113](https://github.com/matrix-org/synapse/issues/12113), [\#12121](https://github.com/matrix-org/synapse/issues/12121), [\#12130](https://github.com/matrix-org/synapse/issues/12130), [\#12189](https://github.com/matrix-org/synapse/issues/12189))
- Fix a bug introduced in Synapse 1.7.2 whereby background updates are never run with the default background batch size. ([\#12157](https://github.com/matrix-org/synapse/issues/12157))
-- Fix a bug where non-standard information was returned from the `/hierarchy` API. Introduced in Synapse v1.41.0. ([\#12175](https://github.com/matrix-org/synapse/issues/12175))
+- Fix a bug where non-standard information was returned from the `/hierarchy` API. Introduced in Synapse 1.41.0. ([\#12175](https://github.com/matrix-org/synapse/issues/12175))
- Fix a bug introduced in Synapse 1.54.0 that broke background updates on sqlite homeservers while search was disabled. ([\#12215](https://github.com/matrix-org/synapse/issues/12215))
- Fix a long-standing bug when a `filter` argument with `event_fields` which did not include the `unsigned` field could result in a 500 error on `/sync`. ([\#12234](https://github.com/matrix-org/synapse/issues/12234))
@@ -1484,15 +2411,15 @@ Bugfixes
- Fix a long-standing issue which could cause Synapse to incorrectly accept data in the unsigned field of events
received over federation. ([\#11530](https://github.com/matrix-org/synapse/issues/11530))
- Fix a long-standing bug where Synapse wouldn't cache a response indicating that a remote user has no devices. ([\#11587](https://github.com/matrix-org/synapse/issues/11587))
-- Fix an error that occurs whilst trying to get the federation status of a destination server that was working normally. This admin API was newly introduced in Synapse v1.49.0. ([\#11593](https://github.com/matrix-org/synapse/issues/11593))
+- Fix an error that occurs whilst trying to get the federation status of a destination server that was working normally. This admin API was newly introduced in Synapse 1.49.0. ([\#11593](https://github.com/matrix-org/synapse/issues/11593))
- Fix bundled aggregations not being included in the `/sync` response, per [MSC2675](https://github.com/matrix-org/matrix-doc/pull/2675). ([\#11612](https://github.com/matrix-org/synapse/issues/11612), [\#11659](https://github.com/matrix-org/synapse/issues/11659), [\#11791](https://github.com/matrix-org/synapse/issues/11791))
- Fix the `/_matrix/client/v1/room/{roomId}/hierarchy` endpoint returning incorrect fields which have been present since Synapse 1.49.0. ([\#11667](https://github.com/matrix-org/synapse/issues/11667))
- Fix preview of some GIF URLs (like tenor.com). Contributed by Philippe Daouadi. ([\#11669](https://github.com/matrix-org/synapse/issues/11669))
-- Fix a bug where only the first 50 rooms from a space were returned from the `/hierarchy` API. This has existed since the introduction of the API in Synapse v1.41.0. ([\#11695](https://github.com/matrix-org/synapse/issues/11695))
-- Fix a bug introduced in Synapse v1.18.0 where password reset and address validation emails would not be sent if their subject was configured to use the 'app' template variable. Contributed by @br4nnigan. ([\#11710](https://github.com/matrix-org/synapse/issues/11710), [\#11745](https://github.com/matrix-org/synapse/issues/11745))
+- Fix a bug where only the first 50 rooms from a space were returned from the `/hierarchy` API. This has existed since the introduction of the API in Synapse 1.41.0. ([\#11695](https://github.com/matrix-org/synapse/issues/11695))
+- Fix a bug introduced in Synapse 1.18.0 where password reset and address validation emails would not be sent if their subject was configured to use the 'app' template variable. Contributed by @br4nnigan. ([\#11710](https://github.com/matrix-org/synapse/issues/11710), [\#11745](https://github.com/matrix-org/synapse/issues/11745))
- Make the 'List Rooms' Admin API sort stable. Contributed by Daniël Sonck. ([\#11737](https://github.com/matrix-org/synapse/issues/11737))
- Fix a long-standing bug where space hierarchy over federation would only work correctly some of the time. ([\#11775](https://github.com/matrix-org/synapse/issues/11775))
-- Fix a bug introduced in Synapse v1.46.0 that prevented `on_logged_out` module callbacks from being correctly awaited by Synapse. ([\#11786](https://github.com/matrix-org/synapse/issues/11786))
+- Fix a bug introduced in Synapse 1.46.0 that prevented `on_logged_out` module callbacks from being correctly awaited by Synapse. ([\#11786](https://github.com/matrix-org/synapse/issues/11786))
Improved Documentation
@@ -1572,8 +2499,8 @@ This release candidate fixes a federation-breaking regression introduced in Syna
Bugfixes
--------
-- Fix a bug introduced in Synapse v1.0.0 whereby some device list updates would not be sent to remote homeservers if there were too many to send at once. ([\#11729](https://github.com/matrix-org/synapse/issues/11729))
-- Fix a bug introduced in Synapse v1.50.0rc1 whereby outbound federation could fail because too many EDUs were produced for device updates. ([\#11730](https://github.com/matrix-org/synapse/issues/11730))
+- Fix a bug introduced in Synapse 1.0.0 whereby some device list updates would not be sent to remote homeservers if there were too many to send at once. ([\#11729](https://github.com/matrix-org/synapse/issues/11729))
+- Fix a bug introduced in Synapse 1.50.0rc1 whereby outbound federation could fail because too many EDUs were produced for device updates. ([\#11730](https://github.com/matrix-org/synapse/issues/11730))
Improved Documentation
diff --git a/Cargo.lock b/Cargo.lock
new file mode 100644
index 0000000000..428cabc39a
--- /dev/null
+++ b/Cargo.lock
@@ -0,0 +1,466 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+version = 3
+
+[[package]]
+name = "aho-corasick"
+version = "0.7.19"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b4f55bd91a0978cbfd91c457a164bab8b4001c833b7f323132c0a4e1922dd44e"
+dependencies = [
+ "memchr",
+]
+
+[[package]]
+name = "anyhow"
+version = "1.0.66"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "216261ddc8289130e551ddcd5ce8a064710c0d064a4d2895c67151c92b5443f6"
+
+[[package]]
+name = "arc-swap"
+version = "1.5.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "983cd8b9d4b02a6dc6ffa557262eb5858a27a0038ffffe21a0f133eaa819a164"
+
+[[package]]
+name = "autocfg"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
+
+[[package]]
+name = "bitflags"
+version = "1.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
+
+[[package]]
+name = "blake2"
+version = "0.10.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b12e5fd123190ce1c2e559308a94c9bacad77907d4c6005d9e58fe1a0689e55e"
+dependencies = [
+ "digest",
+]
+
+[[package]]
+name = "block-buffer"
+version = "0.10.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "69cce20737498f97b993470a6e536b8523f0af7892a4f928cceb1ac5e52ebe7e"
+dependencies = [
+ "generic-array",
+]
+
+[[package]]
+name = "cfg-if"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
+
+[[package]]
+name = "crypto-common"
+version = "0.1.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3"
+dependencies = [
+ "generic-array",
+ "typenum",
+]
+
+[[package]]
+name = "digest"
+version = "0.10.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "adfbc57365a37acbd2ebf2b64d7e69bb766e2fea813521ed536f5d0520dcf86c"
+dependencies = [
+ "block-buffer",
+ "crypto-common",
+ "subtle",
+]
+
+[[package]]
+name = "generic-array"
+version = "0.14.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bff49e947297f3312447abdca79f45f4738097cc82b06e72054d2223f601f1b9"
+dependencies = [
+ "typenum",
+ "version_check",
+]
+
+[[package]]
+name = "hex"
+version = "0.4.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70"
+
+[[package]]
+name = "indoc"
+version = "1.0.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "adab1eaa3408fb7f0c777a73e7465fd5656136fc93b670eb6df3c88c2c1344e3"
+
+[[package]]
+name = "itoa"
+version = "1.0.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4217ad341ebadf8d8e724e264f13e593e0648f5b3e94b3896a5df283be015ecc"
+
+[[package]]
+name = "lazy_static"
+version = "1.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
+
+[[package]]
+name = "libc"
+version = "0.2.135"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "68783febc7782c6c5cb401fbda4de5a9898be1762314da0bb2c10ced61f18b0c"
+
+[[package]]
+name = "lock_api"
+version = "0.4.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "435011366fe56583b16cf956f9df0095b405b82d76425bc8981c0e22e60ec4df"
+dependencies = [
+ "autocfg",
+ "scopeguard",
+]
+
+[[package]]
+name = "log"
+version = "0.4.17"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e"
+dependencies = [
+ "cfg-if",
+]
+
+[[package]]
+name = "memchr"
+version = "2.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d"
+
+[[package]]
+name = "memoffset"
+version = "0.6.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce"
+dependencies = [
+ "autocfg",
+]
+
+[[package]]
+name = "once_cell"
+version = "1.15.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e82dad04139b71a90c080c8463fe0dc7902db5192d939bd0950f074d014339e1"
+
+[[package]]
+name = "parking_lot"
+version = "0.12.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f"
+dependencies = [
+ "lock_api",
+ "parking_lot_core",
+]
+
+[[package]]
+name = "parking_lot_core"
+version = "0.9.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "09a279cbf25cb0757810394fbc1e359949b59e348145c643a939a525692e6929"
+dependencies = [
+ "cfg-if",
+ "libc",
+ "redox_syscall",
+ "smallvec",
+ "windows-sys",
+]
+
+[[package]]
+name = "proc-macro2"
+version = "1.0.46"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "94e2ef8dbfc347b10c094890f778ee2e36ca9bb4262e86dc99cd217e35f3470b"
+dependencies = [
+ "unicode-ident",
+]
+
+[[package]]
+name = "pyo3"
+version = "0.17.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "268be0c73583c183f2b14052337465768c07726936a260f480f0857cb95ba543"
+dependencies = [
+ "anyhow",
+ "cfg-if",
+ "indoc",
+ "libc",
+ "memoffset",
+ "parking_lot",
+ "pyo3-build-config",
+ "pyo3-ffi",
+ "pyo3-macros",
+ "unindent",
+]
+
+[[package]]
+name = "pyo3-build-config"
+version = "0.17.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "28fcd1e73f06ec85bf3280c48c67e731d8290ad3d730f8be9dc07946923005c8"
+dependencies = [
+ "once_cell",
+ "target-lexicon",
+]
+
+[[package]]
+name = "pyo3-ffi"
+version = "0.17.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0f6cb136e222e49115b3c51c32792886defbfb0adead26a688142b346a0b9ffc"
+dependencies = [
+ "libc",
+ "pyo3-build-config",
+]
+
+[[package]]
+name = "pyo3-log"
+version = "0.7.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e5695ccff5060c13ca1751cf8c857a12da9b0bf0378cb071c5e0326f7c7e4c1b"
+dependencies = [
+ "arc-swap",
+ "log",
+ "pyo3",
+]
+
+[[package]]
+name = "pyo3-macros"
+version = "0.17.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "94144a1266e236b1c932682136dc35a9dee8d3589728f68130c7c3861ef96b28"
+dependencies = [
+ "proc-macro2",
+ "pyo3-macros-backend",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "pyo3-macros-backend"
+version = "0.17.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c8df9be978a2d2f0cdebabb03206ed73b11314701a5bfe71b0d753b81997777f"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "pythonize"
+version = "0.17.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0f7f0c136f5fbc01868185eef462800e49659eb23acca83b9e884367a006acb6"
+dependencies = [
+ "pyo3",
+ "serde",
+]
+
+[[package]]
+name = "quote"
+version = "1.0.21"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bbe448f377a7d6961e30f5955f9b8d106c3f5e449d493ee1b125c1d43c2b5179"
+dependencies = [
+ "proc-macro2",
+]
+
+[[package]]
+name = "redox_syscall"
+version = "0.2.16"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a"
+dependencies = [
+ "bitflags",
+]
+
+[[package]]
+name = "regex"
+version = "1.7.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e076559ef8e241f2ae3479e36f97bd5741c0330689e217ad51ce2c76808b868a"
+dependencies = [
+ "aho-corasick",
+ "memchr",
+ "regex-syntax",
+]
+
+[[package]]
+name = "regex-syntax"
+version = "0.6.27"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a3f87b73ce11b1619a3c6332f45341e0047173771e8b8b73f87bfeefb7b56244"
+
+[[package]]
+name = "ryu"
+version = "1.0.11"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4501abdff3ae82a1c1b477a17252eb69cee9e66eb915c1abaa4f44d873df9f09"
+
+[[package]]
+name = "scopeguard"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
+
+[[package]]
+name = "serde"
+version = "1.0.147"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d193d69bae983fc11a79df82342761dfbf28a99fc8d203dca4c3c1b590948965"
+dependencies = [
+ "serde_derive",
+]
+
+[[package]]
+name = "serde_derive"
+version = "1.0.147"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4f1d362ca8fc9c3e3a7484440752472d68a6caa98f1ab81d99b5dfe517cec852"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "serde_json"
+version = "1.0.88"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8e8b3801309262e8184d9687fb697586833e939767aea0dda89f5a8e650e8bd7"
+dependencies = [
+ "itoa",
+ "ryu",
+ "serde",
+]
+
+[[package]]
+name = "smallvec"
+version = "1.10.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0"
+
+[[package]]
+name = "subtle"
+version = "2.4.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601"
+
+[[package]]
+name = "syn"
+version = "1.0.102"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3fcd952facd492f9be3ef0d0b7032a6e442ee9b361d4acc2b1d0c4aaa5f613a1"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "unicode-ident",
+]
+
+[[package]]
+name = "synapse"
+version = "0.1.0"
+dependencies = [
+ "anyhow",
+ "blake2",
+ "hex",
+ "lazy_static",
+ "log",
+ "pyo3",
+ "pyo3-log",
+ "pythonize",
+ "regex",
+ "serde",
+ "serde_json",
+]
+
+[[package]]
+name = "target-lexicon"
+version = "0.12.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c02424087780c9b71cc96799eaeddff35af2bc513278cda5c99fc1f5d026d3c1"
+
+[[package]]
+name = "typenum"
+version = "1.15.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987"
+
+[[package]]
+name = "unicode-ident"
+version = "1.0.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6ceab39d59e4c9499d4e5a8ee0e2735b891bb7308ac83dfb4e80cad195c9f6f3"
+
+[[package]]
+name = "unindent"
+version = "0.1.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "58ee9362deb4a96cef4d437d1ad49cffc9b9e92d202b6995674e928ce684f112"
+
+[[package]]
+name = "version_check"
+version = "0.9.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f"
+
+[[package]]
+name = "windows-sys"
+version = "0.36.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ea04155a16a59f9eab786fe12a4a450e75cdb175f9e0d80da1e17db09f55b8d2"
+dependencies = [
+ "windows_aarch64_msvc",
+ "windows_i686_gnu",
+ "windows_i686_msvc",
+ "windows_x86_64_gnu",
+ "windows_x86_64_msvc",
+]
+
+[[package]]
+name = "windows_aarch64_msvc"
+version = "0.36.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9bb8c3fd39ade2d67e9874ac4f3db21f0d710bee00fe7cab16949ec184eeaa47"
+
+[[package]]
+name = "windows_i686_gnu"
+version = "0.36.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "180e6ccf01daf4c426b846dfc66db1fc518f074baa793aa7d9b9aaeffad6a3b6"
+
+[[package]]
+name = "windows_i686_msvc"
+version = "0.36.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e2e7917148b2812d1eeafaeb22a97e4813dfa60a3f8f78ebe204bcc88f12f024"
+
+[[package]]
+name = "windows_x86_64_gnu"
+version = "0.36.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4dcd171b8776c41b97521e5da127a2d86ad280114807d0b2ab1e462bc764d9e1"
+
+[[package]]
+name = "windows_x86_64_msvc"
+version = "0.36.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680"
diff --git a/Cargo.toml b/Cargo.toml
new file mode 100644
index 0000000000..de141bdee9
--- /dev/null
+++ b/Cargo.toml
@@ -0,0 +1,5 @@
+# We make the whole Synapse folder a workspace so that we can run `cargo`
+# commands from the root (rather than having to cd into rust/).
+
+[workspace]
+members = ["rust"]
diff --git a/README.rst b/README.rst
index 219e32de8e..d116cd51fb 100644
--- a/README.rst
+++ b/README.rst
@@ -2,107 +2,111 @@
Synapse |support| |development| |documentation| |license| |pypi| |python|
=========================================================================
+Synapse is an open-source `Matrix <https://matrix.org/>`_ homeserver written and
+maintained by the Matrix.org Foundation. We began rapid development in 2014,
+reaching v1.0.0 in 2019. Development on Synapse and the Matrix protocol itself continues
+in earnest today.
+
+Briefly, Matrix is an open standard for communications on the internet, supporting
+federation, encryption and VoIP. Matrix.org has more to say about the `goals of the
+Matrix project <https://matrix.org/docs/guides/introduction>`_, and the `formal specification
+<https://spec.matrix.org/>`_ describes the technical details.
+
.. contents::
-Introduction
-============
+Installing and configuration
+============================
-Matrix is an ambitious new ecosystem for open federated Instant Messaging and
-VoIP. The basics you need to know to get up and running are:
+The Synapse documentation describes `how to install Synapse <https://matrix-org.github.io/synapse/latest/setup/installation.html>`_. We recommend using
+`Docker images <https://matrix-org.github.io/synapse/latest/setup/installation.html#docker-images-and-ansible-playbooks>`_ or `Debian packages from Matrix.org
+<https://matrix-org.github.io/synapse/latest/setup/installation.html#matrixorg-packages>`_.
-- Everything in Matrix happens in a room. Rooms are distributed and do not
- exist on any single server. Rooms can be located using convenience aliases
- like ``#matrix:matrix.org`` or ``#test:localhost:8448``.
+.. _federation:
-- Matrix user IDs look like ``@matthew:matrix.org`` (although in the future
- you will normally refer to yourself and others using a third party identifier
- (3PID): email address, phone number, etc rather than manipulating Matrix user IDs)
+Synapse has a variety of `config options
+<https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html>`_
+which can be used to customise its behaviour after installation.
+There are additional details on how to `configure Synapse for federation here
+<https://matrix-org.github.io/synapse/latest/federate.html>`_.
-The overall architecture is::
+.. _reverse-proxy:
- client <----> homeserver <=====================> homeserver <----> client
- https://somewhere.org/_matrix https://elsewhere.net/_matrix
+Using a reverse proxy with Synapse
+----------------------------------
-``#matrix:matrix.org`` is the official support room for Matrix, and can be
-accessed by any client from https://matrix.org/docs/projects/try-matrix-now.html or
-via IRC bridge at irc://irc.libera.chat/matrix.
+It is recommended to put a reverse proxy such as
+`nginx <https://nginx.org/en/docs/http/ngx_http_proxy_module.html>`_,
+`Apache <https://httpd.apache.org/docs/current/mod/mod_proxy_http.html>`_,
+`Caddy <https://caddyserver.com/docs/quick-starts/reverse-proxy>`_,
+`HAProxy <https://www.haproxy.org/>`_ or
+`relayd <https://man.openbsd.org/relayd.8>`_ in front of Synapse. One advantage of
+doing so is that it means that you can expose the default https port (443) to
+Matrix clients without needing to run Synapse with root privileges.
+For information on configuring one, see `the reverse proxy docs
+<https://matrix-org.github.io/synapse/latest/reverse_proxy.html>`_.
-Synapse is currently in rapid development, but as of version 0.5 we believe it
-is sufficiently stable to be run as an internet-facing service for real usage!
+Upgrading an existing Synapse
+-----------------------------
-About Matrix
-============
+The instructions for upgrading Synapse are in `the upgrade notes`_.
+Please check these instructions as upgrading may require extra steps for some
+versions of Synapse.
-Matrix specifies a set of pragmatic RESTful HTTP JSON APIs as an open standard,
-which handle:
+.. _the upgrade notes: https://matrix-org.github.io/synapse/develop/upgrade.html
-- Creating and managing fully distributed chat rooms with no
- single points of control or failure
-- Eventually-consistent cryptographically secure synchronisation of room
- state across a global open network of federated servers and services
-- Sending and receiving extensible messages in a room with (optional)
- end-to-end encryption
-- Inviting, joining, leaving, kicking, banning room members
-- Managing user accounts (registration, login, logout)
-- Using 3rd Party IDs (3PIDs) such as email addresses, phone numbers,
- Facebook accounts to authenticate, identify and discover users on Matrix.
-- Placing 1:1 VoIP and Video calls
-These APIs are intended to be implemented on a wide range of servers, services
-and clients, letting developers build messaging and VoIP functionality on top
-of the entirely open Matrix ecosystem rather than using closed or proprietary
-solutions. The hope is for Matrix to act as the building blocks for a new
-generation of fully open and interoperable messaging and VoIP apps for the
-internet.
+Platform dependencies
+---------------------
-Synapse is a Matrix "homeserver" implementation developed by the matrix.org core
-team, written in Python 3/Twisted.
+Synapse uses a number of platform dependencies such as Python and PostgreSQL,
+and aims to follow supported upstream versions. See the
+`deprecation policy <https://matrix-org.github.io/synapse/latest/deprecation_policy.html>`_
+for more details.
-In Matrix, every user runs one or more Matrix clients, which connect through to
-a Matrix homeserver. The homeserver stores all their personal chat history and
-user account information - much as a mail client connects through to an
-IMAP/SMTP server. Just like email, you can either run your own Matrix
-homeserver and control and own your own communications and history or use one
-hosted by someone else (e.g. matrix.org) - there is no single point of control
-or mandatory service provider in Matrix, unlike WhatsApp, Facebook, Hangouts,
-etc.
-We'd like to invite you to join #matrix:matrix.org (via
-https://matrix.org/docs/projects/try-matrix-now.html), run a homeserver, take a look
-at the `Matrix spec <https://matrix.org/docs/spec>`_, and experiment with the
-`APIs <https://matrix.org/docs/api>`_ and `Client SDKs
-<https://matrix.org/docs/projects/try-matrix-now.html#client-sdks>`_.
+Security note
+-------------
-Thanks for using Matrix!
+Matrix serves raw, user-supplied data in some APIs -- specifically the `content
+repository endpoints`_.
-Support
-=======
+.. _content repository endpoints: https://matrix.org/docs/spec/client_server/latest.html#get-matrix-media-r0-download-servername-mediaid
-For support installing or managing Synapse, please join |room|_ (from a matrix.org
-account if necessary) and ask questions there. We do not use GitHub issues for
-support requests, only for bug reports and feature requests.
+Whilst we make a reasonable effort to mitigate against XSS attacks (for
+instance, by using `CSP`_), a Matrix homeserver should not be hosted on a
+domain hosting other web applications. This especially applies to sharing
+the domain with Matrix web clients and other sensitive applications like
+webmail. See
+https://developer.github.com/changes/2014-04-25-user-content-security for more
+information.
-Synapse's documentation is `nicely rendered on GitHub Pages <https://matrix-org.github.io/synapse>`_,
-with its source available in |docs|_.
+.. _CSP: https://github.com/matrix-org/synapse/pull/1021
-.. |room| replace:: ``#synapse:matrix.org``
-.. _room: https://matrix.to/#/#synapse:matrix.org
+Ideally, the homeserver should not simply be on a different subdomain, but on
+a completely different `registered domain`_ (also known as top-level site or
+eTLD+1). This is because `some attacks`_ are still possible as long as the two
+applications share the same registered domain.
-.. |docs| replace:: ``docs``
-.. _docs: docs
+.. _registered domain: https://tools.ietf.org/html/draft-ietf-httpbis-rfc6265bis-03#section-2.3
-Synapse Installation
-====================
+.. _some attacks: https://en.wikipedia.org/wiki/Session_fixation#Attacks_using_cross-subdomain_cookie
-.. _federation:
+To illustrate this with an example, if your Element Web or other sensitive web
+application is hosted on ``A.example1.com``, you should ideally host Synapse on
+``example2.com``. Some amount of protection is offered by hosting on
+``B.example1.com`` instead, so this is also acceptable in some scenarios.
+However, you should *not* host your Synapse on ``A.example1.com``.
+
+Note that all of the above refers exclusively to the domain used in Synapse's
+``public_baseurl`` setting. In particular, it has no bearing on the domain
+mentioned in MXIDs hosted on that server.
-* For details on how to install synapse, see
- `Installation Instructions <https://matrix-org.github.io/synapse/latest/setup/installation.html>`_.
-* For specific details on how to configure Synapse for federation see `docs/federate.md <docs/federate.md>`_
+Following this advice ensures that even if an XSS is found in Synapse, the
+impact to other applications will be minimal.
-Connecting to Synapse from a client
-===================================
+Testing a new installation
+==========================
The easiest way to try out your new Synapse installation is by connecting to it
from a web client.
@@ -129,11 +133,20 @@ Registering a new user from a client
------------------------------------
By default, registration of new users via Matrix clients is disabled. To enable
-it, specify ``enable_registration: true`` in ``homeserver.yaml``. (It is then
-recommended to also set up CAPTCHA - see `<docs/CAPTCHA_SETUP.md>`_.)
+it:
+
+1. In the
+ `registration config section <https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#registration>`_
+ set ``enable_registration: true`` in ``homeserver.yaml``.
+2. Then **either**:
-Once ``enable_registration`` is set to ``true``, it is possible to register a
-user via a Matrix client.
+ a. set up a `CAPTCHA <https://matrix-org.github.io/synapse/latest/CAPTCHA_SETUP.html>`_, or
+ b. set ``enable_registration_without_verification: true`` in ``homeserver.yaml``.
+
+We **strongly** recommend using a CAPTCHA, particularly if your homeserver is exposed to
+the public internet. Without it, anyone can freely register accounts on your homeserver.
+This can be exploited by attackers to create spambots targetting the rest of the Matrix
+federation.
Your new user name will be formed partly from the ``server_name``, and partly
from a localpart you specify when you create the account. Your name will take
@@ -146,71 +159,22 @@ the form of::
As when logging in, you will need to specify a "Custom server". Specify your
desired ``localpart`` in the 'User name' box.
-Security note
-=============
+Troubleshooting and support
+===========================
-Matrix serves raw, user-supplied data in some APIs -- specifically the `content
-repository endpoints`_.
+The `Admin FAQ <https://matrix-org.github.io/synapse/latest/usage/administration/admin_faq.html>`_
+includes tips on dealing with some common problems. For more details, see
+`Synapse's wider documentation <https://matrix-org.github.io/synapse/latest/>`_.
-.. _content repository endpoints: https://matrix.org/docs/spec/client_server/latest.html#get-matrix-media-r0-download-servername-mediaid
-
-Whilst we make a reasonable effort to mitigate against XSS attacks (for
-instance, by using `CSP`_), a Matrix homeserver should not be hosted on a
-domain hosting other web applications. This especially applies to sharing
-the domain with Matrix web clients and other sensitive applications like
-webmail. See
-https://developer.github.com/changes/2014-04-25-user-content-security for more
-information.
-
-.. _CSP: https://github.com/matrix-org/synapse/pull/1021
-
-Ideally, the homeserver should not simply be on a different subdomain, but on
-a completely different `registered domain`_ (also known as top-level site or
-eTLD+1). This is because `some attacks`_ are still possible as long as the two
-applications share the same registered domain.
-
-.. _registered domain: https://tools.ietf.org/html/draft-ietf-httpbis-rfc6265bis-03#section-2.3
-
-.. _some attacks: https://en.wikipedia.org/wiki/Session_fixation#Attacks_using_cross-subdomain_cookie
+For additional support installing or managing Synapse, please ask in the community
+support room |room|_ (from a matrix.org account if necessary). We do not use GitHub
+issues for support requests, only for bug reports and feature requests.
-To illustrate this with an example, if your Element Web or other sensitive web
-application is hosted on ``A.example1.com``, you should ideally host Synapse on
-``example2.com``. Some amount of protection is offered by hosting on
-``B.example1.com`` instead, so this is also acceptable in some scenarios.
-However, you should *not* host your Synapse on ``A.example1.com``.
-
-Note that all of the above refers exclusively to the domain used in Synapse's
-``public_baseurl`` setting. In particular, it has no bearing on the domain
-mentioned in MXIDs hosted on that server.
-
-Following this advice ensures that even if an XSS is found in Synapse, the
-impact to other applications will be minimal.
-
-
-Upgrading an existing Synapse
-=============================
-
-The instructions for upgrading synapse are in `the upgrade notes`_.
-Please check these instructions as upgrading may require extra steps for some
-versions of synapse.
-
-.. _the upgrade notes: https://matrix-org.github.io/synapse/develop/upgrade.html
-
-.. _reverse-proxy:
-
-Using a reverse proxy with Synapse
-==================================
-
-It is recommended to put a reverse proxy such as
-`nginx <https://nginx.org/en/docs/http/ngx_http_proxy_module.html>`_,
-`Apache <https://httpd.apache.org/docs/current/mod/mod_proxy_http.html>`_,
-`Caddy <https://caddyserver.com/docs/quick-starts/reverse-proxy>`_,
-`HAProxy <https://www.haproxy.org/>`_ or
-`relayd <https://man.openbsd.org/relayd.8>`_ in front of Synapse. One advantage of
-doing so is that it means that you can expose the default https port (443) to
-Matrix clients without needing to run Synapse with root privileges.
+.. |room| replace:: ``#synapse:matrix.org``
+.. _room: https://matrix.to/#/#synapse:matrix.org
-For information on configuring one, see `<docs/reverse_proxy.md>`_.
+.. |docs| replace:: ``docs``
+.. _docs: docs
Identity Servers
================
@@ -242,34 +206,15 @@ an email address with your account, or send an invite to another user via their
email address.
-Password reset
-==============
-
-Users can reset their password through their client. Alternatively, a server admin
-can reset a users password using the `admin API <docs/admin_api/user_admin_api.md#reset-password>`_
-or by directly editing the database as shown below.
-
-First calculate the hash of the new password::
-
- $ ~/synapse/env/bin/hash_password
- Password:
- Confirm password:
- $2a$12$xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-
-Then update the ``users`` table in the database::
-
- UPDATE users SET password_hash='$2a$12$xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
- WHERE name='@test:test.com';
-
-
-Synapse Development
-===================
+Development
+===========
+We welcome contributions to Synapse from the community!
The best place to get started is our
`guide for contributors <https://matrix-org.github.io/synapse/latest/development/contributing_guide.html>`_.
This is part of our larger `documentation <https://matrix-org.github.io/synapse/latest>`_, which includes
-information for synapse developers as well as synapse administrators.
+information for Synapse developers as well as Synapse administrators.
Developers might be particularly interested in:
* `Synapse's database schema <https://matrix-org.github.io/synapse/latest/development/database_schema.html>`_,
@@ -280,187 +225,6 @@ Alongside all that, join our developer community on Matrix:
`#synapse-dev:matrix.org <https://matrix.to/#/#synapse-dev:matrix.org>`_, featuring real humans!
-Quick start
------------
-
-Before setting up a development environment for synapse, make sure you have the
-system dependencies (such as the python header files) installed - see
-`Platform-specific prerequisites <https://matrix-org.github.io/synapse/latest/setup/installation.html#platform-specific-prerequisites>`_.
-
-To check out a synapse for development, clone the git repo into a working
-directory of your choice::
-
- git clone https://github.com/matrix-org/synapse.git
- cd synapse
-
-Synapse has a number of external dependencies. We maintain a fixed development
-environment using `Poetry <https://python-poetry.org/>`_. First, install poetry. We recommend::
-
- pip install --user pipx
- pipx install poetry
-
-as described `here <https://python-poetry.org/docs/#installing-with-pipx>`_.
-(See `poetry's installation docs <https://python-poetry.org/docs/#installation>`_
-for other installation methods.) Then ask poetry to create a virtual environment
-from the project and install Synapse's dependencies::
-
- poetry install --extras "all test"
-
-This will run a process of downloading and installing all the needed
-dependencies into a virtual env.
-
-We recommend using the demo which starts 3 federated instances running on ports `8080` - `8082`::
-
- poetry run ./demo/start.sh
-
-(to stop, you can use ``poetry run ./demo/stop.sh``)
-
-See the `demo documentation <https://matrix-org.github.io/synapse/develop/development/demo.html>`_
-for more information.
-
-If you just want to start a single instance of the app and run it directly::
-
- # Create the homeserver.yaml config once
- poetry run synapse_homeserver \
- --server-name my.domain.name \
- --config-path homeserver.yaml \
- --generate-config \
- --report-stats=[yes|no]
-
- # Start the app
- poetry run synapse_homeserver --config-path homeserver.yaml
-
-
-Running the unit tests
-----------------------
-
-After getting up and running, you may wish to run Synapse's unit tests to
-check that everything is installed correctly::
-
- poetry run trial tests
-
-This should end with a 'PASSED' result (note that exact numbers will
-differ)::
-
- Ran 1337 tests in 716.064s
-
- PASSED (skips=15, successes=1322)
-
-For more tips on running the unit tests, like running a specific test or
-to see the logging output, see the `CONTRIBUTING doc <CONTRIBUTING.md#run-the-unit-tests>`_.
-
-
-Running the Integration Tests
------------------------------
-
-Synapse is accompanied by `SyTest <https://github.com/matrix-org/sytest>`_,
-a Matrix homeserver integration testing suite, which uses HTTP requests to
-access the API as a Matrix client would. It is able to run Synapse directly from
-the source tree, so installation of the server is not required.
-
-Testing with SyTest is recommended for verifying that changes related to the
-Client-Server API are functioning correctly. See the `SyTest installation
-instructions <https://github.com/matrix-org/sytest#installing>`_ for details.
-
-
-Platform dependencies
-=====================
-
-Synapse uses a number of platform dependencies such as Python and PostgreSQL,
-and aims to follow supported upstream versions. See the
-`<docs/deprecation_policy.md>`_ document for more details.
-
-
-Troubleshooting
-===============
-
-Need help? Join our community support room on Matrix:
-`#synapse:matrix.org <https://matrix.to/#/#synapse:matrix.org>`_
-
-Running out of File Handles
----------------------------
-
-If synapse runs out of file handles, it typically fails badly - live-locking
-at 100% CPU, and/or failing to accept new TCP connections (blocking the
-connecting client). Matrix currently can legitimately use a lot of file handles,
-thanks to busy rooms like #matrix:matrix.org containing hundreds of participating
-servers. The first time a server talks in a room it will try to connect
-simultaneously to all participating servers, which could exhaust the available
-file descriptors between DNS queries & HTTPS sockets, especially if DNS is slow
-to respond. (We need to improve the routing algorithm used to be better than
-full mesh, but as of March 2019 this hasn't happened yet).
-
-If you hit this failure mode, we recommend increasing the maximum number of
-open file handles to be at least 4096 (assuming a default of 1024 or 256).
-This is typically done by editing ``/etc/security/limits.conf``
-
-Separately, Synapse may leak file handles if inbound HTTP requests get stuck
-during processing - e.g. blocked behind a lock or talking to a remote server etc.
-This is best diagnosed by matching up the 'Received request' and 'Processed request'
-log lines and looking for any 'Processed request' lines which take more than
-a few seconds to execute. Please let us know at #synapse:matrix.org if
-you see this failure mode so we can help debug it, however.
-
-Help!! Synapse is slow and eats all my RAM/CPU!
------------------------------------------------
-
-First, ensure you are running the latest version of Synapse, using Python 3
-with a PostgreSQL database.
-
-Synapse's architecture is quite RAM hungry currently - we deliberately
-cache a lot of recent room data and metadata in RAM in order to speed up
-common requests. We'll improve this in the future, but for now the easiest
-way to either reduce the RAM usage (at the risk of slowing things down)
-is to set the almost-undocumented ``SYNAPSE_CACHE_FACTOR`` environment
-variable. The default is 0.5, which can be decreased to reduce RAM usage
-in memory constrained enviroments, or increased if performance starts to
-degrade.
-
-However, degraded performance due to a low cache factor, common on
-machines with slow disks, often leads to explosions in memory use due
-backlogged requests. In this case, reducing the cache factor will make
-things worse. Instead, try increasing it drastically. 2.0 is a good
-starting value.
-
-Using `libjemalloc <http://jemalloc.net/>`_ can also yield a significant
-improvement in overall memory use, and especially in terms of giving back
-RAM to the OS. To use it, the library must simply be put in the
-LD_PRELOAD environment variable when launching Synapse. On Debian, this
-can be done by installing the ``libjemalloc1`` package and adding this
-line to ``/etc/default/matrix-synapse``::
-
- LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so.1
-
-This can make a significant difference on Python 2.7 - it's unclear how
-much of an improvement it provides on Python 3.x.
-
-If you're encountering high CPU use by the Synapse process itself, you
-may be affected by a bug with presence tracking that leads to a
-massive excess of outgoing federation requests (see `discussion
-<https://github.com/matrix-org/synapse/issues/3971>`_). If metrics
-indicate that your server is also issuing far more outgoing federation
-requests than can be accounted for by your users' activity, this is a
-likely cause. The misbehavior can be worked around by setting
-the following in the Synapse config file:
-
-.. code-block:: yaml
-
- presence:
- enabled: false
-
-People can't accept room invitations from me
---------------------------------------------
-
-The typical failure mode here is that you send an invitation to someone
-to join a room or direct chat, but when they go to accept it, they get an
-error (typically along the lines of "Invalid signature"). They might see
-something like the following in their logs::
-
- 2019-09-11 19:32:04,271 - synapse.federation.transport.server - 288 - WARNING - GET-11752 - authenticate_request failed: 401: Invalid signature for server <server> with key ed25519:a_EqML: Unable to verify signature for <server>
-
-This is normally caused by a misconfiguration in your reverse-proxy. See
-`<docs/reverse_proxy.md>`_ and double-check that your settings are correct.
-
.. |support| image:: https://img.shields.io/matrix/synapse:matrix.org?label=support&logo=matrix
:alt: (get support on #synapse:matrix.org)
:target: https://matrix.to/#/#synapse:matrix.org
diff --git a/build_rust.py b/build_rust.py
new file mode 100644
index 0000000000..662474dcb4
--- /dev/null
+++ b/build_rust.py
@@ -0,0 +1,23 @@
+# A build script for poetry that adds the rust extension.
+
+import os
+from typing import Any, Dict
+
+from setuptools_rust import Binding, RustExtension
+
+
+def build(setup_kwargs: Dict[str, Any]) -> None:
+ original_project_dir = os.path.dirname(os.path.realpath(__file__))
+ cargo_toml_path = os.path.join(original_project_dir, "rust", "Cargo.toml")
+
+ extension = RustExtension(
+ target="synapse.synapse_rust",
+ path=cargo_toml_path,
+ binding=Binding.PyO3,
+ py_limited_api=True,
+ # We force always building in release mode, as we can't tell the
+ # difference between using `poetry` in development vs production.
+ debug=False,
+ )
+ setup_kwargs.setdefault("rust_extensions", []).append(extension)
+ setup_kwargs["zip_safe"] = False
diff --git a/changelog.d/12978.misc b/changelog.d/12978.misc
deleted file mode 100644
index 050c9047fc..0000000000
--- a/changelog.d/12978.misc
+++ /dev/null
@@ -1 +0,0 @@
-Extend the release script to automatically push a new SyTest branch, rather than having that be a manual process.
\ No newline at end of file
diff --git a/changelog.d/13160.misc b/changelog.d/13160.misc
deleted file mode 100644
index 36ff50c2a6..0000000000
--- a/changelog.d/13160.misc
+++ /dev/null
@@ -1 +0,0 @@
-Make minor clarifications to the error messages given when we fail to join a room via any server.
\ No newline at end of file
diff --git a/changelog.d/13213.misc b/changelog.d/13213.misc
deleted file mode 100644
index b50d26ac0c..0000000000
--- a/changelog.d/13213.misc
+++ /dev/null
@@ -1 +0,0 @@
-Enable Complement CI tests in the 'latest deps' test run.
\ No newline at end of file
diff --git a/changelog.d/13221.doc b/changelog.d/13221.doc
deleted file mode 100644
index dd2b3d8972..0000000000
--- a/changelog.d/13221.doc
+++ /dev/null
@@ -1 +0,0 @@
-Document which HTTP resources support gzip compression.
diff --git a/changelog.d/13230.doc b/changelog.d/13230.doc
deleted file mode 100644
index dce7be2425..0000000000
--- a/changelog.d/13230.doc
+++ /dev/null
@@ -1 +0,0 @@
-Add steps describing how to elevate an existing user to administrator by manipulating the database.
diff --git a/changelog.d/13343.feature b/changelog.d/13343.feature
deleted file mode 100644
index c151251e54..0000000000
--- a/changelog.d/13343.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add new unstable error codes `ORG.MATRIX.MSC3848.ALREADY_JOINED`, `ORG.MATRIX.MSC3848.NOT_JOINED`, and `ORG.MATRIX.MSC3848.INSUFFICIENT_POWER` described in MSC3848.
\ No newline at end of file
diff --git a/changelog.d/13346.misc b/changelog.d/13346.misc
deleted file mode 100644
index 06557c8481..0000000000
--- a/changelog.d/13346.misc
+++ /dev/null
@@ -1 +0,0 @@
-Fix long-standing bugged logic which was never hit in `get_pdu` asking every remote destination even after it finds an event.
diff --git a/changelog.d/13353.bugfix b/changelog.d/13353.bugfix
deleted file mode 100644
index 8e18bfae1f..0000000000
--- a/changelog.d/13353.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix a bug in the experimental faster-room-joins support which could cause it to get stuck in an infinite loop.
diff --git a/changelog.d/13355.misc b/changelog.d/13355.misc
deleted file mode 100644
index 7715075885..0000000000
--- a/changelog.d/13355.misc
+++ /dev/null
@@ -1 +0,0 @@
-Faster room joins: avoid blocking when pulling events with partially missing prev events.
diff --git a/changelog.d/13365.bugfix b/changelog.d/13365.bugfix
deleted file mode 100644
index b915c3158c..0000000000
--- a/changelog.d/13365.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix a bug introduced in Synapse v1.41.0 where the `/hierarchy` API returned non-standard information (a `room_id` field under each entry in `children_state`).
diff --git a/changelog.d/13370.feature b/changelog.d/13370.feature
deleted file mode 100644
index 3a49bc2778..0000000000
--- a/changelog.d/13370.feature
+++ /dev/null
@@ -1 +0,0 @@
-Use stable prefixes for [MSC3827](https://github.com/matrix-org/matrix-spec-proposals/pull/3827).
diff --git a/changelog.d/13372.docker b/changelog.d/13372.docker
deleted file mode 100644
index 238c78de09..0000000000
--- a/changelog.d/13372.docker
+++ /dev/null
@@ -1 +0,0 @@
-Make docker images build on armv7 by installing cryptography dependencies in the "requirements" stage. Contributed by Jasper Spaans.
\ No newline at end of file
diff --git a/changelog.d/13374.bugfix b/changelog.d/13374.bugfix
deleted file mode 100644
index 1c5bd1b363..0000000000
--- a/changelog.d/13374.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix a bug introduced in Synapse 0.24.0 that would respond with the wrong error status code to `/joined_members` requests when the requester is not a current member of the room. Contributed by @andrewdoh.
\ No newline at end of file
diff --git a/changelog.d/13383.misc b/changelog.d/13383.misc
deleted file mode 100644
index 2236eced24..0000000000
--- a/changelog.d/13383.misc
+++ /dev/null
@@ -1 +0,0 @@
-Remove an unused argument to `get_relations_for_event`.
diff --git a/changelog.d/13392.bugfix b/changelog.d/13392.bugfix
deleted file mode 100644
index 7d83c77550..0000000000
--- a/changelog.d/13392.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix bug in handling of typing events for appservices. Contributed by Nick @ Beeper (@fizzadar).
diff --git a/changelog.d/13393.misc b/changelog.d/13393.misc
deleted file mode 100644
index be2b0153ea..0000000000
--- a/changelog.d/13393.misc
+++ /dev/null
@@ -1 +0,0 @@
-Add a `merge-back` command to the release script, which automates merging the correct branches after a release.
\ No newline at end of file
diff --git a/changelog.d/13397.misc b/changelog.d/13397.misc
deleted file mode 100644
index 8dc610d9e2..0000000000
--- a/changelog.d/13397.misc
+++ /dev/null
@@ -1 +0,0 @@
-Adding missing type hints to tests.
diff --git a/changelog.d/13403.misc b/changelog.d/13403.misc
deleted file mode 100644
index cb7b38153c..0000000000
--- a/changelog.d/13403.misc
+++ /dev/null
@@ -1 +0,0 @@
-Faster Room Joins: don't leave a stuck room partial state flag if the join fails.
diff --git a/changelog.d/13404.misc b/changelog.d/13404.misc
deleted file mode 100644
index 655be4061b..0000000000
--- a/changelog.d/13404.misc
+++ /dev/null
@@ -1 +0,0 @@
-Refactor `_resolve_state_at_missing_prevs` to compute an `EventContext` instead.
diff --git a/changelog.d/13413.bugfix b/changelog.d/13413.bugfix
deleted file mode 100644
index a0ce884274..0000000000
--- a/changelog.d/13413.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Faster room joins: fix a bug which caused rejected events to become un-rejected during state syncing.
\ No newline at end of file
diff --git a/changelog.d/13420.misc b/changelog.d/13420.misc
deleted file mode 100644
index ff1a68e2e8..0000000000
--- a/changelog.d/13420.misc
+++ /dev/null
@@ -1 +0,0 @@
-Re-enable running Complement tests against Synapse with workers.
\ No newline at end of file
diff --git a/changelog.d/13428.feature b/changelog.d/13428.feature
deleted file mode 100644
index 085b61483f..0000000000
--- a/changelog.d/13428.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add a module API method to translate a room alias into a room ID.
diff --git a/changelog.d/13431.misc b/changelog.d/13431.misc
deleted file mode 100644
index 655be4061b..0000000000
--- a/changelog.d/13431.misc
+++ /dev/null
@@ -1 +0,0 @@
-Refactor `_resolve_state_at_missing_prevs` to compute an `EventContext` instead.
diff --git a/changelog.d/13432.bugfix b/changelog.d/13432.bugfix
deleted file mode 100644
index bb99616afc..0000000000
--- a/changelog.d/13432.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Faster room joins: Fix error when running out of servers to sync partial state with, so that Synapse raises the intended error instead.
diff --git a/changelog.d/13437.doc b/changelog.d/13437.doc
deleted file mode 100644
index fb772b24dc..0000000000
--- a/changelog.d/13437.doc
+++ /dev/null
@@ -1 +0,0 @@
-Fix wrong headline for `url_preview_accept_language` in documentation.
diff --git a/changelog.d/13438.doc b/changelog.d/13438.doc
deleted file mode 100644
index 163b63ffc6..0000000000
--- a/changelog.d/13438.doc
+++ /dev/null
@@ -1 +0,0 @@
-Remove redundant 'Contents' section from the Configuration Manual. Contributed by @dklimpel.
diff --git a/changelog.d/13442.misc b/changelog.d/13442.misc
deleted file mode 100644
index f503bc79d3..0000000000
--- a/changelog.d/13442.misc
+++ /dev/null
@@ -1 +0,0 @@
-Rename class `RateLimitConfig` to `RatelimitSettings` and `FederationRateLimitConfig` to `FederationRatelimitSettings`.
\ No newline at end of file
diff --git a/changelog.d/13443.doc b/changelog.d/13443.doc
deleted file mode 100644
index 0db5d1b3b4..0000000000
--- a/changelog.d/13443.doc
+++ /dev/null
@@ -1 +0,0 @@
-Update documentation for config setting `macaroon_secret_key`.
\ No newline at end of file
diff --git a/changelog.d/13917.feature b/changelog.d/13917.feature
new file mode 100644
index 0000000000..4eb942ab38
--- /dev/null
+++ b/changelog.d/13917.feature
@@ -0,0 +1 @@
+Adds support for handling avatar in SSO login. Contributed by @ashfame.
diff --git a/changelog.d/14055.misc b/changelog.d/14055.misc
new file mode 100644
index 0000000000..02980bc528
--- /dev/null
+++ b/changelog.d/14055.misc
@@ -0,0 +1 @@
+Add missing type hints to `HomeServer`.
diff --git a/changelog.d/14149.bugfix b/changelog.d/14149.bugfix
new file mode 100644
index 0000000000..b31c658266
--- /dev/null
+++ b/changelog.d/14149.bugfix
@@ -0,0 +1 @@
+Fix #12383: paginate room messages from the start if no from is given. Contributed by @gnunicorn .
\ No newline at end of file
diff --git a/changelog.d/14376.misc b/changelog.d/14376.misc
new file mode 100644
index 0000000000..2ca326fea6
--- /dev/null
+++ b/changelog.d/14376.misc
@@ -0,0 +1 @@
+Remove old stream ID tracking code. Contributed by Nick @Beeper (@fizzadar).
diff --git a/changelog.d/14393.bugfix b/changelog.d/14393.bugfix
new file mode 100644
index 0000000000..97177bc62f
--- /dev/null
+++ b/changelog.d/14393.bugfix
@@ -0,0 +1 @@
+Fix a bug introduced in 1.58.0 where a user with presence state 'org.matrix.msc3026.busy' would mistakenly be set to 'online' when calling `/sync` or `/events` on a worker process.
\ No newline at end of file
diff --git a/changelog.d/14400.misc b/changelog.d/14400.misc
new file mode 100644
index 0000000000..6e025329c4
--- /dev/null
+++ b/changelog.d/14400.misc
@@ -0,0 +1 @@
+Remove the `worker_main_http_uri` configuration setting. This is now handled via internal replication.
diff --git a/changelog.d/14403.misc b/changelog.d/14403.misc
new file mode 100644
index 0000000000..ff28a2712a
--- /dev/null
+++ b/changelog.d/14403.misc
@@ -0,0 +1 @@
+Faster joins: do not wait for full state when creating events to send.
diff --git a/changelog.d/14404.misc b/changelog.d/14404.misc
new file mode 100644
index 0000000000..b9ab525f2b
--- /dev/null
+++ b/changelog.d/14404.misc
@@ -0,0 +1 @@
+Faster joins: filter out non local events when a room doesn't have its full state.
diff --git a/changelog.d/14408.misc b/changelog.d/14408.misc
new file mode 100644
index 0000000000..2c77d97591
--- /dev/null
+++ b/changelog.d/14408.misc
@@ -0,0 +1 @@
+Faster joins: send events to initial list of servers if we don't have the full state yet.
diff --git a/changelog.d/14412.misc b/changelog.d/14412.misc
new file mode 100644
index 0000000000..4da061d461
--- /dev/null
+++ b/changelog.d/14412.misc
@@ -0,0 +1 @@
+Remove duplicated type information from type hints.
diff --git a/changelog.d/14449.misc b/changelog.d/14449.misc
new file mode 100644
index 0000000000..320c0b6fae
--- /dev/null
+++ b/changelog.d/14449.misc
@@ -0,0 +1 @@
+Fix type logic in TCP replication code that prevented correctly ignoring blank commands.
\ No newline at end of file
diff --git a/changelog.d/14452.misc b/changelog.d/14452.misc
new file mode 100644
index 0000000000..cb190c0823
--- /dev/null
+++ b/changelog.d/14452.misc
@@ -0,0 +1 @@
+Enable mypy's [`strict_equality` check](https://mypy.readthedocs.io/en/stable/command_line.html#cmdoption-mypy-strict-equality) by default.
\ No newline at end of file
diff --git a/changelog.d/14468.misc b/changelog.d/14468.misc
new file mode 100644
index 0000000000..2ca326fea6
--- /dev/null
+++ b/changelog.d/14468.misc
@@ -0,0 +1 @@
+Remove old stream ID tracking code. Contributed by Nick @Beeper (@fizzadar).
diff --git a/changelog.d/14476.misc b/changelog.d/14476.misc
new file mode 100644
index 0000000000..6e025329c4
--- /dev/null
+++ b/changelog.d/14476.misc
@@ -0,0 +1 @@
+Remove the `worker_main_http_uri` configuration setting. This is now handled via internal replication.
diff --git a/changelog.d/14479.misc b/changelog.d/14479.misc
new file mode 100644
index 0000000000..08edd2f929
--- /dev/null
+++ b/changelog.d/14479.misc
@@ -0,0 +1 @@
+`scripts-dev/federation_client`: Fix routing on servers with `.well-known` files.
\ No newline at end of file
diff --git a/changelog.d/14487.misc b/changelog.d/14487.misc
new file mode 100644
index 0000000000..f6b47a1d8e
--- /dev/null
+++ b/changelog.d/14487.misc
@@ -0,0 +1 @@
+Reduce default third party invite rate limit to 216 invites per day.
diff --git a/changelog.d/14490.misc b/changelog.d/14490.misc
new file mode 100644
index 0000000000..c0a4daa885
--- /dev/null
+++ b/changelog.d/14490.misc
@@ -0,0 +1 @@
+Fix a bug introduced in Synapse 0.9 where it would fail to fetch server keys whose IDs contain a forward slash.
diff --git a/changelog.d/14491.feature b/changelog.d/14491.feature
new file mode 100644
index 0000000000..4fca7282f7
--- /dev/null
+++ b/changelog.d/14491.feature
@@ -0,0 +1 @@
+Reduce database load of [Client-Server endpoints](https://spec.matrix.org/v1.4/client-server-api/#aggregations) which return bundled aggregations.
diff --git a/changelog.d/14496.misc b/changelog.d/14496.misc
new file mode 100644
index 0000000000..57fc6cf452
--- /dev/null
+++ b/changelog.d/14496.misc
@@ -0,0 +1 @@
+Refactor `federation_sender` and `pusher` configuration loading.
diff --git a/changelog.d/14499.doc b/changelog.d/14499.doc
new file mode 100644
index 0000000000..34ea57ef43
--- /dev/null
+++ b/changelog.d/14499.doc
@@ -0,0 +1 @@
+Fixed link to 'Synapse administration endpoints'.
diff --git a/changelog.d/14500.misc b/changelog.d/14500.misc
new file mode 100644
index 0000000000..c5d70a70f7
--- /dev/null
+++ b/changelog.d/14500.misc
@@ -0,0 +1 @@
+Bump pygithub from 1.56 to 1.57.
diff --git a/changelog.d/14501.misc b/changelog.d/14501.misc
new file mode 100644
index 0000000000..3c240d38b5
--- /dev/null
+++ b/changelog.d/14501.misc
@@ -0,0 +1 @@
+Bump sentry-sdk from 1.10.1 to 1.11.0.
diff --git a/changelog.d/14502.misc b/changelog.d/14502.misc
new file mode 100644
index 0000000000..86a19900f1
--- /dev/null
+++ b/changelog.d/14502.misc
@@ -0,0 +1 @@
+Bump types-pillow from 9.2.2.1 to 9.3.0.1.
diff --git a/changelog.d/14503.misc b/changelog.d/14503.misc
new file mode 100644
index 0000000000..e627d35cde
--- /dev/null
+++ b/changelog.d/14503.misc
@@ -0,0 +1 @@
+Bump towncrier from 21.9.0 to 22.8.0.
diff --git a/changelog.d/14504.misc b/changelog.d/14504.misc
new file mode 100644
index 0000000000..e228ee46a5
--- /dev/null
+++ b/changelog.d/14504.misc
@@ -0,0 +1 @@
+Bump phonenumbers from 8.12.56 to 8.13.0.
diff --git a/changelog.d/14505.misc b/changelog.d/14505.misc
new file mode 100644
index 0000000000..45d97ec461
--- /dev/null
+++ b/changelog.d/14505.misc
@@ -0,0 +1 @@
+Bump serde_json from 1.0.87 to 1.0.88.
diff --git a/changelog.d/14508.feature b/changelog.d/14508.feature
new file mode 100644
index 0000000000..4fca7282f7
--- /dev/null
+++ b/changelog.d/14508.feature
@@ -0,0 +1 @@
+Reduce database load of [Client-Server endpoints](https://spec.matrix.org/v1.4/client-server-api/#aggregations) which return bundled aggregations.
diff --git a/changelog.d/14510.feature b/changelog.d/14510.feature
new file mode 100644
index 0000000000..4fca7282f7
--- /dev/null
+++ b/changelog.d/14510.feature
@@ -0,0 +1 @@
+Reduce database load of [Client-Server endpoints](https://spec.matrix.org/v1.4/client-server-api/#aggregations) which return bundled aggregations.
diff --git a/changelog.d/14515.misc b/changelog.d/14515.misc
new file mode 100644
index 0000000000..a0effb4dbe
--- /dev/null
+++ b/changelog.d/14515.misc
@@ -0,0 +1 @@
+Faster joins: use servers list approximation received during `send_join` (potentially updated with received membership events) in `assert_host_in_room`.
\ No newline at end of file
diff --git a/changelog.d/14516.misc b/changelog.d/14516.misc
new file mode 100644
index 0000000000..51666c6ffc
--- /dev/null
+++ b/changelog.d/14516.misc
@@ -0,0 +1 @@
+Refactor conversion of device list changes in room to outbound pokes to track unconverted rows using a `(stream ID, room ID)` position instead of updating the `converted_to_destinations` flag on every row.
diff --git a/changelog.d/14522.misc b/changelog.d/14522.misc
new file mode 100644
index 0000000000..512bc32567
--- /dev/null
+++ b/changelog.d/14522.misc
@@ -0,0 +1 @@
+Add more prompts to the bug report form.
diff --git a/changelog.d/14526.misc b/changelog.d/14526.misc
new file mode 100644
index 0000000000..84d4ada31b
--- /dev/null
+++ b/changelog.d/14526.misc
@@ -0,0 +1 @@
+Extend editorconfig rules on indent and line length to `.pyi` files.
diff --git a/changelog.d/14527.misc b/changelog.d/14527.misc
new file mode 100644
index 0000000000..3c4c7bf07d
--- /dev/null
+++ b/changelog.d/14527.misc
@@ -0,0 +1 @@
+Speed-up `/messages` with `filter_events_for_client` optimizations.
diff --git a/changelog.d/14529.misc b/changelog.d/14529.misc
new file mode 100644
index 0000000000..d44571b731
--- /dev/null
+++ b/changelog.d/14529.misc
@@ -0,0 +1 @@
+Add missing type hints.
diff --git a/changelog.d/14534.misc b/changelog.d/14534.misc
new file mode 100644
index 0000000000..5fe79042e5
--- /dev/null
+++ b/changelog.d/14534.misc
@@ -0,0 +1 @@
+Improve DB performance by reducing amount of data that gets read in `device_lists_changes_in_room`.
diff --git a/changelog.d/14537.bugfix b/changelog.d/14537.bugfix
new file mode 100644
index 0000000000..d7ce78d032
--- /dev/null
+++ b/changelog.d/14537.bugfix
@@ -0,0 +1 @@
+Fix a long-standing bug where the [List media admin API](https://matrix-org.github.io/synapse/latest/admin_api/media_admin_api.html#list-all-media-in-a-room) would fail when processing an image with broken thumbnail information.
\ No newline at end of file
diff --git a/changelog.d/14538.removal b/changelog.d/14538.removal
new file mode 100644
index 0000000000..d2035ce82a
--- /dev/null
+++ b/changelog.d/14538.removal
@@ -0,0 +1 @@
+Remove legacy Prometheus metrics names. They were deprecated in Synapse v1.69.0 and disabled by default in Synapse v1.71.0.
\ No newline at end of file
diff --git a/changelog.d/14557.misc b/changelog.d/14557.misc
new file mode 100644
index 0000000000..379cd2378c
--- /dev/null
+++ b/changelog.d/14557.misc
@@ -0,0 +1 @@
+Bump dtolnay/rust-toolchain from 55c7845fad90d0ae8b2e83715cb900e5e861e8cb to e645b0cf01249a964ec099494d38d2da0f0b349f.
diff --git a/changelog.d/14571.misc b/changelog.d/14571.misc
new file mode 100644
index 0000000000..212990cb10
--- /dev/null
+++ b/changelog.d/14571.misc
@@ -0,0 +1 @@
+Run Rust CI when `Cargo.lock` changes. This is particularly useful for dependabot updates.
diff --git a/contrib/docker_compose_workers/README.md b/contrib/docker_compose_workers/README.md
index 4dbfee2853..bdd3dd32e0 100644
--- a/contrib/docker_compose_workers/README.md
+++ b/contrib/docker_compose_workers/README.md
@@ -94,20 +94,6 @@ worker_replication_host: synapse
worker_replication_http_port: 9093
```
-### Add Workers to `instance_map`
-
-Locate the `instance_map` section of your `homeserver.yaml` and populate it with your workers:
-
-```yaml
-instance_map:
- synapse-generic-worker-1: # The worker_name setting in your worker configuration file
- host: synapse-generic-worker-1 # The name of the worker service in your Docker Compose file
- port: 8034 # The port assigned to the replication listener in your worker config file
- synapse-federation-sender-1:
- host: synapse-federation-sender-1
- port: 8034
-```
-
### Configure Federation Senders
This section is applicable if you are using Federation senders (synapse.app.federation_sender). Locate the `send_federation` and `federation_sender_instances` settings in your `homeserver.yaml` and configure them:
@@ -122,4 +108,4 @@ federation_sender_instances:
## Other Worker types
-Using the concepts shown here it is possible to create other worker types in Docker Compose. See the [Workers](https://matrix-org.github.io/synapse/latest/workers.html#available-worker-applications) documentation for a list of available workers.
\ No newline at end of file
+Using the concepts shown here it is possible to create other worker types in Docker Compose. See the [Workers](https://matrix-org.github.io/synapse/latest/workers.html#available-worker-applications) documentation for a list of available workers.
diff --git a/contrib/docker_compose_workers/workers/synapse-federation-sender-1.yaml b/contrib/docker_compose_workers/workers/synapse-federation-sender-1.yaml
index 5ba42a92d2..5b40664d67 100644
--- a/contrib/docker_compose_workers/workers/synapse-federation-sender-1.yaml
+++ b/contrib/docker_compose_workers/workers/synapse-federation-sender-1.yaml
@@ -5,10 +5,4 @@ worker_name: synapse-federation-sender-1
worker_replication_host: synapse
worker_replication_http_port: 9093
-worker_listeners:
- - type: http
- port: 8034
- resources:
- - names: [replication]
-
worker_log_config: /data/federation_sender.log.config
diff --git a/contrib/docker_compose_workers/workers/synapse-generic-worker-1.yaml b/contrib/docker_compose_workers/workers/synapse-generic-worker-1.yaml
index 694584105a..09e55df9f3 100644
--- a/contrib/docker_compose_workers/workers/synapse-generic-worker-1.yaml
+++ b/contrib/docker_compose_workers/workers/synapse-generic-worker-1.yaml
@@ -7,10 +7,6 @@ worker_replication_http_port: 9093
worker_listeners:
- type: http
- port: 8034
- resources:
- - names: [replication]
- - type: http
port: 8081
x_forwarded: true
resources:
diff --git a/contrib/grafana/synapse.json b/contrib/grafana/synapse.json
index 819426b8ea..68705b6e6d 100644
--- a/contrib/grafana/synapse.json
+++ b/contrib/grafana/synapse.json
@@ -9,17 +9,18 @@
"pluginName": "Prometheus"
}
],
+ "__elements": {},
"__requires": [
{
"type": "grafana",
"id": "grafana",
"name": "Grafana",
- "version": "7.3.7"
+ "version": "9.2.2"
},
{
"type": "panel",
"id": "graph",
- "name": "Graph",
+ "name": "Graph (old)",
"version": ""
},
{
@@ -33,13 +34,21 @@
"id": "prometheus",
"name": "Prometheus",
"version": "1.0.0"
+ },
+ {
+ "type": "panel",
+ "id": "timeseries",
+ "name": "Time series",
+ "version": ""
}
],
"annotations": {
"list": [
{
"builtIn": 1,
- "datasource": "$datasource",
+ "datasource": {
+ "uid": "$datasource"
+ },
"enable": false,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
@@ -51,10 +60,9 @@
]
},
"editable": true,
- "gnetId": null,
+ "fiscalYearStartMonth": 0,
"graphTooltip": 0,
"id": null,
- "iteration": 1628606819564,
"links": [
{
"asDropdown": false,
@@ -66,24 +74,16 @@
],
"title": "Dashboards",
"type": "dashboards"
- },
- {
- "asDropdown": false,
- "icon": "external link",
- "includeVars": false,
- "keepTime": false,
- "tags": [],
- "targetBlank": true,
- "title": "Synapse Documentation",
- "tooltip": "Open Documentation",
- "type": "link",
- "url": "https://matrix-org.github.io/synapse/latest/"
}
],
+ "liveNow": false,
"panels": [
{
"collapsed": false,
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "000000001"
+ },
"gridPos": {
"h": 1,
"w": 24,
@@ -92,6 +92,15 @@
},
"id": 73,
"panels": [],
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "000000001"
+ },
+ "refId": "A"
+ }
+ ],
"title": "Overview",
"type": "row"
},
@@ -108,10 +117,21 @@
"mode": "spectrum"
},
"dataFormat": "tsbuckets",
- "datasource": "$datasource",
+ "datasource": {
+ "uid": "$datasource"
+ },
"fieldConfig": {
"defaults": {
- "custom": {}
+ "custom": {
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "scaleDistribution": {
+ "type": "linear"
+ }
+ }
},
"overrides": []
},
@@ -129,9 +149,51 @@
"show": false
},
"links": [],
+ "options": {
+ "calculate": false,
+ "calculation": {},
+ "cellGap": -1,
+ "cellRadius": 0,
+ "cellValues": {},
+ "color": {
+ "exponent": 0.5,
+ "fill": "#b4ff00",
+ "mode": "scheme",
+ "reverse": false,
+ "scale": "exponential",
+ "scheme": "Inferno",
+ "steps": 128
+ },
+ "exemplars": {
+ "color": "rgba(255,0,255,0.7)"
+ },
+ "filterValues": {
+ "le": 1e-9
+ },
+ "legend": {
+ "show": false
+ },
+ "rowsFrame": {
+ "layout": "auto"
+ },
+ "showValue": "never",
+ "tooltip": {
+ "show": true,
+ "yHistogram": true
+ },
+ "yAxis": {
+ "axisPlacement": "left",
+ "reverse": false,
+ "unit": "s"
+ }
+ },
+ "pluginVersion": "9.2.2",
"reverseYBuckets": false,
"targets": [
{
+ "datasource": {
+ "uid": "$datasource"
+ },
"expr": "sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\"}[$bucket_size])) by (le)",
"format": "heatmap",
"interval": "",
@@ -149,31 +211,24 @@
"xAxis": {
"show": true
},
- "xBucketNumber": null,
- "xBucketSize": null,
"yAxis": {
- "decimals": null,
"format": "s",
"logBase": 2,
- "max": null,
- "min": null,
- "show": true,
- "splitFactor": null
+ "show": true
},
- "yBucketBound": "auto",
- "yBucketNumber": null,
- "yBucketSize": null
+ "yBucketBound": "auto"
},
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
+ "datasource": {
+ "uid": "$datasource"
+ },
"description": "",
"fieldConfig": {
"defaults": {
- "custom": {},
"links": []
},
"overrides": []
@@ -207,7 +262,7 @@
},
"paceLength": 10,
"percentage": false,
- "pluginVersion": "7.3.7",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -266,6 +321,9 @@
"steppedLine": false,
"targets": [
{
+ "datasource": {
+ "uid": "$datasource"
+ },
"expr": "histogram_quantile(0.99, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',index=~\"$index\",instance=\"$instance\",code=~\"2..\"}[$bucket_size])) by (le))",
"format": "time_series",
"intervalFactor": 1,
@@ -273,6 +331,9 @@
"refId": "D"
},
{
+ "datasource": {
+ "uid": "$datasource"
+ },
"expr": "histogram_quantile(0.9, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',index=~\"$index\",instance=\"$instance\",code=~\"2..\"}[$bucket_size])) by (le))",
"format": "time_series",
"interval": "",
@@ -281,6 +342,9 @@
"refId": "A"
},
{
+ "datasource": {
+ "uid": "$datasource"
+ },
"expr": "histogram_quantile(0.75, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',index=~\"$index\",instance=\"$instance\",code=~\"2..\"}[$bucket_size])) by (le))",
"format": "time_series",
"intervalFactor": 1,
@@ -288,6 +352,9 @@
"refId": "C"
},
{
+ "datasource": {
+ "uid": "$datasource"
+ },
"expr": "histogram_quantile(0.5, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',index=~\"$index\",instance=\"$instance\",code=~\"2..\"}[$bucket_size])) by (le))",
"format": "time_series",
"intervalFactor": 1,
@@ -295,22 +362,34 @@
"refId": "B"
},
{
+ "datasource": {
+ "uid": "$datasource"
+ },
"expr": "histogram_quantile(0.25, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',index=~\"$index\",instance=\"$instance\",code=~\"2..\"}[$bucket_size])) by (le))",
"legendFormat": "25%",
"refId": "F"
},
{
+ "datasource": {
+ "uid": "$datasource"
+ },
"expr": "histogram_quantile(0.05, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',index=~\"$index\",instance=\"$instance\",code=~\"2..\"}[$bucket_size])) by (le))",
"legendFormat": "5%",
"refId": "G"
},
{
+ "datasource": {
+ "uid": "$datasource"
+ },
"expr": "sum(rate(synapse_http_server_response_time_seconds_sum{servlet='RoomSendEventRestServlet',index=~\"$index\",instance=\"$instance\",code=~\"2..\"}[$bucket_size])) / sum(rate(synapse_http_server_response_time_seconds_count{servlet='RoomSendEventRestServlet',index=~\"$index\",instance=\"$instance\",code=~\"2..\"}[$bucket_size]))",
"legendFormat": "Average",
"refId": "H"
},
{
- "expr": "sum(rate(synapse_storage_events_persisted_events{instance=\"$instance\"}[$bucket_size]))",
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "expr": "sum(rate(synapse_storage_events_persisted_events_total{instance=\"$instance\"}[$bucket_size]))",
"hide": false,
"instant": false,
"legendFormat": "Events",
@@ -319,6 +398,7 @@
],
"thresholds": [
{
+ "$$hashKey": "object:283",
"colorMode": "warning",
"fill": false,
"line": true,
@@ -327,6 +407,7 @@
"yaxis": "left"
},
{
+ "$$hashKey": "object:284",
"colorMode": "critical",
"fill": false,
"line": true,
@@ -335,9 +416,7 @@
"yaxis": "left"
}
],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "Event Send Time Quantiles (excluding errors, all workers)",
"tooltip": {
"shared": true,
@@ -346,34 +425,30 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
"yaxes": [
{
- "decimals": null,
+ "$$hashKey": "object:255",
"format": "s",
"label": "",
"logBase": 1,
- "max": null,
"min": "0",
"show": true
},
{
+ "$$hashKey": "object:256",
"format": "hertz",
"label": "",
"logBase": 1,
- "max": null,
"min": "0",
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
},
{
@@ -381,10 +456,11 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
+ "datasource": {
+ "uid": "$datasource"
+ },
"fieldConfig": {
"defaults": {
- "custom": {},
"links": []
},
"overrides": []
@@ -417,7 +493,7 @@
},
"paceLength": 10,
"percentage": false,
- "pluginVersion": "7.3.7",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -427,6 +503,9 @@
"steppedLine": false,
"targets": [
{
+ "datasource": {
+ "uid": "$datasource"
+ },
"expr": "rate(process_cpu_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
"format": "time_series",
"interval": "",
@@ -437,6 +516,7 @@
],
"thresholds": [
{
+ "$$hashKey": "object:566",
"colorMode": "critical",
"fill": true,
"line": true,
@@ -445,9 +525,7 @@
"yaxis": "left"
}
],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "CPU usage",
"tooltip": {
"shared": false,
@@ -456,34 +534,28 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
"yaxes": [
{
- "decimals": null,
+ "$$hashKey": "object:538",
"format": "percentunit",
- "label": null,
"logBase": 1,
"max": "1.5",
"min": "0",
"show": true
},
{
+ "$$hashKey": "object:539",
"format": "short",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
},
{
@@ -491,12 +563,13 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
+ "datasource": {
+ "uid": "$datasource"
+ },
"editable": true,
"error": false,
"fieldConfig": {
"defaults": {
- "custom": {},
"links": []
},
"overrides": []
@@ -530,7 +603,7 @@
},
"paceLength": 10,
"percentage": false,
- "pluginVersion": "7.3.7",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -540,6 +613,9 @@
"steppedLine": false,
"targets": [
{
+ "datasource": {
+ "uid": "$datasource"
+ },
"expr": "process_resident_memory_bytes{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
"format": "time_series",
"interval": "",
@@ -550,6 +626,9 @@
"target": ""
},
{
+ "datasource": {
+ "uid": "$datasource"
+ },
"expr": "sum(process_resident_memory_bytes{instance=\"$instance\",job=~\"$job\",index=~\"$index\"})",
"hide": true,
"interval": "",
@@ -558,9 +637,7 @@
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "Memory",
"tooltip": {
"shared": false,
@@ -570,31 +647,27 @@
"transformations": [],
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
"yaxes": [
{
+ "$$hashKey": "object:1560",
"format": "bytes",
"logBase": 1,
- "max": null,
"min": "0",
"show": true
},
{
+ "$$hashKey": "object:1561",
"format": "short",
"logBase": 1,
- "max": null,
- "min": null,
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
},
{
@@ -602,10 +675,11 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
+ "datasource": {
+ "uid": "$datasource"
+ },
"fieldConfig": {
"defaults": {
- "custom": {},
"links": []
},
"overrides": []
@@ -638,12 +712,13 @@
},
"paceLength": 10,
"percentage": false,
- "pluginVersion": "7.3.7",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
"seriesOverrides": [
{
+ "$$hashKey": "object:639",
"alias": "/max$/",
"color": "#890F02",
"fill": 0,
@@ -655,6 +730,9 @@
"steppedLine": false,
"targets": [
{
+ "datasource": {
+ "uid": "$datasource"
+ },
"expr": "process_open_fds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
"format": "time_series",
"hide": false,
@@ -665,6 +743,9 @@
"step": 20
},
{
+ "datasource": {
+ "uid": "$datasource"
+ },
"expr": "process_max_fds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
"format": "time_series",
"hide": true,
@@ -676,9 +757,7 @@
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "Open FDs",
"tooltip": {
"shared": false,
@@ -687,40 +766,35 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
"yaxes": [
{
- "decimals": null,
+ "$$hashKey": "object:650",
"format": "none",
"label": "",
"logBase": 1,
- "max": null,
- "min": null,
"show": true
},
{
- "decimals": null,
+ "$$hashKey": "object:651",
"format": "short",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
},
{
"collapsed": true,
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "$datasource"
+ },
"gridPos": {
"h": 1,
"w": 24,
@@ -734,12 +808,13 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
+ "datasource": {
+ "uid": "$datasource"
+ },
"editable": true,
"error": false,
"fieldConfig": {
"defaults": {
- "custom": {},
"links": []
},
"overrides": []
@@ -751,7 +826,7 @@
"h": 7,
"w": 12,
"x": 0,
- "y": 25
+ "y": 27
},
"hiddenSeries": false,
"id": 5,
@@ -777,15 +852,17 @@
},
"paceLength": 10,
"percentage": false,
- "pluginVersion": "7.3.7",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
"seriesOverrides": [
{
+ "$$hashKey": "object:1240",
"alias": "/user/"
},
{
+ "$$hashKey": "object:1241",
"alias": "/system/"
}
],
@@ -794,6 +871,9 @@
"steppedLine": false,
"targets": [
{
+ "datasource": {
+ "uid": "$datasource"
+ },
"expr": "rate(process_cpu_system_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
"format": "time_series",
"intervalFactor": 1,
@@ -803,6 +883,9 @@
"step": 20
},
{
+ "datasource": {
+ "uid": "$datasource"
+ },
"expr": "rate(process_cpu_user_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
"format": "time_series",
"hide": false,
@@ -815,6 +898,7 @@
],
"thresholds": [
{
+ "$$hashKey": "object:1278",
"colorMode": "custom",
"fillColor": "rgba(255, 255, 255, 1)",
"line": true,
@@ -824,6 +908,7 @@
"yaxis": "left"
},
{
+ "$$hashKey": "object:1279",
"colorMode": "custom",
"fillColor": "rgba(255, 255, 255, 1)",
"line": true,
@@ -833,6 +918,7 @@
"yaxis": "left"
},
{
+ "$$hashKey": "object:1498",
"colorMode": "critical",
"fill": true,
"line": true,
@@ -841,9 +927,7 @@
"yaxis": "left"
}
],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "CPU",
"tooltip": {
"shared": false,
@@ -852,15 +936,13 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
"yaxes": [
{
- "decimals": null,
+ "$$hashKey": "object:1250",
"format": "percentunit",
"label": "",
"logBase": 1,
@@ -869,71 +951,117 @@
"show": true
},
{
+ "$$hashKey": "object:1251",
"format": "short",
"logBase": 1,
- "max": null,
- "min": null,
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
},
{
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"description": "Shows the time in which the given percentage of reactor ticks completed, over the sampled timespan",
"fieldConfig": {
"defaults": {
- "custom": {},
- "links": []
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": true,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "links": [],
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "s"
},
"overrides": []
},
- "fill": 1,
- "fillGradient": 0,
"gridPos": {
"h": 7,
"w": 12,
"x": 12,
- "y": 25
+ "y": 27
},
- "hiddenSeries": false,
"id": 105,
"interval": "",
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
"links": [],
- "nullPointMode": "null",
"options": {
- "alertThreshold": true
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
},
- "paceLength": 10,
- "percentage": false,
- "pluginVersion": "7.3.7",
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
+ "pluginVersion": "8.3.2",
"targets": [
{
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "exemplar": true,
+ "expr": "histogram_quantile(0.999, rate(python_twisted_reactor_tick_time_bucket{index=~\"$index\",instance=\"$instance\",job=~\"$job\"}[$bucket_size]))",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "{{job}}-{{index}} 99.9%",
+ "refId": "E"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "exemplar": true,
"expr": "histogram_quantile(0.99, rate(python_twisted_reactor_tick_time_bucket{index=~\"$index\",instance=\"$instance\",job=~\"$job\"}[$bucket_size]))",
"format": "time_series",
"interval": "",
@@ -943,13 +1071,23 @@
"step": 20
},
{
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "exemplar": true,
"expr": "histogram_quantile(0.95, rate(python_twisted_reactor_tick_time_bucket{index=~\"$index\",instance=\"$instance\",job=~\"$job\"}[$bucket_size]))",
"format": "time_series",
+ "interval": "",
"intervalFactor": 1,
"legendFormat": "{{job}}-{{index}} 95%",
"refId": "B"
},
{
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"expr": "histogram_quantile(0.90, rate(python_twisted_reactor_tick_time_bucket{index=~\"$index\",instance=\"$instance\",job=~\"$job\"}[$bucket_size]))",
"format": "time_series",
"intervalFactor": 1,
@@ -957,6 +1095,10 @@
"refId": "C"
},
{
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"expr": "rate(python_twisted_reactor_tick_time_sum{index=~\"$index\",instance=\"$instance\",job=~\"$job\"}[$bucket_size]) / rate(python_twisted_reactor_tick_time_count{index=~\"$index\",instance=\"$instance\",job=~\"$job\"}[$bucket_size])",
"format": "time_series",
"intervalFactor": 1,
@@ -964,58 +1106,21 @@
"refId": "D"
}
],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
"title": "Reactor tick quantiles",
- "tooltip": {
- "shared": false,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "s",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
+ "type": "timeseries"
},
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
+ "datasource": {
+ "uid": "$datasource"
+ },
"editable": true,
"error": false,
"fieldConfig": {
"defaults": {
- "custom": {},
"links": []
},
"overrides": []
@@ -1027,7 +1132,7 @@
"h": 7,
"w": 12,
"x": 0,
- "y": 32
+ "y": 34
},
"hiddenSeries": false,
"id": 34,
@@ -1049,7 +1154,7 @@
},
"paceLength": 10,
"percentage": false,
- "pluginVersion": "7.3.7",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -1059,6 +1164,9 @@
"steppedLine": false,
"targets": [
{
+ "datasource": {
+ "uid": "$datasource"
+ },
"expr": "process_resident_memory_bytes{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
"format": "time_series",
"interval": "",
@@ -1069,6 +1177,9 @@
"target": ""
},
{
+ "datasource": {
+ "uid": "$datasource"
+ },
"expr": "sum(process_resident_memory_bytes{instance=\"$instance\",job=~\"$job\",index=~\"$index\"})",
"interval": "",
"legendFormat": "total",
@@ -1076,9 +1187,7 @@
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "Memory",
"tooltip": {
"shared": false,
@@ -1088,9 +1197,7 @@
"transformations": [],
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
@@ -1098,21 +1205,17 @@
{
"format": "bytes",
"logBase": 1,
- "max": null,
"min": "0",
"show": true
},
{
"format": "short",
"logBase": 1,
- "max": null,
- "min": null,
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
},
{
@@ -1120,10 +1223,11 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
+ "datasource": {
+ "uid": "$datasource"
+ },
"fieldConfig": {
"defaults": {
- "custom": {},
"links": []
},
"overrides": []
@@ -1134,7 +1238,7 @@
"h": 7,
"w": 12,
"x": 12,
- "y": 32
+ "y": 34
},
"hiddenSeries": false,
"id": 49,
@@ -1156,7 +1260,7 @@
},
"paceLength": 10,
"percentage": false,
- "pluginVersion": "7.3.7",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -1172,6 +1276,9 @@
"steppedLine": false,
"targets": [
{
+ "datasource": {
+ "uid": "$datasource"
+ },
"expr": "scrape_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
"format": "time_series",
"interval": "",
@@ -1182,9 +1289,7 @@
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "Prometheus scrape time",
"tooltip": {
"shared": false,
@@ -1193,18 +1298,14 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "s",
- "label": null,
"logBase": 1,
- "max": null,
"min": "0",
"show": true
},
@@ -1219,8 +1320,7 @@
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
},
{
@@ -1228,10 +1328,11 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
+ "datasource": {
+ "uid": "$datasource"
+ },
"fieldConfig": {
"defaults": {
- "custom": {},
"links": []
},
"overrides": []
@@ -1242,7 +1343,7 @@
"h": 7,
"w": 12,
"x": 0,
- "y": 39
+ "y": 41
},
"hiddenSeries": false,
"id": 53,
@@ -1264,7 +1365,7 @@
},
"paceLength": 10,
"percentage": false,
- "pluginVersion": "7.3.7",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -1274,6 +1375,9 @@
"steppedLine": false,
"targets": [
{
+ "datasource": {
+ "uid": "$datasource"
+ },
"expr": "min_over_time(up{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
"format": "time_series",
"intervalFactor": 2,
@@ -1282,9 +1386,7 @@
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "Up",
"tooltip": {
"shared": false,
@@ -1293,33 +1395,24 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "short",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
},
{
"format": "short",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
},
{
@@ -1327,10 +1420,11 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
+ "datasource": {
+ "uid": "$datasource"
+ },
"fieldConfig": {
"defaults": {
- "custom": {},
"links": []
},
"overrides": []
@@ -1341,7 +1435,7 @@
"h": 7,
"w": 12,
"x": 12,
- "y": 39
+ "y": 41
},
"hiddenSeries": false,
"id": 120,
@@ -1362,7 +1456,7 @@
"alertThreshold": true
},
"percentage": false,
- "pluginVersion": "7.3.7",
+ "pluginVersion": "9.2.2",
"pointradius": 2,
"points": false,
"renderer": "flot",
@@ -1372,6 +1466,9 @@
"steppedLine": false,
"targets": [
{
+ "datasource": {
+ "uid": "$datasource"
+ },
"expr": "rate(synapse_http_server_response_ru_utime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])+rate(synapse_http_server_response_ru_stime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
"format": "time_series",
"hide": false,
@@ -1381,7 +1478,10 @@
"refId": "A"
},
{
- "expr": "rate(synapse_background_process_ru_utime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])+rate(synapse_background_process_ru_stime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "expr": "rate(synapse_background_process_ru_utime_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])+rate(synapse_background_process_ru_stime_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
"format": "time_series",
"hide": false,
"instant": false,
@@ -1401,9 +1501,7 @@
"yaxis": "left"
}
],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "Stacked CPU usage",
"tooltip": {
"shared": false,
@@ -1412,33 +1510,26 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
"yaxes": [
{
+ "$$hashKey": "object:572",
"format": "percentunit",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
},
{
+ "$$hashKey": "object:573",
"format": "short",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
},
{
@@ -1446,10 +1537,12 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "$datasource"
+ },
"fieldConfig": {
"defaults": {
- "custom": {},
"links": []
},
"overrides": []
@@ -1460,7 +1553,7 @@
"h": 7,
"w": 12,
"x": 0,
- "y": 46
+ "y": 48
},
"hiddenSeries": false,
"id": 136,
@@ -1481,7 +1574,7 @@
"alertThreshold": true
},
"percentage": false,
- "pluginVersion": "7.3.7",
+ "pluginVersion": "9.2.2",
"pointradius": 2,
"points": false,
"renderer": "flot",
@@ -1491,20 +1584,28 @@
"steppedLine": false,
"targets": [
{
- "expr": "rate(synapse_http_client_requests{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "editorMode": "code",
+ "expr": "rate(synapse_http_client_requests_total{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
"legendFormat": "{{job}}-{{index}} {{method}}",
+ "range": true,
"refId": "A"
},
{
- "expr": "rate(synapse_http_matrixfederationclient_requests{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "editorMode": "code",
+ "expr": "rate(synapse_http_matrixfederationclient_requests_total{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
"legendFormat": "{{job}}-{{index}} {{method}} (federation)",
+ "range": true,
"refId": "B"
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "Outgoing HTTP request rate",
"tooltip": {
"shared": false,
@@ -1513,43 +1614,139 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
"yaxes": [
{
+ "$$hashKey": "object:123",
"format": "reqps",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
},
{
+ "$$hashKey": "object:124",
"format": "short",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "active threads",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 12,
+ "y": 48
+ },
+ "id": 207,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "exemplar": true,
+ "expr": "synapse_threadpool_working_threads{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
+ "interval": "",
+ "legendFormat": "{{job}}-{{index}} {{name}}",
+ "refId": "A"
+ }
+ ],
+ "title": "Threadpool activity",
+ "type": "timeseries"
+ }
+ ],
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "$datasource"
+ },
+ "refId": "A"
}
],
- "repeat": null,
"title": "Process info",
"type": "row"
},
{
"collapsed": true,
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "$datasource"
+ },
"gridPos": {
"h": 1,
"w": 24,
@@ -1571,10 +1768,21 @@
"mode": "spectrum"
},
"dataFormat": "tsbuckets",
- "datasource": "$datasource",
+ "datasource": {
+ "uid": "$datasource"
+ },
"fieldConfig": {
"defaults": {
- "custom": {}
+ "custom": {
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "scaleDistribution": {
+ "type": "linear"
+ }
+ }
},
"overrides": []
},
@@ -1582,7 +1790,7 @@
"h": 9,
"w": 12,
"x": 0,
- "y": 21
+ "y": 56
},
"heatmap": {},
"hideZeroBuckets": false,
@@ -1592,9 +1800,51 @@
"show": false
},
"links": [],
+ "options": {
+ "calculate": false,
+ "calculation": {},
+ "cellGap": -1,
+ "cellRadius": 0,
+ "cellValues": {},
+ "color": {
+ "exponent": 0.5,
+ "fill": "#b4ff00",
+ "mode": "scheme",
+ "reverse": false,
+ "scale": "exponential",
+ "scheme": "Inferno",
+ "steps": 128
+ },
+ "exemplars": {
+ "color": "rgba(255,0,255,0.7)"
+ },
+ "filterValues": {
+ "le": 1e-9
+ },
+ "legend": {
+ "show": false
+ },
+ "rowsFrame": {
+ "layout": "auto"
+ },
+ "showValue": "never",
+ "tooltip": {
+ "show": true,
+ "yHistogram": true
+ },
+ "yAxis": {
+ "axisPlacement": "left",
+ "reverse": false,
+ "unit": "s"
+ }
+ },
+ "pluginVersion": "9.2.2",
"reverseYBuckets": false,
"targets": [
{
+ "datasource": {
+ "uid": "$datasource"
+ },
"expr": "sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',instance=\"$instance\"}[$bucket_size])) by (le)",
"format": "heatmap",
"intervalFactor": 1,
@@ -1611,33 +1861,26 @@
"xAxis": {
"show": true
},
- "xBucketNumber": null,
- "xBucketSize": null,
"yAxis": {
- "decimals": null,
"format": "s",
"logBase": 2,
- "max": null,
- "min": null,
- "show": true,
- "splitFactor": null
+ "show": true
},
- "yBucketBound": "auto",
- "yBucketNumber": null,
- "yBucketSize": null
+ "yBucketBound": "auto"
},
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
+ "datasource": {
+ "uid": "$datasource"
+ },
"description": "",
"editable": true,
"error": false,
"fieldConfig": {
"defaults": {
- "custom": {},
"links": []
},
"overrides": []
@@ -1649,7 +1892,7 @@
"h": 9,
"w": 12,
"x": 12,
- "y": 21
+ "y": 56
},
"hiddenSeries": false,
"id": 33,
@@ -1671,7 +1914,7 @@
},
"paceLength": 10,
"percentage": false,
- "pluginVersion": "7.3.7",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -1681,7 +1924,10 @@
"steppedLine": false,
"targets": [
{
- "expr": "sum(rate(synapse_storage_events_persisted_events{instance=\"$instance\"}[$bucket_size])) without (job,index)",
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "expr": "sum(rate(synapse_storage_events_persisted_events_total{instance=\"$instance\"}[$bucket_size])) without (job,index)",
"format": "time_series",
"interval": "",
"intervalFactor": 2,
@@ -1692,9 +1938,7 @@
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "Events Persisted (all workers)",
"tooltip": {
"shared": true,
@@ -1703,31 +1947,26 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
"yaxes": [
{
+ "$$hashKey": "object:102",
"format": "hertz",
"logBase": 1,
- "max": null,
- "min": null,
"show": true
},
{
+ "$$hashKey": "object:103",
"format": "short",
"logBase": 1,
- "max": null,
- "min": null,
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
},
{
@@ -1735,21 +1974,17 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
- "decimals": 1,
- "fieldConfig": {
- "defaults": {
- "custom": {}
- },
- "overrides": []
+ "datasource": {
+ "uid": "$datasource"
},
+ "decimals": 1,
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 7,
"w": 12,
"x": 0,
- "y": 30
+ "y": 65
},
"hiddenSeries": false,
"id": 40,
@@ -1770,7 +2005,7 @@
"alertThreshold": true
},
"percentage": false,
- "pluginVersion": "7.3.7",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -1780,6 +2015,9 @@
"steppedLine": false,
"targets": [
{
+ "datasource": {
+ "uid": "$datasource"
+ },
"expr": "rate(synapse_storage_events_persisted_by_source_type{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
"format": "time_series",
"intervalFactor": 2,
@@ -1788,9 +2026,7 @@
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "Events/s Local vs Remote",
"tooltip": {
"shared": true,
@@ -1799,9 +2035,7 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
@@ -1810,22 +2044,17 @@
"format": "hertz",
"label": "",
"logBase": 1,
- "max": null,
"min": "0",
"show": true
},
{
"format": "short",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
},
{
@@ -1833,21 +2062,17 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
- "decimals": 1,
- "fieldConfig": {
- "defaults": {
- "custom": {}
- },
- "overrides": []
+ "datasource": {
+ "uid": "$datasource"
},
+ "decimals": 1,
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 7,
"w": 12,
"x": 12,
- "y": 30
+ "y": 65
},
"hiddenSeries": false,
"id": 46,
@@ -1868,7 +2093,7 @@
"alertThreshold": true
},
"percentage": false,
- "pluginVersion": "7.3.7",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -1878,6 +2103,9 @@
"steppedLine": false,
"targets": [
{
+ "datasource": {
+ "uid": "$datasource"
+ },
"expr": "rate(synapse_storage_events_persisted_by_event_type{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
"format": "time_series",
"instant": false,
@@ -1888,9 +2116,7 @@
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "Events/s by Type",
"tooltip": {
"shared": false,
@@ -1899,33 +2125,25 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "hertz",
- "label": null,
"logBase": 1,
- "max": null,
"min": "0",
"show": true
},
{
"format": "short",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
},
{
@@ -1935,21 +2153,17 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
- "decimals": 1,
- "fieldConfig": {
- "defaults": {
- "custom": {}
- },
- "overrides": []
+ "datasource": {
+ "uid": "$datasource"
},
+ "decimals": 1,
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 7,
"w": 12,
"x": 0,
- "y": 37
+ "y": 72
},
"hiddenSeries": false,
"id": 44,
@@ -1973,7 +2187,7 @@
"alertThreshold": true
},
"percentage": false,
- "pluginVersion": "7.3.7",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -1983,6 +2197,9 @@
"steppedLine": false,
"targets": [
{
+ "datasource": {
+ "uid": "$datasource"
+ },
"expr": "rate(synapse_storage_events_persisted_by_origin{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
"format": "time_series",
"intervalFactor": 2,
@@ -1992,9 +2209,7 @@
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "Events/s by Origin",
"tooltip": {
"shared": false,
@@ -2003,33 +2218,25 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "hertz",
- "label": null,
"logBase": 1,
- "max": null,
"min": "0",
"show": true
},
{
"format": "short",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
},
{
@@ -2037,21 +2244,18 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
- "decimals": 1,
- "fieldConfig": {
- "defaults": {
- "custom": {}
- },
- "overrides": []
+ "datasource": {
+ "type": "prometheus",
+ "uid": "$datasource"
},
+ "decimals": 1,
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 7,
"w": 12,
"x": 12,
- "y": 37
+ "y": 72
},
"hiddenSeries": false,
"id": 45,
@@ -2075,7 +2279,7 @@
"alertThreshold": true
},
"percentage": false,
- "pluginVersion": "7.3.7",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -2085,18 +2289,21 @@
"steppedLine": false,
"targets": [
{
- "expr": "sum(rate(synapse_storage_events_persisted_events_sep{job=~\"$job\",index=~\"$index\", type=\"m.room.member\",instance=\"$instance\", origin_type=\"local\"}[$bucket_size])) by (origin_type, origin_entity)",
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "editorMode": "code",
+ "expr": "sum(rate(synapse_storage_events_persisted_events_sep_total{job=~\"$job\",index=~\"$index\", type=\"m.room.member\",instance=\"$instance\", origin_type=\"local\"}[$bucket_size])) by (origin_type, origin_entity)",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{origin_entity}} ({{origin_type}})",
+ "range": true,
"refId": "A",
"step": 20
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "Memberships/s by Origin",
"tooltip": {
"shared": true,
@@ -2105,33 +2312,27 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
"yaxes": [
{
+ "$$hashKey": "object:232",
"format": "hertz",
- "label": null,
"logBase": 1,
- "max": null,
"min": "0",
"show": true
},
{
+ "$$hashKey": "object:233",
"format": "short",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
},
{
@@ -2139,10 +2340,12 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"fieldConfig": {
"defaults": {
- "custom": {},
"links": []
},
"overrides": []
@@ -2153,7 +2356,7 @@
"h": 9,
"w": 12,
"x": 0,
- "y": 44
+ "y": 79
},
"hiddenSeries": false,
"id": 118,
@@ -2175,13 +2378,14 @@
},
"paceLength": 10,
"percentage": false,
- "pluginVersion": "7.3.7",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
"repeatDirection": "h",
"seriesOverrides": [
{
+ "$$hashKey": "object:316",
"alias": "mean",
"linewidth": 2
}
@@ -2191,6 +2395,11 @@
"steppedLine": false,
"targets": [
{
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "exemplar": true,
"expr": "histogram_quantile(0.99, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (method))",
"format": "time_series",
"interval": "",
@@ -2199,6 +2408,10 @@
"refId": "A"
},
{
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"expr": "histogram_quantile(0.95, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (method))",
"format": "time_series",
"interval": "",
@@ -2207,6 +2420,10 @@
"refId": "B"
},
{
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"expr": "histogram_quantile(0.90, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (method))",
"format": "time_series",
"intervalFactor": 1,
@@ -2214,6 +2431,10 @@
"refId": "C"
},
{
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"expr": "histogram_quantile(0.50, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (method))",
"format": "time_series",
"intervalFactor": 1,
@@ -2221,17 +2442,21 @@
"refId": "D"
},
{
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
"expr": "sum(rate(synapse_http_server_response_time_seconds_sum{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (method) / sum(rate(synapse_http_server_response_time_seconds_count{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (method)",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "{{job}}-{{index}} mean",
+ "range": true,
"refId": "E"
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "Event send time quantiles by worker",
"tooltip": {
"shared": true,
@@ -2240,43 +2465,158 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
"yaxes": [
{
+ "$$hashKey": "object:263",
"format": "s",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
},
{
+ "$$hashKey": "object:264",
"format": "short",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "$datasource"
+ },
+ "description": "CPU and DB time spent on most expensive state resolution in a room, summed over all workers. This is a very rough proxy for \"how fast is state res\", but it doesn't accurately represent the system load (e.g. it completely ignores cheap state resolutions).\n",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 30,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "s/s"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 9,
+ "w": 12,
+ "x": 12,
+ "y": 79
+ },
+ "id": 222,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": false
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "$datasource"
+ },
+ "exemplar": false,
+ "expr": "sum(rate(synapse_state_res_db_for_biggest_room_seconds_total{instance=\"$instance\"}[1m]))",
+ "format": "time_series",
+ "hide": false,
+ "instant": false,
+ "interval": "",
+ "legendFormat": "DB time",
+ "refId": "B"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "$datasource"
+ },
+ "exemplar": false,
+ "expr": "sum(rate(synapse_state_res_cpu_for_biggest_room_seconds_total{instance=\"$instance\"}[1m]))",
+ "format": "time_series",
+ "hide": false,
+ "instant": false,
+ "interval": "",
+ "legendFormat": "CPU time",
+ "refId": "C"
+ }
+ ],
+ "title": "Stateres worst-case",
+ "type": "timeseries"
+ }
+ ],
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "$datasource"
+ },
+ "refId": "A"
}
],
- "repeat": null,
"title": "Event persistence",
"type": "row"
},
{
"collapsed": true,
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "$datasource"
+ },
"gridPos": {
"h": 1,
"w": 24,
@@ -2290,13 +2630,14 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
- "decimals": null,
+ "datasource": {
+ "type": "prometheus",
+ "uid": "$datasource"
+ },
"editable": true,
"error": false,
"fieldConfig": {
"defaults": {
- "custom": {},
"links": []
},
"overrides": []
@@ -2308,7 +2649,7 @@
"h": 8,
"w": 12,
"x": 0,
- "y": 31
+ "y": 57
},
"hiddenSeries": false,
"id": 4,
@@ -2333,7 +2674,7 @@
"alertThreshold": true
},
"percentage": false,
- "pluginVersion": "7.3.7",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -2343,7 +2684,10 @@
"steppedLine": false,
"targets": [
{
- "expr": "rate(synapse_http_server_requests_received{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "expr": "rate(synapse_http_server_requests_received_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
"format": "time_series",
"interval": "",
"intervalFactor": 2,
@@ -2354,6 +2698,7 @@
],
"thresholds": [
{
+ "$$hashKey": "object:234",
"colorMode": "custom",
"fill": true,
"fillColor": "rgba(216, 200, 27, 0.27)",
@@ -2362,6 +2707,7 @@
"yaxis": "left"
},
{
+ "$$hashKey": "object:235",
"colorMode": "custom",
"fill": true,
"fillColor": "rgba(234, 112, 112, 0.22)",
@@ -2370,9 +2716,7 @@
"yaxis": "left"
}
],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "Request Count by arrival time",
"tooltip": {
"shared": false,
@@ -2381,31 +2725,26 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
"yaxes": [
{
+ "$$hashKey": "object:206",
"format": "hertz",
"logBase": 1,
- "max": null,
- "min": null,
"show": true
},
{
+ "$$hashKey": "object:207",
"format": "short",
"logBase": 1,
- "max": null,
- "min": null,
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
},
{
@@ -2413,12 +2752,14 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "$datasource"
+ },
"editable": true,
"error": false,
"fieldConfig": {
"defaults": {
- "custom": {},
"links": []
},
"overrides": []
@@ -2430,7 +2771,7 @@
"h": 8,
"w": 12,
"x": 12,
- "y": 31
+ "y": 57
},
"hiddenSeries": false,
"id": 32,
@@ -2451,7 +2792,7 @@
"alertThreshold": true
},
"percentage": false,
- "pluginVersion": "7.3.7",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -2461,7 +2802,10 @@
"steppedLine": false,
"targets": [
{
- "expr": "rate(synapse_http_server_requests_received{instance=\"$instance\",job=~\"$job\",index=~\"$index\",method!=\"OPTIONS\"}[$bucket_size]) and topk(10,synapse_http_server_requests_received{instance=\"$instance\",job=~\"$job\",method!=\"OPTIONS\"})",
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "expr": "rate(synapse_http_server_requests_received_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\",method!=\"OPTIONS\"}[$bucket_size]) and topk(10,synapse_http_server_requests_received_total{instance=\"$instance\",job=~\"$job\",method!=\"OPTIONS\"})",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{method}} {{servlet}} {{job}}-{{index}}",
@@ -2471,9 +2815,7 @@
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "Top 10 Request Counts",
"tooltip": {
"shared": false,
@@ -2482,31 +2824,26 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
"yaxes": [
{
+ "$$hashKey": "object:305",
"format": "hertz",
"logBase": 1,
- "max": null,
- "min": null,
"show": true
},
{
+ "$$hashKey": "object:306",
"format": "short",
"logBase": 1,
- "max": null,
- "min": null,
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
},
{
@@ -2514,13 +2851,14 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
- "decimals": null,
+ "datasource": {
+ "type": "prometheus",
+ "uid": "$datasource"
+ },
"editable": true,
"error": false,
"fieldConfig": {
"defaults": {
- "custom": {},
"links": []
},
"overrides": []
@@ -2532,7 +2870,7 @@
"h": 8,
"w": 12,
"x": 0,
- "y": 39
+ "y": 65
},
"hiddenSeries": false,
"id": 139,
@@ -2557,7 +2895,7 @@
"alertThreshold": true
},
"percentage": false,
- "pluginVersion": "7.3.7",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -2567,7 +2905,10 @@
"steppedLine": false,
"targets": [
{
- "expr": "rate(synapse_http_server_in_flight_requests_ru_utime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])+rate(synapse_http_server_in_flight_requests_ru_stime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "expr": "rate(synapse_http_server_in_flight_requests_ru_utime_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])+rate(synapse_http_server_in_flight_requests_ru_stime_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
"format": "time_series",
"interval": "",
"intervalFactor": 1,
@@ -2578,6 +2919,7 @@
],
"thresholds": [
{
+ "$$hashKey": "object:135",
"colorMode": "custom",
"fill": true,
"fillColor": "rgba(216, 200, 27, 0.27)",
@@ -2586,6 +2928,7 @@
"yaxis": "left"
},
{
+ "$$hashKey": "object:136",
"colorMode": "custom",
"fill": true,
"fillColor": "rgba(234, 112, 112, 0.22)",
@@ -2594,9 +2937,7 @@
"yaxis": "left"
}
],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "Total CPU Usage by Endpoint",
"tooltip": {
"shared": false,
@@ -2605,31 +2946,26 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
"yaxes": [
{
+ "$$hashKey": "object:107",
"format": "percentunit",
"logBase": 1,
- "max": null,
- "min": null,
"show": true
},
{
+ "$$hashKey": "object:108",
"format": "short",
"logBase": 1,
- "max": null,
- "min": null,
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
},
{
@@ -2637,13 +2973,14 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
- "decimals": null,
+ "datasource": {
+ "type": "prometheus",
+ "uid": "$datasource"
+ },
"editable": true,
"error": false,
"fieldConfig": {
"defaults": {
- "custom": {},
"links": []
},
"overrides": []
@@ -2655,7 +2992,7 @@
"h": 8,
"w": 12,
"x": 12,
- "y": 39
+ "y": 65
},
"hiddenSeries": false,
"id": 52,
@@ -2680,7 +3017,7 @@
"alertThreshold": true
},
"percentage": false,
- "pluginVersion": "7.3.7",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -2690,7 +3027,10 @@
"steppedLine": false,
"targets": [
{
- "expr": "(rate(synapse_http_server_in_flight_requests_ru_utime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])+rate(synapse_http_server_in_flight_requests_ru_stime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) / rate(synapse_http_server_requests_received{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "expr": "(rate(synapse_http_server_in_flight_requests_ru_utime_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])+rate(synapse_http_server_in_flight_requests_ru_stime_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) / rate(synapse_http_server_requests_received_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
"format": "time_series",
"interval": "",
"intervalFactor": 1,
@@ -2701,6 +3041,7 @@
],
"thresholds": [
{
+ "$$hashKey": "object:417",
"colorMode": "custom",
"fill": true,
"fillColor": "rgba(216, 200, 27, 0.27)",
@@ -2709,6 +3050,7 @@
"yaxis": "left"
},
{
+ "$$hashKey": "object:418",
"colorMode": "custom",
"fill": true,
"fillColor": "rgba(234, 112, 112, 0.22)",
@@ -2717,9 +3059,7 @@
"yaxis": "left"
}
],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "Average CPU Usage by Endpoint",
"tooltip": {
"shared": false,
@@ -2728,31 +3068,26 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
"yaxes": [
{
+ "$$hashKey": "object:389",
"format": "s",
"logBase": 1,
- "max": null,
- "min": null,
"show": true
},
{
+ "$$hashKey": "object:390",
"format": "short",
"logBase": 1,
- "max": null,
- "min": null,
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
},
{
@@ -2760,12 +3095,14 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "$datasource"
+ },
"editable": true,
"error": false,
"fieldConfig": {
"defaults": {
- "custom": {},
"links": []
},
"overrides": []
@@ -2777,7 +3114,7 @@
"h": 8,
"w": 12,
"x": 0,
- "y": 47
+ "y": 73
},
"hiddenSeries": false,
"id": 7,
@@ -2801,7 +3138,7 @@
"alertThreshold": true
},
"percentage": false,
- "pluginVersion": "7.3.7",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -2811,7 +3148,10 @@
"steppedLine": false,
"targets": [
{
- "expr": "rate(synapse_http_server_in_flight_requests_db_txn_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "expr": "rate(synapse_http_server_in_flight_requests_db_txn_duration_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
"format": "time_series",
"interval": "",
"intervalFactor": 1,
@@ -2821,9 +3161,7 @@
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "DB Usage by endpoint",
"tooltip": {
"shared": false,
@@ -2832,31 +3170,26 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
"yaxes": [
{
+ "$$hashKey": "object:488",
"format": "percentunit",
"logBase": 1,
- "max": null,
- "min": null,
"show": true
},
{
+ "$$hashKey": "object:489",
"format": "short",
"logBase": 1,
- "max": null,
- "min": null,
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
},
{
@@ -2864,13 +3197,13 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
- "decimals": null,
+ "datasource": {
+ "uid": "$datasource"
+ },
"editable": true,
"error": false,
"fieldConfig": {
"defaults": {
- "custom": {},
"links": []
},
"overrides": []
@@ -2882,7 +3215,7 @@
"h": 8,
"w": 12,
"x": 12,
- "y": 47
+ "y": 73
},
"hiddenSeries": false,
"id": 47,
@@ -2907,7 +3240,7 @@
"alertThreshold": true
},
"percentage": false,
- "pluginVersion": "7.3.7",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -2917,6 +3250,9 @@
"steppedLine": false,
"targets": [
{
+ "datasource": {
+ "uid": "$datasource"
+ },
"expr": "(sum(rate(synapse_http_server_response_time_seconds_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\",tag!=\"incremental_sync\"}[$bucket_size])) without (code))/(sum(rate(synapse_http_server_response_time_seconds_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\",tag!=\"incremental_sync\"}[$bucket_size])) without (code))",
"format": "time_series",
"hide": false,
@@ -2928,9 +3264,7 @@
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "Non-sync avg response time",
"tooltip": {
"shared": false,
@@ -2939,9 +3273,7 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
@@ -2949,21 +3281,16 @@
{
"format": "s",
"logBase": 1,
- "max": null,
- "min": null,
"show": true
},
{
"format": "short",
"logBase": 1,
- "max": null,
- "min": null,
"show": false
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
},
{
@@ -2971,10 +3298,11 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
+ "datasource": {
+ "uid": "$datasource"
+ },
"fieldConfig": {
"defaults": {
- "custom": {},
"links": []
},
"overrides": []
@@ -2985,7 +3313,7 @@
"h": 9,
"w": 12,
"x": 0,
- "y": 55
+ "y": 81
},
"hiddenSeries": false,
"id": 103,
@@ -3006,7 +3334,7 @@
"alertThreshold": true
},
"percentage": false,
- "pluginVersion": "7.3.7",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -3023,6 +3351,9 @@
"steppedLine": false,
"targets": [
{
+ "datasource": {
+ "uid": "$datasource"
+ },
"expr": "topk(10,synapse_http_server_in_flight_requests_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"})",
"format": "time_series",
"interval": "",
@@ -3031,6 +3362,9 @@
"refId": "A"
},
{
+ "datasource": {
+ "uid": "$datasource"
+ },
"expr": "sum(avg_over_time(synapse_http_server_in_flight_requests_count{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size]))",
"interval": "",
"legendFormat": "Total",
@@ -3038,9 +3372,7 @@
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "Requests in flight",
"tooltip": {
"shared": false,
@@ -3049,43 +3381,45 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "short",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
},
{
"format": "short",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
}
],
- "repeat": null,
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "$datasource"
+ },
+ "refId": "A"
+ }
+ ],
"title": "Requests",
"type": "row"
},
{
"collapsed": true,
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "$datasource"
+ },
"gridPos": {
"h": 1,
"w": 24,
@@ -3099,10 +3433,11 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
+ "datasource": {
+ "uid": "$datasource"
+ },
"fieldConfig": {
"defaults": {
- "custom": {},
"links": []
},
"overrides": []
@@ -3113,7 +3448,7 @@
"h": 9,
"w": 12,
"x": 0,
- "y": 32
+ "y": 5
},
"hiddenSeries": false,
"id": 99,
@@ -3130,9 +3465,12 @@
"linewidth": 1,
"links": [],
"nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
"paceLength": 10,
"percentage": false,
- "pluginVersion": "7.1.3",
+ "pluginVersion": "8.4.3",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -3142,7 +3480,10 @@
"steppedLine": false,
"targets": [
{
- "expr": "rate(synapse_background_process_ru_utime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])+rate(synapse_background_process_ru_stime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "expr": "rate(synapse_background_process_ru_utime_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])+rate(synapse_background_process_ru_stime_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
"format": "time_series",
"interval": "",
"intervalFactor": 1,
@@ -3151,9 +3492,7 @@
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "CPU usage by background jobs",
"tooltip": {
"shared": false,
@@ -3162,33 +3501,24 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "percentunit",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
},
{
"format": "short",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
},
{
@@ -3196,10 +3526,11 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
+ "datasource": {
+ "uid": "$datasource"
+ },
"fieldConfig": {
"defaults": {
- "custom": {},
"links": []
},
"overrides": []
@@ -3210,7 +3541,7 @@
"h": 9,
"w": 12,
"x": 12,
- "y": 32
+ "y": 5
},
"hiddenSeries": false,
"id": 101,
@@ -3227,9 +3558,12 @@
"linewidth": 1,
"links": [],
"nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
"paceLength": 10,
"percentage": false,
- "pluginVersion": "7.1.3",
+ "pluginVersion": "8.4.3",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -3239,7 +3573,10 @@
"steppedLine": false,
"targets": [
{
- "expr": "rate(synapse_background_process_db_txn_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) + rate(synapse_background_process_db_sched_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "expr": "rate(synapse_background_process_db_txn_duration_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) + rate(synapse_background_process_db_sched_duration_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
"format": "time_series",
"hide": false,
"intervalFactor": 1,
@@ -3248,9 +3585,7 @@
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "DB usage by background jobs (including scheduling time)",
"tooltip": {
"shared": false,
@@ -3259,33 +3594,24 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "percentunit",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
},
{
"format": "short",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
},
{
@@ -3293,10 +3619,11 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
+ "datasource": {
+ "uid": "$datasource"
+ },
"fieldConfig": {
"defaults": {
- "custom": {},
"links": []
},
"overrides": []
@@ -3307,7 +3634,7 @@
"h": 8,
"w": 12,
"x": 0,
- "y": 41
+ "y": 14
},
"hiddenSeries": false,
"id": 138,
@@ -3323,8 +3650,11 @@
"lines": true,
"linewidth": 1,
"nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
"percentage": false,
- "pluginVersion": "7.1.3",
+ "pluginVersion": "8.4.3",
"pointradius": 2,
"points": false,
"renderer": "flot",
@@ -3334,15 +3664,16 @@
"steppedLine": false,
"targets": [
{
+ "datasource": {
+ "uid": "$datasource"
+ },
"expr": "synapse_background_process_in_flight_count{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}",
"legendFormat": "{{job}}-{{index}} {{name}}",
"refId": "A"
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "Background jobs in flight",
"tooltip": {
"shared": false,
@@ -3351,42 +3682,45 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "short",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
},
{
"format": "short",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
}
],
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "$datasource"
+ },
+ "refId": "A"
+ }
+ ],
"title": "Background jobs",
"type": "row"
},
{
"collapsed": true,
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "$datasource"
+ },
"gridPos": {
"h": 1,
"w": 24,
@@ -3400,10 +3734,11 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
+ "datasource": {
+ "uid": "$datasource"
+ },
"fieldConfig": {
"defaults": {
- "custom": {},
"links": []
},
"overrides": []
@@ -3414,7 +3749,7 @@
"h": 9,
"w": 12,
"x": 0,
- "y": 6
+ "y": 59
},
"hiddenSeries": false,
"id": 79,
@@ -3436,7 +3771,7 @@
},
"paceLength": 10,
"percentage": false,
- "pluginVersion": "7.3.7",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -3446,22 +3781,26 @@
"steppedLine": false,
"targets": [
{
- "expr": "sum(rate(synapse_federation_client_sent_transactions{instance=\"$instance\"}[$bucket_size]))",
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "expr": "sum(rate(synapse_federation_client_sent_transactions_total{instance=\"$instance\"}[$bucket_size]))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "successful txn rate",
"refId": "A"
},
{
- "expr": "sum(rate(synapse_util_metrics_block_count{block_name=\"_send_new_transaction\",instance=\"$instance\"}[$bucket_size]) - ignoring (block_name) rate(synapse_federation_client_sent_transactions{instance=\"$instance\"}[$bucket_size]))",
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "expr": "sum(rate(synapse_util_metrics_block_count_total{block_name=\"_send_new_transaction\",instance=\"$instance\"}[$bucket_size]) - ignoring (block_name) rate(synapse_federation_client_sent_transactions_total{instance=\"$instance\"}[$bucket_size]))",
"legendFormat": "failed txn rate",
"refId": "B"
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "Outgoing federation transaction rate",
"tooltip": {
"shared": true,
@@ -3470,33 +3809,24 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "hertz",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
},
{
"format": "short",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
},
{
@@ -3504,10 +3834,11 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
+ "datasource": {
+ "uid": "$datasource"
+ },
"fieldConfig": {
"defaults": {
- "custom": {},
"links": []
},
"overrides": []
@@ -3518,7 +3849,7 @@
"h": 9,
"w": 12,
"x": 12,
- "y": 6
+ "y": 59
},
"hiddenSeries": false,
"id": 83,
@@ -3540,7 +3871,7 @@
},
"paceLength": 10,
"percentage": false,
- "pluginVersion": "7.3.7",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -3550,14 +3881,20 @@
"steppedLine": false,
"targets": [
{
- "expr": "sum(rate(synapse_federation_server_received_pdus{instance=~\"$instance\"}[$bucket_size]))",
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "expr": "sum(rate(synapse_federation_server_received_pdus_total{instance=~\"$instance\"}[$bucket_size]))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "pdus",
"refId": "A"
},
{
- "expr": "sum(rate(synapse_federation_server_received_edus{instance=~\"$instance\"}[$bucket_size]))",
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "expr": "sum(rate(synapse_federation_server_received_edus_total{instance=~\"$instance\"}[$bucket_size]))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "edus",
@@ -3565,9 +3902,7 @@
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "Incoming PDU/EDU rate",
"tooltip": {
"shared": true,
@@ -3576,33 +3911,24 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "hertz",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
},
{
"format": "short",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
},
{
@@ -3610,10 +3936,11 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
+ "datasource": {
+ "uid": "$datasource"
+ },
"fieldConfig": {
"defaults": {
- "custom": {},
"links": []
},
"overrides": []
@@ -3624,7 +3951,7 @@
"h": 9,
"w": 12,
"x": 0,
- "y": 15
+ "y": 68
},
"hiddenSeries": false,
"id": 109,
@@ -3646,7 +3973,7 @@
},
"paceLength": 10,
"percentage": false,
- "pluginVersion": "7.3.7",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -3656,7 +3983,10 @@
"steppedLine": false,
"targets": [
{
- "expr": "sum(rate(synapse_federation_client_sent_pdu_destinations:total{instance=\"$instance\"}[$bucket_size]))",
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "expr": "sum(rate(synapse_federation_client_sent_pdu_destinations:total_total{instance=\"$instance\"}[$bucket_size]))",
"format": "time_series",
"interval": "",
"intervalFactor": 1,
@@ -3664,7 +3994,10 @@
"refId": "A"
},
{
- "expr": "sum(rate(synapse_federation_client_sent_edus{instance=\"$instance\"}[$bucket_size]))",
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "expr": "sum(rate(synapse_federation_client_sent_edus_total{instance=\"$instance\"}[$bucket_size]))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "edus",
@@ -3672,9 +4005,7 @@
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "Outgoing PDU/EDU rate",
"tooltip": {
"shared": true,
@@ -3683,33 +4014,24 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "hertz",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
},
{
"format": "short",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
},
{
@@ -3717,10 +4039,12 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "$datasource"
+ },
"fieldConfig": {
"defaults": {
- "custom": {},
"links": []
},
"overrides": []
@@ -3731,7 +4055,7 @@
"h": 9,
"w": 12,
"x": 12,
- "y": 15
+ "y": 68
},
"hiddenSeries": false,
"id": 111,
@@ -3753,7 +4077,7 @@
},
"paceLength": 10,
"percentage": false,
- "pluginVersion": "7.3.7",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -3763,18 +4087,21 @@
"steppedLine": false,
"targets": [
{
- "expr": "rate(synapse_federation_client_sent_edus_by_type{instance=\"$instance\"}[$bucket_size])",
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "editorMode": "code",
+ "expr": "rate(synapse_federation_client_sent_edus_by_type_total{instance=\"$instance\"}[$bucket_size])",
"format": "time_series",
"interval": "",
"intervalFactor": 1,
"legendFormat": "{{type}}",
+ "range": true,
"refId": "A"
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "Outgoing EDUs by type",
"tooltip": {
"shared": true,
@@ -3783,33 +4110,26 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
"yaxes": [
{
+ "$$hashKey": "object:462",
"format": "hertz",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
},
{
+ "$$hashKey": "object:463",
"format": "short",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
},
{
@@ -3817,11 +4137,13 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "$datasource"
+ },
"description": "The number of events in the in-memory queues ",
"fieldConfig": {
"defaults": {
- "custom": {},
"links": []
},
"overrides": []
@@ -3832,7 +4154,7 @@
"h": 8,
"w": 12,
"x": 0,
- "y": 24
+ "y": 77
},
"hiddenSeries": false,
"id": 142,
@@ -3852,7 +4174,7 @@
"alertThreshold": true
},
"percentage": false,
- "pluginVersion": "7.3.7",
+ "pluginVersion": "9.2.2",
"pointradius": 2,
"points": false,
"renderer": "flot",
@@ -3862,12 +4184,22 @@
"steppedLine": false,
"targets": [
{
+ "datasource": {
+ "type": "prometheus",
+ "uid": "$datasource"
+ },
+ "editorMode": "code",
"expr": "synapse_federation_transaction_queue_pending_pdus{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
"interval": "",
"legendFormat": "pending PDUs {{job}}-{{index}}",
+ "range": true,
"refId": "A"
},
{
+ "datasource": {
+ "type": "prometheus",
+ "uid": "$datasource"
+ },
"expr": "synapse_federation_transaction_queue_pending_edus{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
"interval": "",
"legendFormat": "pending EDUs {{job}}-{{index}}",
@@ -3875,9 +4207,7 @@
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "In-memory federation transmission queues",
"tooltip": {
"shared": true,
@@ -3886,33 +4216,29 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
"yaxes": [
{
+ "$$hashKey": "object:547",
"format": "short",
"label": "events",
"logBase": 1,
- "max": null,
"min": "0",
"show": true
},
{
+ "$$hashKey": "object:548",
"format": "short",
"label": "",
"logBase": 1,
- "max": null,
- "min": null,
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
},
{
@@ -3920,11 +4246,12 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
+ "datasource": {
+ "uid": "$datasource"
+ },
"description": "Number of events queued up on the master process for processing by the federation sender",
"fieldConfig": {
"defaults": {
- "custom": {},
"links": []
},
"overrides": []
@@ -3935,7 +4262,7 @@
"h": 9,
"w": 12,
"x": 12,
- "y": 24
+ "y": 77
},
"hiddenSeries": false,
"id": 140,
@@ -3957,7 +4284,7 @@
},
"paceLength": 10,
"percentage": false,
- "pluginVersion": "7.3.7",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -3967,6 +4294,9 @@
"steppedLine": false,
"targets": [
{
+ "datasource": {
+ "uid": "$datasource"
+ },
"expr": "synapse_federation_send_queue_presence_changed_size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
"format": "time_series",
"interval": "",
@@ -3975,6 +4305,9 @@
"refId": "A"
},
{
+ "datasource": {
+ "uid": "$datasource"
+ },
"expr": "synapse_federation_send_queue_presence_map_size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
"format": "time_series",
"hide": false,
@@ -3984,6 +4317,9 @@
"refId": "B"
},
{
+ "datasource": {
+ "uid": "$datasource"
+ },
"expr": "synapse_federation_send_queue_presence_destinations_size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
"format": "time_series",
"hide": false,
@@ -3993,6 +4329,9 @@
"refId": "E"
},
{
+ "datasource": {
+ "uid": "$datasource"
+ },
"expr": "synapse_federation_send_queue_keyed_edu_size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
"format": "time_series",
"hide": false,
@@ -4002,6 +4341,9 @@
"refId": "C"
},
{
+ "datasource": {
+ "uid": "$datasource"
+ },
"expr": "synapse_federation_send_queue_edus_size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
"format": "time_series",
"hide": false,
@@ -4011,6 +4353,9 @@
"refId": "D"
},
{
+ "datasource": {
+ "uid": "$datasource"
+ },
"expr": "synapse_federation_send_queue_pos_time_size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
"format": "time_series",
"hide": false,
@@ -4021,9 +4366,7 @@
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "Outgoing EDU queues on master",
"tooltip": {
"shared": true,
@@ -4032,39 +4375,30 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "none",
- "label": null,
"logBase": 1,
- "max": null,
"min": "0",
"show": true
},
{
"format": "short",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
},
{
"cards": {
- "cardPadding": -1,
- "cardRound": null
+ "cardPadding": -1
},
"color": {
"cardColor": "#b4ff00",
@@ -4075,10 +4409,21 @@
"mode": "spectrum"
},
"dataFormat": "tsbuckets",
- "datasource": "$datasource",
+ "datasource": {
+ "uid": "$datasource"
+ },
"fieldConfig": {
"defaults": {
- "custom": {}
+ "custom": {
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "scaleDistribution": {
+ "type": "linear"
+ }
+ }
},
"overrides": []
},
@@ -4086,7 +4431,7 @@
"h": 9,
"w": 12,
"x": 0,
- "y": 32
+ "y": 85
},
"heatmap": {},
"hideZeroBuckets": false,
@@ -4096,9 +4441,54 @@
"show": false
},
"links": [],
+ "options": {
+ "calculate": false,
+ "calculation": {},
+ "cellGap": -1,
+ "cellValues": {
+ "decimals": 2
+ },
+ "color": {
+ "exponent": 0.5,
+ "fill": "#b4ff00",
+ "min": 0,
+ "mode": "scheme",
+ "reverse": false,
+ "scale": "exponential",
+ "scheme": "Inferno",
+ "steps": 128
+ },
+ "exemplars": {
+ "color": "rgba(255,0,255,0.7)"
+ },
+ "filterValues": {
+ "le": 1e-9
+ },
+ "legend": {
+ "show": false
+ },
+ "rowsFrame": {
+ "layout": "auto"
+ },
+ "showValue": "never",
+ "tooltip": {
+ "show": true,
+ "yHistogram": true
+ },
+ "yAxis": {
+ "axisPlacement": "left",
+ "decimals": 0,
+ "reverse": false,
+ "unit": "s"
+ }
+ },
+ "pluginVersion": "9.2.2",
"reverseYBuckets": false,
"targets": [
{
+ "datasource": {
+ "uid": "$datasource"
+ },
"expr": "sum(rate(synapse_event_processing_lag_by_event_bucket{instance=\"$instance\",name=\"federation_sender\"}[$bucket_size])) by (le)",
"format": "heatmap",
"instant": false,
@@ -4118,30 +4508,24 @@
"xAxis": {
"show": true
},
- "xBucketNumber": null,
- "xBucketSize": null,
"yAxis": {
"decimals": 0,
"format": "s",
"logBase": 1,
- "max": null,
- "min": null,
- "show": true,
- "splitFactor": null
+ "show": true
},
- "yBucketBound": "auto",
- "yBucketNumber": null,
- "yBucketSize": null
+ "yBucketBound": "auto"
},
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
+ "datasource": {
+ "uid": "$datasource"
+ },
"fieldConfig": {
"defaults": {
- "custom": {},
"links": []
},
"overrides": []
@@ -4152,7 +4536,7 @@
"h": 9,
"w": 12,
"x": 12,
- "y": 33
+ "y": 86
},
"hiddenSeries": false,
"id": 162,
@@ -4175,7 +4559,7 @@
},
"paceLength": 10,
"percentage": false,
- "pluginVersion": "7.3.7",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -4226,6 +4610,9 @@
"steppedLine": false,
"targets": [
{
+ "datasource": {
+ "uid": "$datasource"
+ },
"expr": "histogram_quantile(0.99, sum(rate(synapse_event_processing_lag_by_event_bucket{name='federation_sender',index=~\"$index\",instance=\"$instance\"}[$bucket_size])) by (le))",
"format": "time_series",
"interval": "",
@@ -4234,6 +4621,9 @@
"refId": "D"
},
{
+ "datasource": {
+ "uid": "$datasource"
+ },
"expr": "histogram_quantile(0.9, sum(rate(synapse_event_processing_lag_by_event_bucket{name='federation_sender',index=~\"$index\",instance=\"$instance\"}[$bucket_size])) by (le))",
"format": "time_series",
"interval": "",
@@ -4242,6 +4632,9 @@
"refId": "A"
},
{
+ "datasource": {
+ "uid": "$datasource"
+ },
"expr": "histogram_quantile(0.75, sum(rate(synapse_event_processing_lag_by_event_bucket{name='federation_sender',index=~\"$index\",instance=\"$instance\"}[$bucket_size])) by (le))",
"format": "time_series",
"interval": "",
@@ -4250,6 +4643,9 @@
"refId": "C"
},
{
+ "datasource": {
+ "uid": "$datasource"
+ },
"expr": "histogram_quantile(0.5, sum(rate(synapse_event_processing_lag_by_event_bucket{name='federation_sender',index=~\"$index\",instance=\"$instance\"}[$bucket_size])) by (le))",
"format": "time_series",
"interval": "",
@@ -4258,18 +4654,27 @@
"refId": "B"
},
{
+ "datasource": {
+ "uid": "$datasource"
+ },
"expr": "histogram_quantile(0.25, sum(rate(synapse_event_processing_lag_by_event_bucket{name='federation_sender',index=~\"$index\",instance=\"$instance\"}[$bucket_size])) by (le))",
"interval": "",
"legendFormat": "25%",
"refId": "F"
},
{
+ "datasource": {
+ "uid": "$datasource"
+ },
"expr": "histogram_quantile(0.05, sum(rate(synapse_event_processing_lag_by_event_bucket{name='federation_sender',index=~\"$index\",instance=\"$instance\"}[$bucket_size])) by (le))",
"interval": "",
"legendFormat": "5%",
"refId": "G"
},
{
+ "datasource": {
+ "uid": "$datasource"
+ },
"expr": "sum(rate(synapse_event_processing_lag_by_event_sum{name='federation_sender',index=~\"$index\",instance=\"$instance\"}[$bucket_size])) / sum(rate(synapse_event_processing_lag_by_event_count{name='federation_sender',index=~\"$index\",instance=\"$instance\"}[$bucket_size]))",
"interval": "",
"legendFormat": "Average",
@@ -4294,9 +4699,7 @@
"yaxis": "left"
}
],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "Federation send PDU lag quantiles",
"tooltip": {
"shared": true,
@@ -4305,19 +4708,15 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
"yaxes": [
{
- "decimals": null,
"format": "s",
"label": "",
"logBase": 1,
- "max": null,
"min": "0",
"show": true
},
@@ -4325,20 +4724,17 @@
"format": "hertz",
"label": "",
"logBase": 1,
- "max": null,
"min": "0",
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
},
{
"cards": {
- "cardPadding": -1,
- "cardRound": null
+ "cardPadding": -1
},
"color": {
"cardColor": "#b4ff00",
@@ -4349,18 +4745,14 @@
"mode": "spectrum"
},
"dataFormat": "tsbuckets",
- "datasource": "$datasource",
- "fieldConfig": {
- "defaults": {
- "custom": {}
- },
- "overrides": []
+ "datasource": {
+ "uid": "$datasource"
},
"gridPos": {
"h": 9,
"w": 12,
"x": 0,
- "y": 41
+ "y": 94
},
"heatmap": {},
"hideZeroBuckets": false,
@@ -4373,6 +4765,9 @@
"reverseYBuckets": false,
"targets": [
{
+ "datasource": {
+ "uid": "$datasource"
+ },
"expr": "sum(rate(synapse_federation_server_pdu_process_time_bucket{instance=\"$instance\"}[$bucket_size])) by (le)",
"format": "heatmap",
"instant": false,
@@ -4392,32 +4787,26 @@
"xAxis": {
"show": true
},
- "xBucketNumber": null,
- "xBucketSize": null,
"yAxis": {
"decimals": 0,
"format": "s",
"logBase": 1,
- "max": null,
- "min": null,
- "show": true,
- "splitFactor": null
+ "show": true
},
- "yBucketBound": "auto",
- "yBucketNumber": null,
- "yBucketSize": null
+ "yBucketBound": "auto"
},
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
+ "datasource": {
+ "uid": "$datasource"
+ },
"editable": true,
"error": false,
"fieldConfig": {
"defaults": {
- "custom": {},
"links": []
},
"overrides": []
@@ -4429,7 +4818,7 @@
"h": 9,
"w": 12,
"x": 12,
- "y": 42
+ "y": 95
},
"hiddenSeries": false,
"id": 203,
@@ -4451,7 +4840,7 @@
},
"paceLength": 10,
"percentage": false,
- "pluginVersion": "7.3.7",
+ "pluginVersion": "9.0.4",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -4461,6 +4850,9 @@
"steppedLine": false,
"targets": [
{
+ "datasource": {
+ "uid": "$datasource"
+ },
"expr": "synapse_federation_server_oldest_inbound_pdu_in_staging{job=\"$job\",index=~\"$index\",instance=\"$instance\"}",
"format": "time_series",
"interval": "",
@@ -4471,9 +4863,7 @@
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "Age of oldest event in staging area",
"tooltip": {
"msResolution": false,
@@ -4483,33 +4873,27 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
"yaxes": [
{
+ "$$hashKey": "object:92",
"format": "ms",
- "label": null,
"logBase": 1,
- "max": null,
"min": 0,
"show": true
},
{
+ "$$hashKey": "object:93",
"format": "short",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
},
{
@@ -4517,12 +4901,13 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
+ "datasource": {
+ "uid": "$datasource"
+ },
"editable": true,
"error": false,
"fieldConfig": {
"defaults": {
- "custom": {},
"links": []
},
"overrides": []
@@ -4534,7 +4919,7 @@
"h": 9,
"w": 12,
"x": 0,
- "y": 50
+ "y": 103
},
"hiddenSeries": false,
"id": 202,
@@ -4556,7 +4941,7 @@
},
"paceLength": 10,
"percentage": false,
- "pluginVersion": "7.3.7",
+ "pluginVersion": "9.0.4",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -4566,6 +4951,9 @@
"steppedLine": false,
"targets": [
{
+ "datasource": {
+ "uid": "$datasource"
+ },
"expr": "synapse_federation_server_number_inbound_pdu_in_staging{job=\"$job\",index=~\"$index\",instance=\"$instance\"}",
"format": "time_series",
"interval": "",
@@ -4576,9 +4964,7 @@
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "Number of events in federation staging area",
"tooltip": {
"msResolution": false,
@@ -4588,33 +4974,27 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
"yaxes": [
{
+ "$$hashKey": "object:92",
"format": "none",
- "label": null,
"logBase": 1,
- "max": null,
"min": 0,
"show": true
},
{
+ "$$hashKey": "object:93",
"format": "short",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
},
{
@@ -4622,12 +5002,9 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "${DS_PROMETHEUS}",
- "fieldConfig": {
- "defaults": {
- "custom": {}
- },
- "overrides": []
+ "datasource": {
+ "type": "prometheus",
+ "uid": "$datasource"
},
"fill": 1,
"fillGradient": 0,
@@ -4635,7 +5012,7 @@
"h": 8,
"w": 12,
"x": 12,
- "y": 51
+ "y": 104
},
"hiddenSeries": false,
"id": 205,
@@ -4655,7 +5032,7 @@
"alertThreshold": true
},
"percentage": false,
- "pluginVersion": "7.3.7",
+ "pluginVersion": "9.0.4",
"pointradius": 2,
"points": false,
"renderer": "flot",
@@ -4665,6 +5042,10 @@
"steppedLine": false,
"targets": [
{
+ "datasource": {
+ "type": "prometheus",
+ "uid": "$datasource"
+ },
"expr": "sum(rate(synapse_federation_soft_failed_events_total{instance=\"$instance\"}[$bucket_size]))",
"interval": "",
"legendFormat": "soft-failed events",
@@ -4672,9 +5053,7 @@
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "Soft-failed event rate",
"tooltip": {
"shared": true,
@@ -4683,48 +5062,776 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
"yaxes": [
{
+ "$$hashKey": "object:131",
"format": "hertz",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
},
{
+ "$$hashKey": "object:132",
"format": "short",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": false
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
}
],
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "$datasource"
+ },
+ "refId": "A"
+ }
+ ],
"title": "Federation",
"type": "row"
},
{
"collapsed": true,
- "datasource": "${DS_PROMETHEUS}",
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
"y": 31
},
+ "id": 227,
+ "panels": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 1
+ },
+ "id": 239,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "expr": "sum(increase(synapse_rate_limit_reject_total{instance=\"$instance\"}[$bucket_size]))",
+ "refId": "A"
+ }
+ ],
+ "title": "Number of rate limit rejected requests",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 1
+ },
+ "id": 235,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "expr": "sum(increase(synapse_rate_limit_sleep_total{instance=\"$instance\"}[$bucket_size]))",
+ "refId": "A"
+ }
+ ],
+ "title": "Number of requests being slept by the rate limiter",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "description": "Why is the data zero (0)? https://github.com/matrix-org/synapse/pull/13541#discussion_r951926322",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 9
+ },
+ "id": 237,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "9.0.4",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "expr": "sum(increase(synapse_rate_limit_reject_affected_hosts{instance=\"$instance\"}[$bucket_size]))",
+ "refId": "A"
+ }
+ ],
+ "title": "Number of hosts being rejected by the rate limiter",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "description": "https://github.com/matrix-org/synapse/pull/13541",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 9
+ },
+ "id": 233,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "9.0.4",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "expr": "sum(increase(synapse_rate_limit_sleep_affected_hosts{instance=\"$instance\"}[$bucket_size]))",
+ "refId": "A"
+ }
+ ],
+ "title": "Number of hosts being slept by the rate limiter",
+ "type": "timeseries"
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": {
+ "type": "prometheus",
+ "uid": "$datasource"
+ },
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "links": []
+ },
+ "overrides": []
+ },
+ "fill": 0,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 17
+ },
+ "hiddenSeries": false,
+ "id": 229,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "rightSide": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 0,
+ "links": [],
+ "nullPointMode": "connected",
+ "options": {
+ "alertThreshold": true
+ },
+ "paceLength": 10,
+ "percentage": false,
+ "pluginVersion": "9.0.4",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "$$hashKey": "object:276",
+ "alias": "Avg",
+ "fill": 0,
+ "linewidth": 3
+ },
+ {
+ "$$hashKey": "object:277",
+ "alias": "99%",
+ "color": "#C4162A",
+ "fillBelowTo": "90%"
+ },
+ {
+ "$$hashKey": "object:278",
+ "alias": "90%",
+ "color": "#FF7383",
+ "fillBelowTo": "75%"
+ },
+ {
+ "$$hashKey": "object:279",
+ "alias": "75%",
+ "color": "#FFEE52",
+ "fillBelowTo": "50%"
+ },
+ {
+ "$$hashKey": "object:280",
+ "alias": "50%",
+ "color": "#73BF69",
+ "fillBelowTo": "25%"
+ },
+ {
+ "$$hashKey": "object:281",
+ "alias": "25%",
+ "color": "#1F60C4",
+ "fillBelowTo": "5%"
+ },
+ {
+ "$$hashKey": "object:282",
+ "alias": "5%",
+ "lines": false
+ },
+ {
+ "$$hashKey": "object:283",
+ "alias": "Average",
+ "color": "rgb(255, 255, 255)",
+ "lines": true,
+ "linewidth": 3
+ },
+ {
+ "$$hashKey": "object:284",
+ "alias": ">99%",
+ "color": "#B877D9",
+ "fill": 3,
+ "lines": true
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "editorMode": "code",
+ "expr": "histogram_quantile(0.9995, sum(rate(synapse_rate_limit_queue_wait_time_seconds_bucket{index=~\"$index\",instance=\"$instance\"}[$bucket_size])) by (le))",
+ "format": "time_series",
+ "hide": false,
+ "intervalFactor": 1,
+ "legendFormat": ">99%",
+ "range": true,
+ "refId": "E"
+ },
+ {
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "editorMode": "code",
+ "expr": "histogram_quantile(0.99, sum(rate(synapse_rate_limit_queue_wait_time_seconds_bucket{index=~\"$index\",instance=\"$instance\"}[$bucket_size])) by (le))",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "99%",
+ "range": true,
+ "refId": "D"
+ },
+ {
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "expr": "histogram_quantile(0.9, sum(rate(synapse_rate_limit_queue_wait_time_seconds_bucket{index=~\"$index\",instance=\"$instance\"}[$bucket_size])) by (le))",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "90%",
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "expr": "histogram_quantile(0.75, sum(rate(synapse_rate_limit_queue_wait_time_seconds_bucket{index=~\"$index\",instance=\"$instance\"}[$bucket_size])) by (le))",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "75%",
+ "refId": "C"
+ },
+ {
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "expr": "histogram_quantile(0.5, sum(rate(synapse_rate_limit_queue_wait_time_seconds_bucket{index=~\"$index\",instance=\"$instance\"}[$bucket_size])) by (le))",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "50%",
+ "refId": "B"
+ },
+ {
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "expr": "histogram_quantile(0.25, sum(rate(synapse_rate_limit_queue_wait_time_seconds_bucket{index=~\"$index\",instance=\"$instance\"}[$bucket_size])) by (le))",
+ "legendFormat": "25%",
+ "refId": "F"
+ },
+ {
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "expr": "histogram_quantile(0.05, sum(rate(synapse_rate_limit_queue_wait_time_seconds_bucket{index=~\"$index\",instance=\"$instance\"}[$bucket_size])) by (le))",
+ "legendFormat": "5%",
+ "refId": "G"
+ },
+ {
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "expr": "sum(rate(synapse_rate_limit_queue_wait_time_seconds_sum{index=~\"$index\",instance=\"$instance\"}[$bucket_size])) / sum(rate(synapse_rate_limit_queue_wait_time_seconds_count{index=~\"$index\",instance=\"$instance\"}[$bucket_size]))",
+ "legendFormat": "Average",
+ "refId": "H"
+ }
+ ],
+ "thresholds": [
+ {
+ "$$hashKey": "object:283",
+ "colorMode": "warning",
+ "fill": false,
+ "line": true,
+ "op": "gt",
+ "value": 1,
+ "yaxis": "left"
+ },
+ {
+ "$$hashKey": "object:284",
+ "colorMode": "critical",
+ "fill": false,
+ "line": true,
+ "op": "gt",
+ "value": 2,
+ "yaxis": "left"
+ }
+ ],
+ "timeRegions": [],
+ "title": "Rate limit queue wait time Quantiles (all workers)",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "mode": "time",
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "$$hashKey": "object:255",
+ "format": "s",
+ "label": "",
+ "logBase": 1,
+ "min": "0",
+ "show": true
+ },
+ {
+ "$$hashKey": "object:256",
+ "format": "hertz",
+ "label": "",
+ "logBase": 1,
+ "min": "0",
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false
+ }
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "line"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ }
+ ]
+ },
+ "unit": "hertz"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Default reject threshold (50 requests within a second)"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "red",
+ "mode": "fixed"
+ }
+ },
+ {
+ "id": "custom.lineWidth",
+ "value": 2
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 17
+ },
+ "id": 231,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "expr": "sum(rate(synapse_rate_limit_sleep_total{instance=\"$instance\"}[$bucket_size]))",
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "50",
+ "hide": false,
+ "legendFormat": "Default reject threshold (50 requests within a second)",
+ "range": true,
+ "refId": "B"
+ }
+ ],
+ "title": "Rate of requests being slept by the rate limiter",
+ "type": "timeseries"
+ }
+ ],
+ "title": "Federation rate limiter",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "datasource": {
+ "type": "prometheus",
+ "uid": "$datasource"
+ },
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 32
+ },
"id": 60,
"panels": [
{
@@ -4732,10 +5839,11 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
+ "datasource": {
+ "uid": "$datasource"
+ },
"fieldConfig": {
"defaults": {
- "custom": {},
"links": []
},
"overrides": []
@@ -4746,7 +5854,7 @@
"h": 8,
"w": 12,
"x": 0,
- "y": 34
+ "y": 32
},
"hiddenSeries": false,
"id": 51,
@@ -4763,9 +5871,12 @@
"linewidth": 1,
"links": [],
"nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
"paceLength": 10,
"percentage": false,
- "pluginVersion": "7.1.3",
+ "pluginVersion": "8.4.3",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -4775,7 +5886,10 @@
"steppedLine": false,
"targets": [
{
- "expr": "rate(synapse_http_httppusher_http_pushes_processed{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) and on (instance, job, index) (synapse_http_httppusher_http_pushes_failed + synapse_http_httppusher_http_pushes_processed) > 0",
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "expr": "rate(synapse_http_httppusher_http_pushes_processed_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) and on (instance, job, index) (synapse_http_httppusher_http_pushes_failed_total + synapse_http_httppusher_http_pushes_processed_total) > 0",
"format": "time_series",
"interval": "",
"intervalFactor": 2,
@@ -4784,7 +5898,10 @@
"step": 20
},
{
- "expr": "rate(synapse_http_httppusher_http_pushes_failed{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) and on (instance, job, index) (synapse_http_httppusher_http_pushes_failed + synapse_http_httppusher_http_pushes_processed) > 0",
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "expr": "rate(synapse_http_httppusher_http_pushes_failed_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) and on (instance, job, index) (synapse_http_httppusher_http_pushes_failed_total + synapse_http_httppusher_http_pushes_processed_total) > 0",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "failed {{job}}",
@@ -4793,9 +5910,7 @@
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "HTTP Push rate",
"tooltip": {
"shared": true,
@@ -4804,33 +5919,24 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "hertz",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
},
{
"format": "short",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
},
{
@@ -4838,11 +5944,12 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
+ "datasource": {
+ "uid": "$datasource"
+ },
"description": "",
"fieldConfig": {
"defaults": {
- "custom": {},
"links": []
},
"overrides": []
@@ -4853,7 +5960,7 @@
"h": 8,
"w": 12,
"x": 12,
- "y": 34
+ "y": 32
},
"hiddenSeries": false,
"id": 134,
@@ -4870,8 +5977,11 @@
"lines": true,
"linewidth": 1,
"nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
"percentage": false,
- "pluginVersion": "7.1.3",
+ "pluginVersion": "8.4.3",
"pointradius": 2,
"points": false,
"renderer": "flot",
@@ -4881,15 +5991,16 @@
"steppedLine": false,
"targets": [
{
+ "datasource": {
+ "uid": "$datasource"
+ },
"expr": "topk(10,synapse_pushers{job=~\"$job\",index=~\"$index\", instance=\"$instance\"})",
"legendFormat": "{{kind}} {{app_id}}",
"refId": "A"
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "Active pusher instances by app",
"tooltip": {
"shared": false,
@@ -4898,60 +6009,65 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "short",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
},
{
"format": "short",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
}
],
- "repeat": null,
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "$datasource"
+ },
+ "refId": "A"
+ }
+ ],
"title": "Pushes",
"type": "row"
},
{
"collapsed": true,
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "$datasource"
+ },
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
- "y": 32
+ "y": 33
},
- "id": 58,
+ "id": 219,
"panels": [
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "description": "How many entries in current state that we are iterating over while calculating push rules.",
"fieldConfig": {
"defaults": {
- "custom": {},
"links": []
},
"overrides": []
@@ -4965,13 +6081,114 @@
"y": 33
},
"hiddenSeries": false,
- "id": 48,
+ "id": 209,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
+ "paceLength": 10,
+ "percentage": false,
+ "pluginVersion": "8.4.3",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "exemplar": true,
+ "expr": "sum(rate(synapse_push_bulk_push_rule_evaluator_push_rules_state_size_counter_total{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size]))",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 2,
+ "legendFormat": "{{index}}",
+ "metric": "synapse_push_bulk_push_rule_evaluator_push_rules_state_size_counter_total",
+ "refId": "A",
+ "step": 2
+ }
+ ],
+ "thresholds": [],
+ "timeRegions": [],
+ "title": "Iterations over State",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "mode": "time",
"show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "hertz",
+ "label": "",
+ "logBase": 1,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "description": "Rate that the cached push rules for a room get invalidated due to underlying push rules being changed. ",
+ "fieldConfig": {
+ "defaults": {
+ "links": []
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 12,
+ "y": 33
+ },
+ "hiddenSeries": false,
+ "id": 211,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
"total": false,
"values": false
},
@@ -4984,7 +6201,7 @@
},
"paceLength": 10,
"percentage": false,
- "pluginVersion": "7.3.7",
+ "pluginVersion": "8.4.3",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -4994,19 +6211,24 @@
"steppedLine": false,
"targets": [
{
- "expr": "rate(synapse_storage_schedule_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])/rate(synapse_storage_schedule_time_count[$bucket_size])",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "$datasource"
+ },
+ "exemplar": true,
+ "expr": "sum(rate(synapse_push_bulk_push_rule_evaluator_push_rules_invalidation_counter_total{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size]))",
"format": "time_series",
+ "interval": "",
"intervalFactor": 2,
- "legendFormat": "{{job}}-{{index}}",
+ "legendFormat": "{{index}}",
+ "metric": "synapse_push_bulk_push_rule_evaluator_push_rules_invalidation_counter_total",
"refId": "A",
- "step": 20
+ "step": 2
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
- "title": "Avg time waiting for db conn",
+ "title": "Push Rule Invalidations",
"tooltip": {
"shared": true,
"sort": 0,
@@ -5014,34 +6236,149 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
"yaxes": [
{
- "decimals": null,
- "format": "s",
+ "format": "hertz",
"label": "",
"logBase": 1,
- "max": null,
"min": "0",
"show": true
},
{
"format": "short",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
- "show": false
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "description": "How often the \"delta optimisation\" happens.\n\nThe delta optimisation is when we update the push rules for a room incrementally after a state change where we know the delta between the old state and the new state.\n\nThis can't happen if we don't the delta or we're calculating push rules from scratch.",
+ "fieldConfig": {
+ "defaults": {
+ "links": []
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 0,
+ "y": 40
+ },
+ "hiddenSeries": false,
+ "id": 213,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
+ "paceLength": 10,
+ "percentage": false,
+ "pluginVersion": "8.4.3",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "Number of calls",
+ "yaxis": 2
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "exemplar": true,
+ "expr": "sum(rate(synapse_util_caches_cache_hits{job=\"$job\",index=~\"$index\",name=\"push_rules_delta_state_cache_metric\",instance=\"$instance\"}[$bucket_size]))/sum(rate(synapse_util_caches_cache{job=\"$job\",index=~\"$index\", name=\"push_rules_delta_state_cache_metric\",instance=\"$instance\"}[$bucket_size]))",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 2,
+ "legendFormat": "Hit Rate",
+ "metric": "synapse_push_bulk_push_rule_evaluator_push_rules_invalidation_counter_total",
+ "refId": "A",
+ "step": 2
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "exemplar": true,
+ "expr": "sum(rate(synapse_util_caches_cache{job=\"$job\",index=~\"$index\", name=\"push_rules_delta_state_cache_metric\",instance=\"$instance\"}[$bucket_size]))",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 2,
+ "legendFormat": "Number of calls",
+ "refId": "B",
+ "step": 2
+ }
+ ],
+ "thresholds": [],
+ "timeRegions": [],
+ "title": "Delta Optimisation",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "mode": "time",
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "percentunit",
+ "label": "",
+ "logBase": 1,
+ "max": "1",
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "hertz",
+ "label": "",
+ "logBase": 1,
+ "min": "0",
+ "show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
},
{
@@ -5049,11 +6386,374 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "description": "How often we have the correct cached push rules for a room.",
+ "fieldConfig": {
+ "defaults": {
+ "links": []
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 12,
+ "y": 40
+ },
+ "hiddenSeries": false,
+ "id": 215,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
+ "paceLength": 10,
+ "percentage": false,
+ "pluginVersion": "8.4.3",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "Number of calls",
+ "yaxis": 2
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "exemplar": true,
+ "expr": "sum(rate(synapse_util_caches_cache_hits{job=\"$job\",index=~\"$index\",name=\"room_push_rule_cache\",instance=\"$instance\"}[$bucket_size]))/sum(rate(synapse_util_caches_cache{job=\"$job\",index=~\"$index\", name=\"room_push_rule_cache\",instance=\"$instance\"}[$bucket_size]))",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 2,
+ "legendFormat": "Hit Rate",
+ "metric": "synapse_push_bulk_push_rule_evaluator_push_rules_invalidation_counter_total",
+ "refId": "A",
+ "step": 2
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "exemplar": true,
+ "expr": "sum(rate(synapse_util_caches_cache{job=\"$job\",index=~\"$index\", name=\"room_push_rule_cache\",instance=\"$instance\"}[$bucket_size]))",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 2,
+ "legendFormat": "Number of calls",
+ "refId": "B",
+ "step": 2
+ }
+ ],
+ "thresholds": [],
+ "timeRegions": [],
+ "title": "How often we reuse existing calculated push rules",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "mode": "time",
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "percentunit",
+ "label": "",
+ "logBase": 1,
+ "max": "1",
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "hertz",
+ "logBase": 1,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "description": "How often we have existing cached push rules for the room. \n\nNote that these might be outdated and need to be recalculated if the state has changed.",
+ "fieldConfig": {
+ "defaults": {
+ "links": []
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 0,
+ "y": 47
+ },
+ "hiddenSeries": false,
+ "id": 217,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
+ "paceLength": 10,
+ "percentage": false,
+ "pluginVersion": "8.4.3",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "Number of calls",
+ "yaxis": 2
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "exemplar": true,
+ "expr": "sum(rate(synapse_util_caches_cache_hits{job=\"$job\",index=~\"$index\",name=\"_get_rules_for_room\",instance=\"$instance\"}[$bucket_size]))/sum(rate(synapse_util_caches_cache{job=\"$job\",index=~\"$index\", name=\"_get_rules_for_room\",instance=\"$instance\"}[$bucket_size]))",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 2,
+ "legendFormat": "Hit Rate",
+ "metric": "synapse_push_bulk_push_rule_evaluator_push_rules_invalidation_counter_total",
+ "refId": "A",
+ "step": 2
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "exemplar": true,
+ "expr": "sum(rate(synapse_util_caches_cache{job=\"$job\",index=~\"$index\", name=\"_get_rules_for_room\",instance=\"$instance\"}[$bucket_size]))",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 2,
+ "legendFormat": "Number of calls",
+ "refId": "B",
+ "step": 2
+ }
+ ],
+ "thresholds": [],
+ "timeRegions": [],
+ "title": "How often we have the RulesForRoom cached",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "mode": "time",
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "percentunit",
+ "label": "",
+ "logBase": 1,
+ "max": "1",
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "hertz",
+ "logBase": 1,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false
+ }
+ }
+ ],
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "000000001"
+ },
+ "refId": "A"
+ }
+ ],
+ "title": "Push Rule Cache",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "datasource": {
+ "type": "prometheus",
+ "uid": "000000001"
+ },
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 34
+ },
+ "id": 58,
+ "panels": [
+ {
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "links": [],
+ "mappings": [],
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "s"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 0,
+ "y": 35
+ },
+ "id": 48,
+ "links": [],
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "9.0.4",
+ "targets": [
+ {
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "expr": "rate(synapse_storage_schedule_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])/rate(synapse_storage_schedule_time_count[$bucket_size])",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{job}}-{{index}}",
+ "refId": "A",
+ "step": 20
+ }
+ ],
+ "title": "Avg time waiting for db conn",
+ "type": "timeseries"
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": {
+ "uid": "$datasource"
+ },
"description": "Shows the time in which the given percentage of database queries were scheduled, over the sampled timespan",
"fieldConfig": {
"defaults": {
- "custom": {},
"links": []
},
"overrides": []
@@ -5064,7 +6764,7 @@
"h": 7,
"w": 12,
"x": 12,
- "y": 33
+ "y": 35
},
"hiddenSeries": false,
"id": 104,
@@ -5087,7 +6787,7 @@
},
"paceLength": 10,
"percentage": false,
- "pluginVersion": "7.3.7",
+ "pluginVersion": "9.0.4",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -5097,6 +6797,9 @@
"steppedLine": false,
"targets": [
{
+ "datasource": {
+ "uid": "$datasource"
+ },
"expr": "histogram_quantile(0.99, rate(synapse_storage_schedule_time_bucket{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))",
"format": "time_series",
"hide": false,
@@ -5106,6 +6809,9 @@
"step": 20
},
{
+ "datasource": {
+ "uid": "$datasource"
+ },
"expr": "histogram_quantile(0.95, rate(synapse_storage_schedule_time_bucket{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))",
"format": "time_series",
"intervalFactor": 1,
@@ -5113,6 +6819,9 @@
"refId": "B"
},
{
+ "datasource": {
+ "uid": "$datasource"
+ },
"expr": "histogram_quantile(0.90, rate(synapse_storage_schedule_time_bucket{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))",
"format": "time_series",
"intervalFactor": 1,
@@ -5120,6 +6829,9 @@
"refId": "C"
},
{
+ "datasource": {
+ "uid": "$datasource"
+ },
"expr": "rate(synapse_storage_schedule_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])/rate(synapse_storage_schedule_time_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
"format": "time_series",
"interval": "",
@@ -5129,9 +6841,7 @@
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "Db scheduling time quantiles",
"tooltip": {
"shared": false,
@@ -5140,34 +6850,26 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
"yaxes": [
{
- "decimals": null,
"format": "s",
"label": "",
"logBase": 1,
- "max": null,
"min": "0",
"show": true
},
{
"format": "short",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": false
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
},
{
@@ -5175,12 +6877,13 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
+ "datasource": {
+ "uid": "$datasource"
+ },
"editable": true,
"error": false,
"fieldConfig": {
"defaults": {
- "custom": {},
"links": []
},
"overrides": []
@@ -5192,7 +6895,7 @@
"h": 7,
"w": 12,
"x": 0,
- "y": 40
+ "y": 42
},
"hiddenSeries": false,
"id": 10,
@@ -5216,7 +6919,7 @@
},
"paceLength": 10,
"percentage": false,
- "pluginVersion": "7.3.7",
+ "pluginVersion": "9.0.4",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -5226,7 +6929,10 @@
"steppedLine": false,
"targets": [
{
- "expr": "topk(10, rate(synapse_storage_transaction_time_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))",
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "expr": "topk(10, rate(synapse_storage_transaction_time_count_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))",
"format": "time_series",
"interval": "",
"intervalFactor": 2,
@@ -5236,9 +6942,7 @@
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "Top DB transactions by txn rate",
"tooltip": {
"shared": false,
@@ -5247,9 +6951,7 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
@@ -5257,21 +6959,17 @@
{
"format": "hertz",
"logBase": 1,
- "max": null,
"min": 0,
"show": true
},
{
"format": "short",
"logBase": 1,
- "max": null,
- "min": null,
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
},
{
@@ -5279,12 +6977,13 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
+ "datasource": {
+ "uid": "$datasource"
+ },
"editable": true,
"error": false,
"fieldConfig": {
"defaults": {
- "custom": {},
"links": []
},
"overrides": []
@@ -5296,7 +6995,7 @@
"h": 7,
"w": 12,
"x": 12,
- "y": 40
+ "y": 42
},
"hiddenSeries": false,
"id": 11,
@@ -5320,7 +7019,7 @@
},
"paceLength": 10,
"percentage": false,
- "pluginVersion": "7.3.7",
+ "pluginVersion": "9.0.4",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -5330,7 +7029,10 @@
"steppedLine": false,
"targets": [
{
- "expr": "rate(synapse_storage_transaction_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "expr": "rate(synapse_storage_transaction_time_sum_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
"format": "time_series",
"instant": false,
"interval": "",
@@ -5341,9 +7043,7 @@
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "DB transactions by total txn time",
"tooltip": {
"shared": false,
@@ -5352,9 +7052,7 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
@@ -5362,21 +7060,16 @@
{
"format": "percentunit",
"logBase": 1,
- "max": null,
- "min": null,
"show": true
},
{
"format": "short",
"logBase": 1,
- "max": null,
- "min": null,
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
},
{
@@ -5384,12 +7077,13 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
+ "datasource": {
+ "uid": "$datasource"
+ },
"editable": true,
"error": false,
"fieldConfig": {
"defaults": {
- "custom": {},
"links": []
},
"overrides": []
@@ -5401,7 +7095,7 @@
"h": 7,
"w": 12,
"x": 0,
- "y": 47
+ "y": 49
},
"hiddenSeries": false,
"id": 180,
@@ -5425,7 +7119,7 @@
},
"paceLength": 10,
"percentage": false,
- "pluginVersion": "7.3.7",
+ "pluginVersion": "9.0.4",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -5435,7 +7129,10 @@
"steppedLine": false,
"targets": [
{
- "expr": "rate(synapse_storage_transaction_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])/rate(synapse_storage_transaction_time_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "expr": "rate(synapse_storage_transaction_time_sum_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])/rate(synapse_storage_transaction_time_count_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
"format": "time_series",
"instant": false,
"interval": "",
@@ -5446,9 +7143,7 @@
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "Average DB txn time",
"tooltip": {
"shared": false,
@@ -5457,9 +7152,7 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
@@ -5467,21 +7160,16 @@
{
"format": "s",
"logBase": 1,
- "max": null,
- "min": null,
"show": true
},
{
"format": "short",
"logBase": 1,
- "max": null,
- "min": null,
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
},
{
@@ -5489,10 +7177,11 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
+ "datasource": {
+ "uid": "$datasource"
+ },
"fieldConfig": {
"defaults": {
- "custom": {},
"links": []
},
"overrides": []
@@ -5503,7 +7192,7 @@
"h": 9,
"w": 12,
"x": 12,
- "y": 47
+ "y": 49
},
"hiddenSeries": false,
"id": 200,
@@ -5524,7 +7213,7 @@
"alertThreshold": true
},
"percentage": false,
- "pluginVersion": "7.3.7",
+ "pluginVersion": "9.0.4",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -5534,6 +7223,9 @@
"steppedLine": false,
"targets": [
{
+ "datasource": {
+ "uid": "$datasource"
+ },
"expr": "histogram_quantile(0.99, sum(rate(synapse_storage_schedule_time_bucket{index=~\"$index\",instance=\"$instance\",job=\"$job\"}[$bucket_size])) by (le))",
"format": "time_series",
"intervalFactor": 1,
@@ -5541,6 +7233,9 @@
"refId": "D"
},
{
+ "datasource": {
+ "uid": "$datasource"
+ },
"expr": "histogram_quantile(0.9, sum(rate(synapse_storage_schedule_time_bucket{index=~\"$index\",instance=\"$instance\",job=\"$job\"}[$bucket_size])) by (le))",
"format": "time_series",
"intervalFactor": 1,
@@ -5548,6 +7243,9 @@
"refId": "A"
},
{
+ "datasource": {
+ "uid": "$datasource"
+ },
"expr": "histogram_quantile(0.75, sum(rate(synapse_storage_schedule_time_bucket{index=~\"$index\",instance=\"$instance\",job=\"$job\"}[$bucket_size])) by (le))",
"format": "time_series",
"intervalFactor": 1,
@@ -5555,6 +7253,9 @@
"refId": "C"
},
{
+ "datasource": {
+ "uid": "$datasource"
+ },
"expr": "histogram_quantile(0.5, sum(rate(synapse_storage_schedule_time_bucket{index=~\"$index\",instance=\"$instance\",job=\"$job\"}[$bucket_size])) by (le))",
"format": "time_series",
"intervalFactor": 1,
@@ -5563,9 +7264,7 @@
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "Time waiting for DB connection quantiles",
"tooltip": {
"shared": true,
@@ -5574,49 +7273,54 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
"yaxes": [
{
- "decimals": null,
+ "$$hashKey": "object:203",
"format": "s",
"label": "",
"logBase": 1,
- "max": null,
"min": "0",
"show": true
},
{
+ "$$hashKey": "object:204",
"format": "short",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": false
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
}
],
- "repeat": null,
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "000000001"
+ },
+ "refId": "A"
+ }
+ ],
"title": "Database",
"type": "row"
},
{
"collapsed": true,
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "000000001"
+ },
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
- "y": 33
+ "y": 35
},
"id": 59,
"panels": [
@@ -5625,12 +7329,13 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
+ "datasource": {
+ "uid": "$datasource"
+ },
"editable": true,
"error": false,
"fieldConfig": {
"defaults": {
- "custom": {},
"links": []
},
"overrides": []
@@ -5642,7 +7347,7 @@
"h": 13,
"w": 12,
"x": 0,
- "y": 9
+ "y": 35
},
"hiddenSeries": false,
"id": 12,
@@ -5660,9 +7365,12 @@
"linewidth": 2,
"links": [],
"nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
"paceLength": 10,
"percentage": false,
- "pluginVersion": "7.1.3",
+ "pluginVersion": "9.0.4",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -5672,7 +7380,10 @@
"steppedLine": false,
"targets": [
{
- "expr": "rate(synapse_util_metrics_block_ru_utime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\",block_name!=\"wrapped_request_handler\"}[$bucket_size]) + rate(synapse_util_metrics_block_ru_stime_seconds[$bucket_size])",
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "expr": "rate(synapse_util_metrics_block_ru_utime_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\",block_name!=\"wrapped_request_handler\"}[$bucket_size]) + rate(synapse_util_metrics_block_ru_stime_seconds_total[$bucket_size])",
"format": "time_series",
"interval": "",
"intervalFactor": 2,
@@ -5682,9 +7393,7 @@
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "Total CPU Usage by Block",
"tooltip": {
"shared": true,
@@ -5693,9 +7402,7 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
@@ -5703,21 +7410,16 @@
{
"format": "percentunit",
"logBase": 1,
- "max": null,
- "min": null,
"show": true
},
{
"format": "short",
"logBase": 1,
- "max": null,
- "min": null,
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
},
{
@@ -5725,12 +7427,13 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
+ "datasource": {
+ "uid": "$datasource"
+ },
"editable": true,
"error": false,
"fieldConfig": {
"defaults": {
- "custom": {},
"links": []
},
"overrides": []
@@ -5742,7 +7445,7 @@
"h": 13,
"w": 12,
"x": 12,
- "y": 9
+ "y": 35
},
"hiddenSeries": false,
"id": 26,
@@ -5760,9 +7463,12 @@
"linewidth": 2,
"links": [],
"nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
"paceLength": 10,
"percentage": false,
- "pluginVersion": "7.1.3",
+ "pluginVersion": "9.0.4",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -5772,7 +7478,10 @@
"steppedLine": false,
"targets": [
{
- "expr": "(rate(synapse_util_metrics_block_ru_utime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) + rate(synapse_util_metrics_block_ru_stime_seconds[$bucket_size])) / rate(synapse_util_metrics_block_count[$bucket_size])",
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "expr": "(rate(synapse_util_metrics_block_ru_utime_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) + rate(synapse_util_metrics_block_ru_stime_seconds_total[$bucket_size])) / rate(synapse_util_metrics_block_count_total[$bucket_size])",
"format": "time_series",
"interval": "",
"intervalFactor": 2,
@@ -5782,9 +7491,7 @@
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "Average CPU Time per Block",
"tooltip": {
"shared": true,
@@ -5793,9 +7500,7 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
@@ -5803,21 +7508,16 @@
{
"format": "ms",
"logBase": 1,
- "max": null,
- "min": null,
"show": true
},
{
"format": "short",
"logBase": 1,
- "max": null,
- "min": null,
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
},
{
@@ -5825,12 +7525,14 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"editable": true,
"error": false,
"fieldConfig": {
"defaults": {
- "custom": {},
"links": []
},
"overrides": []
@@ -5842,7 +7544,7 @@
"h": 13,
"w": 12,
"x": 0,
- "y": 22
+ "y": 48
},
"hiddenSeries": false,
"id": 13,
@@ -5860,9 +7562,12 @@
"linewidth": 2,
"links": [],
"nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
"paceLength": 10,
"percentage": false,
- "pluginVersion": "7.1.3",
+ "pluginVersion": "9.0.4",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -5872,19 +7577,22 @@
"steppedLine": false,
"targets": [
{
- "expr": "rate(synapse_util_metrics_block_db_txn_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "exemplar": true,
+ "expr": "rate(synapse_util_metrics_block_db_txn_duration_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
"format": "time_series",
"interval": "",
"intervalFactor": 2,
- "legendFormat": "{{job}} {{block_name}}",
+ "legendFormat": "{{job}}-{{index}} {{block_name}}",
"refId": "A",
"step": 20
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "Total DB Usage by Block",
"tooltip": {
"shared": true,
@@ -5893,31 +7601,27 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
"yaxes": [
{
+ "$$hashKey": "object:196",
"format": "percentunit",
"logBase": 1,
- "max": null,
"min": 0,
"show": true
},
{
+ "$$hashKey": "object:197",
"format": "short",
"logBase": 1,
- "max": null,
- "min": null,
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
},
{
@@ -5925,13 +7629,14 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
+ "datasource": {
+ "uid": "$datasource"
+ },
"description": "The time each database transaction takes to execute, on average, broken down by metrics block.",
"editable": true,
"error": false,
"fieldConfig": {
"defaults": {
- "custom": {},
"links": []
},
"overrides": []
@@ -5943,7 +7648,7 @@
"h": 13,
"w": 12,
"x": 12,
- "y": 22
+ "y": 48
},
"hiddenSeries": false,
"id": 27,
@@ -5961,9 +7666,12 @@
"linewidth": 2,
"links": [],
"nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
"paceLength": 10,
"percentage": false,
- "pluginVersion": "7.1.3",
+ "pluginVersion": "9.0.4",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -5973,7 +7681,10 @@
"steppedLine": false,
"targets": [
{
- "expr": "rate(synapse_util_metrics_block_db_txn_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) / rate(synapse_util_metrics_block_db_txn_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "expr": "rate(synapse_util_metrics_block_db_txn_duration_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) / rate(synapse_util_metrics_block_db_txn_count_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
"format": "time_series",
"interval": "",
"intervalFactor": 2,
@@ -5983,9 +7694,7 @@
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "Average Database Transaction time, by Block",
"tooltip": {
"shared": true,
@@ -5994,9 +7703,7 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
@@ -6004,21 +7711,16 @@
{
"format": "ms",
"logBase": 1,
- "max": null,
- "min": null,
"show": true
},
{
"format": "short",
"logBase": 1,
- "max": null,
- "min": null,
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
},
{
@@ -6026,12 +7728,13 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
+ "datasource": {
+ "uid": "$datasource"
+ },
"editable": true,
"error": false,
"fieldConfig": {
"defaults": {
- "custom": {},
"links": []
},
"overrides": []
@@ -6043,7 +7746,7 @@
"h": 13,
"w": 12,
"x": 0,
- "y": 35
+ "y": 61
},
"hiddenSeries": false,
"id": 28,
@@ -6060,9 +7763,12 @@
"linewidth": 2,
"links": [],
"nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
"paceLength": 10,
"percentage": false,
- "pluginVersion": "7.1.3",
+ "pluginVersion": "9.0.4",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -6072,7 +7778,10 @@
"steppedLine": false,
"targets": [
{
- "expr": "rate(synapse_util_metrics_block_db_txn_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) / rate(synapse_util_metrics_block_db_txn_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "expr": "rate(synapse_util_metrics_block_db_txn_duration_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) / rate(synapse_util_metrics_block_db_txn_count_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
"format": "time_series",
"interval": "",
"intervalFactor": 2,
@@ -6082,9 +7791,7 @@
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "Average Transactions per Block",
"tooltip": {
"shared": false,
@@ -6093,9 +7800,7 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
@@ -6103,21 +7808,16 @@
{
"format": "none",
"logBase": 1,
- "max": null,
- "min": null,
"show": true
},
{
"format": "short",
"logBase": 1,
- "max": null,
- "min": null,
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
},
{
@@ -6125,12 +7825,13 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
+ "datasource": {
+ "uid": "$datasource"
+ },
"editable": true,
"error": false,
"fieldConfig": {
"defaults": {
- "custom": {},
"links": []
},
"overrides": []
@@ -6142,7 +7843,7 @@
"h": 13,
"w": 12,
"x": 12,
- "y": 35
+ "y": 61
},
"hiddenSeries": false,
"id": 25,
@@ -6159,9 +7860,12 @@
"linewidth": 2,
"links": [],
"nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
"paceLength": 10,
"percentage": false,
- "pluginVersion": "7.1.3",
+ "pluginVersion": "9.0.4",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -6171,7 +7875,10 @@
"steppedLine": false,
"targets": [
{
- "expr": "rate(synapse_util_metrics_block_time_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) / rate(synapse_util_metrics_block_count[$bucket_size])",
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "expr": "rate(synapse_util_metrics_block_time_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) / rate(synapse_util_metrics_block_count_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
"format": "time_series",
"interval": "",
"intervalFactor": 2,
@@ -6181,9 +7888,7 @@
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "Average Wallclock Time per Block",
"tooltip": {
"shared": false,
@@ -6192,31 +7897,26 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
"yaxes": [
{
- "format": "ms",
+ "$$hashKey": "object:180",
+ "format": "s",
"logBase": 1,
- "max": null,
- "min": null,
"show": true
},
{
+ "$$hashKey": "object:181",
"format": "short",
"logBase": 1,
- "max": null,
- "min": null,
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
},
{
@@ -6224,12 +7924,8 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
- "fieldConfig": {
- "defaults": {
- "custom": {}
- },
- "overrides": []
+ "datasource": {
+ "uid": "$datasource"
},
"fill": 1,
"fillGradient": 0,
@@ -6237,7 +7933,7 @@
"h": 15,
"w": 12,
"x": 0,
- "y": 48
+ "y": 74
},
"hiddenSeries": false,
"id": 154,
@@ -6254,8 +7950,11 @@
"lines": true,
"linewidth": 1,
"nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
"percentage": false,
- "pluginVersion": "7.1.3",
+ "pluginVersion": "9.0.4",
"pointradius": 2,
"points": false,
"renderer": "flot",
@@ -6265,16 +7964,17 @@
"steppedLine": false,
"targets": [
{
- "expr": "rate(synapse_util_metrics_block_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "expr": "rate(synapse_util_metrics_block_count_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
"interval": "",
"legendFormat": "{{job}}-{{index}} {{block_name}}",
"refId": "A"
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "Block count",
"tooltip": {
"shared": true,
@@ -6283,48 +7983,50 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "hertz",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
},
{
"format": "short",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
}
],
- "repeat": null,
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "000000001"
+ },
+ "refId": "A"
+ }
+ ],
"title": "Per-block metrics",
"type": "row"
},
{
"collapsed": true,
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "000000001"
+ },
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
- "y": 34
+ "y": 36
},
"id": 61,
"panels": [
@@ -6333,13 +8035,14 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
+ "datasource": {
+ "uid": "$datasource"
+ },
"decimals": 2,
"editable": true,
"error": false,
"fieldConfig": {
"defaults": {
- "custom": {},
"links": []
},
"overrides": []
@@ -6351,7 +8054,7 @@
"h": 10,
"w": 12,
"x": 0,
- "y": 35
+ "y": 36
},
"hiddenSeries": false,
"id": 1,
@@ -6375,7 +8078,7 @@
"alertThreshold": true
},
"percentage": false,
- "pluginVersion": "7.3.7",
+ "pluginVersion": "9.0.4",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -6385,7 +8088,10 @@
"steppedLine": false,
"targets": [
{
- "expr": "rate(synapse_util_caches_cache:hits{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])/rate(synapse_util_caches_cache:total{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "expr": "rate(synapse_util_caches_cache_hits{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])/rate(synapse_util_caches_cache{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{name}} {{job}}-{{index}}",
@@ -6394,9 +8100,7 @@
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "Cache Hit Ratio",
"tooltip": {
"msResolution": true,
@@ -6406,15 +8110,12 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
"yaxes": [
{
- "decimals": null,
"format": "percentunit",
"label": "",
"logBase": 1,
@@ -6425,14 +8126,11 @@
{
"format": "short",
"logBase": 1,
- "max": null,
- "min": null,
"show": false
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
},
{
@@ -6440,12 +8138,13 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
+ "datasource": {
+ "uid": "$datasource"
+ },
"editable": true,
"error": false,
"fieldConfig": {
"defaults": {
- "custom": {},
"links": []
},
"overrides": []
@@ -6457,7 +8156,7 @@
"h": 10,
"w": 12,
"x": 12,
- "y": 35
+ "y": 36
},
"hiddenSeries": false,
"id": 8,
@@ -6480,7 +8179,7 @@
"alertThreshold": true
},
"percentage": false,
- "pluginVersion": "7.3.7",
+ "pluginVersion": "9.0.4",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -6490,7 +8189,10 @@
"steppedLine": false,
"targets": [
{
- "expr": "synapse_util_caches_cache:size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "expr": "synapse_util_caches_cache_size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
"format": "time_series",
"hide": false,
"interval": "",
@@ -6501,9 +8203,7 @@
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "Cache Size",
"tooltip": {
"shared": false,
@@ -6512,9 +8212,7 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
@@ -6522,21 +8220,17 @@
{
"format": "short",
"logBase": 1,
- "max": null,
"min": 0,
"show": true
},
{
"format": "short",
"logBase": 1,
- "max": null,
- "min": null,
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
},
{
@@ -6544,12 +8238,13 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
+ "datasource": {
+ "uid": "$datasource"
+ },
"editable": true,
"error": false,
"fieldConfig": {
"defaults": {
- "custom": {},
"links": []
},
"overrides": []
@@ -6561,7 +8256,7 @@
"h": 10,
"w": 12,
"x": 0,
- "y": 45
+ "y": 46
},
"hiddenSeries": false,
"id": 38,
@@ -6584,7 +8279,7 @@
"alertThreshold": true
},
"percentage": false,
- "pluginVersion": "7.3.7",
+ "pluginVersion": "9.0.4",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -6594,7 +8289,10 @@
"steppedLine": false,
"targets": [
{
- "expr": "rate(synapse_util_caches_cache:total{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "expr": "rate(synapse_util_caches_cache{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
"format": "time_series",
"interval": "",
"intervalFactor": 2,
@@ -6604,9 +8302,7 @@
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "Cache request rate",
"tooltip": {
"shared": false,
@@ -6615,9 +8311,7 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
@@ -6625,21 +8319,17 @@
{
"format": "rps",
"logBase": 1,
- "max": null,
"min": 0,
"show": true
},
{
"format": "short",
"logBase": 1,
- "max": null,
- "min": null,
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
},
{
@@ -6647,10 +8337,11 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
+ "datasource": {
+ "uid": "$datasource"
+ },
"fieldConfig": {
"defaults": {
- "custom": {},
"links": []
},
"overrides": []
@@ -6661,7 +8352,7 @@
"h": 10,
"w": 12,
"x": 12,
- "y": 45
+ "y": 46
},
"hiddenSeries": false,
"id": 39,
@@ -6669,11 +8360,16 @@
"alignAsTable": true,
"avg": false,
"current": false,
- "max": false,
+ "hideEmpty": false,
+ "hideZero": false,
+ "max": true,
"min": false,
+ "rightSide": false,
"show": true,
+ "sort": "max",
+ "sortDesc": true,
"total": false,
- "values": false
+ "values": true
},
"lines": true,
"linewidth": 1,
@@ -6683,7 +8379,7 @@
"alertThreshold": true
},
"percentage": false,
- "pluginVersion": "7.3.7",
+ "pluginVersion": "9.0.4",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -6693,7 +8389,10 @@
"steppedLine": false,
"targets": [
{
- "expr": "topk(10, rate(synapse_util_caches_cache:total{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size]) - rate(synapse_util_caches_cache:hits{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size]))",
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "expr": "topk(10, rate(synapse_util_caches_cache{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size]) - rate(synapse_util_caches_cache_hits{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size]))",
"format": "time_series",
"interval": "",
"intervalFactor": 2,
@@ -6703,9 +8402,7 @@
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "Top 10 cache misses",
"tooltip": {
"shared": false,
@@ -6714,33 +8411,26 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
"yaxes": [
{
+ "$$hashKey": "object:101",
"format": "rps",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
},
{
+ "$$hashKey": "object:102",
"format": "short",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
},
{
@@ -6748,10 +8438,11 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
+ "datasource": {
+ "uid": "$datasource"
+ },
"fieldConfig": {
"defaults": {
- "custom": {},
"links": []
},
"overrides": []
@@ -6762,7 +8453,7 @@
"h": 9,
"w": 12,
"x": 0,
- "y": 55
+ "y": 56
},
"hiddenSeries": false,
"id": 65,
@@ -6784,7 +8475,7 @@
"alertThreshold": true
},
"percentage": false,
- "pluginVersion": "7.3.7",
+ "pluginVersion": "9.0.4",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -6794,17 +8485,19 @@
"steppedLine": false,
"targets": [
{
- "expr": "rate(synapse_util_caches_cache:evicted_size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "expr": "rate(synapse_util_caches_cache_evicted_size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
"format": "time_series",
+ "interval": "",
"intervalFactor": 1,
"legendFormat": "{{name}} ({{reason}}) {{job}}-{{index}}",
"refId": "A"
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "Cache eviction rate",
"tooltip": {
"shared": false,
@@ -6813,49 +8506,51 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
"yaxes": [
{
- "decimals": null,
"format": "hertz",
"label": "entries / second",
"logBase": 1,
- "max": null,
- "min": null,
"show": true
},
{
"format": "short",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
}
],
- "repeat": null,
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "000000001"
+ },
+ "refId": "A"
+ }
+ ],
"title": "Caches",
"type": "row"
},
{
"collapsed": true,
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "000000001"
+ },
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
- "y": 35
+ "y": 37
},
"id": 148,
"panels": [
@@ -6864,7 +8559,9 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
+ "datasource": {
+ "uid": "$datasource"
+ },
"fieldConfig": {
"defaults": {
"custom": {},
@@ -6908,16 +8605,17 @@
"steppedLine": false,
"targets": [
{
- "expr": "synapse_util_caches_response_cache:size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "expr": "synapse_util_caches_response_cache_size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
"interval": "",
"legendFormat": "{{name}} {{job}}-{{index}}",
"refId": "A"
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "Response cache size",
"tooltip": {
"shared": false,
@@ -6926,33 +8624,24 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "short",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
},
{
"format": "short",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
},
{
@@ -6960,7 +8649,9 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
+ "datasource": {
+ "uid": "$datasource"
+ },
"fieldConfig": {
"defaults": {
"custom": {},
@@ -7004,12 +8695,18 @@
"steppedLine": false,
"targets": [
{
- "expr": "rate(synapse_util_caches_response_cache:hits{instance=\"$instance\", job=~\"$job\", index=~\"$index\"}[$bucket_size])/rate(synapse_util_caches_response_cache:total{instance=\"$instance\", job=~\"$job\", index=~\"$index\"}[$bucket_size])",
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "expr": "rate(synapse_util_caches_response_cache_hits{instance=\"$instance\", job=~\"$job\", index=~\"$index\"}[$bucket_size])/rate(synapse_util_caches_response_cache{instance=\"$instance\", job=~\"$job\", index=~\"$index\"}[$bucket_size])",
"interval": "",
"legendFormat": "{{name}} {{job}}-{{index}}",
"refId": "A"
},
{
+ "datasource": {
+ "uid": "$datasource"
+ },
"expr": "",
"interval": "",
"legendFormat": "",
@@ -7017,9 +8714,7 @@
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "Response cache hit rate",
"tooltip": {
"shared": false,
@@ -7028,17 +8723,13 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
"yaxes": [
{
- "decimals": null,
"format": "percentunit",
- "label": null,
"logBase": 1,
"max": "1",
"min": "0",
@@ -7046,30 +8737,38 @@
},
{
"format": "short",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
}
],
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "000000001"
+ },
+ "refId": "A"
+ }
+ ],
"title": "Response caches",
"type": "row"
},
{
"collapsed": true,
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "000000001"
+ },
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
- "y": 36
+ "y": 38
},
"id": 62,
"panels": [
@@ -7078,7 +8777,9 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
+ "datasource": {
+ "uid": "$datasource"
+ },
"fieldConfig": {
"defaults": {
"custom": {},
@@ -7123,6 +8824,9 @@
"steppedLine": false,
"targets": [
{
+ "datasource": {
+ "uid": "$datasource"
+ },
"expr": "rate(python_gc_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[10m])",
"format": "time_series",
"instant": false,
@@ -7132,9 +8836,7 @@
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "Total GC time by bucket (10m smoothing)",
"tooltip": {
"shared": true,
@@ -7143,34 +8845,25 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
"yaxes": [
{
- "decimals": null,
"format": "percentunit",
- "label": null,
"logBase": 1,
- "max": null,
"min": "0",
"show": true
},
{
"format": "short",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
},
{
@@ -7178,7 +8871,9 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
+ "datasource": {
+ "uid": "$datasource"
+ },
"decimals": 3,
"editable": true,
"error": false,
@@ -7228,6 +8923,9 @@
"steppedLine": false,
"targets": [
{
+ "datasource": {
+ "uid": "$datasource"
+ },
"expr": "rate(python_gc_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])/rate(python_gc_time_count[$bucket_size])",
"format": "time_series",
"intervalFactor": 2,
@@ -7238,9 +8936,7 @@
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "Average GC Time Per Collection",
"tooltip": {
"shared": false,
@@ -7249,9 +8945,7 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
@@ -7259,21 +8953,16 @@
{
"format": "s",
"logBase": 1,
- "max": null,
- "min": null,
"show": true
},
{
"format": "short",
"logBase": 1,
- "max": null,
- "min": null,
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
},
{
@@ -7281,7 +8970,9 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
+ "datasource": {
+ "uid": "$datasource"
+ },
"description": "'gen 0' shows the number of objects allocated since the last gen0 GC.\n'gen 1' / 'gen 2' show the number of gen0/gen1 GCs since the last gen1/gen2 GC.",
"fieldConfig": {
"defaults": {
@@ -7334,6 +9025,9 @@
"steppedLine": false,
"targets": [
{
+ "datasource": {
+ "uid": "$datasource"
+ },
"expr": "python_gc_counts{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}",
"format": "time_series",
"intervalFactor": 1,
@@ -7342,9 +9036,7 @@
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "Allocation counts",
"tooltip": {
"shared": false,
@@ -7353,9 +9045,7 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
@@ -7364,23 +9054,17 @@
"format": "short",
"label": "Gen N-1 GCs since last Gen N GC",
"logBase": 1,
- "max": null,
- "min": null,
"show": true
},
{
- "decimals": null,
"format": "short",
"label": "Objects since last Gen 0 GC",
"logBase": 1,
- "max": null,
- "min": null,
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
},
{
@@ -7388,7 +9072,9 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
+ "datasource": {
+ "uid": "$datasource"
+ },
"fieldConfig": {
"defaults": {
"custom": {},
@@ -7433,6 +9119,9 @@
"steppedLine": false,
"targets": [
{
+ "datasource": {
+ "uid": "$datasource"
+ },
"expr": "rate(python_gc_unreachable_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])/rate(python_gc_time_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
"format": "time_series",
"intervalFactor": 1,
@@ -7441,9 +9130,7 @@
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "Object counts per collection",
"tooltip": {
"shared": true,
@@ -7452,33 +9139,24 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "short",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
},
{
"format": "short",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
},
{
@@ -7486,7 +9164,9 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
+ "datasource": {
+ "uid": "$datasource"
+ },
"fieldConfig": {
"defaults": {
"custom": {},
@@ -7531,6 +9211,9 @@
"steppedLine": false,
"targets": [
{
+ "datasource": {
+ "uid": "$datasource"
+ },
"expr": "rate(python_gc_time_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
"format": "time_series",
"intervalFactor": 1,
@@ -7539,9 +9222,7 @@
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "GC frequency",
"tooltip": {
"shared": true,
@@ -7550,51 +9231,43 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "hertz",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
},
{
"format": "short",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
},
{
"cards": {
- "cardPadding": 0,
- "cardRound": null
+ "cardPadding": 0
},
"color": {
"cardColor": "#b4ff00",
"colorScale": "sqrt",
"colorScheme": "interpolateSpectral",
"exponent": 0.5,
- "max": null,
"min": 0,
"mode": "spectrum"
},
"dataFormat": "tsbuckets",
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"fieldConfig": {
"defaults": {
"custom": {}
@@ -7618,6 +9291,10 @@
"reverseYBuckets": false,
"targets": [
{
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
"expr": "sum(rate(python_gc_time_bucket[$bucket_size])) by (le)",
"format": "heatmap",
"intervalFactor": 1,
@@ -7634,34 +9311,37 @@
"xAxis": {
"show": true
},
- "xBucketNumber": null,
- "xBucketSize": null,
"yAxis": {
- "decimals": null,
"format": "s",
"logBase": 1,
- "max": null,
- "min": null,
- "show": true,
- "splitFactor": null
+ "show": true
+ },
+ "yBucketBound": "auto"
+ }
+ ],
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "000000001"
},
- "yBucketBound": "auto",
- "yBucketNumber": null,
- "yBucketSize": null
+ "refId": "A"
}
],
- "repeat": null,
"title": "GC",
"type": "row"
},
{
"collapsed": true,
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "000000001"
+ },
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
- "y": 37
+ "y": 39
},
"id": 63,
"panels": [
@@ -7670,10 +9350,12 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "$datasource"
+ },
"fieldConfig": {
"defaults": {
- "custom": {},
"links": []
},
"overrides": []
@@ -7684,10 +9366,10 @@
"h": 7,
"w": 12,
"x": 0,
- "y": 13
+ "y": 40
},
"hiddenSeries": false,
- "id": 42,
+ "id": 43,
"legend": {
"avg": false,
"current": false,
@@ -7706,7 +9388,7 @@
},
"paceLength": 10,
"percentage": false,
- "pluginVersion": "7.3.7",
+ "pluginVersion": "9.0.4",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -7716,7 +9398,10 @@
"steppedLine": false,
"targets": [
{
- "expr": "sum (rate(synapse_replication_tcp_protocol_inbound_commands{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (name, conn_id)",
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "expr": "sum (rate(synapse_replication_tcp_protocol_outbound_commands_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (name, conn_id)",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{job}}-{{index}} {{command}}",
@@ -7725,10 +9410,8 @@
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
- "title": "Rate of incoming commands",
+ "title": "Rate of outgoing commands",
"tooltip": {
"shared": false,
"sort": 0,
@@ -7736,241 +9419,331 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
"yaxes": [
{
+ "$$hashKey": "object:89",
"format": "hertz",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
},
{
+ "$$hashKey": "object:90",
"format": "short",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
},
{
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "${DS_PROMETHEUS}",
- "description": "",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "$datasource"
+ },
"fieldConfig": {
"defaults": {
- "custom": {},
- "links": []
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": true,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "links": [],
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "hertz"
},
"overrides": []
},
- "fill": 1,
- "fillGradient": 0,
"gridPos": {
"h": 7,
"w": 12,
"x": 12,
- "y": 13
- },
- "hiddenSeries": false,
- "id": 144,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "y": 40
},
- "lines": true,
- "linewidth": 1,
- "nullPointMode": "null",
+ "id": 41,
+ "links": [],
"options": {
- "alertThreshold": true
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
},
- "percentage": false,
- "pluginVersion": "7.3.7",
- "pointradius": 2,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
+ "pluginVersion": "8.4.3",
"targets": [
{
- "expr": "synapse_replication_tcp_command_queue{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "exemplar": true,
+ "expr": "rate(synapse_replication_tcp_resource_stream_updates_total{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
+ "format": "time_series",
"interval": "",
- "legendFormat": "{{stream_name}} {{job}}-{{index}}",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "Queued incoming RDATA commands, by stream",
- "tooltip": {
- "shared": false,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
+ "intervalFactor": 2,
+ "legendFormat": "{{stream_name}}",
+ "refId": "A",
+ "step": 20
}
],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
+ "title": "Rate of outgoing RDATA commands, by stream",
+ "type": "timeseries"
},
{
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "$datasource"
+ },
"fieldConfig": {
"defaults": {
- "custom": {},
- "links": []
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": true,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "links": [],
+ "mappings": [],
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "hertz"
},
"overrides": []
},
- "fill": 1,
- "fillGradient": 0,
"gridPos": {
"h": 7,
"w": 12,
"x": 0,
- "y": 20
- },
- "hiddenSeries": false,
- "id": 43,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "y": 47
},
- "lines": true,
- "linewidth": 1,
+ "id": 42,
"links": [],
- "nullPointMode": "null",
"options": {
- "alertThreshold": true
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
},
- "paceLength": 10,
- "percentage": false,
- "pluginVersion": "7.3.7",
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
+ "pluginVersion": "8.4.3",
"targets": [
{
- "expr": "sum (rate(synapse_replication_tcp_protocol_outbound_commands{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (name, conn_id)",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "exemplar": true,
+ "expr": "sum (rate(synapse_replication_tcp_protocol_inbound_commands_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (name, conn_id)",
"format": "time_series",
+ "interval": "",
"intervalFactor": 2,
"legendFormat": "{{job}}-{{index}} {{command}}",
"refId": "A",
"step": 20
}
],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "Rate of outgoing commands",
- "tooltip": {
- "shared": false,
- "sort": 0,
- "value_type": "individual"
+ "title": "Rate of incoming commands (including echoes)",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "$datasource"
},
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "axisSoftMin": 1,
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": true,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "links": [],
+ "mappings": [],
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "hertz"
+ },
+ "overrides": []
},
- "yaxes": [
- {
- "format": "hertz",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 12,
+ "y": 47
+ },
+ "id": 220,
+ "links": [],
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
},
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "8.4.3",
+ "targets": [
{
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "exemplar": true,
+ "expr": "rate(synapse_replication_tcp_protocol_inbound_rdata_count_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "{{job}}-{{index}} {{stream_name}}",
+ "refId": "A",
+ "step": 20
}
],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
+ "title": "Rate of incoming RDATA commands (excluding echoes), by stream",
+ "type": "timeseries"
},
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "description": "",
"fieldConfig": {
"defaults": {
- "custom": {},
"links": []
},
"overrides": []
@@ -7980,11 +9753,11 @@
"gridPos": {
"h": 7,
"w": 12,
- "x": 12,
- "y": 20
+ "x": 0,
+ "y": 54
},
"hiddenSeries": false,
- "id": 41,
+ "id": 144,
"legend": {
"avg": false,
"current": false,
@@ -7996,15 +9769,13 @@
},
"lines": true,
"linewidth": 1,
- "links": [],
"nullPointMode": "null",
"options": {
"alertThreshold": true
},
- "paceLength": 10,
"percentage": false,
- "pluginVersion": "7.3.7",
- "pointradius": 5,
+ "pluginVersion": "9.0.4",
+ "pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
@@ -8013,20 +9784,19 @@
"steppedLine": false,
"targets": [
{
- "expr": "rate(synapse_replication_tcp_resource_stream_updates{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
- "format": "time_series",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "expr": "synapse_replication_tcp_command_queue{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
"interval": "",
- "intervalFactor": 2,
- "legendFormat": "{{stream_name}}",
- "refId": "A",
- "step": 20
+ "legendFormat": "{{stream_name}} {{job}}-{{index}}",
+ "refId": "A"
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
- "title": "Outgoing stream updates",
+ "title": "Queued incoming RDATA commands, by stream",
"tooltip": {
"shared": false,
"sort": 0,
@@ -8034,33 +9804,26 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
"yaxes": [
{
- "format": "hertz",
- "label": null,
+ "$$hashKey": "object:218",
+ "format": "short",
"logBase": 1,
- "max": null,
- "min": null,
"show": true
},
{
+ "$$hashKey": "object:219",
"format": "short",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
},
{
@@ -8068,10 +9831,12 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "$datasource"
+ },
"fieldConfig": {
"defaults": {
- "custom": {},
"links": []
},
"overrides": []
@@ -8081,11 +9846,11 @@
"gridPos": {
"h": 7,
"w": 12,
- "x": 0,
- "y": 27
+ "x": 12,
+ "y": 54
},
"hiddenSeries": false,
- "id": 113,
+ "id": 115,
"legend": {
"avg": false,
"current": false,
@@ -8104,7 +9869,7 @@
},
"paceLength": 10,
"percentage": false,
- "pluginVersion": "7.3.7",
+ "pluginVersion": "9.0.4",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -8114,25 +9879,19 @@
"steppedLine": false,
"targets": [
{
- "expr": "synapse_replication_tcp_resource_connections_per_stream{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}",
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "expr": "rate(synapse_replication_tcp_protocol_close_reason_total{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
"format": "time_series",
"intervalFactor": 1,
- "legendFormat": "{{job}}-{{index}} {{stream_name}}",
+ "legendFormat": "{{job}}-{{index}} {{reason_type}}",
"refId": "A"
- },
- {
- "expr": "synapse_replication_tcp_resource_total_connections{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}",
- "format": "time_series",
- "intervalFactor": 1,
- "legendFormat": "{{job}}-{{index}}",
- "refId": "B"
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
- "title": "Replication connections",
+ "title": "Replication connection close reasons",
"tooltip": {
"shared": true,
"sort": 0,
@@ -8140,33 +9899,26 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
"yaxes": [
{
- "format": "short",
- "label": null,
+ "$$hashKey": "object:260",
+ "format": "hertz",
"logBase": 1,
- "max": null,
- "min": "0",
"show": true
},
{
+ "$$hashKey": "object:261",
"format": "short",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
},
{
@@ -8174,10 +9926,11 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
+ "datasource": {
+ "uid": "$datasource"
+ },
"fieldConfig": {
"defaults": {
- "custom": {},
"links": []
},
"overrides": []
@@ -8187,11 +9940,11 @@
"gridPos": {
"h": 7,
"w": 12,
- "x": 12,
- "y": 27
+ "x": 0,
+ "y": 61
},
"hiddenSeries": false,
- "id": 115,
+ "id": 113,
"legend": {
"avg": false,
"current": false,
@@ -8210,7 +9963,7 @@
},
"paceLength": 10,
"percentage": false,
- "pluginVersion": "7.3.7",
+ "pluginVersion": "9.0.4",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -8220,18 +9973,29 @@
"steppedLine": false,
"targets": [
{
- "expr": "rate(synapse_replication_tcp_protocol_close_reason{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "expr": "synapse_replication_tcp_resource_connections_per_stream{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}",
"format": "time_series",
"intervalFactor": 1,
- "legendFormat": "{{job}}-{{index}} {{reason_type}}",
+ "legendFormat": "{{job}}-{{index}} {{stream_name}}",
"refId": "A"
+ },
+ {
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "expr": "synapse_replication_tcp_resource_total_connections{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "{{job}}-{{index}}",
+ "refId": "B"
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
- "title": "Replication connection close reasons",
+ "title": "Replication connections",
"tooltip": {
"shared": true,
"sort": 0,
@@ -8239,48 +10003,51 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
"yaxes": [
{
- "format": "hertz",
- "label": null,
+ "format": "short",
"logBase": 1,
- "max": null,
- "min": null,
+ "min": "0",
"show": true
},
{
"format": "short",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
}
],
- "repeat": null,
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "000000001"
+ },
+ "refId": "A"
+ }
+ ],
"title": "Replication",
"type": "row"
},
{
"collapsed": true,
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "000000001"
+ },
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
- "y": 38
+ "y": 40
},
"id": 69,
"panels": [
@@ -8289,7 +10056,9 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
+ "datasource": {
+ "uid": "$datasource"
+ },
"fieldConfig": {
"defaults": {
"custom": {},
@@ -8335,6 +10104,9 @@
"steppedLine": false,
"targets": [
{
+ "datasource": {
+ "uid": "$datasource"
+ },
"expr": "max(synapse_event_persisted_position{instance=\"$instance\"}) - on() group_right() synapse_event_processing_positions{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
"format": "time_series",
"interval": "",
@@ -8344,9 +10116,7 @@
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "Event processing lag",
"tooltip": {
"shared": true,
@@ -8355,9 +10125,7 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
@@ -8366,22 +10134,17 @@
"format": "short",
"label": "events",
"logBase": 1,
- "max": null,
"min": "0",
"show": true
},
{
"format": "short",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
},
{
@@ -8389,7 +10152,9 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
+ "datasource": {
+ "uid": "$datasource"
+ },
"fieldConfig": {
"defaults": {
"custom": {},
@@ -8435,6 +10200,9 @@
"steppedLine": false,
"targets": [
{
+ "datasource": {
+ "uid": "$datasource"
+ },
"expr": "time()*1000-synapse_event_processing_last_ts{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
"format": "time_series",
"hide": false,
@@ -8445,9 +10213,7 @@
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "Age of last processed event",
"tooltip": {
"shared": true,
@@ -8456,33 +10222,25 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "ms",
- "label": null,
"logBase": 1,
- "max": null,
"min": "0",
"show": true
},
{
"format": "short",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
},
{
@@ -8490,7 +10248,9 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
+ "datasource": {
+ "uid": "$datasource"
+ },
"fieldConfig": {
"defaults": {
"custom": {},
@@ -8537,6 +10297,9 @@
"steppedLine": false,
"targets": [
{
+ "datasource": {
+ "uid": "$datasource"
+ },
"expr": "deriv(synapse_event_processing_last_ts{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])/1000 - 1",
"format": "time_series",
"hide": false,
@@ -8547,9 +10310,7 @@
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "Event processing catchup rate",
"tooltip": {
"shared": true,
@@ -8558,67 +10319,70 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
"yaxes": [
{
- "decimals": null,
"format": "none",
"label": "fallbehind(-) / catchup(+): s/sec",
"logBase": 1,
- "max": null,
- "min": null,
"show": true
},
{
"format": "short",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
}
],
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "000000001"
+ },
+ "refId": "A"
+ }
+ ],
"title": "Event processing loop positions",
"type": "row"
},
{
"collapsed": true,
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "000000001"
+ },
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
- "y": 39
+ "y": 41
},
"id": 126,
"panels": [
{
"cards": {
- "cardPadding": 0,
- "cardRound": null
+ "cardPadding": 0
},
"color": {
"cardColor": "#B877D9",
"colorScale": "sqrt",
"colorScheme": "interpolateInferno",
"exponent": 0.5,
- "max": null,
"min": 0,
"mode": "opacity"
},
"dataFormat": "tsbuckets",
- "datasource": "$datasource",
+ "datasource": {
+ "uid": "$datasource"
+ },
"description": "Colour reflects the number of rooms with the given number of forward extremities, or fewer.\n\nThis is only updated once an hour.",
"fieldConfig": {
"defaults": {
@@ -8643,15 +10407,16 @@
"reverseYBuckets": false,
"targets": [
{
- "expr": "synapse_forward_extremities_bucket{instance=\"$instance\"} and on (index, instance, job) (synapse_storage_events_persisted_events > 0)",
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "expr": "synapse_forward_extremities_bucket{instance=\"$instance\"} and on (index, instance, job) (synapse_storage_events_persisted_events_total > 0)",
"format": "heatmap",
"intervalFactor": 1,
"legendFormat": "{{le}}",
"refId": "A"
}
],
- "timeFrom": null,
- "timeShift": null,
"title": "Number of rooms, by number of forward extremities in room",
"tooltip": {
"show": true,
@@ -8661,27 +10426,22 @@
"xAxis": {
"show": true
},
- "xBucketNumber": null,
- "xBucketSize": null,
"yAxis": {
"decimals": 0,
"format": "short",
"logBase": 1,
- "max": null,
- "min": null,
- "show": true,
- "splitFactor": null
+ "show": true
},
- "yBucketBound": "auto",
- "yBucketNumber": null,
- "yBucketSize": null
+ "yBucketBound": "auto"
},
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
+ "datasource": {
+ "uid": "$datasource"
+ },
"description": "Number of rooms with the given number of forward extremities or fewer.\n\nThis is only updated once an hour.",
"fieldConfig": {
"defaults": {
@@ -8725,6 +10485,9 @@
"steppedLine": false,
"targets": [
{
+ "datasource": {
+ "uid": "$datasource"
+ },
"expr": "synapse_forward_extremities_bucket{instance=\"$instance\"} > 0",
"format": "heatmap",
"interval": "",
@@ -8734,9 +10497,7 @@
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "Room counts, by number of extremities",
"tooltip": {
"shared": true,
@@ -8745,40 +10506,30 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
"yaxes": [
{
- "decimals": null,
"format": "none",
"label": "Number of rooms",
"logBase": 10,
- "max": null,
- "min": null,
"show": true
},
{
"format": "short",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": false
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
},
{
"cards": {
- "cardPadding": 0,
- "cardRound": null
+ "cardPadding": 0
},
"color": {
"cardColor": "#5794F2",
@@ -8789,7 +10540,9 @@
"mode": "opacity"
},
"dataFormat": "tsbuckets",
- "datasource": "$datasource",
+ "datasource": {
+ "uid": "$datasource"
+ },
"description": "Colour reflects the number of events persisted to rooms with the given number of forward extremities, or fewer.",
"fieldConfig": {
"defaults": {
@@ -8814,15 +10567,16 @@
"reverseYBuckets": false,
"targets": [
{
- "expr": "rate(synapse_storage_events_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events > 0)",
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "expr": "rate(synapse_storage_events_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events_total > 0)",
"format": "heatmap",
"intervalFactor": 1,
"legendFormat": "{{le}}",
"refId": "A"
}
],
- "timeFrom": null,
- "timeShift": null,
"title": "Events persisted, by number of forward extremities in room (heatmap)",
"tooltip": {
"show": true,
@@ -8832,27 +10586,22 @@
"xAxis": {
"show": true
},
- "xBucketNumber": null,
- "xBucketSize": null,
"yAxis": {
"decimals": 0,
"format": "short",
"logBase": 1,
- "max": null,
- "min": null,
- "show": true,
- "splitFactor": null
+ "show": true
},
- "yBucketBound": "auto",
- "yBucketNumber": null,
- "yBucketSize": null
+ "yBucketBound": "auto"
},
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
+ "datasource": {
+ "uid": "$datasource"
+ },
"description": "For a given percentage P, the number X where P% of events were persisted to rooms with X forward extremities or fewer.",
"fieldConfig": {
"defaults": {
@@ -8895,28 +10644,40 @@
"steppedLine": false,
"targets": [
{
- "expr": "histogram_quantile(0.5, rate(synapse_storage_events_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events > 0))",
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "expr": "histogram_quantile(0.5, rate(synapse_storage_events_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events_total > 0))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "50%",
"refId": "A"
},
{
- "expr": "histogram_quantile(0.75, rate(synapse_storage_events_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events > 0))",
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "expr": "histogram_quantile(0.75, rate(synapse_storage_events_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events_total > 0))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "75%",
"refId": "B"
},
{
- "expr": "histogram_quantile(0.90, rate(synapse_storage_events_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events > 0))",
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "expr": "histogram_quantile(0.90, rate(synapse_storage_events_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events_total > 0))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "90%",
"refId": "C"
},
{
- "expr": "histogram_quantile(0.99, rate(synapse_storage_events_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events > 0))",
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "expr": "histogram_quantile(0.99, rate(synapse_storage_events_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events_total > 0))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "99%",
@@ -8924,9 +10685,7 @@
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "Events persisted, by number of forward extremities in room (quantiles)",
"tooltip": {
"shared": true,
@@ -8935,9 +10694,7 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
@@ -8946,28 +10703,22 @@
"format": "short",
"label": "Number of extremities in room",
"logBase": 1,
- "max": null,
"min": "0",
"show": true
},
{
"format": "short",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
},
{
"cards": {
- "cardPadding": 0,
- "cardRound": null
+ "cardPadding": 0
},
"color": {
"cardColor": "#FF9830",
@@ -8978,7 +10729,9 @@
"mode": "opacity"
},
"dataFormat": "tsbuckets",
- "datasource": "$datasource",
+ "datasource": {
+ "uid": "$datasource"
+ },
"description": "Colour reflects the number of events persisted to rooms with the given number of stale forward extremities, or fewer.\n\nStale forward extremities are those that were in the previous set of extremities as well as the new.",
"fieldConfig": {
"defaults": {
@@ -9003,15 +10756,16 @@
"reverseYBuckets": false,
"targets": [
{
- "expr": "rate(synapse_storage_events_stale_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events > 0)",
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "expr": "rate(synapse_storage_events_stale_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events_total > 0)",
"format": "heatmap",
"intervalFactor": 1,
"legendFormat": "{{le}}",
"refId": "A"
}
],
- "timeFrom": null,
- "timeShift": null,
"title": "Events persisted, by number of stale forward extremities in room (heatmap)",
"tooltip": {
"show": true,
@@ -9021,27 +10775,22 @@
"xAxis": {
"show": true
},
- "xBucketNumber": null,
- "xBucketSize": null,
"yAxis": {
"decimals": 0,
"format": "short",
"logBase": 1,
- "max": null,
- "min": null,
- "show": true,
- "splitFactor": null
+ "show": true
},
- "yBucketBound": "auto",
- "yBucketNumber": null,
- "yBucketSize": null
+ "yBucketBound": "auto"
},
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
+ "datasource": {
+ "uid": "$datasource"
+ },
"description": "For given percentage P, the number X where P% of events were persisted to rooms with X stale forward extremities or fewer.\n\nStale forward extremities are those that were in the previous set of extremities as well as the new.",
"fieldConfig": {
"defaults": {
@@ -9084,28 +10833,40 @@
"steppedLine": false,
"targets": [
{
- "expr": "histogram_quantile(0.5, rate(synapse_storage_events_stale_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events > 0))",
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "expr": "histogram_quantile(0.5, rate(synapse_storage_events_stale_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events_total > 0))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "50%",
"refId": "A"
},
{
- "expr": "histogram_quantile(0.75, rate(synapse_storage_events_stale_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events > 0))",
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "expr": "histogram_quantile(0.75, rate(synapse_storage_events_stale_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events_total > 0))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "75%",
"refId": "B"
},
{
- "expr": "histogram_quantile(0.90, rate(synapse_storage_events_stale_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events > 0))",
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "expr": "histogram_quantile(0.90, rate(synapse_storage_events_stale_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events_total > 0))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "90%",
"refId": "C"
},
{
- "expr": "histogram_quantile(0.99, rate(synapse_storage_events_stale_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events > 0))",
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "expr": "histogram_quantile(0.99, rate(synapse_storage_events_stale_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events_total > 0))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "99%",
@@ -9113,9 +10874,7 @@
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "Events persisted, by number of stale forward extremities in room (quantiles)",
"tooltip": {
"shared": true,
@@ -9124,9 +10883,7 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
@@ -9135,28 +10892,22 @@
"format": "short",
"label": "Number of stale forward extremities in room",
"logBase": 1,
- "max": null,
"min": "0",
"show": true
},
{
"format": "short",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
},
{
"cards": {
- "cardPadding": 0,
- "cardRound": null
+ "cardPadding": 0
},
"color": {
"cardColor": "#73BF69",
@@ -9167,7 +10918,9 @@
"mode": "opacity"
},
"dataFormat": "tsbuckets",
- "datasource": "$datasource",
+ "datasource": {
+ "uid": "$datasource"
+ },
"description": "Colour reflects the number of state resolution operations performed over the given number of state groups, or fewer.",
"fieldConfig": {
"defaults": {
@@ -9192,6 +10945,9 @@
"reverseYBuckets": false,
"targets": [
{
+ "datasource": {
+ "uid": "$datasource"
+ },
"expr": "rate(synapse_state_number_state_groups_in_resolution_bucket{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
"format": "heatmap",
"interval": "",
@@ -9200,8 +10956,6 @@
"refId": "A"
}
],
- "timeFrom": null,
- "timeShift": null,
"title": "Number of state resolution performed, by number of state groups involved (heatmap)",
"tooltip": {
"show": true,
@@ -9211,27 +10965,22 @@
"xAxis": {
"show": true
},
- "xBucketNumber": null,
- "xBucketSize": null,
"yAxis": {
"decimals": 0,
"format": "short",
"logBase": 1,
- "max": null,
- "min": null,
- "show": true,
- "splitFactor": null
+ "show": true
},
- "yBucketBound": "auto",
- "yBucketNumber": null,
- "yBucketSize": null
+ "yBucketBound": "auto"
},
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
+ "datasource": {
+ "uid": "$datasource"
+ },
"description": "For a given percentage P, the number X where P% of state resolution operations took place over X state groups or fewer.",
"fieldConfig": {
"defaults": {
@@ -9275,6 +11024,9 @@
"steppedLine": false,
"targets": [
{
+ "datasource": {
+ "uid": "$datasource"
+ },
"expr": "histogram_quantile(0.5, rate(synapse_state_number_state_groups_in_resolution_bucket{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))",
"format": "time_series",
"interval": "",
@@ -9283,6 +11035,9 @@
"refId": "A"
},
{
+ "datasource": {
+ "uid": "$datasource"
+ },
"expr": "histogram_quantile(0.75, rate(synapse_state_number_state_groups_in_resolution_bucket{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))",
"format": "time_series",
"interval": "",
@@ -9291,6 +11046,9 @@
"refId": "B"
},
{
+ "datasource": {
+ "uid": "$datasource"
+ },
"expr": "histogram_quantile(0.90, rate(synapse_state_number_state_groups_in_resolution_bucket{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))",
"format": "time_series",
"interval": "",
@@ -9299,6 +11057,9 @@
"refId": "C"
},
{
+ "datasource": {
+ "uid": "$datasource"
+ },
"expr": "histogram_quantile(0.99, rate(synapse_state_number_state_groups_in_resolution_bucket{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))",
"format": "time_series",
"interval": "",
@@ -9308,9 +11069,7 @@
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "Number of state resolutions performed, by number of state groups involved (quantiles)",
"tooltip": {
"shared": true,
@@ -9319,9 +11078,7 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
@@ -9330,22 +11087,17 @@
"format": "short",
"label": "Number of state groups",
"logBase": 1,
- "max": null,
"min": "0",
"show": true
},
{
"format": "short",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
},
{
@@ -9353,7 +11105,9 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
+ "datasource": {
+ "uid": "$datasource"
+ },
"description": "When we do a state res while persisting events we try and see if we can prune any stale extremities.",
"fieldConfig": {
"defaults": {
@@ -9394,28 +11148,35 @@
"steppedLine": false,
"targets": [
{
- "expr": "sum(rate(synapse_storage_events_state_resolutions_during_persistence{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))",
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "expr": "sum(rate(synapse_storage_events_state_resolutions_during_persistence_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))",
"interval": "",
"legendFormat": "State res ",
"refId": "A"
},
{
- "expr": "sum(rate(synapse_storage_events_potential_times_prune_extremities{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))",
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "expr": "sum(rate(synapse_storage_events_potential_times_prune_extremities_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))",
"interval": "",
"legendFormat": "Potential to prune",
"refId": "B"
},
{
- "expr": "sum(rate(synapse_storage_events_times_pruned_extremities{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))",
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "expr": "sum(rate(synapse_storage_events_times_pruned_extremities_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))",
"interval": "",
"legendFormat": "Pruned",
"refId": "C"
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "Stale extremity dropping",
"tooltip": {
"shared": true,
@@ -9424,47 +11185,50 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "hertz",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
},
{
"format": "short",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
}
],
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "000000001"
+ },
+ "refId": "A"
+ }
+ ],
"title": "Extremities",
"type": "row"
},
{
"collapsed": true,
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "000000001"
+ },
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
- "y": 40
+ "y": 42
},
"id": 158,
"panels": [
@@ -9473,10 +11237,12 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "$datasource"
+ },
"fieldConfig": {
"defaults": {
- "custom": {},
"links": []
},
"overrides": []
@@ -9487,7 +11253,7 @@
"h": 8,
"w": 12,
"x": 0,
- "y": 41
+ "y": 43
},
"hiddenSeries": false,
"id": 156,
@@ -9508,12 +11274,13 @@
"alertThreshold": true
},
"percentage": false,
- "pluginVersion": "7.3.7",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
"seriesOverrides": [
{
+ "$$hashKey": "object:632",
"alias": "Max",
"color": "#bf1b00",
"fill": 0,
@@ -9525,26 +11292,33 @@
"steppedLine": false,
"targets": [
{
- "expr": "synapse_admin_mau:current{instance=\"$instance\", job=~\"$job\"}",
- "format": "time_series",
- "interval": "",
- "intervalFactor": 1,
- "legendFormat": "Current",
- "refId": "A"
- },
- {
- "expr": "synapse_admin_mau:max{instance=\"$instance\", job=~\"$job\"}",
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "editorMode": "code",
+ "expr": "synapse_admin_mau_max{instance=\"$instance\", job=~\"(hhs_)?synapse\"}",
"format": "time_series",
"interval": "",
"intervalFactor": 1,
"legendFormat": "Max",
+ "range": true,
"refId": "B"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "$datasource"
+ },
+ "editorMode": "code",
+ "expr": "synapse_admin_mau_current{instance=\"$instance\", job=~\"(hhs_)?synapse\"}",
+ "hide": false,
+ "legendFormat": "Current",
+ "range": true,
+ "refId": "C"
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "MAU Limits",
"tooltip": {
"shared": true,
@@ -9553,33 +11327,27 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
"yaxes": [
{
+ "$$hashKey": "object:176",
"format": "short",
- "label": null,
"logBase": 1,
- "max": null,
"min": "0",
"show": true
},
{
+ "$$hashKey": "object:177",
"format": "short",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
},
{
@@ -9587,12 +11355,8 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
- "fieldConfig": {
- "defaults": {
- "custom": {}
- },
- "overrides": []
+ "datasource": {
+ "uid": "$datasource"
},
"fill": 1,
"fillGradient": 0,
@@ -9600,7 +11364,7 @@
"h": 8,
"w": 12,
"x": 12,
- "y": 41
+ "y": 43
},
"hiddenSeries": false,
"id": 160,
@@ -9620,7 +11384,7 @@
"alertThreshold": true
},
"percentage": false,
- "pluginVersion": "7.3.7",
+ "pluginVersion": "9.2.2",
"pointradius": 2,
"points": false,
"renderer": "flot",
@@ -9630,6 +11394,9 @@
"steppedLine": false,
"targets": [
{
+ "datasource": {
+ "uid": "$datasource"
+ },
"expr": "synapse_admin_mau_current_mau_by_service{instance=\"$instance\"}",
"interval": "",
"legendFormat": "{{ app_service }}",
@@ -9637,9 +11404,7 @@
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "MAU by Appservice",
"tooltip": {
"shared": true,
@@ -9648,47 +11413,50 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "short",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
},
{
"format": "short",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
}
],
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "000000001"
+ },
+ "refId": "A"
+ }
+ ],
"title": "MAU",
"type": "row"
},
{
"collapsed": true,
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "000000001"
+ },
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
- "y": 41
+ "y": 43
},
"id": 177,
"panels": [
@@ -9697,10 +11465,12 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "$datasource"
+ },
"fieldConfig": {
"defaults": {
- "custom": {},
"links": []
},
"overrides": []
@@ -9711,7 +11481,7 @@
"h": 7,
"w": 12,
"x": 0,
- "y": 1
+ "y": 44
},
"hiddenSeries": false,
"id": 173,
@@ -9728,8 +11498,11 @@
"linewidth": 1,
"links": [],
"nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
"percentage": false,
- "pluginVersion": "7.1.3",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -9739,20 +11512,23 @@
"steppedLine": false,
"targets": [
{
- "expr": "rate(synapse_notifier_users_woken_by_stream{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "editorMode": "code",
+ "expr": "rate(synapse_notifier_users_woken_by_stream_total{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
"format": "time_series",
"hide": false,
"intervalFactor": 2,
"legendFormat": "{{stream}} {{index}}",
"metric": "synapse_notifier",
+ "range": true,
"refId": "A",
"step": 2
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "Notifier Streams Woken",
"tooltip": {
"shared": true,
@@ -9761,33 +11537,26 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
"yaxes": [
{
+ "$$hashKey": "object:734",
"format": "hertz",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
},
{
+ "$$hashKey": "object:735",
"format": "short",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
},
{
@@ -9795,10 +11564,12 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "$datasource"
+ },
"fieldConfig": {
"defaults": {
- "custom": {},
"links": []
},
"overrides": []
@@ -9809,7 +11580,7 @@
"h": 7,
"w": 12,
"x": 12,
- "y": 1
+ "y": 44
},
"hiddenSeries": false,
"id": 175,
@@ -9826,8 +11597,11 @@
"linewidth": 1,
"links": [],
"nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
"percentage": false,
- "pluginVersion": "7.1.3",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -9837,19 +11611,22 @@
"steppedLine": false,
"targets": [
{
- "expr": "rate(synapse_handler_presence_get_updates{job=~\"$job\",instance=\"$instance\"}[$bucket_size])",
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "editorMode": "code",
+ "expr": "rate(synapse_handler_presence_get_updates_total{job=~\"$job\",instance=\"$instance\"}[$bucket_size])",
"format": "time_series",
"interval": "",
"intervalFactor": 2,
"legendFormat": "{{type}} {{index}}",
+ "range": true,
"refId": "A",
"step": 2
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "Presence Stream Fetch Type Rates",
"tooltip": {
"shared": true,
@@ -9858,47 +11635,53 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
"yaxes": [
{
+ "$$hashKey": "object:819",
"format": "hertz",
- "label": null,
"logBase": 1,
- "max": null,
"min": "0",
"show": true
},
{
+ "$$hashKey": "object:820",
"format": "short",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
}
],
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "000000001"
+ },
+ "refId": "A"
+ }
+ ],
"title": "Notifier",
"type": "row"
},
{
"collapsed": true,
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "000000001"
+ },
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
- "y": 42
+ "y": 44
},
"id": 170,
"panels": [
@@ -9907,12 +11690,9 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
- "fieldConfig": {
- "defaults": {
- "custom": {}
- },
- "overrides": []
+ "datasource": {
+ "type": "prometheus",
+ "uid": "$datasource"
},
"fill": 1,
"fillGradient": 0,
@@ -9920,7 +11700,7 @@
"h": 8,
"w": 12,
"x": 0,
- "y": 43
+ "y": 45
},
"hiddenSeries": false,
"id": 168,
@@ -9940,7 +11720,7 @@
"alertThreshold": true
},
"percentage": false,
- "pluginVersion": "7.3.7",
+ "pluginVersion": "9.2.2",
"pointradius": 2,
"points": false,
"renderer": "flot",
@@ -9950,16 +11730,19 @@
"steppedLine": false,
"targets": [
{
- "expr": "rate(synapse_appservice_api_sent_events{instance=\"$instance\"}[$bucket_size])",
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "editorMode": "code",
+ "expr": "rate(synapse_appservice_api_sent_events_total{instance=\"$instance\"}[$bucket_size])",
"interval": "",
"legendFormat": "{{service}}",
+ "range": true,
"refId": "A"
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "Sent Events rate",
"tooltip": {
"shared": true,
@@ -9968,33 +11751,26 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
"yaxes": [
{
+ "$$hashKey": "object:177",
"format": "hertz",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
},
{
+ "$$hashKey": "object:178",
"format": "short",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
},
{
@@ -10002,12 +11778,9 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
- "fieldConfig": {
- "defaults": {
- "custom": {}
- },
- "overrides": []
+ "datasource": {
+ "type": "prometheus",
+ "uid": "$datasource"
},
"fill": 1,
"fillGradient": 0,
@@ -10015,7 +11788,7 @@
"h": 8,
"w": 12,
"x": 12,
- "y": 43
+ "y": 45
},
"hiddenSeries": false,
"id": 171,
@@ -10035,7 +11808,7 @@
"alertThreshold": true
},
"percentage": false,
- "pluginVersion": "7.3.7",
+ "pluginVersion": "9.2.2",
"pointradius": 2,
"points": false,
"renderer": "flot",
@@ -10045,16 +11818,19 @@
"steppedLine": false,
"targets": [
{
- "expr": "rate(synapse_appservice_api_sent_transactions{instance=\"$instance\"}[$bucket_size])",
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "editorMode": "code",
+ "expr": "rate(synapse_appservice_api_sent_transactions_total{instance=\"$instance\"}[$bucket_size])",
"interval": "",
- "legendFormat": "{{service}}",
+ "legendFormat": "{{exported_service }} {{ service }}",
+ "range": true,
"refId": "A"
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "Transactions rate",
"tooltip": {
"shared": true,
@@ -10063,47 +11839,52 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
"yaxes": [
{
+ "$$hashKey": "object:260",
"format": "hertz",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
},
{
+ "$$hashKey": "object:261",
"format": "short",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
}
],
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "000000001"
+ },
+ "refId": "A"
+ }
+ ],
"title": "Appservices",
"type": "row"
},
{
"collapsed": true,
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "000000001"
+ },
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
- "y": 43
+ "y": 45
},
"id": 188,
"panels": [
@@ -10112,12 +11893,8 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
- "fieldConfig": {
- "defaults": {
- "custom": {}
- },
- "overrides": []
+ "datasource": {
+ "uid": "$datasource"
},
"fill": 1,
"fillGradient": 0,
@@ -10125,7 +11902,7 @@
"h": 8,
"w": 12,
"x": 0,
- "y": 44
+ "y": 46
},
"hiddenSeries": false,
"id": 182,
@@ -10145,7 +11922,7 @@
"alertThreshold": true
},
"percentage": false,
- "pluginVersion": "7.3.7",
+ "pluginVersion": "9.2.2",
"pointradius": 2,
"points": false,
"renderer": "flot",
@@ -10155,40 +11932,53 @@
"steppedLine": false,
"targets": [
{
- "expr": "rate(synapse_handler_presence_notified_presence{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "expr": "rate(synapse_handler_presence_notified_presence_total{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
"interval": "",
"legendFormat": "Notified",
"refId": "A"
},
{
- "expr": "rate(synapse_handler_presence_federation_presence_out{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "expr": "rate(synapse_handler_presence_federation_presence_out_total{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
"interval": "",
"legendFormat": "Remote ping",
"refId": "B"
},
{
- "expr": "rate(synapse_handler_presence_presence_updates{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "expr": "rate(synapse_handler_presence_presence_updates_total{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
"interval": "",
"legendFormat": "Total updates",
"refId": "C"
},
{
- "expr": "rate(synapse_handler_presence_federation_presence{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "expr": "rate(synapse_handler_presence_federation_presence_total{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
"interval": "",
"legendFormat": "Remote updates",
"refId": "D"
},
{
- "expr": "rate(synapse_handler_presence_bump_active_time{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "expr": "rate(synapse_handler_presence_bump_active_time_total{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
"interval": "",
"legendFormat": "Bump active time",
"refId": "E"
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "Presence",
"tooltip": {
"shared": true,
@@ -10197,33 +11987,24 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "hertz",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
},
{
"format": "short",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
},
{
@@ -10231,12 +12012,9 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
- "fieldConfig": {
- "defaults": {
- "custom": {}
- },
- "overrides": []
+ "datasource": {
+ "type": "prometheus",
+ "uid": "$datasource"
},
"fill": 1,
"fillGradient": 0,
@@ -10244,7 +12022,7 @@
"h": 8,
"w": 12,
"x": 12,
- "y": 44
+ "y": 46
},
"hiddenSeries": false,
"id": 184,
@@ -10264,7 +12042,7 @@
"alertThreshold": true
},
"percentage": false,
- "pluginVersion": "7.3.7",
+ "pluginVersion": "9.2.2",
"pointradius": 2,
"points": false,
"renderer": "flot",
@@ -10274,16 +12052,19 @@
"steppedLine": false,
"targets": [
{
- "expr": "rate(synapse_handler_presence_state_transition{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "editorMode": "code",
+ "expr": "rate(synapse_handler_presence_state_transition_total{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
"interval": "",
"legendFormat": "{{from}} -> {{to}}",
+ "range": true,
"refId": "A"
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "Presence state transitions",
"tooltip": {
"shared": true,
@@ -10292,33 +12073,26 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
"yaxes": [
{
+ "$$hashKey": "object:1090",
"format": "hertz",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
},
{
+ "$$hashKey": "object:1091",
"format": "short",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
},
{
@@ -10326,12 +12100,9 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
- "fieldConfig": {
- "defaults": {
- "custom": {}
- },
- "overrides": []
+ "datasource": {
+ "type": "prometheus",
+ "uid": "$datasource"
},
"fill": 1,
"fillGradient": 0,
@@ -10339,7 +12110,7 @@
"h": 8,
"w": 12,
"x": 0,
- "y": 52
+ "y": 54
},
"hiddenSeries": false,
"id": 186,
@@ -10359,7 +12130,7 @@
"alertThreshold": true
},
"percentage": false,
- "pluginVersion": "7.3.7",
+ "pluginVersion": "9.2.2",
"pointradius": 2,
"points": false,
"renderer": "flot",
@@ -10369,16 +12140,19 @@
"steppedLine": false,
"targets": [
{
- "expr": "rate(synapse_handler_presence_notify_reason{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "editorMode": "code",
+ "expr": "rate(synapse_handler_presence_notify_reason_total{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
"interval": "",
"legendFormat": "{{reason}}",
+ "range": true,
"refId": "A"
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "Presence notify reason",
"tooltip": {
"shared": true,
@@ -10387,165 +12161,163 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
"yaxes": [
{
+ "$$hashKey": "object:165",
"format": "hertz",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
},
{
+ "$$hashKey": "object:166",
"format": "short",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
}
],
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "000000001"
+ },
+ "refId": "A"
+ }
+ ],
"title": "Presence",
"type": "row"
},
{
"collapsed": true,
- "datasource": "${DS_PROMETHEUS}",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "000000001"
+ },
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
- "y": 44
+ "y": 46
},
"id": 197,
"panels": [
{
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "$datasource"
+ },
"fieldConfig": {
"defaults": {
- "custom": {}
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "hertz"
},
"overrides": []
},
- "fill": 1,
- "fillGradient": 0,
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
- "y": 1
+ "y": 46
},
- "hiddenSeries": false,
"id": 191,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "nullPointMode": "null",
"options": {
- "alertThreshold": true
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "desc"
+ }
},
- "percentage": false,
- "pluginVersion": "7.3.7",
- "pointradius": 2,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
+ "pluginVersion": "9.0.4",
"targets": [
{
- "expr": "rate(synapse_external_cache_set{job=\"$job\", instance=\"$instance\", index=~\"$index\"}[$bucket_size])",
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "editorMode": "code",
+ "expr": "rate(synapse_external_cache_set{job=~\"$job\", instance=\"$instance\", index=~\"$index\"}[$bucket_size])",
"interval": "",
- "legendFormat": "{{ cache_name }} {{ index }}",
+ "legendFormat": "{{ cache_name }} {{job}}-{{ index }}",
+ "range": true,
"refId": "A"
}
],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
"title": "External Cache Set Rate",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "hertz",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
+ "type": "timeseries"
},
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
- "description": "",
- "fieldConfig": {
- "defaults": {
- "custom": {}
- },
- "overrides": []
+ "datasource": {
+ "type": "prometheus",
+ "uid": "$datasource"
},
+ "description": "",
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
- "y": 1
+ "y": 46
},
"hiddenSeries": false,
"id": 193,
@@ -10565,7 +12337,7 @@
"alertThreshold": true
},
"percentage": false,
- "pluginVersion": "7.3.7",
+ "pluginVersion": "9.0.4",
"pointradius": 2,
"points": false,
"renderer": "flot",
@@ -10575,16 +12347,19 @@
"steppedLine": false,
"targets": [
{
- "expr": "rate(synapse_external_cache_get{job=\"$job\", instance=\"$instance\", index=~\"$index\"}[$bucket_size])",
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "editorMode": "code",
+ "expr": "sum without (hit) (rate(synapse_external_cache_get{job=~\"$job\", instance=\"$instance\", index=~\"$index\"}[$bucket_size]))",
"interval": "",
- "legendFormat": "{{ cache_name }} {{ index }}",
+ "legendFormat": "{{ cache_name }} {{job}}-{{ index }}",
+ "range": true,
"refId": "A"
}
],
"thresholds": [],
- "timeFrom": null,
"timeRegions": [],
- "timeShift": null,
"title": "External Cache Get Rate",
"tooltip": {
"shared": true,
@@ -10593,39 +12368,31 @@
},
"type": "graph",
"xaxis": {
- "buckets": null,
"mode": "time",
- "name": null,
"show": true,
"values": []
},
"yaxes": [
{
+ "$$hashKey": "object:390",
"format": "hertz",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
},
{
+ "$$hashKey": "object:391",
"format": "short",
- "label": null,
"logBase": 1,
- "max": null,
- "min": null,
"show": true
}
],
"yaxis": {
- "align": false,
- "alignLevel": null
+ "align": false
}
},
{
"cards": {
- "cardPadding": -1,
- "cardRound": null
+ "cardPadding": -1
},
"color": {
"cardColor": "#b4ff00",
@@ -10636,18 +12403,15 @@
"mode": "spectrum"
},
"dataFormat": "tsbuckets",
- "datasource": "$datasource",
- "fieldConfig": {
- "defaults": {
- "custom": {}
- },
- "overrides": []
+ "datasource": {
+ "type": "prometheus",
+ "uid": "$datasource"
},
"gridPos": {
- "h": 9,
+ "h": 8,
"w": 12,
"x": 0,
- "y": 9
+ "y": 54
},
"heatmap": {},
"hideZeroBuckets": false,
@@ -10660,7 +12424,10 @@
"reverseYBuckets": false,
"targets": [
{
- "expr": "sum(rate(synapse_external_cache_response_time_seconds_bucket{index=~\"$index\",instance=\"$instance\",job=\"$job\"}[$bucket_size])) by (le)",
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "expr": "sum(rate(synapse_external_cache_response_time_seconds_bucket{index=~\"$index\",instance=\"$instance\",job=~\"$job\"}[$bucket_size])) by (le)",
"format": "heatmap",
"instant": false,
"interval": "",
@@ -10679,20 +12446,110 @@
"xAxis": {
"show": true
},
- "xBucketNumber": null,
- "xBucketSize": null,
"yAxis": {
"decimals": 0,
"format": "s",
"logBase": 1,
- "max": null,
- "min": null,
- "show": true,
- "splitFactor": null
+ "show": true
+ },
+ "yBucketBound": "auto"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "$datasource"
+ },
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ }
+ ]
+ },
+ "unit": "hertz"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 54
},
- "yBucketBound": "auto",
- "yBucketNumber": null,
- "yBucketSize": null
+ "id": 223,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "desc"
+ }
+ },
+ "pluginVersion": "9.0.4",
+ "targets": [
+ {
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "editorMode": "code",
+ "expr": "rate(synapse_external_cache_get{job=~\"$job\", instance=\"$instance\", index=~\"$index\", hit=\"False\"}[$bucket_size])",
+ "interval": "",
+ "legendFormat": "{{ cache_name }} {{job}}-{{ index }}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "External Cache Miss Rate",
+ "type": "timeseries"
+ }
+ ],
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "000000001"
+ },
+ "refId": "A"
}
],
"title": "External Cache",
@@ -10700,7 +12557,7 @@
}
],
"refresh": false,
- "schemaVersion": 26,
+ "schemaVersion": 37,
"style": "dark",
"tags": [
"matrix"
@@ -10713,10 +12570,8 @@
"text": "default",
"value": "default"
},
- "error": null,
"hide": 0,
"includeAll": false,
- "label": null,
"multi": false,
"name": "datasource",
"options": [],
@@ -10731,14 +12586,12 @@
"allFormat": "glob",
"auto": true,
"auto_count": 100,
- "auto_min": "60s",
+ "auto_min": "30s",
"current": {
"selected": false,
"text": "auto",
"value": "$__auto_interval_bucket_size"
},
- "datasource": null,
- "error": null,
"hide": 0,
"includeAll": false,
"label": "Bucket Size",
@@ -10789,24 +12642,25 @@
"type": "interval"
},
{
- "allValue": null,
"current": {},
- "datasource": "$datasource",
+ "datasource": {
+ "uid": "$datasource"
+ },
"definition": "",
- "error": null,
"hide": 0,
"includeAll": false,
- "label": null,
"multi": false,
"name": "instance",
"options": [],
- "query": "label_values(synapse_util_metrics_block_ru_utime_seconds, instance)",
+ "query": {
+ "query": "label_values(synapse_util_metrics_block_ru_utime_seconds_total, instance)",
+ "refId": "Prometheus-instance-Variable-Query"
+ },
"refresh": 2,
"regex": "",
"skipUrlSync": false,
"sort": 1,
"tagValuesQuery": "",
- "tags": [],
"tagsQuery": "",
"type": "query",
"useTags": false
@@ -10815,9 +12669,10 @@
"allFormat": "regex wildcard",
"allValue": "",
"current": {},
- "datasource": "$datasource",
+ "datasource": {
+ "uid": "$datasource"
+ },
"definition": "",
- "error": null,
"hide": 0,
"hideLabel": false,
"includeAll": true,
@@ -10826,14 +12681,16 @@
"multiFormat": "regex values",
"name": "job",
"options": [],
- "query": "label_values(synapse_util_metrics_block_ru_utime_seconds, job)",
+ "query": {
+ "query": "label_values(synapse_util_metrics_block_ru_utime_seconds_total, job)",
+ "refId": "Prometheus-job-Variable-Query"
+ },
"refresh": 2,
"refresh_on_load": false,
"regex": "",
"skipUrlSync": false,
"sort": 1,
"tagValuesQuery": "",
- "tags": [],
"tagsQuery": "",
"type": "query",
"useTags": false
@@ -10842,9 +12699,10 @@
"allFormat": "regex wildcard",
"allValue": ".*",
"current": {},
- "datasource": "$datasource",
+ "datasource": {
+ "uid": "$datasource"
+ },
"definition": "",
- "error": null,
"hide": 0,
"hideLabel": false,
"includeAll": true,
@@ -10853,14 +12711,16 @@
"multiFormat": "regex values",
"name": "index",
"options": [],
- "query": "label_values(synapse_util_metrics_block_ru_utime_seconds, index)",
+ "query": {
+ "query": "label_values(synapse_util_metrics_block_ru_utime_seconds_total, index)",
+ "refId": "Prometheus-index-Variable-Query"
+ },
"refresh": 2,
"refresh_on_load": false,
"regex": "",
"skipUrlSync": false,
"sort": 3,
"tagValuesQuery": "",
- "tags": [],
"tagsQuery": "",
"type": "query",
"useTags": false
@@ -10900,5 +12760,6 @@
"timezone": "",
"title": "Synapse",
"uid": "000000012",
- "version": 100
-}
+ "version": 149,
+ "weekStart": ""
+}
\ No newline at end of file
diff --git a/contrib/prometheus/synapse-v1.rules b/contrib/prometheus/synapse-v1.rules
deleted file mode 100644
index 4c900ba537..0000000000
--- a/contrib/prometheus/synapse-v1.rules
+++ /dev/null
@@ -1,21 +0,0 @@
-synapse_federation_transaction_queue_pendingEdus:total = sum(synapse_federation_transaction_queue_pendingEdus or absent(synapse_federation_transaction_queue_pendingEdus)*0)
-synapse_federation_transaction_queue_pendingPdus:total = sum(synapse_federation_transaction_queue_pendingPdus or absent(synapse_federation_transaction_queue_pendingPdus)*0)
-
-synapse_http_server_request_count:method{servlet=""} = sum(synapse_http_server_request_count) by (method)
-synapse_http_server_request_count:servlet{method=""} = sum(synapse_http_server_request_count) by (servlet)
-
-synapse_http_server_request_count:total{servlet=""} = sum(synapse_http_server_request_count:by_method) by (servlet)
-
-synapse_cache:hit_ratio_5m = rate(synapse_util_caches_cache:hits[5m]) / rate(synapse_util_caches_cache:total[5m])
-synapse_cache:hit_ratio_30s = rate(synapse_util_caches_cache:hits[30s]) / rate(synapse_util_caches_cache:total[30s])
-
-synapse_federation_client_sent{type="EDU"} = synapse_federation_client_sent_edus + 0
-synapse_federation_client_sent{type="PDU"} = synapse_federation_client_sent_pdu_destinations:count + 0
-synapse_federation_client_sent{type="Query"} = sum(synapse_federation_client_sent_queries) by (job)
-
-synapse_federation_server_received{type="EDU"} = synapse_federation_server_received_edus + 0
-synapse_federation_server_received{type="PDU"} = synapse_federation_server_received_pdus + 0
-synapse_federation_server_received{type="Query"} = sum(synapse_federation_server_received_queries) by (job)
-
-synapse_federation_transaction_queue_pending{type="EDU"} = synapse_federation_transaction_queue_pending_edus + 0
-synapse_federation_transaction_queue_pending{type="PDU"} = synapse_federation_transaction_queue_pending_pdus + 0
diff --git a/contrib/prometheus/synapse-v2.rules b/contrib/prometheus/synapse-v2.rules
index 7e405bf7f0..dde311322f 100644
--- a/contrib/prometheus/synapse-v2.rules
+++ b/contrib/prometheus/synapse-v2.rules
@@ -1,37 +1,20 @@
groups:
- name: synapse
rules:
- - record: "synapse_federation_transaction_queue_pendingEdus:total"
- expr: "sum(synapse_federation_transaction_queue_pendingEdus or absent(synapse_federation_transaction_queue_pendingEdus)*0)"
- - record: "synapse_federation_transaction_queue_pendingPdus:total"
- expr: "sum(synapse_federation_transaction_queue_pendingPdus or absent(synapse_federation_transaction_queue_pendingPdus)*0)"
- - record: 'synapse_http_server_request_count:method'
- labels:
- servlet: ""
- expr: "sum(synapse_http_server_request_count) by (method)"
- - record: 'synapse_http_server_request_count:servlet'
- labels:
- method: ""
- expr: 'sum(synapse_http_server_request_count) by (servlet)'
-
- - record: 'synapse_http_server_request_count:total'
- labels:
- servlet: ""
- expr: 'sum(synapse_http_server_request_count:by_method) by (servlet)'
-
- - record: 'synapse_cache:hit_ratio_5m'
- expr: 'rate(synapse_util_caches_cache:hits[5m]) / rate(synapse_util_caches_cache:total[5m])'
- - record: 'synapse_cache:hit_ratio_30s'
- expr: 'rate(synapse_util_caches_cache:hits[30s]) / rate(synapse_util_caches_cache:total[30s])'
+ ###
+ ### Prometheus Console Only
+ ### The following rules are only needed if you use the Prometheus Console
+ ### in contrib/prometheus/consoles/synapse.html
+ ###
- record: 'synapse_federation_client_sent'
labels:
type: "EDU"
- expr: 'synapse_federation_client_sent_edus + 0'
+ expr: 'synapse_federation_client_sent_edus_total + 0'
- record: 'synapse_federation_client_sent'
labels:
type: "PDU"
- expr: 'synapse_federation_client_sent_pdu_destinations:count + 0'
+ expr: 'synapse_federation_client_sent_pdu_destinations_count_total + 0'
- record: 'synapse_federation_client_sent'
labels:
type: "Query"
@@ -40,11 +23,11 @@ groups:
- record: 'synapse_federation_server_received'
labels:
type: "EDU"
- expr: 'synapse_federation_server_received_edus + 0'
+ expr: 'synapse_federation_server_received_edus_total + 0'
- record: 'synapse_federation_server_received'
labels:
type: "PDU"
- expr: 'synapse_federation_server_received_pdus + 0'
+ expr: 'synapse_federation_server_received_pdus_total + 0'
- record: 'synapse_federation_server_received'
labels:
type: "Query"
@@ -58,21 +41,34 @@ groups:
labels:
type: "PDU"
expr: 'synapse_federation_transaction_queue_pending_pdus + 0'
+ ###
+ ### End of 'Prometheus Console Only' rules block
+ ###
+
+ ###
+ ### Grafana Only
+ ### The following rules are only needed if you use the Grafana dashboard
+ ### in contrib/grafana/synapse.json
+ ###
- record: synapse_storage_events_persisted_by_source_type
- expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep{origin_type="remote"})
+ expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep_total{origin_type="remote"})
labels:
type: remote
- record: synapse_storage_events_persisted_by_source_type
- expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep{origin_entity="*client*",origin_type="local"})
+ expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep_total{origin_entity="*client*",origin_type="local"})
labels:
type: local
- record: synapse_storage_events_persisted_by_source_type
- expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep{origin_entity!="*client*",origin_type="local"})
+ expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep_total{origin_entity!="*client*",origin_type="local"})
labels:
type: bridges
+
- record: synapse_storage_events_persisted_by_event_type
- expr: sum without(origin_entity, origin_type) (synapse_storage_events_persisted_events_sep)
- - record: synapse_storage_events_persisted_by_origin
- expr: sum without(type) (synapse_storage_events_persisted_events_sep)
+ expr: sum without(origin_entity, origin_type) (synapse_storage_events_persisted_events_sep_total)
+ - record: synapse_storage_events_persisted_by_origin
+ expr: sum without(type) (synapse_storage_events_persisted_events_sep_total)
+ ###
+ ### End of 'Grafana Only' rules block
+ ###
diff --git a/contrib/workers-bash-scripts/create-multiple-generic-workers.md b/contrib/workers-bash-scripts/create-multiple-generic-workers.md
index d303101429..c9be707b3c 100644
--- a/contrib/workers-bash-scripts/create-multiple-generic-workers.md
+++ b/contrib/workers-bash-scripts/create-multiple-generic-workers.md
@@ -7,7 +7,7 @@ You can alternatively create multiple worker configuration files with a simple `
#!/bin/bash
for i in {1..5}
do
-cat << EOF >> generic_worker$i.yaml
+cat << EOF > generic_worker$i.yaml
worker_app: synapse.app.generic_worker
worker_name: generic_worker$i
@@ -15,6 +15,8 @@ worker_name: generic_worker$i
worker_replication_host: 127.0.0.1
worker_replication_http_port: 9093
+worker_main_http_uri: http://localhost:8008/
+
worker_listeners:
- type: http
port: 808$i
diff --git a/debian/build_virtualenv b/debian/build_virtualenv
index f1ec609163..dd97e888ba 100755
--- a/debian/build_virtualenv
+++ b/debian/build_virtualenv
@@ -36,7 +36,7 @@ TEMP_VENV="$(mktemp -d)"
python3 -m venv "$TEMP_VENV"
source "$TEMP_VENV/bin/activate"
pip install -U pip
-pip install poetry==1.2.0b1
+pip install poetry==1.2.0
poetry export \
--extras all \
--extras test \
@@ -61,7 +61,7 @@ dh_virtualenv \
--extras="all,systemd,test" \
--requirements="exported_requirements.txt"
-PACKAGE_BUILD_DIR="debian/matrix-synapse-py3"
+PACKAGE_BUILD_DIR="$(pwd)/debian/matrix-synapse-py3"
VIRTUALENV_DIR="${PACKAGE_BUILD_DIR}${DH_VIRTUALENV_INSTALL_ROOT}/matrix-synapse"
TARGET_PYTHON="${VIRTUALENV_DIR}/bin/python"
@@ -78,9 +78,14 @@ case "$DEB_BUILD_OPTIONS" in
cp -r tests "$tmpdir"
+ # To avoid pulling in the unbuilt Synapse in the local directory
+ pushd /
+
PYTHONPATH="$tmpdir" \
"${TARGET_PYTHON}" -m twisted.trial --reporter=text -j2 tests
+ popd
+
;;
esac
diff --git a/debian/changelog b/debian/changelog
index 9efcb4f132..1f1b4daa31 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,172 @@
+matrix-synapse-py3 (1.72.0) stable; urgency=medium
+
+ * New Synapse release 1.72.0.
+
+ -- Synapse Packaging team <packages@matrix.org> Tue, 22 Nov 2022 10:57:30 +0000
+
+matrix-synapse-py3 (1.72.0~rc1) stable; urgency=medium
+
+ * New Synapse release 1.72.0rc1.
+
+ -- Synapse Packaging team <packages@matrix.org> Wed, 16 Nov 2022 15:10:59 +0000
+
+matrix-synapse-py3 (1.71.0) stable; urgency=medium
+
+ * New Synapse release 1.71.0.
+
+ -- Synapse Packaging team <packages@matrix.org> Tue, 08 Nov 2022 10:38:10 +0000
+
+matrix-synapse-py3 (1.71.0~rc2) stable; urgency=medium
+
+ * New Synapse release 1.71.0rc2.
+
+ -- Synapse Packaging team <packages@matrix.org> Fri, 04 Nov 2022 12:00:33 +0000
+
+matrix-synapse-py3 (1.71.0~rc1) stable; urgency=medium
+
+ * New Synapse release 1.71.0rc1.
+
+ -- Synapse Packaging team <packages@matrix.org> Tue, 01 Nov 2022 12:10:17 +0000
+
+matrix-synapse-py3 (1.70.1) stable; urgency=medium
+
+ * New Synapse release 1.70.1.
+
+ -- Synapse Packaging team <packages@matrix.org> Fri, 28 Oct 2022 12:10:21 +0100
+
+matrix-synapse-py3 (1.70.0) stable; urgency=medium
+
+ * New Synapse release 1.70.0.
+
+ -- Synapse Packaging team <packages@matrix.org> Wed, 26 Oct 2022 11:11:50 +0100
+
+matrix-synapse-py3 (1.70.0~rc2) stable; urgency=medium
+
+ * New Synapse release 1.70.0rc2.
+
+ -- Synapse Packaging team <packages@matrix.org> Tue, 25 Oct 2022 10:59:47 +0100
+
+matrix-synapse-py3 (1.70.0~rc1) stable; urgency=medium
+
+ * New Synapse release 1.70.0rc1.
+
+ -- Synapse Packaging team <packages@matrix.org> Wed, 19 Oct 2022 14:11:57 +0100
+
+matrix-synapse-py3 (1.69.0) stable; urgency=medium
+
+ * New Synapse release 1.69.0.
+
+ -- Synapse Packaging team <packages@matrix.org> Mon, 17 Oct 2022 11:31:03 +0100
+
+matrix-synapse-py3 (1.69.0~rc4) stable; urgency=medium
+
+ * New Synapse release 1.69.0rc4.
+
+ -- Synapse Packaging team <packages@matrix.org> Fri, 14 Oct 2022 15:04:47 +0100
+
+matrix-synapse-py3 (1.69.0~rc3) stable; urgency=medium
+
+ * New Synapse release 1.69.0rc3.
+
+ -- Synapse Packaging team <packages@matrix.org> Wed, 12 Oct 2022 13:24:04 +0100
+
+matrix-synapse-py3 (1.69.0~rc2) stable; urgency=medium
+
+ * New Synapse release 1.69.0rc2.
+
+ -- Synapse Packaging team <packages@matrix.org> Thu, 06 Oct 2022 14:45:00 +0100
+
+matrix-synapse-py3 (1.69.0~rc1) stable; urgency=medium
+
+ * The man page for the hash_password script has been updated to reflect
+ the correct default value of 'bcrypt_rounds'.
+ * New Synapse release 1.69.0rc1.
+
+ -- Synapse Packaging team <packages@matrix.org> Tue, 04 Oct 2022 11:17:16 +0100
+
+matrix-synapse-py3 (1.68.0) stable; urgency=medium
+
+ * New Synapse release 1.68.0.
+
+ -- Synapse Packaging team <packages@matrix.org> Tue, 27 Sep 2022 12:02:09 +0100
+
+matrix-synapse-py3 (1.68.0~rc2) stable; urgency=medium
+
+ * New Synapse release 1.68.0rc2.
+
+ -- Synapse Packaging team <packages@matrix.org> Fri, 23 Sep 2022 09:40:10 +0100
+
+matrix-synapse-py3 (1.68.0~rc1) stable; urgency=medium
+
+ * New Synapse release 1.68.0rc1.
+
+ -- Synapse Packaging team <packages@matrix.org> Tue, 20 Sep 2022 11:18:20 +0100
+
+matrix-synapse-py3 (1.67.0) stable; urgency=medium
+
+ * New Synapse release 1.67.0.
+
+ -- Synapse Packaging team <packages@matrix.org> Tue, 13 Sep 2022 09:19:56 +0100
+
+matrix-synapse-py3 (1.67.0~rc1) stable; urgency=medium
+
+ [ Erik Johnston ]
+ * Use stable poetry 1.2.0 version, rather than a prerelease.
+
+ [ Synapse Packaging team ]
+ * New Synapse release 1.67.0rc1.
+
+ -- Synapse Packaging team <packages@matrix.org> Tue, 06 Sep 2022 09:01:06 +0100
+
+matrix-synapse-py3 (1.66.0) stable; urgency=medium
+
+ * New Synapse release 1.66.0.
+
+ -- Synapse Packaging team <packages@matrix.org> Wed, 31 Aug 2022 11:20:17 +0100
+
+matrix-synapse-py3 (1.66.0~rc2+nmu1) UNRELEASED; urgency=medium
+
+ [ Jörg Behrmann ]
+ * Update debhelper to compatibility level 12.
+ * Drop the preinst script stopping synapse.
+ * Allocate a group for the system user.
+ * Change dpkg-statoverride to --force-statoverride-add.
+
+ [ Erik Johnston ]
+ * Disable `dh_auto_configure` as it broke during Rust build.
+
+ -- Jörg Behrmann <behrmann@physik.fu-berlin.de> Tue, 23 Aug 2022 17:17:00 +0100
+
+matrix-synapse-py3 (1.66.0~rc2) stable; urgency=medium
+
+ * New Synapse release 1.66.0rc2.
+
+ -- Synapse Packaging team <packages@matrix.org> Tue, 30 Aug 2022 12:25:19 +0100
+
+matrix-synapse-py3 (1.66.0~rc1) stable; urgency=medium
+
+ * New Synapse release 1.66.0rc1.
+
+ -- Synapse Packaging team <packages@matrix.org> Tue, 23 Aug 2022 09:48:55 +0100
+
+matrix-synapse-py3 (1.65.0) stable; urgency=medium
+
+ * New Synapse release 1.65.0.
+
+ -- Synapse Packaging team <packages@matrix.org> Tue, 16 Aug 2022 16:51:26 +0100
+
+matrix-synapse-py3 (1.65.0~rc2) stable; urgency=medium
+
+ * New Synapse release 1.65.0rc2.
+
+ -- Synapse Packaging team <packages@matrix.org> Thu, 11 Aug 2022 11:38:18 +0100
+
+matrix-synapse-py3 (1.65.0~rc1) stable; urgency=medium
+
+ * New Synapse release 1.65.0rc1.
+
+ -- Synapse Packaging team <packages@matrix.org> Tue, 09 Aug 2022 11:39:29 +0100
+
matrix-synapse-py3 (1.64.0) stable; urgency=medium
* New Synapse release 1.64.0.
diff --git a/debian/compat b/debian/compat
deleted file mode 100644
index f599e28b8a..0000000000
--- a/debian/compat
+++ /dev/null
@@ -1 +0,0 @@
-10
diff --git a/debian/control b/debian/control
index 412a9e1d4c..86f5a66d02 100644
--- a/debian/control
+++ b/debian/control
@@ -4,7 +4,7 @@ Priority: extra
Maintainer: Synapse Packaging team <packages@matrix.org>
# keep this list in sync with the build dependencies in docker/Dockerfile-dhvirtualenv.
Build-Depends:
- debhelper (>= 10),
+ debhelper-compat (= 12),
dh-virtualenv (>= 1.1),
libsystemd-dev,
libpq-dev,
diff --git a/debian/hash_password.1 b/debian/hash_password.1
index d64b91e7c8..39fa3ffcbf 100644
--- a/debian/hash_password.1
+++ b/debian/hash_password.1
@@ -10,7 +10,7 @@
.P
\fBhash_password\fR takes a password as an parameter either on the command line or the \fBSTDIN\fR if not supplied\.
.P
-It accepts an YAML file which can be used to specify parameters like the number of rounds for bcrypt and password_config section having the pepper value used for the hashing\. By default \fBbcrypt_rounds\fR is set to \fB10\fR\.
+It accepts an YAML file which can be used to specify parameters like the number of rounds for bcrypt and password_config section having the pepper value used for the hashing\. By default \fBbcrypt_rounds\fR is set to \fB12\fR\.
.P
The hashed password is written on the \fBSTDOUT\fR\.
.SH "FILES"
diff --git a/debian/hash_password.ronn b/debian/hash_password.ronn
index eeb354602d..5d0df53802 100644
--- a/debian/hash_password.ronn
+++ b/debian/hash_password.ronn
@@ -14,7 +14,7 @@ or the `STDIN` if not supplied.
It accepts an YAML file which can be used to specify parameters like the
number of rounds for bcrypt and password_config section having the pepper
-value used for the hashing. By default `bcrypt_rounds` is set to **10**.
+value used for the hashing. By default `bcrypt_rounds` is set to **12**.
The hashed password is written on the `STDOUT`.
diff --git a/debian/matrix-synapse-py3.postinst b/debian/matrix-synapse-py3.postinst
index 029b9e0243..acab0877ad 100644
--- a/debian/matrix-synapse-py3.postinst
+++ b/debian/matrix-synapse-py3.postinst
@@ -40,12 +40,12 @@ EOF
/opt/venvs/matrix-synapse/lib/manage_debconf.pl update
if ! getent passwd $USER >/dev/null; then
- adduser --quiet --system --no-create-home --home /var/lib/matrix-synapse $USER
+ adduser --quiet --system --group --no-create-home --home /var/lib/matrix-synapse $USER
fi
for DIR in /var/lib/matrix-synapse /var/log/matrix-synapse /etc/matrix-synapse; do
if ! dpkg-statoverride --list --quiet $DIR >/dev/null; then
- dpkg-statoverride --force --quiet --update --add $USER nogroup 0755 $DIR
+ dpkg-statoverride --force-statoverride-add --quiet --update --add $USER "$(id -gn $USER)" 0755 $DIR
fi
done
diff --git a/debian/matrix-synapse-py3.preinst b/debian/matrix-synapse-py3.preinst
deleted file mode 100644
index 4b5612f050..0000000000
--- a/debian/matrix-synapse-py3.preinst
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/bin/sh -e
-
-# Attempt to undo some of the braindamage caused by
-# https://github.com/matrix-org/package-synapse-debian/issues/18.
-#
-# Due to reasons [1], the old python2 matrix-synapse package will not stop the
-# service when the package is uninstalled. Our maintainer scripts will do the
-# right thing in terms of ensuring the service is enabled and unmasked, but
-# then do a `systemctl start matrix-synapse`, which of course does nothing -
-# leaving the old (py2) service running.
-#
-# There should normally be no reason for the service to be running during our
-# preinst, so we assume that if it *is* running, it's due to that situation,
-# and stop it.
-#
-# [1] dh_systemd_start doesn't do anything because it sees that there is an
-# init.d script with the same name, so leaves it to dh_installinit.
-#
-# dh_installinit doesn't do anything because somebody gave it a --no-start
-# for unknown reasons.
-
-if [ -x /bin/systemctl ]; then
- if /bin/systemctl --quiet is-active -- matrix-synapse; then
- echo >&2 "stopping existing matrix-synapse service"
- /bin/systemctl stop matrix-synapse || true
- fi
-fi
-
-#DEBHELPER#
-
-exit 0
diff --git a/debian/matrix-synapse.default b/debian/matrix-synapse.default
deleted file mode 100644
index f402d73bbf..0000000000
--- a/debian/matrix-synapse.default
+++ /dev/null
@@ -1,2 +0,0 @@
-# Specify environment variables used when running Synapse
-# SYNAPSE_CACHE_FACTOR=0.5 (default)
diff --git a/debian/matrix-synapse.service b/debian/matrix-synapse.service
index bde1c6cb9f..c3f9660283 100644
--- a/debian/matrix-synapse.service
+++ b/debian/matrix-synapse.service
@@ -5,7 +5,6 @@ Description=Synapse Matrix homeserver
Type=notify
User=matrix-synapse
WorkingDirectory=/var/lib/matrix-synapse
-EnvironmentFile=-/etc/default/matrix-synapse
ExecStartPre=/opt/venvs/matrix-synapse/bin/python -m synapse.app.homeserver --config-path=/etc/matrix-synapse/homeserver.yaml --config-path=/etc/matrix-synapse/conf.d/ --generate-keys
ExecStart=/opt/venvs/matrix-synapse/bin/python -m synapse.app.homeserver --config-path=/etc/matrix-synapse/homeserver.yaml --config-path=/etc/matrix-synapse/conf.d/
ExecReload=/bin/kill -HUP $MAINPID
@@ -13,5 +12,10 @@ Restart=always
RestartSec=3
SyslogIdentifier=matrix-synapse
+# The environment file is not shipped by default anymore and the below directive
+# is for backwards compatibility only. Please use your homeserver.yaml if
+# possible.
+EnvironmentFile=-/etc/default/matrix-synapse
+
[Install]
WantedBy=multi-user.target
diff --git a/debian/rules b/debian/rules
index 5baf2475f0..914d068f2a 100755
--- a/debian/rules
+++ b/debian/rules
@@ -6,15 +6,19 @@
# assume we only have one package
PACKAGE_NAME:=`dh_listpackages`
-override_dh_systemd_enable:
- dh_systemd_enable --name=matrix-synapse
-
-override_dh_installinit:
- dh_installinit --name=matrix-synapse
+override_dh_installsystemd:
+ dh_installsystemd --name=matrix-synapse
# we don't really want to strip the symbols from our object files.
override_dh_strip:
+override_dh_auto_configure:
+
+# many libraries pulled from PyPI have allocatable sections after
+# non-allocatable ones on which dwz errors out. For those without the issue the
+# gains are only marginal
+override_dh_dwz:
+
# dh_shlibdeps calls dpkg-shlibdeps, which finds all the binary files
# (executables and shared libs) in the package, and looks for the shared
# libraries that they depend on. It then adds a dependency on the package that
diff --git a/docker/Dockerfile b/docker/Dockerfile
index fa58ae3acb..7f8756e8a4 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -31,7 +31,9 @@ ARG PYTHON_VERSION=3.9
###
### Stage 0: generate requirements.txt
###
-FROM docker.io/python:${PYTHON_VERSION}-slim as requirements
+# We hardcode the use of Debian bullseye here because this could change upstream
+# and other Dockerfiles used for testing are expecting bullseye.
+FROM docker.io/python:${PYTHON_VERSION}-slim-bullseye as requirements
# RUN --mount is specific to buildkit and is documented at
# https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/syntax.md#build-mounts-run---mount.
@@ -46,17 +48,8 @@ RUN \
# We install poetry in its own build stage to avoid its dependencies conflicting with
# synapse's dependencies.
-# We use a specific commit from poetry's master branch instead of our usual 1.1.14,
-# to incorporate fixes to some bugs in `poetry export`. This commit corresponds to
-# https://github.com/python-poetry/poetry/pull/5156 and
-# https://github.com/python-poetry/poetry/issues/5141 ;
-# without it, we generate a requirements.txt with incorrect environment markers,
-# which causes necessary packages to be omitted when we `pip install`.
-#
-# NB: In poetry 1.2 `poetry export` will be moved into a plugin; we'll need to also
-# pip install poetry-plugin-export (https://github.com/python-poetry/poetry-plugin-export).
RUN --mount=type=cache,target=/root/.cache/pip \
- pip install --user "poetry-core==1.1.0a7" "git+https://github.com/python-poetry/poetry.git@fb13b3a676f476177f7937ffa480ee5cff9a90a5"
+ pip install --user "poetry==1.2.0"
WORKDIR /synapse
@@ -85,7 +78,7 @@ RUN if [ -z "$TEST_ONLY_IGNORE_POETRY_LOCKFILE" ]; then \
###
### Stage 1: builder
###
-FROM docker.io/python:${PYTHON_VERSION}-slim as builder
+FROM docker.io/python:${PYTHON_VERSION}-slim-bullseye as builder
# install the OS build deps
RUN \
@@ -101,11 +94,26 @@ RUN \
libxml++2.6-dev \
libxslt1-dev \
openssl \
- rustc \
zlib1g-dev \
git \
+ curl \
&& rm -rf /var/lib/apt/lists/*
+
+# Install rust and ensure its in the PATH
+ENV RUSTUP_HOME=/rust
+ENV CARGO_HOME=/cargo
+ENV PATH=/cargo/bin:/rust/bin:$PATH
+RUN mkdir /rust /cargo
+
+RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain stable --profile minimal
+
+
+# arm64 builds consume a lot of memory if `CARGO_NET_GIT_FETCH_WITH_CLI` is not
+# set to true, so we expose it as a build-arg.
+ARG CARGO_NET_GIT_FETCH_WITH_CLI=false
+ENV CARGO_NET_GIT_FETCH_WITH_CLI=$CARGO_NET_GIT_FETCH_WITH_CLI
+
# To speed up rebuilds, install all of the dependencies before we copy over
# the whole synapse project, so that this layer in the Docker cache can be
# used while you develop on the source
@@ -117,8 +125,9 @@ RUN --mount=type=cache,target=/root/.cache/pip \
# Copy over the rest of the synapse source code.
COPY synapse /synapse/synapse/
+COPY rust /synapse/rust/
# ... and what we need to `pip install`.
-COPY pyproject.toml README.rst /synapse/
+COPY pyproject.toml README.rst build_rust.py Cargo.toml Cargo.lock /synapse/
# Repeat of earlier build argument declaration, as this is a new build stage.
ARG TEST_ONLY_IGNORE_POETRY_LOCKFILE
@@ -126,7 +135,9 @@ ARG TEST_ONLY_IGNORE_POETRY_LOCKFILE
# Install the synapse package itself.
# If we have populated requirements.txt, we don't install any dependencies
# as we should already have those from the previous `pip install` step.
-RUN if [ -z "$TEST_ONLY_IGNORE_POETRY_LOCKFILE" ]; then \
+RUN --mount=type=cache,target=/synapse/target,sharing=locked \
+ --mount=type=cache,target=${CARGO_HOME}/registry,sharing=locked \
+ if [ -z "$TEST_ONLY_IGNORE_POETRY_LOCKFILE" ]; then \
pip install --prefix="/install" --no-deps --no-warn-script-location /synapse[all]; \
else \
pip install --prefix="/install" --no-warn-script-location /synapse[all]; \
@@ -136,7 +147,7 @@ RUN if [ -z "$TEST_ONLY_IGNORE_POETRY_LOCKFILE" ]; then \
### Stage 2: runtime
###
-FROM docker.io/python:${PYTHON_VERSION}-slim
+FROM docker.io/python:${PYTHON_VERSION}-slim-bullseye
LABEL org.opencontainers.image.url='https://matrix.org/docs/projects/server/synapse'
LABEL org.opencontainers.image.documentation='https://github.com/matrix-org/synapse/blob/master/docker/README.md'
diff --git a/docker/Dockerfile-dhvirtualenv b/docker/Dockerfile-dhvirtualenv
index fbc1d2346f..73165f6f85 100644
--- a/docker/Dockerfile-dhvirtualenv
+++ b/docker/Dockerfile-dhvirtualenv
@@ -72,6 +72,7 @@ RUN apt-get update -qq -o Acquire::Languages=none \
&& env DEBIAN_FRONTEND=noninteractive apt-get install \
-yqq --no-install-recommends -o Dpkg::Options::=--force-unsafe-io \
build-essential \
+ curl \
debhelper \
devscripts \
libsystemd-dev \
@@ -85,6 +86,15 @@ RUN apt-get update -qq -o Acquire::Languages=none \
libpq-dev \
xmlsec1
+# Install rust and ensure it's in the PATH
+ENV RUSTUP_HOME=/rust
+ENV CARGO_HOME=/cargo
+ENV PATH=/cargo/bin:/rust/bin:$PATH
+RUN mkdir /rust /cargo
+
+RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain stable --profile minimal
+
+
COPY --from=builder /dh-virtualenv_1.2.2-1_all.deb /
# install dhvirtualenv. Update the apt cache again first, in case we got a
diff --git a/docker/Dockerfile-workers b/docker/Dockerfile-workers
index 84f836ff7b..0c2d4f3047 100644
--- a/docker/Dockerfile-workers
+++ b/docker/Dockerfile-workers
@@ -1,39 +1,66 @@
# syntax=docker/dockerfile:1
-# Inherit from the official Synapse docker image
+
ARG SYNAPSE_VERSION=latest
+
+# first of all, we create a base image with an nginx which we can copy into the
+# target image. For repeated rebuilds, this is much faster than apt installing
+# each time.
+
+FROM debian:bullseye-slim AS deps_base
+ RUN \
+ --mount=type=cache,target=/var/cache/apt,sharing=locked \
+ --mount=type=cache,target=/var/lib/apt,sharing=locked \
+ apt-get update -qq && \
+ DEBIAN_FRONTEND=noninteractive apt-get install -yqq --no-install-recommends \
+ redis-server nginx-light
+
+# Similarly, a base to copy the redis server from.
+#
+# The redis docker image has fewer dynamic libraries than the debian package,
+# which makes it much easier to copy (but we need to make sure we use an image
+# based on the same debian version as the synapse image, to make sure we get
+# the expected version of libc.
+FROM redis:6-bullseye AS redis_base
+
+# now build the final image, based on the the regular Synapse docker image
FROM matrixdotorg/synapse:$SYNAPSE_VERSION
-# Install deps
-RUN \
- --mount=type=cache,target=/var/cache/apt,sharing=locked \
- --mount=type=cache,target=/var/lib/apt,sharing=locked \
- apt-get update -qq && \
- DEBIAN_FRONTEND=noninteractive apt-get install -yqq --no-install-recommends \
- redis-server nginx-light
+ # Install supervisord with pip instead of apt, to avoid installing a second
+ # copy of python.
+ RUN --mount=type=cache,target=/root/.cache/pip \
+ pip install supervisor~=4.2
+ RUN mkdir -p /etc/supervisor/conf.d
+
+ # Copy over redis and nginx
+ COPY --from=redis_base /usr/local/bin/redis-server /usr/local/bin
-# Install supervisord with pip instead of apt, to avoid installing a second
-# copy of python.
-RUN --mount=type=cache,target=/root/.cache/pip \
- pip install supervisor~=4.2
+ COPY --from=deps_base /usr/sbin/nginx /usr/sbin
+ COPY --from=deps_base /usr/share/nginx /usr/share/nginx
+ COPY --from=deps_base /usr/lib/nginx /usr/lib/nginx
+ COPY --from=deps_base /etc/nginx /etc/nginx
+ RUN rm /etc/nginx/sites-enabled/default
+ RUN mkdir /var/log/nginx /var/lib/nginx
+ RUN chown www-data /var/lib/nginx
-# Disable the default nginx sites
-RUN rm /etc/nginx/sites-enabled/default
+ # have nginx log to stderr/out
+ RUN ln -sf /dev/stdout /var/log/nginx/access.log
+ RUN ln -sf /dev/stderr /var/log/nginx/error.log
-# Copy Synapse worker, nginx and supervisord configuration template files
-COPY ./docker/conf-workers/* /conf/
+ # Copy Synapse worker, nginx and supervisord configuration template files
+ COPY ./docker/conf-workers/* /conf/
-# Copy a script to prefix log lines with the supervisor program name
-COPY ./docker/prefix-log /usr/local/bin/
+ # Copy a script to prefix log lines with the supervisor program name
+ COPY ./docker/prefix-log /usr/local/bin/
-# Expose nginx listener port
-EXPOSE 8080/tcp
+ # Expose nginx listener port
+ EXPOSE 8080/tcp
-# A script to read environment variables and create the necessary
-# files to run the desired worker configuration. Will start supervisord.
-COPY ./docker/configure_workers_and_start.py /configure_workers_and_start.py
-ENTRYPOINT ["/configure_workers_and_start.py"]
+ # A script to read environment variables and create the necessary
+ # files to run the desired worker configuration. Will start supervisord.
+ COPY ./docker/configure_workers_and_start.py /configure_workers_and_start.py
+ ENTRYPOINT ["/configure_workers_and_start.py"]
-# Replace the healthcheck with one which checks *all* the workers. The script
-# is generated by configure_workers_and_start.py.
-HEALTHCHECK --start-period=5s --interval=15s --timeout=5s \
- CMD /bin/sh /healthcheck.sh
+ # Replace the healthcheck with one which checks *all* the workers. The script
+ # is generated by configure_workers_and_start.py.
+ HEALTHCHECK --start-period=5s --interval=15s --timeout=5s \
+ CMD /bin/sh /healthcheck.sh
diff --git a/docker/README.md b/docker/README.md
index 5b7de2fe38..eda3221c23 100644
--- a/docker/README.md
+++ b/docker/README.md
@@ -191,7 +191,7 @@ If you need to build the image from a Synapse checkout, use the following `docke
build` command from the repo's root:
```
-docker build -t matrixdotorg/synapse -f docker/Dockerfile .
+DOCKER_BUILDKIT=1 docker build -t matrixdotorg/synapse -f docker/Dockerfile .
```
You can choose to build a different docker image by changing the value of the `-f` flag to
@@ -241,4 +241,4 @@ healthcheck:
Jemalloc is embedded in the image and will be used instead of the default allocator.
You can read about jemalloc by reading the Synapse
-[README](https://github.com/matrix-org/synapse/blob/HEAD/README.rst#help-synapse-is-slow-and-eats-all-my-ram-cpu).
+[Admin FAQ](https://matrix-org.github.io/synapse/latest/usage/administration/admin_faq.html#help-synapse-is-slow-and-eats-all-my-ramcpu).
diff --git a/docker/complement/Dockerfile b/docker/complement/Dockerfile
index 3cfff19f9a..c0935c99a8 100644
--- a/docker/complement/Dockerfile
+++ b/docker/complement/Dockerfile
@@ -8,35 +8,29 @@
ARG SYNAPSE_VERSION=latest
-# first of all, we create a base image with a postgres server and database,
-# which we can copy into the target image. For repeated rebuilds, this is
-# much faster than apt installing postgres each time.
-#
-# This trick only works because (a) the Synapse image happens to have all the
-# shared libraries that postgres wants, (b) we use a postgres image based on
-# the same debian version as Synapse's docker image (so the versions of the
-# shared libraries match).
+FROM matrixdotorg/synapse-workers:$SYNAPSE_VERSION
+ # First of all, we copy postgres server from the official postgres image,
+ # since for repeated rebuilds, this is much faster than apt installing
+ # postgres each time.
+
+ # This trick only works because (a) the Synapse image happens to have all the
+ # shared libraries that postgres wants, (b) we use a postgres image based on
+ # the same debian version as Synapse's docker image (so the versions of the
+ # shared libraries match).
+ RUN adduser --system --uid 999 postgres --home /var/lib/postgresql
+ COPY --from=postgres:13-bullseye /usr/lib/postgresql /usr/lib/postgresql
+ COPY --from=postgres:13-bullseye /usr/share/postgresql /usr/share/postgresql
+ RUN mkdir /var/run/postgresql && chown postgres /var/run/postgresql
+ ENV PATH="${PATH}:/usr/lib/postgresql/13/bin"
+ ENV PGDATA=/var/lib/postgresql/data
-FROM postgres:13-bullseye AS postgres_base
- # initialise the database cluster in /var/lib/postgresql
+ # We also initialize the database at build time, rather than runtime, so that it's faster to spin up the image.
RUN gosu postgres initdb --locale=C --encoding=UTF-8 --auth-host password
# Configure a password and create a database for Synapse
RUN echo "ALTER USER postgres PASSWORD 'somesecret'" | gosu postgres postgres --single
RUN echo "CREATE DATABASE synapse" | gosu postgres postgres --single
-# now build the final image, based on the Synapse image.
-
-FROM matrixdotorg/synapse-workers:$SYNAPSE_VERSION
- # copy the postgres installation over from the image we built above
- RUN adduser --system --uid 999 postgres --home /var/lib/postgresql
- COPY --from=postgres_base /var/lib/postgresql /var/lib/postgresql
- COPY --from=postgres_base /usr/lib/postgresql /usr/lib/postgresql
- COPY --from=postgres_base /usr/share/postgresql /usr/share/postgresql
- RUN mkdir /var/run/postgresql && chown postgres /var/run/postgresql
- ENV PATH="${PATH}:/usr/lib/postgresql/13/bin"
- ENV PGDATA=/var/lib/postgresql/data
-
# Extend the shared homeserver config to disable rate-limiting,
# set Complement's static shared secret, enable registration, amongst other
# tweaks to get Synapse ready for testing.
diff --git a/docker/complement/conf/start_for_complement.sh b/docker/complement/conf/start_for_complement.sh
index cc6482f763..49d79745b0 100755
--- a/docker/complement/conf/start_for_complement.sh
+++ b/docker/complement/conf/start_for_complement.sh
@@ -45,7 +45,12 @@ esac
if [[ -n "$SYNAPSE_COMPLEMENT_USE_WORKERS" ]]; then
# Specify the workers to test with
- export SYNAPSE_WORKER_TYPES="\
+ # Allow overriding by explicitly setting SYNAPSE_WORKER_TYPES outside, while still
+ # utilizing WORKERS=1 for backwards compatibility.
+ # -n True if the length of string is non-zero.
+ # -z True if the length of string is zero.
+ if [[ -z "$SYNAPSE_WORKER_TYPES" ]]; then
+ export SYNAPSE_WORKER_TYPES="\
event_persister, \
event_persister, \
background_worker, \
@@ -57,9 +62,12 @@ if [[ -n "$SYNAPSE_COMPLEMENT_USE_WORKERS" ]]; then
federation_reader, \
federation_sender, \
synchrotron, \
+ client_reader, \
appservice, \
pusher"
+ fi
+ log "Workers requested: $SYNAPSE_WORKER_TYPES"
# Improve startup times by using a launcher based on fork()
export SYNAPSE_USE_EXPERIMENTAL_FORKING_LAUNCHER=1
else
diff --git a/docker/complement/conf/workers-shared-extra.yaml.j2 b/docker/complement/conf/workers-shared-extra.yaml.j2
index 9e554a865e..883a87159c 100644
--- a/docker/complement/conf/workers-shared-extra.yaml.j2
+++ b/docker/complement/conf/workers-shared-extra.yaml.j2
@@ -12,6 +12,8 @@ trusted_key_servers: []
enable_registration: true
enable_registration_without_verification: true
bcrypt_rounds: 4
+url_preview_enabled: true
+url_preview_ip_range_blacklist: []
## Registration ##
@@ -90,8 +92,6 @@ allow_device_name_lookup_over_federation: true
## Experimental Features ##
experimental_features:
- # Enable spaces support
- spaces_enabled: true
# Enable history backfilling support
msc2716_enabled: true
# server-side support for partial state in /send_join responses
@@ -102,6 +102,8 @@ experimental_features:
{% endif %}
# Enable jump to date endpoint
msc3030_enabled: true
+ # Filtering /messages by relation type.
+ msc3874_enabled: true
server_notices:
system_mxid_localpart: _server
diff --git a/docker/conf-workers/supervisord.conf.j2 b/docker/conf-workers/supervisord.conf.j2
index 086137494e..9f1e03cfc0 100644
--- a/docker/conf-workers/supervisord.conf.j2
+++ b/docker/conf-workers/supervisord.conf.j2
@@ -19,7 +19,7 @@ username=www-data
autorestart=true
[program:redis]
-command=/usr/local/bin/prefix-log /usr/bin/redis-server /etc/redis/redis.conf --daemonize no
+command=/usr/local/bin/prefix-log /usr/local/bin/redis-server
priority=1
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
diff --git a/docker/configure_workers_and_start.py b/docker/configure_workers_and_start.py
index 51583dc13d..c1e1544536 100755
--- a/docker/configure_workers_and_start.py
+++ b/docker/configure_workers_and_start.py
@@ -20,7 +20,7 @@
# * SYNAPSE_SERVER_NAME: The desired server_name of the homeserver.
# * SYNAPSE_REPORT_STATS: Whether to report stats.
# * SYNAPSE_WORKER_TYPES: A comma separated list of worker names as specified in WORKER_CONFIG
-# below. Leave empty for no workers, or set to '*' for all possible workers.
+# below. Leave empty for no workers.
# * SYNAPSE_AS_REGISTRATION_DIR: If specified, a directory in which .yaml and .yml files
# will be treated as Application Service registration files.
# * SYNAPSE_TLS_CERT: Path to a TLS certificate in PEM format.
@@ -39,6 +39,7 @@
# continue to work if so.
import os
+import platform
import subprocess
import sys
from pathlib import Path
@@ -49,13 +50,18 @@ from jinja2 import Environment, FileSystemLoader
MAIN_PROCESS_HTTP_LISTENER_PORT = 8080
-
+# Workers with exposed endpoints needs either "client", "federation", or "media" listener_resources
+# Watching /_matrix/client needs a "client" listener
+# Watching /_matrix/federation needs a "federation" listener
+# Watching /_matrix/media and related needs a "media" listener
+# Stream Writers require "client" and "replication" listeners because they
+# have to attach by instance_map to the master process and have client endpoints.
WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
"pusher": {
- "app": "synapse.app.pusher",
+ "app": "synapse.app.generic_worker",
"listener_resources": [],
"endpoint_patterns": [],
- "shared_extra_conf": {"start_pushers": False},
+ "shared_extra_conf": {},
"worker_extra_conf": "",
},
"user_dir": {
@@ -78,7 +84,11 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
"^/_synapse/admin/v1/media/.*$",
"^/_synapse/admin/v1/quarantine_media/.*$",
],
- "shared_extra_conf": {"enable_media_repo": False},
+ # The first configured media worker will run the media background jobs
+ "shared_extra_conf": {
+ "enable_media_repo": False,
+ "media_instance_running_background_jobs": "media_repository1",
+ },
"worker_extra_conf": "enable_media_repo: true",
},
"appservice": {
@@ -89,10 +99,10 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
"worker_extra_conf": "",
},
"federation_sender": {
- "app": "synapse.app.federation_sender",
+ "app": "synapse.app.generic_worker",
"listener_resources": [],
"endpoint_patterns": [],
- "shared_extra_conf": {"send_federation": False},
+ "shared_extra_conf": {},
"worker_extra_conf": "",
},
"synchrotron": {
@@ -107,6 +117,34 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
"shared_extra_conf": {},
"worker_extra_conf": "",
},
+ "client_reader": {
+ "app": "synapse.app.generic_worker",
+ "listener_resources": ["client"],
+ "endpoint_patterns": [
+ "^/_matrix/client/(api/v1|r0|v3|unstable)/publicRooms$",
+ "^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/joined_members$",
+ "^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/context/.*$",
+ "^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/members$",
+ "^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/state$",
+ "^/_matrix/client/v1/rooms/.*/hierarchy$",
+ "^/_matrix/client/(v1|unstable)/rooms/.*/relations/",
+ "^/_matrix/client/v1/rooms/.*/threads$",
+ "^/_matrix/client/(api/v1|r0|v3|unstable)/login$",
+ "^/_matrix/client/(api/v1|r0|v3|unstable)/account/3pid$",
+ "^/_matrix/client/(api/v1|r0|v3|unstable)/account/whoami$",
+ "^/_matrix/client/versions$",
+ "^/_matrix/client/(api/v1|r0|v3|unstable)/voip/turnServer$",
+ "^/_matrix/client/(r0|v3|unstable)/register$",
+ "^/_matrix/client/(r0|v3|unstable)/auth/.*/fallback/web$",
+ "^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/messages$",
+ "^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/event",
+ "^/_matrix/client/(api/v1|r0|v3|unstable)/joined_rooms",
+ "^/_matrix/client/(api/v1|r0|v3|unstable/.*)/rooms/.*/aliases",
+ "^/_matrix/client/(api/v1|r0|v3|unstable)/search",
+ ],
+ "shared_extra_conf": {},
+ "worker_extra_conf": "",
+ },
"federation_reader": {
"app": "synapse.app.generic_worker",
"listener_resources": ["federation"],
@@ -171,14 +209,54 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
"worker_extra_conf": "",
},
"frontend_proxy": {
- "app": "synapse.app.frontend_proxy",
+ "app": "synapse.app.generic_worker",
"listener_resources": ["client", "replication"],
"endpoint_patterns": ["^/_matrix/client/(api/v1|r0|v3|unstable)/keys/upload"],
"shared_extra_conf": {},
- "worker_extra_conf": (
- "worker_main_http_uri: http://127.0.0.1:%d"
- % (MAIN_PROCESS_HTTP_LISTENER_PORT,)
- ),
+ "worker_extra_conf": "",
+ },
+ "account_data": {
+ "app": "synapse.app.generic_worker",
+ "listener_resources": ["client", "replication"],
+ "endpoint_patterns": [
+ "^/_matrix/client/(r0|v3|unstable)/.*/tags",
+ "^/_matrix/client/(r0|v3|unstable)/.*/account_data",
+ ],
+ "shared_extra_conf": {},
+ "worker_extra_conf": "",
+ },
+ "presence": {
+ "app": "synapse.app.generic_worker",
+ "listener_resources": ["client", "replication"],
+ "endpoint_patterns": ["^/_matrix/client/(api/v1|r0|v3|unstable)/presence/"],
+ "shared_extra_conf": {},
+ "worker_extra_conf": "",
+ },
+ "receipts": {
+ "app": "synapse.app.generic_worker",
+ "listener_resources": ["client", "replication"],
+ "endpoint_patterns": [
+ "^/_matrix/client/(r0|v3|unstable)/rooms/.*/receipt",
+ "^/_matrix/client/(r0|v3|unstable)/rooms/.*/read_markers",
+ ],
+ "shared_extra_conf": {},
+ "worker_extra_conf": "",
+ },
+ "to_device": {
+ "app": "synapse.app.generic_worker",
+ "listener_resources": ["client", "replication"],
+ "endpoint_patterns": ["^/_matrix/client/(r0|v3|unstable)/sendToDevice/"],
+ "shared_extra_conf": {},
+ "worker_extra_conf": "",
+ },
+ "typing": {
+ "app": "synapse.app.generic_worker",
+ "listener_resources": ["client", "replication"],
+ "endpoint_patterns": [
+ "^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/typing"
+ ],
+ "shared_extra_conf": {},
+ "worker_extra_conf": "",
},
}
@@ -201,24 +279,19 @@ upstream {upstream_worker_type} {{
# Utility functions
def log(txt: str) -> None:
- """Log something to the stdout.
-
- Args:
- txt: The text to log.
- """
print(txt)
def error(txt: str) -> NoReturn:
- """Log something and exit with an error code.
-
- Args:
- txt: The text to log in error.
- """
- log(txt)
+ print(txt, file=sys.stderr)
sys.exit(2)
+def flush_buffers() -> None:
+ sys.stdout.flush()
+ sys.stderr.flush()
+
+
def convert(src: str, dst: str, **template_vars: object) -> None:
"""Generate a file from a template
@@ -247,14 +320,14 @@ def convert(src: str, dst: str, **template_vars: object) -> None:
outfile.write(rendered)
-def add_sharding_to_shared_config(
+def add_worker_roles_to_shared_config(
shared_config: dict,
worker_type: str,
worker_name: str,
worker_port: int,
) -> None:
"""Given a dictionary representing a config file shared across all workers,
- append sharded worker information to it for the current worker_type instance.
+ append appropriate worker information to it for the current worker_type instance.
Args:
shared_config: The config dict that all worker instances share (after being converted to YAML)
@@ -285,9 +358,19 @@ def add_sharding_to_shared_config(
"port": worker_port,
}
- elif worker_type == "media_repository":
- # The first configured media worker will run the media background jobs
- shared_config.setdefault("media_instance_running_background_jobs", worker_name)
+ elif worker_type in ["account_data", "presence", "receipts", "to_device", "typing"]:
+ # Update the list of stream writers
+ # It's convenient that the name of the worker type is the same as the stream to write
+ shared_config.setdefault("stream_writers", {}).setdefault(
+ worker_type, []
+ ).append(worker_name)
+
+ # Map of stream writer instance names to host/ports combos
+ # For now, all stream writers need http replication ports
+ instance_map[worker_name] = {
+ "host": "localhost",
+ "port": worker_port,
+ }
def generate_base_homeserver_config() -> None:
@@ -299,7 +382,7 @@ def generate_base_homeserver_config() -> None:
# start.py already does this for us, so just call that.
# note that this script is copied in in the official, monolith dockerfile
os.environ["SYNAPSE_HTTP_PORT"] = str(MAIN_PROCESS_HTTP_LISTENER_PORT)
- subprocess.check_output(["/usr/local/bin/python", "/start.py", "migrate_config"])
+ subprocess.run(["/usr/local/bin/python", "/start.py", "migrate_config"], check=True)
def generate_worker_files(
@@ -373,8 +456,8 @@ def generate_worker_files(
# No workers, just the main process
worker_types = []
else:
- # Split type names by comma
- worker_types = worker_types_env.split(",")
+ # Split type names by comma, ignoring whitespace.
+ worker_types = [x.strip() for x in worker_types_env.split(",")]
# Create the worker configuration directory if it doesn't already exist
os.makedirs("/conf/workers", exist_ok=True)
@@ -393,14 +476,11 @@ def generate_worker_files(
# For each worker type specified by the user, create config values
for worker_type in worker_types:
- worker_type = worker_type.strip()
-
worker_config = WORKERS_CONFIG.get(worker_type)
if worker_config:
worker_config = worker_config.copy()
else:
- log(worker_type + " is an unknown worker type! It will be ignored")
- continue
+ error(worker_type + " is an unknown worker type! Please fix!")
new_worker_count = worker_type_counter.setdefault(worker_type, 0) + 1
worker_type_counter[worker_type] = new_worker_count
@@ -419,11 +499,11 @@ def generate_worker_files(
# Check if more than one instance of this worker type has been specified
worker_type_total_count = worker_types.count(worker_type)
- if worker_type_total_count > 1:
- # Update the shared config with sharding-related options if necessary
- add_sharding_to_shared_config(
- shared_config, worker_type, worker_name, worker_port
- )
+
+ # Update the shared config with sharding-related options if necessary
+ add_worker_roles_to_shared_config(
+ shared_config, worker_type, worker_name, worker_port
+ )
# Enable the worker in supervisord
worker_descriptors.append(worker_config)
@@ -604,14 +684,24 @@ def main(args: List[str], environ: MutableMapping[str, str]) -> None:
with open(mark_filepath, "w") as f:
f.write("")
+ # Lifted right out of start.py
+ jemallocpath = "/usr/lib/%s-linux-gnu/libjemalloc.so.2" % (platform.machine(),)
+
+ if os.path.isfile(jemallocpath):
+ environ["LD_PRELOAD"] = jemallocpath
+ else:
+ log("Could not find %s, will not use" % (jemallocpath,))
+
# Start supervisord, which will start Synapse, all of the configured worker
# processes, redis, nginx etc. according to the config we created above.
log("Starting supervisord")
- os.execl(
+ flush_buffers()
+ os.execle(
"/usr/local/bin/supervisord",
"supervisord",
"-c",
"/etc/supervisor/supervisord.conf",
+ environ,
)
diff --git a/docker/start.py b/docker/start.py
index 5a98dce551..ebcc599f04 100755
--- a/docker/start.py
+++ b/docker/start.py
@@ -13,14 +13,19 @@ import jinja2
# Utility functions
def log(txt: str) -> None:
- print(txt, file=sys.stderr)
+ print(txt)
def error(txt: str) -> NoReturn:
- log(txt)
+ print(txt, file=sys.stderr)
sys.exit(2)
+def flush_buffers() -> None:
+ sys.stdout.flush()
+ sys.stderr.flush()
+
+
def convert(src: str, dst: str, environ: Mapping[str, object]) -> None:
"""Generate a file from a template
@@ -131,10 +136,10 @@ def generate_config_from_template(
if ownership is not None:
log(f"Setting ownership on /data to {ownership}")
- subprocess.check_output(["chown", "-R", ownership, "/data"])
+ subprocess.run(["chown", "-R", ownership, "/data"], check=True)
args = ["gosu", ownership] + args
- subprocess.check_output(args)
+ subprocess.run(args, check=True)
def run_generate_config(environ: Mapping[str, str], ownership: Optional[str]) -> None:
@@ -158,7 +163,7 @@ def run_generate_config(environ: Mapping[str, str], ownership: Optional[str]) ->
if ownership is not None:
# make sure that synapse has perms to write to the data dir.
log(f"Setting ownership on {data_dir} to {ownership}")
- subprocess.check_output(["chown", ownership, data_dir])
+ subprocess.run(["chown", ownership, data_dir], check=True)
# create a suitable log config from our template
log_config_file = "%s/%s.log.config" % (config_dir, server_name)
@@ -185,6 +190,7 @@ def run_generate_config(environ: Mapping[str, str], ownership: Optional[str]) ->
"--open-private-ports",
]
# log("running %s" % (args, ))
+ flush_buffers()
os.execv(sys.executable, args)
@@ -267,8 +273,10 @@ running with 'migrate_config'. See the README for more details.
args = [sys.executable] + args
if ownership is not None:
args = ["gosu", ownership] + args
+ flush_buffers()
os.execve("/usr/sbin/gosu", args, environ)
else:
+ flush_buffers()
os.execve(sys.executable, args, environ)
diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md
index 2d56b084e2..8d68719958 100644
--- a/docs/SUMMARY.md
+++ b/docs/SUMMARY.md
@@ -9,6 +9,8 @@
- [Configuring a Reverse Proxy](reverse_proxy.md)
- [Configuring a Forward/Outbound Proxy](setup/forward_proxy.md)
- [Configuring a Turn Server](turn-howto.md)
+ - [coturn TURN server](setup/turn/coturn.md)
+ - [eturnal TURN server](setup/turn/eturnal.md)
- [Delegation](delegate.md)
# Upgrading
@@ -69,6 +71,7 @@
- [Manhole](manhole.md)
- [Monitoring](metrics-howto.md)
- [Reporting Homeserver Usage Statistics](usage/administration/monitoring/reporting_homeserver_usage_statistics.md)
+ - [Monthly Active Users](usage/administration/monthly_active_users.md)
- [Understanding Synapse Through Grafana Graphs](usage/administration/understanding_synapse_through_grafana_graphs.md)
- [Useful SQL for Admins](usage/administration/useful_sql_for_admins.md)
- [Database Maintenance Tools](usage/administration/database_maintenance_tools.md)
diff --git a/docs/admin_api/register_api.md b/docs/admin_api/register_api.md
index c346090bb1..dd2830f3a1 100644
--- a/docs/admin_api/register_api.md
+++ b/docs/admin_api/register_api.md
@@ -5,9 +5,9 @@ non-interactive way. This is generally used for bootstrapping a Synapse
instance with administrator accounts.
To authenticate yourself to the server, you will need both the shared secret
-(`registration_shared_secret` in the homeserver configuration), and a
-one-time nonce. If the registration shared secret is not configured, this API
-is not enabled.
+([`registration_shared_secret`](../usage/configuration/config_documentation.md#registration_shared_secret)
+in the homeserver configuration), and a one-time nonce. If the registration
+shared secret is not configured, this API is not enabled.
To fetch the nonce, you need to request one from the API:
@@ -46,7 +46,24 @@ As an example:
The MAC is the hex digest output of the HMAC-SHA1 algorithm, with the key being
the shared secret and the content being the nonce, user, password, either the
string "admin" or "notadmin", and optionally the user_type
-each separated by NULs. For an example of generation in Python:
+each separated by NULs.
+
+Here is an easy way to generate the HMAC digest if you have Bash and OpenSSL:
+
+```bash
+# Update these values and then paste this code block into a bash terminal
+nonce='thisisanonce'
+username='pepper_roni'
+password='pizza'
+admin='admin'
+secret='shared_secret'
+
+printf '%s\0%s\0%s\0%s' "$nonce" "$username" "$password" "$admin" |
+ openssl sha1 -hmac "$secret" |
+ awk '{print $2}'
+```
+
+For an example of generation in Python:
```python
import hmac, hashlib
@@ -70,4 +87,4 @@ def generate_mac(nonce, user, password, admin=False, user_type=None):
mac.update(user_type.encode('utf8'))
return mac.hexdigest()
-```
\ No newline at end of file
+```
diff --git a/docs/admin_api/rooms.md b/docs/admin_api/rooms.md
index 9aa489e4a3..8f727b363e 100644
--- a/docs/admin_api/rooms.md
+++ b/docs/admin_api/rooms.md
@@ -302,6 +302,8 @@ The following fields are possible in the JSON response body:
* `state_events` - Total number of state_events of a room. Complexity of the room.
* `room_type` - The type of the room taken from the room's creation event; for example "m.space" if the room is a space.
If the room does not define a type, the value will be `null`.
+* `forgotten` - Whether all local users have
+ [forgotten](https://spec.matrix.org/latest/client-server-api/#leaving-rooms) the room.
The API is:
@@ -330,10 +332,13 @@ A response body like the following is returned:
"guest_access": null,
"history_visibility": "shared",
"state_events": 93534,
- "room_type": "m.space"
+ "room_type": "m.space",
+ "forgotten": false
}
```
+_Changed in Synapse 1.66:_ Added the `forgotten` key to the response body.
+
# Room Members API
The Room Members admin API allows server admins to get a list of all members of a room.
@@ -388,6 +393,151 @@ A response body like the following is returned:
}
```
+# Room Messages API
+
+The Room Messages admin API allows server admins to get all messages
+sent to a room in a given timeframe. There are various parameters available
+that allow for filtering and ordering the returned list. This API supports pagination.
+
+To use it, you will need to authenticate by providing an `access_token`
+for a server admin: see [Admin API](../usage/administration/admin_api).
+
+This endpoint mirrors the [Matrix Spec defined Messages API](https://spec.matrix.org/v1.1/client-server-api/#get_matrixclientv3roomsroomidmessages).
+
+The API is:
+```
+GET /_synapse/admin/v1/rooms/<room_id>/messages
+```
+
+**Parameters**
+
+The following path parameters are required:
+
+* `room_id` - The ID of the room you wish you fetch messages from.
+
+The following query parameters are available:
+
+* `from` (required) - The token to start returning events from. This token can be obtained from a prev_batch
+ or next_batch token returned by the /sync endpoint, or from an end token returned by a previous request to this endpoint.
+* `to` - The token to spot returning events at.
+* `limit` - The maximum number of events to return. Defaults to `10`.
+* `filter` - A JSON RoomEventFilter to filter returned events with.
+* `dir` - The direction to return events from. Either `f` for forwards or `b` for backwards. Setting
+ this value to `b` will reverse the above sort order. Defaults to `f`.
+
+**Response**
+
+The following fields are possible in the JSON response body:
+
+* `chunk` - A list of room events. The order depends on the dir parameter.
+ Note that an empty chunk does not necessarily imply that no more events are available. Clients should continue to paginate until no end property is returned.
+* `end` - A token corresponding to the end of chunk. This token can be passed back to this endpoint to request further events.
+ If no further events are available, this property is omitted from the response.
+* `start` - A token corresponding to the start of chunk.
+* `state` - A list of state events relevant to showing the chunk.
+
+**Example**
+
+For more details on each chunk, read [the Matrix specification](https://spec.matrix.org/v1.1/client-server-api/#get_matrixclientv3roomsroomidmessages).
+
+```json
+{
+ "chunk": [
+ {
+ "content": {
+ "body": "This is an example text message",
+ "format": "org.matrix.custom.html",
+ "formatted_body": "<b>This is an example text message</b>",
+ "msgtype": "m.text"
+ },
+ "event_id": "$143273582443PhrSn:example.org",
+ "origin_server_ts": 1432735824653,
+ "room_id": "!636q39766251:example.com",
+ "sender": "@example:example.org",
+ "type": "m.room.message",
+ "unsigned": {
+ "age": 1234
+ }
+ },
+ {
+ "content": {
+ "name": "The room name"
+ },
+ "event_id": "$143273582443PhrSn:example.org",
+ "origin_server_ts": 1432735824653,
+ "room_id": "!636q39766251:example.com",
+ "sender": "@example:example.org",
+ "state_key": "",
+ "type": "m.room.name",
+ "unsigned": {
+ "age": 1234
+ }
+ },
+ {
+ "content": {
+ "body": "Gangnam Style",
+ "info": {
+ "duration": 2140786,
+ "h": 320,
+ "mimetype": "video/mp4",
+ "size": 1563685,
+ "thumbnail_info": {
+ "h": 300,
+ "mimetype": "image/jpeg",
+ "size": 46144,
+ "w": 300
+ },
+ "thumbnail_url": "mxc://example.org/FHyPlCeYUSFFxlgbQYZmoEoe",
+ "w": 480
+ },
+ "msgtype": "m.video",
+ "url": "mxc://example.org/a526eYUSFFxlgbQYZmo442"
+ },
+ "event_id": "$143273582443PhrSn:example.org",
+ "origin_server_ts": 1432735824653,
+ "room_id": "!636q39766251:example.com",
+ "sender": "@example:example.org",
+ "type": "m.room.message",
+ "unsigned": {
+ "age": 1234
+ }
+ }
+ ],
+ "end": "t47409-4357353_219380_26003_2265",
+ "start": "t47429-4392820_219380_26003_2265"
+}
+```
+
+# Room Timestamp to Event API
+
+The Room Timestamp to Event API endpoint fetches the `event_id` of the closest event to the given
+timestamp (`ts` query parameter) in the given direction (`dir` query parameter).
+
+Useful for cases like jump to date so you can start paginating messages from
+a given date in the archive.
+
+The API is:
+```
+ GET /_synapse/admin/v1/rooms/<room_id>/timestamp_to_event
+```
+
+**Parameters**
+
+The following path parameters are required:
+
+* `room_id` - The ID of the room you wish to check.
+
+The following query parameters are available:
+
+* `ts` - a timestamp in milliseconds where we will find the closest event in
+ the given direction.
+* `dir` - can be `f` or `b` to indicate forwards and backwards in time from the
+ given timestamp. Defaults to `f`.
+
+**Response**
+
+* `event_id` - converted from timestamp
+
# Block Room API
The Block Room admin API allows server admins to block and unblock rooms,
and query to see if a given room is blocked.
diff --git a/docs/admin_api/user_admin_api.md b/docs/admin_api/user_admin_api.md
index 0871cfebf5..880bef4194 100644
--- a/docs/admin_api/user_admin_api.md
+++ b/docs/admin_api/user_admin_api.md
@@ -37,11 +37,13 @@ It returns a JSON body like the following:
"is_guest": 0,
"admin": 0,
"deactivated": 0,
+ "erased": false,
"shadow_banned": 0,
"creation_ts": 1560432506,
"appservice_id": null,
"consent_server_notice_sent": null,
"consent_version": null,
+ "consent_ts": null,
"external_ids": [
{
"auth_provider": "<provider1>",
@@ -166,6 +168,7 @@ A response body like the following is returned:
"admin": 0,
"user_type": null,
"deactivated": 0,
+ "erased": false,
"shadow_banned": 0,
"displayname": "<User One>",
"avatar_url": null,
@@ -176,6 +179,7 @@ A response body like the following is returned:
"admin": 1,
"user_type": null,
"deactivated": 0,
+ "erased": false,
"shadow_banned": 0,
"displayname": "<User Two>",
"avatar_url": "<avatar_url>",
@@ -246,6 +250,7 @@ The following fields are returned in the JSON response body:
- `user_type` - string - Type of the user. Normal users are type `None`.
This allows user type specific behaviour. There are also types `support` and `bot`.
- `deactivated` - bool - Status if that user has been marked as deactivated.
+ - `erased` - bool - Status if that user has been marked as erased.
- `shadow_banned` - bool - Status if that user has been marked as shadow banned.
- `displayname` - string - The user's display name if they have set one.
- `avatar_url` - string - The user's avatar URL if they have set one.
@@ -364,6 +369,7 @@ The following actions are **NOT** performed. The list may be incomplete.
- Remove the user's creation (registration) timestamp
- [Remove rate limit overrides](#override-ratelimiting-for-users)
- Remove from monthly active users
+- Remove user's consent information (consent version and timestamp)
## Reset password
@@ -753,6 +759,7 @@ A response body like the following is returned:
"device_id": "QBUAZIFURK",
"display_name": "android",
"last_seen_ip": "1.2.3.4",
+ "last_seen_user_agent": "Mozilla/5.0 (X11; Linux x86_64; rv:103.0) Gecko/20100101 Firefox/103.0",
"last_seen_ts": 1474491775024,
"user_id": "<user_id>"
},
@@ -760,6 +767,7 @@ A response body like the following is returned:
"device_id": "AUIECTSRND",
"display_name": "ios",
"last_seen_ip": "1.2.3.5",
+ "last_seen_user_agent": "Mozilla/5.0 (X11; Linux x86_64; rv:103.0) Gecko/20100101 Firefox/103.0",
"last_seen_ts": 1474491775025,
"user_id": "<user_id>"
}
@@ -786,6 +794,8 @@ The following fields are returned in the JSON response body:
Absent if no name has been set.
- `last_seen_ip` - The IP address where this device was last seen.
(May be a few minutes out of date, for efficiency reasons).
+ - `last_seen_user_agent` - The user agent of the device when it was last seen.
+ (May be a few minutes out of date, for efficiency reasons).
- `last_seen_ts` - The timestamp (in milliseconds since the unix epoch) when this
devices was last seen. (May be a few minutes out of date, for efficiency reasons).
- `user_id` - Owner of device.
@@ -837,6 +847,7 @@ A response body like the following is returned:
"device_id": "<device_id>",
"display_name": "android",
"last_seen_ip": "1.2.3.4",
+ "last_seen_user_agent": "Mozilla/5.0 (X11; Linux x86_64; rv:103.0) Gecko/20100101 Firefox/103.0",
"last_seen_ts": 1474491775024,
"user_id": "<user_id>"
}
@@ -858,6 +869,8 @@ The following fields are returned in the JSON response body:
Absent if no name has been set.
- `last_seen_ip` - The IP address where this device was last seen.
(May be a few minutes out of date, for efficiency reasons).
+ - `last_seen_user_agent` - The user agent of the device when it was last seen.
+ (May be a few minutes out of date, for efficiency reasons).
- `last_seen_ts` - The timestamp (in milliseconds since the unix epoch) when this
devices was last seen. (May be a few minutes out of date, for efficiency reasons).
- `user_id` - Owner of device.
@@ -1146,3 +1159,80 @@ GET /_synapse/admin/v1/username_available?username=$localpart
The request and response format is the same as the
[/_matrix/client/r0/register/available](https://matrix.org/docs/spec/client_server/r0.6.0#get-matrix-client-r0-register-available) API.
+
+### Find a user based on their ID in an auth provider
+
+The API is:
+
+```
+GET /_synapse/admin/v1/auth_providers/$provider/users/$external_id
+```
+
+When a user matched the given ID for the given provider, an HTTP code `200` with a response body like the following is returned:
+
+```json
+{
+ "user_id": "@hello:example.org"
+}
+```
+
+**Parameters**
+
+The following parameters should be set in the URL:
+
+- `provider` - The ID of the authentication provider, as advertised by the [`GET /_matrix/client/v3/login`](https://spec.matrix.org/latest/client-server-api/#post_matrixclientv3login) API in the `m.login.sso` authentication method.
+- `external_id` - The user ID from the authentication provider. Usually corresponds to the `sub` claim for OIDC providers, or to the `uid` attestation for SAML2 providers.
+
+The `external_id` may have characters that are not URL-safe (typically `/`, `:` or `@`), so it is advised to URL-encode those parameters.
+
+**Errors**
+
+Returns a `404` HTTP status code if no user was found, with a response body like this:
+
+```json
+{
+ "errcode":"M_NOT_FOUND",
+ "error":"User not found"
+}
+```
+
+_Added in Synapse 1.68.0._
+
+
+### Find a user based on their Third Party ID (ThreePID or 3PID)
+
+The API is:
+
+```
+GET /_synapse/admin/v1/threepid/$medium/users/$address
+```
+
+When a user matched the given address for the given medium, an HTTP code `200` with a response body like the following is returned:
+
+```json
+{
+ "user_id": "@hello:example.org"
+}
+```
+
+**Parameters**
+
+The following parameters should be set in the URL:
+
+- `medium` - Kind of third-party ID, either `email` or `msisdn`.
+- `address` - Value of the third-party ID.
+
+The `address` may have characters that are not URL-safe, so it is advised to URL-encode those parameters.
+
+**Errors**
+
+Returns a `404` HTTP status code if no user was found, with a response body like this:
+
+```json
+{
+ "errcode":"M_NOT_FOUND",
+ "error":"User not found"
+}
+```
+
+_Added in Synapse 1.72.0._
diff --git a/docs/auth_chain_difference_algorithm.md b/docs/auth_chain_difference_algorithm.md
index 30f72a70da..ebc9de25b8 100644
--- a/docs/auth_chain_difference_algorithm.md
+++ b/docs/auth_chain_difference_algorithm.md
@@ -34,13 +34,45 @@ the process of indexing it).
## Chain Cover Index
Synapse computes auth chain differences by pre-computing a "chain cover" index
-for the auth chain in a room, allowing efficient reachability queries like "is
-event A in the auth chain of event B". This is done by assigning every event a
-*chain ID* and *sequence number* (e.g. `(5,3)`), and having a map of *links*
-between chains (e.g. `(5,3) -> (2,4)`) such that A is reachable by B (i.e. `A`
-is in the auth chain of `B`) if and only if either:
-
-1. A and B have the same chain ID and `A`'s sequence number is less than `B`'s
+for the auth chain in a room, allowing us to efficiently make reachability queries
+like "is event `A` in the auth chain of event `B`?". We could do this with an index
+that tracks all pairs `(A, B)` such that `A` is in the auth chain of `B`. However, this
+would be prohibitively large, scaling poorly as the room accumulates more state
+events.
+
+Instead, we break down the graph into *chains*. A chain is a subset of a DAG
+with the following property: for any pair of events `E` and `F` in the chain,
+the chain contains a path `E -> F` or a path `F -> E`. This forces a chain to be
+linear (without forks), e.g. `E -> F -> G -> ... -> H`. Each event in the chain
+is given a *sequence number* local to that chain. The oldest event `E` in the
+chain has sequence number 1. If `E` has a child `F` in the chain, then `F` has
+sequence number 2. If `E` has a grandchild `G` in the chain, then `G` has
+sequence number 3; and so on.
+
+Synapse ensures that each persisted event belongs to exactly one chain, and
+tracks how the chains are connected to one another. This allows us to
+efficiently answer reachability queries. Doing so uses less storage than
+tracking reachability on an event-by-event basis, particularly when we have
+fewer and longer chains. See
+
+> Jagadish, H. (1990). [A compression technique to materialize transitive closure](https://doi.org/10.1145/99935.99944).
+> *ACM Transactions on Database Systems (TODS)*, 15*(4)*, 558-598.
+
+for the original idea or
+
+> Y. Chen, Y. Chen, [An efficient algorithm for answering graph
+> reachability queries](https://doi.org/10.1109/ICDE.2008.4497498),
+> in: 2008 IEEE 24th International Conference on Data Engineering, April 2008,
+> pp. 893–902. (PDF available via [Google Scholar](https://scholar.google.com/scholar?q=Y.%20Chen,%20Y.%20Chen,%20An%20efficient%20algorithm%20for%20answering%20graph%20reachability%20queries,%20in:%202008%20IEEE%2024th%20International%20Conference%20on%20Data%20Engineering,%20April%202008,%20pp.%20893902.).)
+
+for a more modern take.
+
+In practical terms, the chain cover assigns every event a
+*chain ID* and *sequence number* (e.g. `(5,3)`), and maintains a map of *links*
+between events in chains (e.g. `(5,3) -> (2,4)`) such that `A` is reachable by `B`
+(i.e. `A` is in the auth chain of `B`) if and only if either:
+
+1. `A` and `B` have the same chain ID and `A`'s sequence number is less than `B`'s
sequence number; or
2. there is a link `L` between `B`'s chain ID and `A`'s chain ID such that
`L.start_seq_no` <= `B.seq_no` and `A.seq_no` <= `L.end_seq_no`.
@@ -49,8 +81,9 @@ There are actually two potential implementations, one where we store links from
each chain to every other reachable chain (the transitive closure of the links
graph), and one where we remove redundant links (the transitive reduction of the
links graph) e.g. if we have chains `C3 -> C2 -> C1` then the link `C3 -> C1`
-would not be stored. Synapse uses the former implementations so that it doesn't
-need to recurse to test reachability between chains.
+would not be stored. Synapse uses the former implementation so that it doesn't
+need to recurse to test reachability between chains. This trades-off extra storage
+in order to save CPU cycles and DB queries.
### Example
diff --git a/docs/deprecation_policy.md b/docs/deprecation_policy.md
index 359dac07c3..46c18d7d32 100644
--- a/docs/deprecation_policy.md
+++ b/docs/deprecation_policy.md
@@ -1,9 +1,9 @@
Deprecation Policy for Platform Dependencies
============================================
-Synapse has a number of platform dependencies, including Python and PostgreSQL.
-This document outlines the policy towards which versions we support, and when we
-drop support for versions in the future.
+Synapse has a number of platform dependencies, including Python, Rust,
+PostgreSQL and SQLite. This document outlines the policy towards which versions
+we support, and when we drop support for versions in the future.
Policy
@@ -17,6 +17,14 @@ Details on the upstream support life cycles for Python and PostgreSQL are
documented at [https://endoflife.date/python](https://endoflife.date/python) and
[https://endoflife.date/postgresql](https://endoflife.date/postgresql).
+A Rust compiler is required to build Synapse from source. For any given release
+the minimum required version may be bumped up to a recent Rust version, and so
+people building from source should ensure they can fetch recent versions of Rust
+(e.g. by using [rustup](https://rustup.rs/)).
+
+The oldest supported version of SQLite is the version
+[provided](https://packages.debian.org/buster/libsqlite3-0) by
+[Debian oldstable](https://wiki.debian.org/DebianOldStable).
Context
-------
@@ -31,3 +39,15 @@ long process.
By following the upstream support life cycles Synapse can ensure that its
dependencies continue to get security patches, while not requiring system admins
to constantly update their platform dependencies to the latest versions.
+
+For Rust, the situation is a bit different given that a) the Rust foundation
+does not generally support older Rust versions, and b) the library ecosystem
+generally bump their minimum support Rust versions frequently. In general, the
+Synapse team will try to avoid updating the dependency on Rust to the absolute
+latest version, but introducing a formal policy is hard given the constraints of
+the ecosystem.
+
+On a similar note, SQLite does not generally have a concept of "supported
+release"; bugfixes are published for the latest minor release only. We chose to
+track Debian's oldstable as this is relatively conservative, predictably updated
+and is consistent with the `.deb` packages released by Matrix.org.
\ No newline at end of file
diff --git a/docs/development/contributing_guide.md b/docs/development/contributing_guide.md
index ab320cbd78..342bc1d340 100644
--- a/docs/development/contributing_guide.md
+++ b/docs/development/contributing_guide.md
@@ -28,6 +28,9 @@ The source code of Synapse is hosted on GitHub. You will also need [a recent ver
For some tests, you will need [a recent version of Docker](https://docs.docker.com/get-docker/).
+A recent version of the Rust compiler is needed to build the native modules. The
+easiest way of installing the latest version is to use [rustup](https://rustup.rs/).
+
# 3. Get the source.
@@ -62,6 +65,8 @@ pipx install poetry
but see poetry's [installation instructions](https://python-poetry.org/docs/#installation)
for other installation methods.
+Synapse requires Poetry version 1.2.0 or later.
+
Next, open a terminal and install dependencies as follows:
```sh
@@ -112,6 +117,11 @@ Some documentation also exists in [Synapse's GitHub
Wiki](https://github.com/matrix-org/synapse/wiki), although this is primarily
contributed to by community authors.
+When changes are made to any Rust code then you must call either `poetry install`
+or `maturin develop` (if installed) to rebuild the Rust code. Using [`maturin`](https://github.com/PyO3/maturin)
+is quicker than `poetry install`, so is recommended when making frequent
+changes to the Rust code.
+
# 8. Test, test, test!
<a name="test-test-test"></a>
@@ -157,6 +167,12 @@ was broken. They are slower than the linters but will typically catch more error
poetry run trial tests
```
+You can run unit tests in parallel by specifying `-jX` argument to `trial` where `X` is the number of parallel runners you want. To use 4 cpu cores, you would run them like:
+
+```sh
+poetry run trial -j4 tests
+```
+
If you wish to only run *some* unit tests, you may specify
another module instead of `tests` - or a test class or a method:
@@ -193,7 +209,7 @@ The database file can then be inspected with:
sqlite3 _trial_temp/test.db
```
-Note that the database file is cleared at the beginning of each test run. Thus it
+Note that the database file is cleared at the beginning of each test run. Thus it
will always only contain the data generated by the *last run test*. Though generally
when debugging, one is only running a single test anyway.
@@ -308,6 +324,12 @@ The above will run a monolithic (single-process) Synapse with SQLite as the data
- Passing `POSTGRES=1` as an environment variable to use the Postgres database instead.
- Passing `WORKERS=1` as an environment variable to use a workerised setup instead. This option implies the use of Postgres.
+ - If setting `WORKERS=1`, optionally set `WORKER_TYPES=` to declare which worker
+ types you wish to test. A simple comma-delimited string containing the worker types
+ defined from the `WORKERS_CONFIG` template in
+ [here](https://github.com/matrix-org/synapse/blob/develop/docker/configure_workers_and_start.py#L54).
+ A safe example would be `WORKER_TYPES="federation_inbound, federation_sender, synchrotron"`.
+ See the [worker documentation](../workers.md) for additional information on workers.
To increase the log level for the tests, set `SYNAPSE_TEST_LOG_LEVEL`, e.g:
```sh
@@ -317,7 +339,7 @@ SYNAPSE_TEST_LOG_LEVEL=DEBUG COMPLEMENT_DIR=../complement ./scripts-dev/compleme
### Prettier formatting with `gotestfmt`
If you want to format the output of the tests the same way as it looks in CI,
-install [gotestfmt](https://github.com/haveyoudebuggedit/gotestfmt).
+install [gotestfmt](https://github.com/GoTestTools/gotestfmt).
You can then use this incantation to format the tests appropriately:
@@ -374,7 +396,7 @@ This file will become part of our [changelog](
https://github.com/matrix-org/synapse/blob/master/CHANGES.md) at the next
release, so the content of the file should be a short description of your
change in the same style as the rest of the changelog. The file can contain Markdown
-formatting, and should end with a full stop (.) or an exclamation mark (!) for
+formatting, and must end with a full stop (.) or an exclamation mark (!) for
consistency.
Adding credits to the changelog is encouraged, we value your
diff --git a/docs/development/database_schema.md b/docs/development/database_schema.md
index d996a7caa2..29945c264e 100644
--- a/docs/development/database_schema.md
+++ b/docs/development/database_schema.md
@@ -191,3 +191,28 @@ There are three separate aspects to this:
flavour will be accepted by SQLite 3.22, but will give a column whose
default value is the **string** `"FALSE"` - which, when cast back to a boolean
in Python, evaluates to `True`.
+
+
+## `event_id` global uniqueness
+
+`event_id`'s can be considered globally unique although there has been a lot of
+debate on this topic in places like
+[MSC2779](https://github.com/matrix-org/matrix-spec-proposals/issues/2779) and
+[MSC2848](https://github.com/matrix-org/matrix-spec-proposals/pull/2848) which
+has no resolution yet (as of 2022-09-01). There are several places in Synapse
+and even in the Matrix APIs like [`GET
+/_matrix/federation/v1/event/{eventId}`](https://spec.matrix.org/v1.1/server-server-api/#get_matrixfederationv1eventeventid)
+where we assume that event IDs are globally unique.
+
+When scoping `event_id` in a database schema, it is often nice to accompany it
+with `room_id` (`PRIMARY KEY (room_id, event_id)` and a `FOREIGN KEY(room_id)
+REFERENCES rooms(room_id)`) which makes flexible lookups easy. For example it
+makes it very easy to find and clean up everything in a room when it needs to be
+purged (no need to use sub-`select` query or join from the `events` table).
+
+A note on collisions: In room versions `1` and `2` it's possible to end up with
+two events with the same `event_id` (in the same or different rooms). After room
+version `3`, that can only happen with a hash collision, which we basically hope
+will never happen (SHA256 has a massive big key space).
+
+
diff --git a/docs/development/dependencies.md b/docs/development/dependencies.md
index 236856a6b0..8474525480 100644
--- a/docs/development/dependencies.md
+++ b/docs/development/dependencies.md
@@ -126,6 +126,23 @@ context of poetry's venv, without having to run `poetry shell` beforehand.
poetry install --extras all --remove-untracked
```
+## ...delete everything and start over from scratch?
+
+```shell
+# Stop the current virtualenv if active
+$ deactivate
+
+# Remove all of the files from the current environment.
+# Don't worry, even though it says "all", this will only
+# remove the Poetry virtualenvs for the current project.
+$ poetry env remove --all
+
+# Reactivate Poetry shell to create the virtualenv again
+$ poetry shell
+# Install everything again
+$ poetry install --extras all
+```
+
## ...run a command in the `poetry` virtualenv?
Use `poetry run cmd args` when you need the python virtualenv context.
@@ -243,14 +260,11 @@ doesn't require poetry. (It's what we use in CI too). However, you could try
## Check the version of poetry with `poetry --version`.
-At the time of writing, the 1.2 series is beta only. We have seen some examples
-where the lockfiles generated by 1.2 prereleasese aren't interpreted correctly
-by poetry 1.1.x. For now, use poetry 1.1.14, which includes a critical
-[change](https://github.com/python-poetry/poetry/pull/5973) needed to remain
-[compatible with PyPI](https://github.com/pypi/warehouse/pull/11775).
+The minimum version of poetry supported by Synapse is 1.2.
It can also be useful to check the version of `poetry-core` in use. If you've
-installed `poetry` with `pipx`, try `pipx runpip poetry list | grep poetry-core`.
+installed `poetry` with `pipx`, try `pipx runpip poetry list | grep
+poetry-core`.
## Clear caches: `poetry cache clear --all pypi`.
@@ -259,6 +273,16 @@ from PyPI. (This is what makes poetry seem slow when doing the first
`poetry install`.) Try `poetry cache list` and `poetry cache clear --all
<name of cache>` to see if that fixes things.
+## Remove outdated egg-info
+
+Delete the `matrix_synapse.egg-info/` directory from the root of your Synapse
+install.
+
+This stores some cached information about dependencies and often conflicts with
+letting Poetry do the right thing.
+
+
+
## Try `--verbose` or `--dry-run` arguments.
Sometimes useful to see what poetry's internal logic is.
diff --git a/docs/message_retention_policies.md b/docs/message_retention_policies.md
index 8c88f93935..7f3e5359f1 100644
--- a/docs/message_retention_policies.md
+++ b/docs/message_retention_policies.md
@@ -8,7 +8,8 @@ and allow server and room admins to configure how long messages should
be kept in a homeserver's database before being purged from it.
**Please note that, as this feature isn't part of the Matrix
specification yet, this implementation is to be considered as
-experimental.**
+experimental. There are known bugs which may cause database corruption.
+Proceed with caution.**
A message retention policy is mainly defined by its `max_lifetime`
parameter, which defines how long a message can be kept around after
diff --git a/docs/metrics-howto.md b/docs/metrics-howto.md
index 4a77d5604c..16e4368f35 100644
--- a/docs/metrics-howto.md
+++ b/docs/metrics-howto.md
@@ -7,17 +7,30 @@
1. Enable Synapse metrics:
- There are two methods of enabling metrics in Synapse.
+ In `homeserver.yaml`, make sure `enable_metrics` is
+ set to `True`.
+
+1. Enable the `/_synapse/metrics` Synapse endpoint that Prometheus uses to
+ collect data:
+
+ There are two methods of enabling the metrics endpoint in Synapse.
The first serves the metrics as a part of the usual web server and
- can be enabled by adding the \"metrics\" resource to the existing
- listener as such:
+ can be enabled by adding the `metrics` resource to the existing
+ listener as such as in this example:
```yaml
- resources:
- - names:
- - client
- - metrics
+ listeners:
+ - port: 8008
+ tls: false
+ type: http
+ x_forwarded: true
+ bind_addresses: ['::1', '127.0.0.1']
+
+ resources:
+ # added "metrics" in this line
+ - names: [client, federation, metrics]
+ compress: false
```
This provides a simple way of adding metrics to your Synapse
@@ -31,19 +44,26 @@
to just internal networks easier. The served metrics are available
over HTTP only, and will be available at `/_synapse/metrics`.
- Add a new listener to homeserver.yaml:
+ Add a new listener to homeserver.yaml as in this example:
```yaml
- listeners:
- - type: metrics
- port: 9000
- bind_addresses:
- - '0.0.0.0'
+ listeners:
+ - port: 8008
+ tls: false
+ type: http
+ x_forwarded: true
+ bind_addresses: ['::1', '127.0.0.1']
+
+ resources:
+ - names: [client, federation]
+ compress: false
+
+ # beginning of the new metrics listener
+ - port: 9000
+ type: metrics
+ bind_addresses: ['::1', '127.0.0.1']
```
- For both options, you will need to ensure that `enable_metrics` is
- set to `True`.
-
1. Restart Synapse.
1. Add a Prometheus target for Synapse.
@@ -132,6 +152,8 @@ Synapse 1.2 updates the Prometheus metrics to match the naming
convention of the upstream `prometheus_client`. The old names are
considered deprecated and will be removed in a future version of
Synapse.
+**The old names will be disabled by default in Synapse v1.71.0 and removed
+altogether in Synapse v1.73.0.**
| New Name | Old Name |
| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------- |
@@ -143,6 +165,13 @@ Synapse.
| synapse_federation_client_events_processed_total | synapse_federation_client_events_processed |
| synapse_event_processing_loop_count_total | synapse_event_processing_loop_count |
| synapse_event_processing_loop_room_count_total | synapse_event_processing_loop_room_count |
+| synapse_util_caches_cache_hits | synapse_util_caches_cache:hits |
+| synapse_util_caches_cache_size | synapse_util_caches_cache:size |
+| synapse_util_caches_cache_evicted_size | synapse_util_caches_cache:evicted_size |
+| synapse_util_caches_cache | synapse_util_caches_cache:total |
+| synapse_util_caches_response_cache_size | synapse_util_caches_response_cache:size |
+| synapse_util_caches_response_cache_hits | synapse_util_caches_response_cache:hits |
+| synapse_util_caches_response_cache_evicted_size | synapse_util_caches_response_cache:evicted_size |
| synapse_util_metrics_block_count_total | synapse_util_metrics_block_count |
| synapse_util_metrics_block_time_seconds_total | synapse_util_metrics_block_time_seconds |
| synapse_util_metrics_block_ru_utime_seconds_total | synapse_util_metrics_block_ru_utime_seconds |
@@ -180,6 +209,9 @@ Synapse.
| synapse_http_httppusher_http_pushes_failed_total | synapse_http_httppusher_http_pushes_failed |
| synapse_http_httppusher_badge_updates_processed_total | synapse_http_httppusher_badge_updates_processed |
| synapse_http_httppusher_badge_updates_failed_total | synapse_http_httppusher_badge_updates_failed |
+| synapse_admin_mau_current | synapse_admin_mau:current |
+| synapse_admin_mau_max | synapse_admin_mau:max |
+| synapse_admin_mau_registered_reserved_users | synapse_admin_mau:registered_reserved_users |
Removal of deprecated metrics & time based counters becoming histograms in 0.31.0
---------------------------------------------------------------------------------
@@ -258,7 +290,7 @@ Standard Metric Names
As of synapse version 0.18.2, the format of the process-wide metrics has
been changed to fit prometheus standard naming conventions. Additionally
-the units have been changed to seconds, from miliseconds.
+the units have been changed to seconds, from milliseconds.
| New name | Old name |
| ---------------------------------------- | --------------------------------- |
diff --git a/docs/modules/password_auth_provider_callbacks.md b/docs/modules/password_auth_provider_callbacks.md
index ec810fd292..f6349d5404 100644
--- a/docs/modules/password_auth_provider_callbacks.md
+++ b/docs/modules/password_auth_provider_callbacks.md
@@ -263,7 +263,7 @@ class MyAuthProvider:
return None
if self.credentials.get(username) == login_dict.get("my_field"):
- return self.api.get_qualified_user_id(username)
+ return (self.api.get_qualified_user_id(username), None)
async def check_pass(
self,
@@ -280,5 +280,5 @@ class MyAuthProvider:
return None
if self.credentials.get(username) == login_dict.get("password"):
- return self.api.get_qualified_user_id(username)
+ return (self.api.get_qualified_user_id(username), None)
```
diff --git a/docs/openid.md b/docs/openid.md
index d0ccf36f71..37c5eb244d 100644
--- a/docs/openid.md
+++ b/docs/openid.md
@@ -49,6 +49,13 @@ setting in your configuration file.
See the [configuration manual](usage/configuration/config_documentation.md#oidc_providers) for some sample settings, as well as
the text below for example configurations for specific providers.
+## OIDC Back-Channel Logout
+
+Synapse supports receiving [OpenID Connect Back-Channel Logout](https://openid.net/specs/openid-connect-backchannel-1_0.html) notifications.
+
+This lets the OpenID Connect Provider notify Synapse when a user logs out, so that Synapse can end that user session.
+This feature can be enabled by setting the `backchannel_logout_enabled` property to `true` in the provider configuration, and setting the following URL as destination for Back-Channel Logout notifications in your OpenID Connect Provider: `[synapse public baseurl]/_synapse/client/oidc/backchannel_logout`
+
## Sample configs
Here are a few configs for providers that should work with Synapse.
@@ -123,6 +130,9 @@ oidc_providers:
[Keycloak][keycloak-idp] is an opensource IdP maintained by Red Hat.
+Keycloak supports OIDC Back-Channel Logout, which sends logout notification to Synapse, so that Synapse users get logged out when they log out from Keycloak.
+This can be optionally enabled by setting `backchannel_logout_enabled` to `true` in the Synapse configuration, and by setting the "Backchannel Logout URL" in Keycloak.
+
Follow the [Getting Started Guide](https://www.keycloak.org/getting-started) to install Keycloak and set up a realm.
1. Click `Clients` in the sidebar and click `Create`
@@ -144,6 +154,8 @@ Follow the [Getting Started Guide](https://www.keycloak.org/getting-started) to
| Client Protocol | `openid-connect` |
| Access Type | `confidential` |
| Valid Redirect URIs | `[synapse public baseurl]/_synapse/client/oidc/callback` |
+| Backchannel Logout URL (optional) | `[synapse public baseurl]/_synapse/client/oidc/backchannel_logout` |
+| Backchannel Logout Session Required (optional) | `On` |
5. Click `Save`
6. On the Credentials tab, update the fields:
@@ -167,14 +179,18 @@ oidc_providers:
config:
localpart_template: "{{ user.preferred_username }}"
display_name_template: "{{ user.name }}"
+ backchannel_logout_enabled: true # Optional
```
+
### Auth0
[Auth0][auth0] is a hosted SaaS IdP solution.
1. Create a regular web application for Synapse
2. Set the Allowed Callback URLs to `[synapse public baseurl]/_synapse/client/oidc/callback`
-3. Add a rule to add the `preferred_username` claim.
+3. Add a rule with any name to add the `preferred_username` claim.
+(See https://auth0.com/docs/customize/rules/create-rules for more information on how to create rules.)
+
<details>
<summary>Code sample</summary>
@@ -334,11 +350,12 @@ oidc_providers:
issuer: "https://accounts.google.com/"
client_id: "your-client-id" # TO BE FILLED
client_secret: "your-client-secret" # TO BE FILLED
- scopes: ["openid", "profile"]
+ scopes: ["openid", "profile", "email"] # email is optional, read below
user_mapping_provider:
config:
localpart_template: "{{ user.given_name|lower }}"
display_name_template: "{{ user.name }}"
+ email_template: "{{ user.email }}" # needs "email" in scopes above
```
4. Back in the Google console, add this Authorized redirect URI: `[synapse
public baseurl]/_synapse/client/oidc/callback`.
@@ -421,7 +438,7 @@ Synapse config:
user_mapping_provider:
config:
display_name_template: "{{ user.name }}"
- email_template: "{{ '{{ user.email }}' }}"
+ email_template: "{{ user.email }}"
```
Relevant documents:
diff --git a/docs/reverse_proxy.md b/docs/reverse_proxy.md
index d1618e8155..48dbc1c58e 100644
--- a/docs/reverse_proxy.md
+++ b/docs/reverse_proxy.md
@@ -45,6 +45,10 @@ listens to traffic on localhost. (Do not change `bind_addresses` to `127.0.0.1`
when using a containerized Synapse, as that will prevent it from responding
to proxied traffic.)
+Optionally, you can also set
+[`request_id_header`](../usage/configuration/config_documentation.md#listeners)
+so that the server extracts and re-uses the same request ID format that the
+reverse proxy is using.
## Reverse-proxy configuration examples
@@ -75,6 +79,9 @@ server {
# Nginx by default only allows file uploads up to 1M in size
# Increase client_max_body_size to match max_upload_size defined in homeserver.yaml
client_max_body_size 50M;
+
+ # Synapse responses may be chunked, which is an HTTP/1.1 feature.
+ proxy_http_version 1.1;
}
}
```
diff --git a/docs/sample_log_config.yaml b/docs/sample_log_config.yaml
index 3065a0e2d9..6339160d00 100644
--- a/docs/sample_log_config.yaml
+++ b/docs/sample_log_config.yaml
@@ -6,7 +6,7 @@
# Synapse also supports structured logging for machine readable logs which can
# be ingested by ELK stacks. See [2] for details.
#
-# [1]: https://docs.python.org/3.7/library/logging.config.html#configuration-dictionary-schema
+# [1]: https://docs.python.org/3/library/logging.config.html#configuration-dictionary-schema
# [2]: https://matrix-org.github.io/synapse/latest/structured_logging.html
version: 1
diff --git a/docs/setup/installation.md b/docs/setup/installation.md
index 260e50577b..dcd8f17c5e 100644
--- a/docs/setup/installation.md
+++ b/docs/setup/installation.md
@@ -181,7 +181,7 @@ doas pkg_add synapse
#### NixOS
Robin Lambertz has packaged Synapse for NixOS at:
-<https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/services/misc/matrix-synapse.nix>
+<https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/services/matrix/synapse.nix>
### Installing as a Python module from PyPI
@@ -196,6 +196,10 @@ System requirements:
- Python 3.7 or later, up to Python 3.10.
- At least 1GB of free RAM if you want to join large public rooms like #matrix:matrix.org
+If building on an uncommon architecture for which pre-built wheels are
+unavailable, you will need to have a recent Rust compiler installed. The easiest
+way of installing the latest version is to use [rustup](https://rustup.rs/).
+
To install the Synapse homeserver run:
```sh
@@ -299,9 +303,10 @@ You may need to install the latest Xcode developer tools:
xcode-select --install
```
-On ARM-based Macs you may need to explicitly install libjpeg which is a pillow dependency. You can use Homebrew (https://brew.sh):
+On ARM-based Macs you may need to install libjpeg and libpq.
+You can use Homebrew (https://brew.sh):
```sh
- brew install jpeg
+ brew install jpeg libpq
```
On macOS Catalina (10.15) you may need to explicitly install OpenSSL
@@ -506,9 +511,13 @@ email will be disabled.
### Registering a user
-The easiest way to create a new user is to do so from a client like [Element](https://element.io/).
+One way to create a new user is to do so from a client like
+[Element](https://element.io/). This requires registration to be enabled via
+the
+[`enable_registration`](../usage/configuration/config_documentation.md#enable_registration)
+setting.
-Alternatively, you can do so from the command line. This can be done as follows:
+Alternatively, you can create new users from the command line. This can be done as follows:
1. If synapse was installed via pip, activate the virtualenv as follows (if Synapse was
installed via a prebuilt package, `register_new_matrix_user` should already be
@@ -520,7 +529,7 @@ Alternatively, you can do so from the command line. This can be done as follows:
```
2. Run the following command:
```sh
- register_new_matrix_user -c homeserver.yaml http://localhost:8008
+ register_new_matrix_user -c homeserver.yaml
```
This will prompt you to add details for the new user, and will then connect to
@@ -533,12 +542,13 @@ Make admin [no]:
Success!
```
-This process uses a setting `registration_shared_secret` in
-`homeserver.yaml`, which is shared between Synapse itself and the
-`register_new_matrix_user` script. It doesn't matter what it is (a random
-value is generated by `--generate-config`), but it should be kept secret, as
-anyone with knowledge of it can register users, including admin accounts,
-on your server even if `enable_registration` is `false`.
+This process uses a setting
+[`registration_shared_secret`](../usage/configuration/config_documentation.md#registration_shared_secret),
+which is shared between Synapse itself and the `register_new_matrix_user`
+script. It doesn't matter what it is (a random value is generated by
+`--generate-config`), but it should be kept secret, as anyone with knowledge of
+it can register users, including admin accounts, on your server even if
+`enable_registration` is `false`.
### Setting up a TURN server
diff --git a/docs/setup/turn/coturn.md b/docs/setup/turn/coturn.md
new file mode 100644
index 0000000000..a1bb1e934c
--- /dev/null
+++ b/docs/setup/turn/coturn.md
@@ -0,0 +1,188 @@
+# coturn TURN server
+
+The following sections describe how to install [coturn](<https://github.com/coturn/coturn>) (which implements the TURN REST API).
+
+## `coturn` setup
+
+### Initial installation
+
+The TURN daemon `coturn` is available from a variety of sources such as native package managers, or installation from source.
+
+#### Debian and Ubuntu based distributions
+
+Just install the debian package:
+
+```sh
+sudo apt install coturn
+```
+
+This will install and start a systemd service called `coturn`.
+
+#### Source installation
+
+1. Download the [latest release](https://github.com/coturn/coturn/releases/latest) from github. Unpack it and `cd` into the directory.
+
+1. Configure it:
+
+ ```sh
+ ./configure
+ ```
+
+ You may need to install `libevent2`: if so, you should do so in
+ the way recommended by your operating system. You can ignore
+ warnings about lack of database support: a database is unnecessary
+ for this purpose.
+
+1. Build and install it:
+
+ ```sh
+ make
+ sudo make install
+ ```
+
+### Configuration
+
+1. Create or edit the config file in `/etc/turnserver.conf`. The relevant
+ lines, with example values, are:
+
+ ```
+ use-auth-secret
+ static-auth-secret=[your secret key here]
+ realm=turn.myserver.org
+ ```
+
+ See `turnserver.conf` for explanations of the options. One way to generate
+ the `static-auth-secret` is with `pwgen`:
+
+ ```sh
+ pwgen -s 64 1
+ ```
+
+ A `realm` must be specified, but its value is somewhat arbitrary. (It is
+ sent to clients as part of the authentication flow.) It is conventional to
+ set it to be your server name.
+
+1. You will most likely want to configure `coturn` to write logs somewhere. The
+ easiest way is normally to send them to the syslog:
+
+ ```sh
+ syslog
+ ```
+
+ (in which case, the logs will be available via `journalctl -u coturn` on a
+ systemd system). Alternatively, `coturn` can be configured to write to a
+ logfile - check the example config file supplied with `coturn`.
+
+1. Consider your security settings. TURN lets users request a relay which will
+ connect to arbitrary IP addresses and ports. The following configuration is
+ suggested as a minimum starting point:
+
+ ```
+ # VoIP traffic is all UDP. There is no reason to let users connect to arbitrary TCP endpoints via the relay.
+ no-tcp-relay
+
+ # don't let the relay ever try to connect to private IP address ranges within your network (if any)
+ # given the turn server is likely behind your firewall, remember to include any privileged public IPs too.
+ denied-peer-ip=10.0.0.0-10.255.255.255
+ denied-peer-ip=192.168.0.0-192.168.255.255
+ denied-peer-ip=172.16.0.0-172.31.255.255
+
+ # recommended additional local peers to block, to mitigate external access to internal services.
+ # https://www.rtcsec.com/article/slack-webrtc-turn-compromise-and-bug-bounty/#how-to-fix-an-open-turn-relay-to-address-this-vulnerability
+ no-multicast-peers
+ denied-peer-ip=0.0.0.0-0.255.255.255
+ denied-peer-ip=100.64.0.0-100.127.255.255
+ denied-peer-ip=127.0.0.0-127.255.255.255
+ denied-peer-ip=169.254.0.0-169.254.255.255
+ denied-peer-ip=192.0.0.0-192.0.0.255
+ denied-peer-ip=192.0.2.0-192.0.2.255
+ denied-peer-ip=192.88.99.0-192.88.99.255
+ denied-peer-ip=198.18.0.0-198.19.255.255
+ denied-peer-ip=198.51.100.0-198.51.100.255
+ denied-peer-ip=203.0.113.0-203.0.113.255
+ denied-peer-ip=240.0.0.0-255.255.255.255
+
+ # special case the turn server itself so that client->TURN->TURN->client flows work
+ # this should be one of the turn server's listening IPs
+ allowed-peer-ip=10.0.0.1
+
+ # consider whether you want to limit the quota of relayed streams per user (or total) to avoid risk of DoS.
+ user-quota=12 # 4 streams per video call, so 12 streams = 3 simultaneous relayed calls per user.
+ total-quota=1200
+ ```
+
+1. Also consider supporting TLS/DTLS. To do this, add the following settings
+ to `turnserver.conf`:
+
+ ```
+ # TLS certificates, including intermediate certs.
+ # For Let's Encrypt certificates, use `fullchain.pem` here.
+ cert=/path/to/fullchain.pem
+
+ # TLS private key file
+ pkey=/path/to/privkey.pem
+
+ # Ensure the configuration lines that disable TLS/DTLS are commented-out or removed
+ #no-tls
+ #no-dtls
+ ```
+
+ In this case, replace the `turn:` schemes in the `turn_uris` settings below
+ with `turns:`.
+
+ We recommend that you only try to set up TLS/DTLS once you have set up a
+ basic installation and got it working.
+
+ NB: If your TLS certificate was provided by Let's Encrypt, TLS/DTLS will
+ not work with any Matrix client that uses Chromium's WebRTC library. This
+ currently includes Element Android & iOS; for more details, see their
+ [respective](https://github.com/vector-im/element-android/issues/1533)
+ [issues](https://github.com/vector-im/element-ios/issues/2712) as well as the underlying
+ [WebRTC issue](https://bugs.chromium.org/p/webrtc/issues/detail?id=11710).
+ Consider using a ZeroSSL certificate for your TURN server as a working alternative.
+
+1. Ensure your firewall allows traffic into the TURN server on the ports
+ you've configured it to listen on (By default: 3478 and 5349 for TURN
+ traffic (remember to allow both TCP and UDP traffic), and ports 49152-65535
+ for the UDP relay.)
+
+1. If your TURN server is behind NAT, the NAT gateway must have an external,
+ publicly-reachable IP address. You must configure `coturn` to advertise that
+ address to connecting clients:
+
+ ```
+ external-ip=EXTERNAL_NAT_IPv4_ADDRESS
+ ```
+
+ You may optionally limit the TURN server to listen only on the local
+ address that is mapped by NAT to the external address:
+
+ ```
+ listening-ip=INTERNAL_TURNSERVER_IPv4_ADDRESS
+ ```
+
+ If your NAT gateway is reachable over both IPv4 and IPv6, you may
+ configure `coturn` to advertise each available address:
+
+ ```
+ external-ip=EXTERNAL_NAT_IPv4_ADDRESS
+ external-ip=EXTERNAL_NAT_IPv6_ADDRESS
+ ```
+
+ When advertising an external IPv6 address, ensure that the firewall and
+ network settings of the system running your TURN server are configured to
+ accept IPv6 traffic, and that the TURN server is listening on the local
+ IPv6 address that is mapped by NAT to the external IPv6 address.
+
+1. (Re)start the turn server:
+
+ * If you used the Debian package (or have set up a systemd unit yourself):
+ ```sh
+ sudo systemctl restart coturn
+ ```
+
+ * If you built from source:
+
+ ```sh
+ /usr/local/bin/turnserver -o
+ ```
diff --git a/docs/setup/turn/eturnal.md b/docs/setup/turn/eturnal.md
new file mode 100644
index 0000000000..2e5a45673e
--- /dev/null
+++ b/docs/setup/turn/eturnal.md
@@ -0,0 +1,170 @@
+# eturnal TURN server
+
+The following sections describe how to install [eturnal](<https://github.com/processone/eturnal>)
+(which implements the TURN REST API).
+
+## `eturnal` setup
+
+### Initial installation
+
+The `eturnal` TURN server implementation is available from a variety of sources
+such as native package managers, binary packages, installation from source or
+[container image](https://eturnal.net/documentation/code/docker.html). They are
+all described [here](https://github.com/processone/eturnal#installation).
+
+Quick-Test instructions in a [Linux Shell](https://github.com/processone/eturnal/blob/master/QUICK-TEST.md)
+or with [Docker](https://github.com/processone/eturnal/blob/master/docker-k8s/QUICK-TEST.md)
+are available as well.
+
+### Configuration
+
+After installation, `eturnal` usually ships a [default configuration file](https://github.com/processone/eturnal/blob/master/config/eturnal.yml)
+here: `/etc/eturnal.yml` (and, if not found there, there is a backup file here:
+`/opt/eturnal/etc/eturnal.yml`). It uses the (indentation-sensitive!) [YAML](https://en.wikipedia.org/wiki/YAML)
+format. The file contains further explanations.
+
+Here are some hints how to configure eturnal on your [host machine](https://github.com/processone/eturnal#configuration)
+or when using e.g. [Docker](https://eturnal.net/documentation/code/docker.html).
+You may also further deep dive into the [reference documentation](https://eturnal.net/documentation/).
+
+`eturnal` runs out of the box with the default configuration. To enable TURN and
+to integrate it with your homeserver, some aspects in `eturnal`'s default configuration file
+must be edited:
+
+1. Homeserver's [`turn_shared_secret`](../../usage/configuration/config_documentation.md#turn_shared_secret)
+ and eturnal's shared `secret` for authentication
+
+ Both need to have the same value. Uncomment and adjust this line in `eturnal`'s
+ configuration file:
+
+ ```yaml
+ secret: "long-and-cryptic" # Shared secret, CHANGE THIS.
+ ```
+
+ One way to generate a `secret` is with `pwgen`:
+
+ ```sh
+ pwgen -s 64 1
+ ```
+
+1. Public IP address
+
+ If your TURN server is behind NAT, the NAT gateway must have an external,
+ publicly-reachable IP address. `eturnal` tries to autodetect the public IP address,
+ however, it may also be configured by uncommenting and adjusting this line, so
+ `eturnal` advertises that address to connecting clients:
+
+ ```yaml
+ relay_ipv4_addr: "203.0.113.4" # The server's public IPv4 address.
+ ```
+
+ If your NAT gateway is reachable over both IPv4 and IPv6, you may
+ configure `eturnal` to advertise each available address:
+
+ ```yaml
+ relay_ipv4_addr: "203.0.113.4" # The server's public IPv4 address.
+ relay_ipv6_addr: "2001:db8::4" # The server's public IPv6 address (optional).
+ ```
+
+ When advertising an external IPv6 address, ensure that the firewall and
+ network settings of the system running your TURN server are configured to
+ accept IPv6 traffic, and that the TURN server is listening on the local
+ IPv6 address that is mapped by NAT to the external IPv6 address.
+
+1. Logging
+
+ If `eturnal` was started by systemd, log files are written into the
+ `/var/log/eturnal` directory by default. In order to log to the [journal](https://www.freedesktop.org/software/systemd/man/systemd-journald.service.html)
+ instead, the `log_dir` option can be set to `stdout` in the configuration file.
+
+1. Security considerations
+
+ Consider your security settings. TURN lets users request a relay which will
+ connect to arbitrary IP addresses and ports. The following configuration is
+ suggested as a minimum starting point, [see also the official documentation](https://eturnal.net/documentation/#blacklist):
+
+ ```yaml
+ ## Reject TURN relaying from/to the following addresses/networks:
+ blacklist: # This is the default blacklist.
+ - "127.0.0.0/8" # IPv4 loopback.
+ - "::1" # IPv6 loopback.
+ - recommended # Expands to a number of networks recommended to be
+ # blocked, but includes private networks. Those
+ # would have to be 'whitelist'ed if eturnal serves
+ # local clients/peers within such networks.
+ ```
+
+ To whitelist IP addresses or specific (private) networks, you need to **add** a
+ whitelist part into the configuration file, e.g.:
+
+ ```yaml
+ whitelist:
+ - "192.168.0.0/16"
+ - "203.0.113.113"
+ - "2001:db8::/64"
+ ```
+
+ The more specific, the better.
+
+1. TURNS (TURN via TLS/DTLS)
+
+ Also consider supporting TLS/DTLS. To do this, adjust the following settings
+ in the `eturnal.yml` configuration file (TLS parts should not be commented anymore):
+
+ ```yaml
+ listen:
+ - ip: "::"
+ port: 3478
+ transport: udp
+ - ip: "::"
+ port: 3478
+ transport: tcp
+ - ip: "::"
+ port: 5349
+ transport: tls
+
+ ## TLS certificate/key files (must be readable by 'eturnal' user!):
+ tls_crt_file: /etc/eturnal/tls/crt.pem
+ tls_key_file: /etc/eturnal/tls/key.pem
+ ```
+
+ In this case, replace the `turn:` schemes in homeserver's `turn_uris` settings
+ with `turns:`. More is described [here](../../usage/configuration/config_documentation.md#turn_uris).
+
+ We recommend that you only try to set up TLS/DTLS once you have set up a
+ basic installation and got it working.
+
+ NB: If your TLS certificate was provided by Let's Encrypt, TLS/DTLS will
+ not work with any Matrix client that uses Chromium's WebRTC library. This
+ currently includes Element Android & iOS; for more details, see their
+ [respective](https://github.com/vector-im/element-android/issues/1533)
+ [issues](https://github.com/vector-im/element-ios/issues/2712) as well as the underlying
+ [WebRTC issue](https://bugs.chromium.org/p/webrtc/issues/detail?id=11710).
+ Consider using a ZeroSSL certificate for your TURN server as a working alternative.
+
+1. Firewall
+
+ Ensure your firewall allows traffic into the TURN server on the ports
+ you've configured it to listen on (By default: 3478 and 5349 for TURN
+ traffic (remember to allow both TCP and UDP traffic), and ports 49152-65535
+ for the UDP relay.)
+
+1. Reload/ restarting `eturnal`
+
+ Changes in the configuration file require `eturnal` to reload/ restart, this
+ can be achieved by:
+
+ ```sh
+ eturnalctl reload
+ ```
+
+ `eturnal` performs a configuration check before actually reloading/ restarting
+ and provides hints, if something is not correctly configured.
+
+### eturnalctl opterations script
+
+`eturnal` offers a handy [operations script](https://eturnal.net/documentation/#Operation)
+which can be called e.g. to check, whether the service is up, to restart the service,
+to query how many active sessions exist, to change logging behaviour and so on.
+
+Hint: If `eturnalctl` is not part of your `$PATH`, consider either sym-linking it (e.g. ´ln -s /opt/eturnal/bin/eturnalctl /usr/local/bin/eturnalctl´) or call it from the default `eturnal` directory directly: e.g. `/opt/eturnal/bin/eturnalctl info`
diff --git a/docs/sso_mapping_providers.md b/docs/sso_mapping_providers.md
index 7b4ddc5b74..9f5e5fbbe1 100644
--- a/docs/sso_mapping_providers.md
+++ b/docs/sso_mapping_providers.md
@@ -22,7 +22,7 @@ choose their own username.
In the first case - where users are automatically allocated a Matrix ID - it is
the responsibility of the mapping provider to normalise the SSO attributes and
map them to a valid Matrix ID. The [specification for Matrix
-IDs](https://matrix.org/docs/spec/appendices#user-identifiers) has some
+IDs](https://spec.matrix.org/latest/appendices/#user-identifiers) has some
information about what is considered valid.
If the mapping provider does not assign a Matrix ID, then Synapse will
@@ -37,9 +37,10 @@ as Synapse). The Synapse config is then modified to point to the mapping provide
## OpenID Mapping Providers
The OpenID mapping provider can be customized by editing the
-`oidc_config.user_mapping_provider.module` config option.
+[`oidc_providers.user_mapping_provider.module`](usage/configuration/config_documentation.md#oidc_providers)
+config option.
-`oidc_config.user_mapping_provider.config` allows you to provide custom
+`oidc_providers.user_mapping_provider.config` allows you to provide custom
configuration options to the module. Check with the module's documentation for
what options it provides (if any). The options listed by default are for the
user mapping provider built in to Synapse. If using a custom module, you should
@@ -58,7 +59,7 @@ A custom mapping provider must specify the following methods:
- This method should have the `@staticmethod` decoration.
- Arguments:
- `config` - A `dict` representing the parsed content of the
- `oidc_config.user_mapping_provider.config` homeserver config option.
+ `oidc_providers.user_mapping_provider.config` homeserver config option.
Runs on homeserver startup. Providers should extract and validate
any option values they need here.
- Whatever is returned will be passed back to the user mapping provider module's
@@ -72,8 +73,8 @@ A custom mapping provider must specify the following methods:
* `async def map_user_attributes(self, userinfo, token, failures)`
- This method must be async.
- Arguments:
- - `userinfo` - A `authlib.oidc.core.claims.UserInfo` object to extract user
- information from.
+ - `userinfo` - An [`authlib.oidc.core.claims.UserInfo`](https://docs.authlib.org/en/latest/specs/oidc.html#authlib.oidc.core.UserInfo)
+ object to extract user information from.
- `token` - A dictionary which includes information necessary to make
further requests to the OpenID provider.
- `failures` - An `int` that represents the amount of times the returned
@@ -90,7 +91,13 @@ A custom mapping provider must specify the following methods:
`None`, the user is prompted to pick their own username. This is only used
during a user's first login. Once a localpart has been associated with a
remote user ID (see `get_remote_user_id`) it cannot be updated.
- - `displayname`: An optional string, the display name for the user.
+ - `confirm_localpart`: A boolean. If set to `True`, when a `localpart`
+ string is returned from this method, Synapse will prompt the user to
+ either accept this localpart or pick their own username. Otherwise this
+ option has no effect. If omitted, defaults to `False`.
+ - `display_name`: An optional string, the display name for the user.
+ - `emails`: A list of strings, the email address(es) to associate with
+ this user. If omitted, defaults to an empty list.
* `async def get_extra_attributes(self, userinfo, token)`
- This method must be async.
- Arguments:
@@ -102,7 +109,7 @@ A custom mapping provider must specify the following methods:
will be returned as part of the response during a successful login.
Note that care should be taken to not overwrite any of the parameters
- usually returned as part of the [login response](https://matrix.org/docs/spec/client_server/latest#post-matrix-client-r0-login).
+ usually returned as part of the [login response](https://spec.matrix.org/latest/client-server-api/#post_matrixclientv3login).
### Default OpenID Mapping Provider
@@ -113,7 +120,8 @@ specified in the config. It is located at
## SAML Mapping Providers
The SAML mapping provider can be customized by editing the
-`saml2_config.user_mapping_provider.module` config option.
+[`saml2_config.user_mapping_provider.module`](docs/usage/configuration/config_documentation.md#saml2_config)
+config option.
`saml2_config.user_mapping_provider.config` allows you to provide custom
configuration options to the module. Check with the module's documentation for
diff --git a/docs/systemd-with-workers/workers/federation_sender.yaml b/docs/systemd-with-workers/workers/federation_sender.yaml
new file mode 100644
index 0000000000..5c591aec2c
--- /dev/null
+++ b/docs/systemd-with-workers/workers/federation_sender.yaml
@@ -0,0 +1,8 @@
+worker_app: synapse.app.federation_sender
+worker_name: federation_sender1
+
+# The replication listener on the main synapse process.
+worker_replication_host: 127.0.0.1
+worker_replication_http_port: 9093
+
+worker_log_config: /etc/matrix-synapse/federation-sender-log.yaml
diff --git a/docs/systemd-with-workers/workers/generic_worker.yaml b/docs/systemd-with-workers/workers/generic_worker.yaml
index a82f9c161f..6e7b60886e 100644
--- a/docs/systemd-with-workers/workers/generic_worker.yaml
+++ b/docs/systemd-with-workers/workers/generic_worker.yaml
@@ -5,6 +5,8 @@ worker_name: generic_worker1
worker_replication_host: 127.0.0.1
worker_replication_http_port: 9093
+worker_main_http_uri: http://localhost:8008/
+
worker_listeners:
- type: http
port: 8083
diff --git a/docs/systemd-with-workers/workers/media_worker.yaml b/docs/systemd-with-workers/workers/media_worker.yaml
new file mode 100644
index 0000000000..eb34d12492
--- /dev/null
+++ b/docs/systemd-with-workers/workers/media_worker.yaml
@@ -0,0 +1,14 @@
+worker_app: synapse.app.media_repository
+worker_name: media_worker
+
+# The replication listener on the main synapse process.
+worker_replication_host: 127.0.0.1
+worker_replication_http_port: 9093
+
+worker_listeners:
+ - type: http
+ port: 8085
+ resources:
+ - names: [media]
+
+worker_log_config: /etc/matrix-synapse/media-worker-log.yaml
diff --git a/docs/systemd-with-workers/workers/pusher_worker.yaml b/docs/systemd-with-workers/workers/pusher_worker.yaml
new file mode 100644
index 0000000000..46e22c6f06
--- /dev/null
+++ b/docs/systemd-with-workers/workers/pusher_worker.yaml
@@ -0,0 +1,8 @@
+worker_app: synapse.app.pusher
+worker_name: pusher_worker1
+
+# The replication listener on the main synapse process.
+worker_replication_host: 127.0.0.1
+worker_replication_http_port: 9093
+
+worker_log_config: /etc/matrix-synapse/pusher-worker-log.yaml
diff --git a/docs/templates.md b/docs/templates.md
index f87692a453..453ac90dd8 100644
--- a/docs/templates.md
+++ b/docs/templates.md
@@ -9,7 +9,7 @@ in, allowing them to specify custom templates:
```yaml
templates:
- custom_templates_directory: /path/to/custom/templates/
+ custom_template_directory: /path/to/custom/templates/
```
If this setting is not set, or the files named below are not found within the directory,
diff --git a/docs/turn-howto.md b/docs/turn-howto.md
index 37a311ad9c..b466cab40c 100644
--- a/docs/turn-howto.md
+++ b/docs/turn-howto.md
@@ -9,222 +9,28 @@ allows the homeserver to generate credentials that are valid for use on the
TURN server through the use of a secret shared between the homeserver and the
TURN server.
-The following sections describe how to install [coturn](<https://github.com/coturn/coturn>) (which implements the TURN REST API) and integrate it with synapse.
+This documentation provides two TURN server configuration examples:
+
+* [coturn](setup/turn/coturn.md)
+* [eturnal](setup/turn/eturnal.md)
## Requirements
-For TURN relaying with `coturn` to work, it must be hosted on a server/endpoint with a public IP.
+For TURN relaying to work, the TURN service must be hosted on a server/endpoint with a public IP.
Hosting TURN behind NAT requires port forwaring and for the NAT gateway to have a public IP.
However, even with appropriate configuration, NAT is known to cause issues and to often not work.
-## `coturn` setup
-
-### Initial installation
-
-The TURN daemon `coturn` is available from a variety of sources such as native package managers, or installation from source.
-
-#### Debian installation
-
-Just install the debian package:
-
-```sh
-apt install coturn
-```
-
-This will install and start a systemd service called `coturn`.
-
-#### Source installation
-
-1. Download the [latest release](https://github.com/coturn/coturn/releases/latest) from github. Unpack it and `cd` into the directory.
-
-1. Configure it:
-
- ```sh
- ./configure
- ```
-
- You may need to install `libevent2`: if so, you should do so in
- the way recommended by your operating system. You can ignore
- warnings about lack of database support: a database is unnecessary
- for this purpose.
-
-1. Build and install it:
-
- ```sh
- make
- make install
- ```
-
-### Configuration
-
-1. Create or edit the config file in `/etc/turnserver.conf`. The relevant
- lines, with example values, are:
-
- ```
- use-auth-secret
- static-auth-secret=[your secret key here]
- realm=turn.myserver.org
- ```
-
- See `turnserver.conf` for explanations of the options. One way to generate
- the `static-auth-secret` is with `pwgen`:
-
- ```sh
- pwgen -s 64 1
- ```
-
- A `realm` must be specified, but its value is somewhat arbitrary. (It is
- sent to clients as part of the authentication flow.) It is conventional to
- set it to be your server name.
-
-1. You will most likely want to configure coturn to write logs somewhere. The
- easiest way is normally to send them to the syslog:
-
- ```sh
- syslog
- ```
-
- (in which case, the logs will be available via `journalctl -u coturn` on a
- systemd system). Alternatively, coturn can be configured to write to a
- logfile - check the example config file supplied with coturn.
-
-1. Consider your security settings. TURN lets users request a relay which will
- connect to arbitrary IP addresses and ports. The following configuration is
- suggested as a minimum starting point:
-
- ```
- # VoIP traffic is all UDP. There is no reason to let users connect to arbitrary TCP endpoints via the relay.
- no-tcp-relay
-
- # don't let the relay ever try to connect to private IP address ranges within your network (if any)
- # given the turn server is likely behind your firewall, remember to include any privileged public IPs too.
- denied-peer-ip=10.0.0.0-10.255.255.255
- denied-peer-ip=192.168.0.0-192.168.255.255
- denied-peer-ip=172.16.0.0-172.31.255.255
-
- # recommended additional local peers to block, to mitigate external access to internal services.
- # https://www.rtcsec.com/article/slack-webrtc-turn-compromise-and-bug-bounty/#how-to-fix-an-open-turn-relay-to-address-this-vulnerability
- no-multicast-peers
- denied-peer-ip=0.0.0.0-0.255.255.255
- denied-peer-ip=100.64.0.0-100.127.255.255
- denied-peer-ip=127.0.0.0-127.255.255.255
- denied-peer-ip=169.254.0.0-169.254.255.255
- denied-peer-ip=192.0.0.0-192.0.0.255
- denied-peer-ip=192.0.2.0-192.0.2.255
- denied-peer-ip=192.88.99.0-192.88.99.255
- denied-peer-ip=198.18.0.0-198.19.255.255
- denied-peer-ip=198.51.100.0-198.51.100.255
- denied-peer-ip=203.0.113.0-203.0.113.255
- denied-peer-ip=240.0.0.0-255.255.255.255
-
- # special case the turn server itself so that client->TURN->TURN->client flows work
- # this should be one of the turn server's listening IPs
- allowed-peer-ip=10.0.0.1
-
- # consider whether you want to limit the quota of relayed streams per user (or total) to avoid risk of DoS.
- user-quota=12 # 4 streams per video call, so 12 streams = 3 simultaneous relayed calls per user.
- total-quota=1200
- ```
-
-1. Also consider supporting TLS/DTLS. To do this, add the following settings
- to `turnserver.conf`:
-
- ```
- # TLS certificates, including intermediate certs.
- # For Let's Encrypt certificates, use `fullchain.pem` here.
- cert=/path/to/fullchain.pem
-
- # TLS private key file
- pkey=/path/to/privkey.pem
-
- # Ensure the configuration lines that disable TLS/DTLS are commented-out or removed
- #no-tls
- #no-dtls
- ```
-
- In this case, replace the `turn:` schemes in the `turn_uris` settings below
- with `turns:`.
-
- We recommend that you only try to set up TLS/DTLS once you have set up a
- basic installation and got it working.
-
- NB: If your TLS certificate was provided by Let's Encrypt, TLS/DTLS will
- not work with any Matrix client that uses Chromium's WebRTC library. This
- currently includes Element Android & iOS; for more details, see their
- [respective](https://github.com/vector-im/element-android/issues/1533)
- [issues](https://github.com/vector-im/element-ios/issues/2712) as well as the underlying
- [WebRTC issue](https://bugs.chromium.org/p/webrtc/issues/detail?id=11710).
- Consider using a ZeroSSL certificate for your TURN server as a working alternative.
-
-1. Ensure your firewall allows traffic into the TURN server on the ports
- you've configured it to listen on (By default: 3478 and 5349 for TURN
- traffic (remember to allow both TCP and UDP traffic), and ports 49152-65535
- for the UDP relay.)
-
-1. If your TURN server is behind NAT, the NAT gateway must have an external,
- publicly-reachable IP address. You must configure coturn to advertise that
- address to connecting clients:
-
- ```
- external-ip=EXTERNAL_NAT_IPv4_ADDRESS
- ```
-
- You may optionally limit the TURN server to listen only on the local
- address that is mapped by NAT to the external address:
-
- ```
- listening-ip=INTERNAL_TURNSERVER_IPv4_ADDRESS
- ```
-
- If your NAT gateway is reachable over both IPv4 and IPv6, you may
- configure coturn to advertise each available address:
-
- ```
- external-ip=EXTERNAL_NAT_IPv4_ADDRESS
- external-ip=EXTERNAL_NAT_IPv6_ADDRESS
- ```
-
- When advertising an external IPv6 address, ensure that the firewall and
- network settings of the system running your TURN server are configured to
- accept IPv6 traffic, and that the TURN server is listening on the local
- IPv6 address that is mapped by NAT to the external IPv6 address.
-
-1. (Re)start the turn server:
-
- * If you used the Debian package (or have set up a systemd unit yourself):
- ```sh
- systemctl restart coturn
- ```
-
- * If you installed from source:
-
- ```sh
- bin/turnserver -o
- ```
+Afterwards, the homeserver needs some further configuration.
## Synapse setup
Your homeserver configuration file needs the following extra keys:
-1. "`turn_uris`": This needs to be a yaml list of public-facing URIs
- for your TURN server to be given out to your clients. Add separate
- entries for each transport your TURN server supports.
-2. "`turn_shared_secret`": This is the secret shared between your
- homeserver and your TURN server, so you should set it to the same
- string you used in turnserver.conf.
-3. "`turn_user_lifetime`": This is the amount of time credentials
- generated by your homeserver are valid for (in milliseconds).
- Shorter times offer less potential for abuse at the expense of
- increased traffic between web clients and your homeserver to
- refresh credentials. The TURN REST API specification recommends
- one day (86400000).
-4. "`turn_allow_guests`": Whether to allow guest users to use the
- TURN server. This is enabled by default, as otherwise VoIP will
- not work reliably for guests. However, it does introduce a
- security risk as it lets guests connect to arbitrary endpoints
- without having gone through a CAPTCHA or similar to register a
- real account.
+1. [`turn_uris`](usage/configuration/config_documentation.md#turn_uris)
+2. [`turn_shared_secret`](usage/configuration/config_documentation.md#turn_shared_secret)
+3. [`turn_user_lifetime`](usage/configuration/config_documentation.md#turn_user_lifetime)
+4. [`turn_allow_guests`](usage/configuration/config_documentation.md#turn_allow_guests)
As an example, here is the relevant section of the config file for `matrix.org`. The
`turn_uris` are appropriate for TURN servers listening on the default ports, with no TLS.
@@ -263,7 +69,7 @@ Here are a few things to try:
* Check that you have opened your firewall to allow UDP traffic to the UDP
relay ports (49152-65535 by default).
- * Try disabling `coturn`'s TLS/DTLS listeners and enable only its (unencrypted)
+ * Try disabling TLS/DTLS listeners and enable only its (unencrypted)
TCP/UDP listeners. (This will only leave signaling traffic unencrypted;
voice & video WebRTC traffic is always encrypted.)
@@ -288,12 +94,19 @@ Here are a few things to try:
* ensure that your TURN server uses the NAT gateway as its default route.
- * Enable more verbose logging in coturn via the `verbose` setting:
+ * Enable more verbose logging, in `coturn` via the `verbose` setting:
```
verbose
```
+ or with `eturnal` with the shell command `eturnalctl loglevel debug` or in the configuration file (the service needs to [reload](https://eturnal.net/documentation/#Operation) for it to become effective):
+
+ ```yaml
+ ## Logging configuration:
+ log_level: debug
+ ```
+
... and then see if there are any clues in its logs.
* If you are using a browser-based client under Chrome, check
@@ -317,7 +130,7 @@ Here are a few things to try:
matrix client to your homeserver in your browser's network inspector. In
the response you should see `username` and `password`. Or:
- * Use the following shell commands:
+ * Use the following shell commands for `coturn`:
```sh
secret=staticAuthSecretHere
@@ -327,11 +140,16 @@ Here are a few things to try:
echo -e "username: $u\npassword: $p"
```
- Or:
+ or for `eturnal`
+
+ ```sh
+ eturnalctl credentials
+ ```
+
- * Temporarily configure coturn to accept a static username/password. To do
- this, comment out `use-auth-secret` and `static-auth-secret` and add the
- following:
+ * Or (**coturn only**): Temporarily configure `coturn` to accept a static
+ username/password. To do this, comment out `use-auth-secret` and
+ `static-auth-secret` and add the following:
```
lt-cred-mech
diff --git a/docs/upgrade.md b/docs/upgrade.md
index 47a74b67de..4fe9e4f02e 100644
--- a/docs/upgrade.md
+++ b/docs/upgrade.md
@@ -15,9 +15,8 @@ this document.
The website <https://endoflife.date> also offers convenient
summaries.
-- If Synapse was installed using [prebuilt
- packages](setup/installation.md#prebuilt-packages), you will need to follow the
- normal process for upgrading those packages.
+- If Synapse was installed using [prebuilt packages](setup/installation.md#prebuilt-packages),
+ you will need to follow the normal process for upgrading those packages.
- If Synapse was installed using pip then upgrade to the latest
version by running:
@@ -89,6 +88,252 @@ process, for example:
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
```
+# Upgrading to v1.73.0
+
+## Legacy Prometheus metric names have now been removed
+
+Synapse v1.69.0 included the deprecation of legacy Prometheus metric names
+and offered an option to disable them.
+Synapse v1.71.0 disabled legacy Prometheus metric names by default.
+
+This version, v1.73.0, removes those legacy Prometheus metric names entirely.
+This also means that the `enable_legacy_metrics` configuration option has been
+removed; it will no longer be possible to re-enable the legacy metric names.
+
+If you use metrics and have not yet updated your Grafana dashboard(s),
+Prometheus console(s) or alerting rule(s), please consider doing so when upgrading
+to this version.
+Note that the included Grafana dashboard was updated in v1.72.0 to correct some
+metric names which were missed when legacy metrics were disabled by default.
+
+See [v1.69.0: Deprecation of legacy Prometheus metric names](#deprecation-of-legacy-prometheus-metric-names)
+for more context.
+
+
+# Upgrading to v1.72.0
+
+## Dropping support for PostgreSQL 10
+
+In line with our [deprecation policy](deprecation_policy.md), we've dropped
+support for PostgreSQL 10, as it is no longer supported upstream.
+
+This release of Synapse requires PostgreSQL 11+.
+
+
+# Upgrading to v1.71.0
+
+## Removal of the `generate_short_term_login_token` module API method
+
+As announced with the release of [Synapse 1.69.0](#deprecation-of-the-generate_short_term_login_token-module-api-method), the deprecated `generate_short_term_login_token` module method has been removed.
+
+Modules relying on it can instead use the `create_login_token` method.
+
+
+## Changes to the events received by application services (interest)
+
+To align with spec (changed in
+[MSC3905](https://github.com/matrix-org/matrix-spec-proposals/pull/3905)), Synapse now
+only considers local users to be interesting. In other words, the `users` namespace
+regex is only be applied against local users of the homeserver.
+
+Please note, this probably doesn't affect the expected behavior of your application
+service, since an interesting local user in a room still means all messages in the room
+(from local or remote users) will still be considered interesting. And matching a room
+with the `rooms` or `aliases` namespace regex will still consider all events sent in the
+room to be interesting to the application service.
+
+If one of your application service's `users` regex was intending to match a remote user,
+this will no longer match as you expect. The behavioral mismatch between matching all
+local users and some remote users is why the spec was changed/clarified and this
+caveat is no longer supported.
+
+
+## Legacy Prometheus metric names are now disabled by default
+
+Synapse v1.71.0 disables legacy Prometheus metric names by default.
+For administrators that still rely on them and have not yet had chance to update their
+uses of the metrics, it's still possible to specify `enable_legacy_metrics: true` in
+the configuration to re-enable them temporarily.
+
+Synapse v1.73.0 will **remove legacy metric names altogether** and at that point,
+it will no longer be possible to re-enable them.
+
+If you do not use metrics or you have already updated your Grafana dashboard(s),
+Prometheus console(s) and alerting rule(s), there is no action needed.
+
+See [v1.69.0: Deprecation of legacy Prometheus metric names](#deprecation-of-legacy-prometheus-metric-names).
+
+
+# Upgrading to v1.69.0
+
+## Changes to the receipts replication streams
+
+Synapse now includes information indicating if a receipt applies to a thread when
+replicating it to other workers. This is a forwards- and backwards-incompatible
+change: v1.68 and workers cannot process receipts replicated by v1.69 workers, and
+vice versa.
+
+Once all workers are upgraded to v1.69 (or downgraded to v1.68), receipts
+replication will resume as normal.
+
+
+## Deprecation of legacy Prometheus metric names
+
+In current versions of Synapse, some Prometheus metrics are emitted under two different names,
+with one of the names being older but non-compliant with OpenMetrics and Prometheus conventions
+and one of the names being newer but compliant.
+
+Synapse v1.71.0 will turn the old metric names off *by default*.
+For administrators that still rely on them and have not had chance to update their
+uses of the metrics, it's possible to specify `enable_legacy_metrics: true` in
+the configuration to re-enable them temporarily.
+
+Synapse v1.73.0 will **remove legacy metric names altogether** and it will no longer
+be possible to re-enable them.
+
+The Grafana dashboard, Prometheus recording rules and Prometheus Consoles included
+in the `contrib` directory in the Synapse repository have been updated to no longer
+rely on the legacy names. These can be used on a current version of Synapse
+because current versions of Synapse emit both old and new names.
+
+You may need to update your alerting rules or any other rules that depend on
+the names of Prometheus metrics.
+If you want to test your changes before legacy names are disabled by default,
+you may specify `enable_legacy_metrics: false` in your homeserver configuration.
+
+A list of affected metrics is available on the [Metrics How-to page](https://matrix-org.github.io/synapse/v1.69/metrics-howto.html?highlight=metrics%20deprecated#renaming-of-metrics--deprecation-of-old-names-in-12).
+
+
+## Deprecation of the `generate_short_term_login_token` module API method
+
+The following method of the module API has been deprecated, and is scheduled to
+be remove in v1.71.0:
+
+```python
+def generate_short_term_login_token(
+ self,
+ user_id: str,
+ duration_in_ms: int = (2 * 60 * 1000),
+ auth_provider_id: str = "",
+ auth_provider_session_id: Optional[str] = None,
+) -> str:
+ ...
+```
+
+It has been replaced by an asynchronous equivalent:
+
+```python
+async def create_login_token(
+ self,
+ user_id: str,
+ duration_in_ms: int = (2 * 60 * 1000),
+ auth_provider_id: Optional[str] = None,
+ auth_provider_session_id: Optional[str] = None,
+) -> str:
+ ...
+```
+
+Synapse will log a warning when a module uses the deprecated method, to help
+administrators find modules using it.
+
+
+# Upgrading to v1.68.0
+
+Two changes announced in the upgrade notes for v1.67.0 have now landed in v1.68.0.
+
+## SQLite version requirement
+
+Synapse now requires a SQLite version of 3.27.0 or higher if SQLite is configured as
+Synapse's database.
+
+Installations using
+
+- Docker images [from `matrixdotorg`](https://hub.docker.com/r/matrixdotorg/synapse),
+- Debian packages [from Matrix.org](https://packages.matrix.org/), or
+- a PostgreSQL database
+
+are not affected.
+
+## Rust requirement when building from source.
+
+Building from a source checkout of Synapse now requires a recent Rust compiler
+(currently Rust 1.58.1, but see also the
+[Platform Dependency Policy](https://matrix-org.github.io/synapse/latest/deprecation_policy.html)).
+
+Installations using
+
+- Docker images [from `matrixdotorg`](https://hub.docker.com/r/matrixdotorg/synapse),
+- Debian packages [from Matrix.org](https://packages.matrix.org/), or
+- PyPI wheels via `pip install matrix-synapse` (on supported platforms and architectures)
+
+will not be affected.
+
+# Upgrading to v1.67.0
+
+## Direct TCP replication is no longer supported: migrate to Redis
+
+Redis support was added in v1.13.0 with it becoming the recommended method in
+v1.18.0. It replaced the old direct TCP connections (which was deprecated as of
+v1.18.0) to the main process. With Redis, rather than all the workers connecting
+to the main process, all the workers and the main process connect to Redis,
+which relays replication commands between processes. This can give a significant
+CPU saving on the main process and is a prerequisite for upcoming
+performance improvements.
+
+To migrate to Redis add the [`redis` config](./workers.md#shared-configuration),
+and remove the TCP `replication` listener from config of the master and
+`worker_replication_port` from worker config. Note that a HTTP listener with a
+`replication` resource is still required.
+
+## Minimum version of Poetry is now v1.2.0
+
+The minimum supported version of poetry is now 1.2. This should only affect
+those installing from a source checkout.
+
+## Rust requirement in the next release
+
+From the next major release (v1.68.0) installing Synapse from a source checkout
+will require a recent Rust compiler. Those using packages or
+`pip install matrix-synapse` will not be affected.
+
+The simplest way of installing Rust is via [rustup.rs](https://rustup.rs/)
+
+## SQLite version requirement in the next release
+
+From the next major release (v1.68.0) Synapse will require SQLite 3.27.0 or
+higher. Synapse v1.67.0 will be the last major release supporting SQLite
+versions 3.22 to 3.26.
+
+Those using Docker images or Debian packages from Matrix.org will not be
+affected. If you have installed from source, you should check the version of
+SQLite used by Python with:
+
+```shell
+python -c "import sqlite3; print(sqlite3.sqlite_version)"
+```
+
+If this is too old, refer to your distribution for advice on upgrading.
+
+
+# Upgrading to v1.66.0
+
+## Delegation of email validation no longer supported
+
+As of this version, Synapse no longer allows the tasks of verifying email address
+ownership, and password reset confirmation, to be delegated to an identity server.
+This removal was previously planned for Synapse 1.64.0, but was
+[delayed](https://github.com/matrix-org/synapse/issues/13421) until now to give
+homeserver administrators more notice of the change.
+
+To continue to allow users to add email addresses to their homeserver accounts,
+and perform password resets, make sure that Synapse is configured with a working
+email server in the [`email` configuration
+section](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#email)
+(including, at a minimum, a `notif_from` setting.)
+
+Specifying an `email` setting under `account_threepid_delegates` will now cause
+an error at startup.
+
# Upgrading to v1.64.0
## Deprecation of the ability to delegate e-mail verification to identity servers
@@ -1181,7 +1426,7 @@ updated.
When setting up worker processes, we now recommend the use of a Redis
server for replication. **The old direct TCP connection method is
deprecated and will be removed in a future release.** See
-[workers](workers.md) for more details.
+the [worker documentation](https://matrix-org.github.io/synapse/v1.66/workers.html) for more details.
# Upgrading to v1.14.0
diff --git a/docs/usage/administration/admin_api/README.md b/docs/usage/administration/admin_api/README.md
index f11e0b19a6..c00de2dd44 100644
--- a/docs/usage/administration/admin_api/README.md
+++ b/docs/usage/administration/admin_api/README.md
@@ -19,7 +19,7 @@ already on your `$PATH` depending on how Synapse was installed.
Finding your user's `access_token` is client-dependent, but will usually be shown in the client's settings.
## Making an Admin API request
-For security reasons, we [recommend](reverse_proxy.md#synapse-administration-endpoints)
+For security reasons, we [recommend](../../../reverse_proxy.md#synapse-administration-endpoints)
that the Admin API (`/_synapse/admin/...`) should be hidden from public view using a
reverse proxy. This means you should typically query the Admin API from a terminal on
the machine which runs Synapse.
diff --git a/docs/usage/administration/admin_api/registration_tokens.md b/docs/usage/administration/admin_api/registration_tokens.md
index 13d5eb75e9..90cbc21125 100644
--- a/docs/usage/administration/admin_api/registration_tokens.md
+++ b/docs/usage/administration/admin_api/registration_tokens.md
@@ -2,11 +2,11 @@
This API allows you to manage tokens which can be used to authenticate
registration requests, as proposed in
-[MSC3231](https://github.com/matrix-org/matrix-doc/blob/main/proposals/3231-token-authenticated-registration.md).
+[MSC3231](https://github.com/matrix-org/matrix-doc/blob/main/proposals/3231-token-authenticated-registration.md)
+and stabilised in version 1.2 of the Matrix specification.
To use it, you will need to enable the `registration_requires_token` config
option, and authenticate by providing an `access_token` for a server admin:
-see [Admin API](../../usage/administration/admin_api).
-Note that this API is still experimental; not all clients may support it yet.
+see [Admin API](../admin_api).
## Registration token objects
diff --git a/docs/usage/administration/admin_faq.md b/docs/usage/administration/admin_faq.md
index 3dcad4bbef..7ba5a83f04 100644
--- a/docs/usage/administration/admin_faq.md
+++ b/docs/usage/administration/admin_faq.md
@@ -2,9 +2,9 @@
How do I become a server admin?
---
-If your server already has an admin account you should use the user admin API to promote other accounts to become admins. See [User Admin API](../../admin_api/user_admin_api.md#Change-whether-a-user-is-a-server-administrator-or-not)
+If your server already has an admin account you should use the [User Admin API](../../admin_api/user_admin_api.md#Change-whether-a-user-is-a-server-administrator-or-not) to promote other accounts to become admins.
-If you don't have any admin accounts yet you won't be able to use the admin API so you'll have to edit the database manually. Manually editing the database is generally not recommended so once you have an admin account, use the admin APIs to make further changes.
+If you don't have any admin accounts yet you won't be able to use the admin API, so you'll have to edit the database manually. Manually editing the database is generally not recommended so once you have an admin account: use the admin APIs to make further changes.
```sql
UPDATE users SET admin = 1 WHERE name = '@foo:bar.com';
@@ -32,9 +32,11 @@ What users are registered on my server?
SELECT NAME from users;
```
-Manually resetting passwords:
+Manually resetting passwords
---
-See https://github.com/matrix-org/synapse/blob/master/README.rst#password-reset
+Users can reset their password through their client. Alternatively, a server admin
+can reset a user's password using the [admin API](../../admin_api/user_admin_api.md#reset-password).
+
I have a problem with my server. Can I just delete my database and start again?
---
@@ -101,3 +103,83 @@ LIMIT 10;
You can also use the [List Room API](../../admin_api/rooms.md#list-room-api)
and `order_by` `state_events`.
+
+
+People can't accept room invitations from me
+---
+
+The typical failure mode here is that you send an invitation to someone
+to join a room or direct chat, but when they go to accept it, they get an
+error (typically along the lines of "Invalid signature"). They might see
+something like the following in their logs:
+
+ 2019-09-11 19:32:04,271 - synapse.federation.transport.server - 288 - WARNING - GET-11752 - authenticate_request failed: 401: Invalid signature for server <server> with key ed25519:a_EqML: Unable to verify signature for <server>
+
+This is normally caused by a misconfiguration in your reverse-proxy. See [the reverse proxy docs](docs/reverse_proxy.md) and double-check that your settings are correct.
+
+
+Help!! Synapse is slow and eats all my RAM/CPU!
+-----------------------------------------------
+
+First, ensure you are running the latest version of Synapse, using Python 3
+with a [PostgreSQL database](../../postgres.md).
+
+Synapse's architecture is quite RAM hungry currently - we deliberately
+cache a lot of recent room data and metadata in RAM in order to speed up
+common requests. We'll improve this in the future, but for now the easiest
+way to either reduce the RAM usage (at the risk of slowing things down)
+is to set the almost-undocumented ``SYNAPSE_CACHE_FACTOR`` environment
+variable. The default is 0.5, which can be decreased to reduce RAM usage
+in memory constrained environments, or increased if performance starts to
+degrade.
+
+However, degraded performance due to a low cache factor, common on
+machines with slow disks, often leads to explosions in memory use due
+backlogged requests. In this case, reducing the cache factor will make
+things worse. Instead, try increasing it drastically. 2.0 is a good
+starting value.
+
+Using [libjemalloc](https://jemalloc.net) can also yield a significant
+improvement in overall memory use, and especially in terms of giving back
+RAM to the OS. To use it, the library must simply be put in the
+LD_PRELOAD environment variable when launching Synapse. On Debian, this
+can be done by installing the `libjemalloc1` package and adding this
+line to `/etc/default/matrix-synapse`:
+
+ LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so.1
+
+This made a significant difference on Python 2.7 - it's unclear how
+much of an improvement it provides on Python 3.x.
+
+If you're encountering high CPU use by the Synapse process itself, you
+may be affected by a bug with presence tracking that leads to a
+massive excess of outgoing federation requests (see [discussion](https://github.com/matrix-org/synapse/issues/3971)). If metrics
+indicate that your server is also issuing far more outgoing federation
+requests than can be accounted for by your users' activity, this is a
+likely cause. The misbehavior can be worked around by disabling presence
+in the Synapse config file: [see here](../configuration/config_documentation.md#presence).
+
+
+Running out of File Handles
+---------------------------
+
+If Synapse runs out of file handles, it typically fails badly - live-locking
+at 100% CPU, and/or failing to accept new TCP connections (blocking the
+connecting client). Matrix currently can legitimately use a lot of file handles,
+thanks to busy rooms like `#matrix:matrix.org` containing hundreds of participating
+servers. The first time a server talks in a room it will try to connect
+simultaneously to all participating servers, which could exhaust the available
+file descriptors between DNS queries & HTTPS sockets, especially if DNS is slow
+to respond. (We need to improve the routing algorithm used to be better than
+full mesh, but as of March 2019 this hasn't happened yet).
+
+If you hit this failure mode, we recommend increasing the maximum number of
+open file handles to be at least 4096 (assuming a default of 1024 or 256).
+This is typically done by editing ``/etc/security/limits.conf``
+
+Separately, Synapse may leak file handles if inbound HTTP requests get stuck
+during processing - e.g. blocked behind a lock or talking to a remote server etc.
+This is best diagnosed by matching up the 'Received request' and 'Processed request'
+log lines and looking for any 'Processed request' lines which take more than
+a few seconds to execute. Please let us know at [`#synapse:matrix.org`](https://matrix.to/#/#synapse-dev:matrix.org) if
+you see this failure mode so we can help debug it, however.
diff --git a/docs/usage/administration/monthly_active_users.md b/docs/usage/administration/monthly_active_users.md
new file mode 100644
index 0000000000..b1da6f17c2
--- /dev/null
+++ b/docs/usage/administration/monthly_active_users.md
@@ -0,0 +1,84 @@
+# Monthly Active Users
+
+Synapse can be configured to record the number of monthly active users (also referred to as MAU) on a given homeserver.
+For clarity's sake, MAU only tracks local users.
+
+Please note that the metrics recorded by the [Homeserver Usage Stats](../../usage/administration/monitoring/reporting_homeserver_usage_statistics.md)
+are calculated differently. The `monthly_active_users` from the usage stats does not take into account any
+of the rules below, and counts any users who have made a request to the homeserver in the last 30 days.
+
+See the [configuration manual](../../usage/configuration/config_documentation.md#limit_usage_by_mau) for details on how to configure MAU.
+
+## Calculating active users
+
+Individual user activity is measured in active days. If a user performs an action, the exact time of that action is then recorded. When
+calculating the MAU figure, any users with a recorded action in the last 30 days are considered part of the cohort. Days are measured
+as a rolling window from the current system time to 30 days ago.
+
+So for example, if Synapse were to calculate the active users on the 15th July at 13:25, it would include any activity from 15th June 13:25 onwards.
+
+A user is **never** considered active if they are either:
+ - Part of the trial day cohort (described below)
+ - Owned by an application service.
+ - Note: This **only** covers users that are part of an application service `namespaces.users` registration. The namespace
+ must also be marked as `exclusive`.
+
+Otherwise, any request to Synapse will mark the user as active. Please note that registration will not mark a user as active *unless*
+they register with a 3pid that is included in the config field `mau_limits_reserved_threepids`.
+
+The Prometheus metric for MAU is refreshed every 5 minutes.
+
+Once an hour, Synapse checks to see if any users are inactive (with only activity timestamps later than 30 days). These users
+are removed from the active users cohort. If they then become active, they are immediately restored to the cohort.
+
+It is important to note that **deactivated** users are not immediately removed from the pool of active users, but as these users won't
+perform actions they will eventually be removed from the cohort.
+
+### Trial days
+
+If the config option `mau_trial_days` is set, a user must have been active this many days **after** registration to be active. A user is in the
+trial period if their registration timestamp (also known as the `creation_ts`) is less than `mau_trial_days` old.
+
+As an example, if `mau_trial_days` is set to `3` and a user is active **after** 3 days (72 hours from registration time) then they will be counted as active.
+
+The `mau_appservice_trial_days` config further extends this rule by applying different durations depending on the `appservice_id` of the user.
+Users registered by an application service will be recorded with an `appservice_id` matching the `id` key in the registration file for that service.
+
+
+## Limiting usage of the homeserver when the maximum MAU is reached
+
+If both config options `limit_usage_by_mau` and `max_mau_value` is set, and the current MAU value exceeds the maximum value, the
+homeserver will begin to block some actions.
+
+Individual users matching **any** of the below criteria never have their actions blocked:
+ - Considered part of the cohort of MAU users.
+ - Considered part of the trial period.
+ - Registered as a `support` user.
+ - Application service users if `track_appservice_user_ips` is NOT set.
+
+Please not that server admins are **not** exempt from blocking.
+
+The following actions are blocked when the MAU limit is exceeded:
+ - Logging in
+ - Sending events
+ - Creating rooms
+ - Syncing
+
+Registration is also blocked for all new signups *unless* the user is registering with a threepid included in the `mau_limits_reserved_threepids`
+config value.
+
+When a request is blocked, the response will have the `errcode` `M_RESOURCE_LIMIT_EXCEEDED`.
+
+## Metrics
+
+Synapse records several different prometheus metrics for MAU.
+
+`synapse_admin_mau_current` records the current MAU figure for native (non-application-service) users.
+
+`synapse_admin_mau_max` records the maximum MAU as dictated by the `max_mau_value` config value.
+
+`synapse_admin_mau_current_mau_by_service` records the current MAU including application service users. The label `app_service` can be used
+to filter by a specific service ID. This *also* includes non-application-service users under `app_service=native` .
+
+`synapse_admin_mau_registered_reserved_users` records the number of users specified in `mau_limits_reserved_threepids` which have
+registered accounts on the homeserver.
diff --git a/docs/usage/administration/request_log.md b/docs/usage/administration/request_log.md
index adb5f4f5f3..82f5ac7b96 100644
--- a/docs/usage/administration/request_log.md
+++ b/docs/usage/administration/request_log.md
@@ -12,14 +12,14 @@ See the following for how to decode the dense data available from the default lo
| Part | Explanation |
| ----- | ------------ |
-| AAAA | Timestamp request was logged (not recieved) |
+| AAAA | Timestamp request was logged (not received) |
| BBBB | Logger name (`synapse.access.(http\|https).<tag>`, where 'tag' is defined in the `listeners` config section, normally the port) |
| CCCC | Line number in code |
| DDDD | Log Level |
| EEEE | Request Identifier (This identifier is shared by related log lines)|
| FFFF | Source IP (Or X-Forwarded-For if enabled) |
| GGGG | Server Port |
-| HHHH | Federated Server or Local User making request (blank if unauthenticated or not supplied) |
+| HHHH | Federated Server or Local User making request (blank if unauthenticated or not supplied).<br/>If this is of the form `@aaa:example.com|@bbb:example.com`, then that means that `@aaa:example.com` is authenticated but they are controlling `@bbb:example.com`, e.g. if `aaa` is controlling `bbb` [via the admin API](https://matrix-org.github.io/synapse/latest/admin_api/user_admin_api.html#login-as-a-user). |
| IIII | Total Time to process the request |
| JJJJ | Time to send response over network once generated (this may be negative if the socket is closed before the response is generated)|
| KKKK | Userland CPU time |
diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md
index 3a9466a837..749af12aac 100644
--- a/docs/usage/configuration/config_documentation.md
+++ b/docs/usage/configuration/config_documentation.md
@@ -99,7 +99,7 @@ modules:
config: {}
```
---
-## Server ##
+## Server
Define your homeserver name and other base options.
@@ -159,7 +159,7 @@ including _matrix/...). This is the same URL a user might enter into the
'Custom Homeserver URL' field on their client. If you use Synapse with a
reverse proxy, this should be the URL to reach Synapse via the proxy.
Otherwise, it should be the URL to reach Synapse's client HTTP listener (see
-'listeners' below).
+['listeners'](#listeners) below).
Defaults to `https://<server_name>/`.
@@ -179,7 +179,7 @@ This will tell other servers to send traffic to port 443 instead.
This option currently defaults to false.
-See https://matrix-org.github.io/synapse/latest/delegate.html for more
+See [Delegation of incoming federation traffic](../../delegate.md) for more
information.
Example configuration:
@@ -431,12 +431,19 @@ Sub-options for each listener include:
* `metrics`: (see the docs [here](../../metrics-howto.md)),
- * `replication`: (see the docs [here](../../workers.md)).
-
* `tls`: set to true to enable TLS for this listener. Will use the TLS key/cert specified in tls_private_key_path / tls_certificate_path.
* `x_forwarded`: Only valid for an 'http' listener. Set to true to use the X-Forwarded-For header as the client IP. Useful when Synapse is
- behind a reverse-proxy.
+ behind a [reverse-proxy](../../reverse_proxy.md).
+
+* `request_id_header`: The header extracted from each incoming request that is
+ used as the basis for the request ID. The request ID is used in
+ [logs](../administration/request_log.md#request-log-format) and tracing to
+ correlate and match up requests. When unset, Synapse will automatically
+ generate sequential request IDs. This option is useful when Synapse is behind
+ a [reverse-proxy](../../reverse_proxy.md).
+
+ _Added in Synapse 1.68.0._
* `resources`: Only valid for an 'http' listener. A list of resources to host
on this port. Sub-options for each resource are:
@@ -444,7 +451,7 @@ Sub-options for each listener include:
* `names`: a list of names of HTTP resources. See below for a list of valid resource names.
* `compress`: set to true to enable gzip compression on HTTP bodies for this resource. This is currently only supported with the
- `client`, `consent` and `metrics` resources.
+ `client`, `consent`, `metrics` and `federation` resources.
* `additional_resources`: Only valid for an 'http' listener. A map of
additional endpoints which should be loaded via dynamic modules.
@@ -563,7 +570,7 @@ Example configuration:
delete_stale_devices_after: 1y
```
-## Homeserver blocking ##
+## Homeserver blocking
Useful options for Synapse admins.
---
@@ -595,6 +602,8 @@ server owner wants to limit to the number of monthly active users. When enabled
reached the server returns a `ResourceLimitError` with error type `Codes.RESOURCE_LIMIT_EXCEEDED`.
Defaults to false. If this is enabled, a value for `max_mau_value` must also be set.
+See [Monthly Active Users](../administration/monthly_active_users.md) for details on how to configure MAU.
+
Example configuration:
```yaml
limit_usage_by_mau: true
@@ -759,6 +768,10 @@ allowed_avatar_mimetypes: ["image/png", "image/jpeg", "image/gif"]
How long to keep redacted events in unredacted form in the database. After
this period redacted events get replaced with their redacted form in the DB.
+Synapse will check whether the rentention period has concluded for redacted
+events every 5 minutes. Thus, even if this option is set to `0`, Synapse may
+still take up to 5 minutes to purge redacted events from the database.
+
Defaults to `7d`. Set to `null` to disable.
Example configuration:
@@ -845,7 +858,11 @@ which are older than the room's maximum retention period. Synapse will also
filter events received over federation so that events that should have been
purged are ignored and not stored again.
-The message retention policies feature is disabled by default.
+The message retention policies feature is disabled by default. Please be advised
+that enabling this feature carries some risk. There are known bugs with the implementation
+which can cause database corruption. Setting retention to delete older history
+is less risky than deleting newer history but in general caution is advised when enabling this
+experimental feature. You can read more about this feature [here](../../message_retention_policies.md).
This setting has the following sub-options:
* `default_policy`: Default retention policy. If set, Synapse will apply it to rooms that lack the
@@ -905,7 +922,7 @@ retention:
interval: 1d
```
---
-## TLS ##
+## TLS
Options related to TLS.
@@ -995,7 +1012,7 @@ federation_custom_ca_list:
- myCA3.pem
```
---
-## Federation ##
+## Federation
Options related to federation.
@@ -1054,28 +1071,30 @@ Example configuration:
allow_device_name_lookup_over_federation: true
```
---
-## Caching ##
+## Caching
-Options related to caching
+Options related to caching.
---
### `event_cache_size`
-The number of events to cache in memory. Not affected by
-`caches.global_factor`. Defaults to 10K.
+The number of events to cache in memory. Defaults to 10K. Like other caches,
+this is affected by `caches.global_factor` (see below).
+
+Note that this option is not part of the `caches` section.
Example configuration:
```yaml
event_cache_size: 15K
```
---
-### `cache` and associated values
+### `caches` and associated values
A cache 'factor' is a multiplier that can be applied to each of
Synapse's caches in order to increase or decrease the maximum
number of entries that can be stored.
-Caching can be configured through the following sub-options:
+`caches` can be configured through the following sub-options:
* `global_factor`: Controls the global cache factor, which is the default cache factor
for all caches if a specific factor for that cache is not otherwise
@@ -1120,7 +1139,7 @@ Caching can be configured through the following sub-options:
* `cache_autotuning` and its sub-options `max_cache_memory_usage`, `target_cache_memory_usage`, and
`min_cache_ttl` work in conjunction with each other to maintain a balance between cache memory
- usage and cache entry availability. You must be using [jemalloc](https://github.com/matrix-org/synapse#help-synapse-is-slow-and-eats-all-my-ramcpu)
+ usage and cache entry availability. You must be using [jemalloc](../administration/admin_faq.md#help-synapse-is-slow-and-eats-all-my-ramcpu)
to utilize this option, and all three of the options must be specified for this feature to work. This option
defaults to off, enable it by providing values for the sub-options listed below. Please note that the feature will not work
and may cause unstable behavior (such as excessive emptying of caches or exceptions) if all of the values are not provided.
@@ -1137,6 +1156,7 @@ Caching can be configured through the following sub-options:
Example configuration:
```yaml
+event_cache_size: 15K
caches:
global_factor: 1.0
per_cache_factors:
@@ -1165,7 +1185,7 @@ file in Synapse's `contrib` directory, you can send a `SIGHUP` signal by using
`systemctl reload matrix-synapse`.
---
-## Database ##
+## Database
Config options related to database settings.
---
@@ -1312,20 +1332,21 @@ databases:
cp_max: 10
```
---
-## Logging ##
+## Logging
Config options related to logging.
---
### `log_config`
-This option specifies a yaml python logging config file as described [here](https://docs.python.org/3.7/library/logging.config.html#configuration-dictionary-schema).
+This option specifies a yaml python logging config file as described
+[here](https://docs.python.org/3/library/logging.config.html#configuration-dictionary-schema).
Example configuration:
```yaml
log_config: "CONFDIR/SERVERNAME.log.config"
```
---
-## Ratelimiting ##
+## Ratelimiting
Options related to ratelimiting in Synapse.
Each ratelimiting configuration is made of two parameters:
@@ -1382,7 +1403,7 @@ This option specifies several limits for login:
client is attempting to log into. Defaults to `per_second: 0.17`,
`burst_count: 3`.
-* `failted_attempts` ratelimits login requests based on the account the
+* `failed_attempts` ratelimits login requests based on the account the
client is attempting to log into, based on the amount of failed login
attempts for this account. Defaults to `per_second: 0.17`, `burst_count: 3`.
@@ -1556,7 +1577,7 @@ Example configuration:
federation_rr_transactions_per_room_per_second: 40
```
---
-## Media Store ##
+## Media Store
Config options related to Synapse's media store.
---
@@ -1746,7 +1767,7 @@ url_preview_ip_range_blacklist:
- 'ff00::/8'
- 'fec0::/10'
```
-----
+---
### `url_preview_ip_range_whitelist`
This option sets a list of IP address CIDR ranges that the URL preview spider is allowed
@@ -1840,7 +1861,7 @@ Example configuration:
- 'fr;q=0.8'
- '*;q=0.7'
```
-----
+---
### `oembed`
oEmbed allows for easier embedding content from a website. It can be
@@ -1857,15 +1878,15 @@ oembed:
- oembed/my_providers.json
```
---
-## Captcha ##
+## Captcha
See [here](../../CAPTCHA_SETUP.md) for full details on setting up captcha.
---
### `recaptcha_public_key`
-This homeserver's ReCAPTCHA public key. Must be specified if `enable_registration_captcha` is
-enabled.
+This homeserver's ReCAPTCHA public key. Must be specified if
+[`enable_registration_captcha`](#enable_registration_captcha) is enabled.
Example configuration:
```yaml
@@ -1874,7 +1895,8 @@ recaptcha_public_key: "YOUR_PUBLIC_KEY"
---
### `recaptcha_private_key`
-This homeserver's ReCAPTCHA private key. Must be specified if `enable_registration_captcha` is
+This homeserver's ReCAPTCHA private key. Must be specified if
+[`enable_registration_captcha`](#enable_registration_captcha) is
enabled.
Example configuration:
@@ -1884,9 +1906,11 @@ recaptcha_private_key: "YOUR_PRIVATE_KEY"
---
### `enable_registration_captcha`
-Set to true to enable ReCaptcha checks when registering, preventing signup
-unless a captcha is answered. Requires a valid ReCaptcha public/private key.
-Defaults to false.
+Set to `true` to require users to complete a CAPTCHA test when registering an account.
+Requires a valid ReCaptcha public/private key.
+Defaults to `false`.
+
+Note that [`enable_registration`](#enable_registration) must also be set to allow account registration.
Example configuration:
```yaml
@@ -1903,7 +1927,7 @@ Example configuration:
recaptcha_siteverify_api: "https://my.recaptcha.site"
```
---
-## TURN ##
+## TURN
Options related to adding a TURN server to Synapse.
---
@@ -1924,7 +1948,7 @@ Example configuration:
```yaml
turn_shared_secret: "YOUR_SHARED_SECRET"
```
-----
+---
### `turn_username` and `turn_password`
The Username and password if the TURN server needs them and does not use a token.
@@ -1962,98 +1986,43 @@ Registration can be rate-limited using the parameters in the [Ratelimiting](#rat
---
### `enable_registration`
-Enable registration for new users. Defaults to false. It is highly recommended that if you enable registration,
-you use either captcha, email, or token-based verification to verify that new users are not bots. In order to enable registration
-without any verification, you must also set `enable_registration_without_verification` to true.
-
-Example configuration:
-```yaml
-enable_registration: true
-```
----
-### `enable_registration_without_verification`
-Enable registration without email or captcha verification. Note: this option is *not* recommended,
-as registration without verification is a known vector for spam and abuse. Defaults to false. Has no effect
-unless `enable_registration` is also enabled.
-
-Example configuration:
-```yaml
-enable_registration_without_verification: true
-```
----
-### `session_lifetime`
-
-Time that a user's session remains valid for, after they log in.
-
-Note that this is not currently compatible with guest logins.
-
-Note also that this is calculated at login time: changes are not applied retrospectively to users who have already
-logged in.
-
-By default, this is infinite.
-
-Example configuration:
-```yaml
-session_lifetime: 24h
-```
-----
-### `refresh_access_token_lifetime`
-
-Time that an access token remains valid for, if the session is using refresh tokens.
-
-For more information about refresh tokens, please see the [manual](user_authentication/refresh_tokens.md).
-
-Note that this only applies to clients which advertise support for refresh tokens.
-
-Note also that this is calculated at login time and refresh time: changes are not applied to
-existing sessions until they are refreshed.
-
-By default, this is 5 minutes.
+Enable registration for new users. Defaults to `false`.
-Example configuration:
-```yaml
-refreshable_access_token_lifetime: 10m
-```
----
-### `refresh_token_lifetime: 24h`
+It is highly recommended that if you enable registration, you set one or more
+or the following options, to avoid abuse of your server by "bots":
-Time that a refresh token remains valid for (provided that it is not
-exchanged for another one first).
-This option can be used to automatically log-out inactive sessions.
-Please see the manual for more information.
+ * [`enable_registration_captcha`](#enable_registration_captcha)
+ * [`registrations_require_3pid`](#registrations_require_3pid)
+ * [`registration_requires_token`](#registration_requires_token)
-Note also that this is calculated at login time and refresh time:
-changes are not applied to existing sessions until they are refreshed.
+(In order to enable registration without any verification, you must also set
+[`enable_registration_without_verification`](#enable_registration_without_verification).)
-By default, this is infinite.
+Note that even if this setting is disabled, new accounts can still be created
+via the admin API if
+[`registration_shared_secret`](#registration_shared_secret) is set.
Example configuration:
```yaml
-refresh_token_lifetime: 24h
+enable_registration: true
```
---
-### `nonrefreshable_access_token_lifetime`
-
-Time that an access token remains valid for, if the session is NOT
-using refresh tokens.
-
-Please note that not all clients support refresh tokens, so setting
-this to a short value may be inconvenient for some users who will
-then be logged out frequently.
-
-Note also that this is calculated at login time: changes are not applied
-retrospectively to existing sessions for users that have already logged in.
+### `enable_registration_without_verification`
-By default, this is infinite.
+Enable registration without email or captcha verification. Note: this option is *not* recommended,
+as registration without verification is a known vector for spam and abuse. Defaults to `false`. Has no effect
+unless [`enable_registration`](#enable_registration) is also enabled.
Example configuration:
```yaml
-nonrefreshable_access_token_lifetime: 24h
+enable_registration_without_verification: true
```
---
### `registrations_require_3pid`
-If this is set, the user must provide all of the specified types of 3PID when registering.
+If this is set, users must provide all of the specified types of 3PID when registering an account.
+
+Note that [`enable_registration`](#enable_registration) must also be set to allow account registration.
Example configuration:
```yaml
@@ -2101,9 +2070,11 @@ enable_3pid_lookup: false
Require users to submit a token during registration.
Tokens can be managed using the admin [API](../administration/admin_api/registration_tokens.md).
-Note that `enable_registration` must be set to true.
Disabling this option will not delete any tokens previously generated.
-Defaults to false. Set to true to enable.
+Defaults to `false`. Set to `true` to enable.
+
+
+Note that [`enable_registration`](#enable_registration) must also be set to allow account registration.
Example configuration:
```yaml
@@ -2112,13 +2083,39 @@ registration_requires_token: true
---
### `registration_shared_secret`
-If set, allows registration of standard or admin accounts by anyone who
-has the shared secret, even if registration is otherwise disabled.
+If set, allows registration of standard or admin accounts by anyone who has the
+shared secret, even if [`enable_registration`](#enable_registration) is not
+set.
+
+This is primarily intended for use with the `register_new_matrix_user` script
+(see [Registering a user](../../setup/installation.md#registering-a-user));
+however, the interface is [documented](../../admin_api/register_api.html).
+
+See also [`registration_shared_secret_path`](#registration_shared_secret_path).
Example configuration:
```yaml
registration_shared_secret: <PRIVATE STRING>
```
+
+---
+### `registration_shared_secret_path`
+
+An alternative to [`registration_shared_secret`](#registration_shared_secret):
+allows the shared secret to be specified in an external file.
+
+The file should be a plain text file, containing only the shared secret.
+
+If this file does not exist, Synapse will create a new signing
+key on startup and store it in this file.
+
+Example configuration:
+```yaml
+registration_shared_secret_file: /path/to/secrets/file
+```
+
+_Added in Synapse 1.67.0._
+
---
### `bcrypt_rounds`
@@ -2173,7 +2170,10 @@ their account.
by the Matrix Identity Service API
[specification](https://matrix.org/docs/spec/identity_service/latest).)
-*Updated in Synapse 1.64.0*: The `email` option is deprecated.
+*Deprecated in Synapse 1.64.0*: The `email` option is deprecated.
+
+*Removed in Synapse 1.66.0*: The `email` option has been removed.
+If present, Synapse will report a configuration error on startup.
Example configuration:
```yaml
@@ -2230,6 +2230,9 @@ homeserver. If the room already exists, make certain it is a publicly joinable
room, i.e. the join rule of the room must be set to 'public'. You can find more options
relating to auto-joining rooms below.
+As Spaces are just rooms under the hood, Space aliases may also be
+used.
+
Example configuration:
```yaml
auto_join_rooms:
@@ -2241,7 +2244,7 @@ auto_join_rooms:
Where `auto_join_rooms` are specified, setting this flag ensures that
the rooms exist by creating them when the first user on the
-homeserver registers.
+homeserver registers. This option will not create Spaces.
By default the auto-created rooms are publicly joinable from any federated
server. Use the `autocreate_auto_join_rooms_federated` and
@@ -2259,7 +2262,7 @@ autocreate_auto_join_rooms: false
---
### `autocreate_auto_join_rooms_federated`
-Whether the rooms listen in `auto_join_rooms` that are auto-created are available
+Whether the rooms listed in `auto_join_rooms` that are auto-created are available
via federation. Only has an effect if `autocreate_auto_join_rooms` is true.
Note that whether a room is federated cannot be modified after
@@ -2347,7 +2350,80 @@ Example configuration:
inhibit_user_in_use_error: true
```
---
-## Metrics ###
+## User session management
+---
+### `session_lifetime`
+
+Time that a user's session remains valid for, after they log in.
+
+Note that this is not currently compatible with guest logins.
+
+Note also that this is calculated at login time: changes are not applied retrospectively to users who have already
+logged in.
+
+By default, this is infinite.
+
+Example configuration:
+```yaml
+session_lifetime: 24h
+```
+---
+### `refresh_access_token_lifetime`
+
+Time that an access token remains valid for, if the session is using refresh tokens.
+
+For more information about refresh tokens, please see the [manual](user_authentication/refresh_tokens.md).
+
+Note that this only applies to clients which advertise support for refresh tokens.
+
+Note also that this is calculated at login time and refresh time: changes are not applied to
+existing sessions until they are refreshed.
+
+By default, this is 5 minutes.
+
+Example configuration:
+```yaml
+refreshable_access_token_lifetime: 10m
+```
+---
+### `refresh_token_lifetime: 24h`
+
+Time that a refresh token remains valid for (provided that it is not
+exchanged for another one first).
+This option can be used to automatically log-out inactive sessions.
+Please see the manual for more information.
+
+Note also that this is calculated at login time and refresh time:
+changes are not applied to existing sessions until they are refreshed.
+
+By default, this is infinite.
+
+Example configuration:
+```yaml
+refresh_token_lifetime: 24h
+```
+---
+### `nonrefreshable_access_token_lifetime`
+
+Time that an access token remains valid for, if the session is NOT
+using refresh tokens.
+
+Please note that not all clients support refresh tokens, so setting
+this to a short value may be inconvenient for some users who will
+then be logged out frequently.
+
+Note also that this is calculated at login time: changes are not applied
+retrospectively to existing sessions for users that have already logged in.
+
+By default, this is infinite.
+
+Example configuration:
+```yaml
+nonrefreshable_access_token_lifetime: 24h
+```
+
+---
+## Metrics
Config options related to metrics.
---
@@ -2419,11 +2495,11 @@ Example configuration:
report_stats_endpoint: https://example.com/report-usage-stats/push
```
---
-## API Configuration ##
+## API Configuration
Config settings related to the client/server API
---
-### `room_prejoin_state:`
+### `room_prejoin_state`
Controls for the state that is shared with users who receive an invite
to a room. By default, the following state event types are shared with users who
@@ -2519,13 +2595,16 @@ Example configuration:
form_secret: <PRIVATE STRING>
```
---
-## Signing Keys ##
+## Signing Keys
Config options relating to signing keys
---
### `signing_key_path`
-Path to the signing key to sign messages with.
+Path to the signing key to sign events and federation requests with.
+
+*New in Synapse 1.67*: If this file does not exist, Synapse will create a new signing
+key on startup and store it in this file.
Example configuration:
```yaml
@@ -2560,7 +2639,7 @@ Example configuration:
key_refresh_interval: 2d
```
---
-### `trusted_key_servers:`
+### `trusted_key_servers`
The trusted servers to download signing keys from.
@@ -2577,6 +2656,12 @@ is still supported for backwards-compatibility, but it is deprecated.
warning on start-up. To suppress this warning, set
`suppress_key_server_warning` to true.
+If the use of a trusted key server has to be deactivated, e.g. in a private
+federation or for privacy reasons, this can be realised by setting
+an empty array (`trusted_key_servers: []`). Then Synapse will request the keys
+directly from the server that owns the keys. If Synapse does not get keys directly
+from the server, the events of this server will be rejected.
+
Options for each entry in the list include:
* `server_name`: the name of the server. Required.
* `verify_keys`: an optional map from key id to base64-encoded public key.
@@ -2625,18 +2710,15 @@ Example configuration:
key_server_signing_keys_path: "key_server_signing_keys.key"
```
---
-## Single sign-on integration ##
+## Single sign-on integration
The following settings can be used to make Synapse use a single sign-on
provider for authentication, instead of its internal password database.
-You will probably also want to set the following options to false to
+You will probably also want to set the following options to `false` to
disable the regular login/registration flows:
- * `enable_registration`
- * `password_config.enabled`
-
-You will also want to investigate the settings under the "sso" configuration
-section below.
+ * [`enable_registration`](#enable_registration)
+ * [`password_config.enabled`](#password_config)
---
### `saml2_config`
@@ -2877,7 +2959,7 @@ Options for each entry include:
* `module`: The class name of a custom mapping module. Default is
`synapse.handlers.oidc.JinjaOidcMappingProvider`.
- See https://matrix-org.github.io/synapse/latest/sso_mapping_providers.html#openid-mapping-providers
+ See [OpenID Mapping Providers](../../sso_mapping_providers.md#openid-mapping-providers)
for information on implementing a custom mapping provider.
* `config`: Configuration for the mapping provider module. This section will
@@ -2886,10 +2968,17 @@ Options for each entry include:
For the default provider, the following settings are available:
- * subject_claim: name of the claim containing a unique identifier
+ * `subject_claim`: name of the claim containing a unique identifier
for the user. Defaults to 'sub', which OpenID Connect
compliant providers should provide.
+ * `picture_claim`: name of the claim containing an url for the user's profile picture.
+ Defaults to 'picture', which OpenID Connect compliant providers should provide
+ and has to refer to a direct image file such as PNG, JPEG, or GIF image file.
+
+ Currently only supported in monolithic (single-process) server configurations
+ where the media repository runs within the Synapse process.
+
* `localpart_template`: Jinja2 template for the localpart of the MXID.
If this is not set, the user will be prompted to choose their
own username (see the documentation for the `sso_auth_account_details.html`
@@ -2914,6 +3003,15 @@ Options for each entry include:
which is set to the claims returned by the UserInfo Endpoint and/or
in the ID Token.
+* `backchannel_logout_enabled`: set to `true` to process OIDC Back-Channel Logout notifications.
+ Those notifications are expected to be received on `/_synapse/client/oidc/backchannel_logout`.
+ Defaults to `false`.
+
+* `backchannel_logout_ignore_sub`: by default, the OIDC Back-Channel Logout feature checks that the
+ `sub` claim matches the subject claim received during login. This check can be disabled by setting
+ this to `true`. Defaults to `false`.
+
+ You might want to disable this if the `subject_claim` returned by the mapping provider is not `sub`.
It is possible to configure Synapse to only allow logins if certain attributes
match particular values in the OIDC userinfo. The requirements can be listed under
@@ -3248,7 +3346,7 @@ email:
email_validation: "[%(server_name)s] Validate your email"
```
---
-## Push ##
+## Push
Configuration settings related to push notifications
---
@@ -3281,11 +3379,11 @@ push:
group_unread_count_by_room: false
```
---
-## Rooms ##
+## Rooms
Config options relating to rooms.
---
-### `encryption_enabled_by_default`
+### `encryption_enabled_by_default_for_room_type`
Controls whether locally-created rooms should be end-to-end encrypted by
default.
@@ -3318,13 +3416,15 @@ This option has the following sub-options:
the user directory. If false, search results will only contain users
visible in public rooms and users sharing a room with the requester.
Defaults to false.
+
NB. If you set this to true, and the last time the user_directory search
indexes were (re)built was before Synapse 1.44, you'll have to
rebuild the indexes in order to search through all known users.
+
These indexes are built the first time Synapse starts; admins can
- manually trigger a rebuild via API following the instructions at
- https://matrix-org.github.io/synapse/latest/usage/administration/admin_api/background_updates.html#run
- Set to true to return search results containing all known users, even if that
+ manually trigger a rebuild via the API following the instructions
+ [for running background updates](../administration/admin_api/background_updates.md#run),
+ set to true to return search results containing all known users, even if that
user does not share a room with the requester.
* `prefer_local_users`: Defines whether to prefer local users in search query results.
If set to true, local users are more likely to appear above remote users when searching the
@@ -3343,7 +3443,7 @@ user_directory:
For detailed instructions on user consent configuration, see [here](../../consent_tracking.md).
Parts of this section are required if enabling the `consent` resource under
-`listeners`, in particular `template_dir` and `version`. # TODO: link `listeners`
+[`listeners`](#listeners), in particular `template_dir` and `version`.
* `template_dir`: gives the location of the templates for the HTML forms.
This directory should contain one subdirectory per language (eg, `en`, `fr`),
@@ -3355,7 +3455,7 @@ Parts of this section are required if enabling the `consent` resource under
parameter.
* `server_notice_content`: if enabled, will send a user a "Server Notice"
- asking them to consent to the privacy policy. The `server_notices` section ##TODO: link
+ asking them to consent to the privacy policy. The [`server_notices` section](#server_notices)
must also be configured for this to work. Notices will *not* be sent to
guest users unless `send_server_notice_to_guests` is set to true.
@@ -3439,9 +3539,9 @@ Example configuration:
enable_room_list_search: false
```
---
-### `alias_creation`
+### `alias_creation_rules`
-The `alias_creation` option controls who is allowed to create aliases
+The `alias_creation_rules` option controls who is allowed to create aliases
on this server.
The format of this option is a list of rules that contain globs that
@@ -3525,7 +3625,7 @@ default_power_level_content_override:
```
---
-## Opentracing ##
+## Opentracing
Configuration options related to Opentracing support.
---
@@ -3568,14 +3668,71 @@ opentracing:
false
```
---
-## Workers ##
-Configuration options related to workers.
+## Coordinating workers
+Configuration options related to workers which belong in the main config file
+(usually called `homeserver.yaml`).
+A Synapse deployment can scale horizontally by running multiple Synapse processes
+called _workers_. Incoming requests are distributed between workers to handle higher
+loads. Some workers are privileged and can accept requests from other workers.
+
+As a result, the worker configuration is divided into two parts.
+
+1. The first part (in this section of the manual) defines which shardable tasks
+ are delegated to privileged workers. This allows unprivileged workers to make
+ request a privileged worker to act on their behalf.
+1. [The second part](#individual-worker-configuration)
+ controls the behaviour of individual workers in isolation.
+
+For guidance on setting up workers, see the [worker documentation](../../workers.md).
+
+---
+### `worker_replication_secret`
+
+A shared secret used by the replication APIs on the main process to authenticate
+HTTP requests from workers.
+
+The default, this value is omitted (equivalently `null`), which means that
+traffic between the workers and the main process is not authenticated.
+
+Example configuration:
+```yaml
+worker_replication_secret: "secret_secret"
+```
+---
+### `start_pushers`
+
+Controls sending of push notifications on the main process. Set to `false`
+if using a [pusher worker](../../workers.md#synapseapppusher). Defaults to `true`.
+
+Example configuration:
+```yaml
+start_pushers: false
+```
+---
+### `pusher_instances`
+It is possible to run multiple [pusher workers](../../workers.md#synapseapppusher),
+in which case the work is balanced across them. Use this setting to list the pushers by
+[`worker_name`](#worker_name). Ensure the main process and all pusher workers are
+restarted after changing this option.
+
+If no or only one pusher worker is configured, this setting is not necessary.
+The main process will send out push notifications by default if you do not disable
+it by setting [`start_pushers: false`](#start_pushers).
+
+Example configuration:
+```yaml
+start_pushers: false
+pusher_instances:
+ - pusher_worker1
+ - pusher_worker2
+```
---
### `send_federation`
Controls sending of outbound federation transactions on the main process.
-Set to false if using a federation sender worker. Defaults to true.
+Set to `false` if using a [federation sender worker](../../workers.md#synapseappfederation_sender).
+Defaults to `true`.
Example configuration:
```yaml
@@ -3584,8 +3741,9 @@ send_federation: false
---
### `federation_sender_instances`
-It is possible to run multiple federation sender workers, in which case the
-work is balanced across them. Use this setting to list the senders.
+It is possible to run multiple
+[federation sender worker](../../workers.md#synapseappfederation_sender), in which
+case the work is balanced across them. Use this setting to list the senders.
This configuration setting must be shared between all federation sender workers, and if
changed all federation sender workers must be stopped at the same time and then
@@ -3594,14 +3752,19 @@ events may be dropped).
Example configuration:
```yaml
+send_federation: false
federation_sender_instances:
- federation_sender1
```
---
### `instance_map`
-When using workers this should be a map from worker name to the
+When using workers this should be a map from [`worker_name`](#worker_name) to the
HTTP replication listener of the worker, if configured.
+Each worker declared under [`stream_writers`](../../workers.md#stream-writers) needs
+a HTTP replication listener, and that listener should be included in the `instance_map`.
+(The main process also needs an HTTP replication listener, but it should not be
+listed in the `instance_map`.)
Example configuration:
```yaml
@@ -3614,8 +3777,11 @@ instance_map:
### `stream_writers`
Experimental: When using workers you can define which workers should
-handle event persistence and typing notifications. Any worker
-specified here must also be in the `instance_map`.
+handle writing to streams such as event persistence and typing notifications.
+Any worker specified here must also be in the [`instance_map`](#instance_map).
+
+See the list of available streams in the
+[worker documentation](../../workers.md#stream-writers).
Example configuration:
```yaml
@@ -3626,29 +3792,18 @@ stream_writers:
---
### `run_background_tasks_on`
-The worker that is used to run background tasks (e.g. cleaning up expired
-data). If not provided this defaults to the main process.
+The [worker](../../workers.md#background-tasks) that is used to run
+background tasks (e.g. cleaning up expired data). If not provided this
+defaults to the main process.
Example configuration:
```yaml
run_background_tasks_on: worker1
```
---
-### `worker_replication_secret`
-
-A shared secret used by the replication APIs to authenticate HTTP requests
-from workers.
-
-By default this is unused and traffic is not authenticated.
-
-Example configuration:
-```yaml
-worker_replication_secret: "secret_secret"
-```
### `redis`
-Configuration for Redis when using workers. This *must* be enabled when
-using workers (unless using old style direct TCP configuration).
+Configuration for Redis when using workers. This *must* be enabled when using workers.
This setting has the following sub-options:
* `enabled`: whether to use Redis support. Defaults to false.
* `host` and `port`: Optional host and port to use to connect to redis. Defaults to
@@ -3663,7 +3818,143 @@ redis:
port: 6379
password: <secret_password>
```
-## Background Updates ##
+---
+## Individual worker configuration
+These options configure an individual worker, in its worker configuration file.
+They should be not be provided when configuring the main process.
+
+Note also the configuration above for
+[coordinating a cluster of workers](#coordinating-workers).
+
+For guidance on setting up workers, see the [worker documentation](../../workers.md).
+
+---
+### `worker_app`
+
+The type of worker. The currently available worker applications are listed
+in [worker documentation](../../workers.md#available-worker-applications).
+
+The most common worker is the
+[`synapse.app.generic_worker`](../../workers.md#synapseappgeneric_worker).
+
+Example configuration:
+```yaml
+worker_app: synapse.app.generic_worker
+```
+---
+### `worker_name`
+
+A unique name for the worker. The worker needs a name to be addressed in
+further parameters and identification in log files. We strongly recommend
+giving each worker a unique `worker_name`.
+
+Example configuration:
+```yaml
+worker_name: generic_worker1
+```
+---
+### `worker_replication_host`
+
+The HTTP replication endpoint that it should talk to on the main Synapse process.
+The main Synapse process defines this with a `replication` resource in
+[`listeners` option](#listeners).
+
+Example configuration:
+```yaml
+worker_replication_host: 127.0.0.1
+```
+---
+### `worker_replication_http_port`
+
+The HTTP replication port that it should talk to on the main Synapse process.
+The main Synapse process defines this with a `replication` resource in
+[`listeners` option](#listeners).
+
+Example configuration:
+```yaml
+worker_replication_http_port: 9093
+```
+---
+### `worker_replication_http_tls`
+
+Whether TLS should be used for talking to the HTTP replication port on the main
+Synapse process.
+The main Synapse process defines this with the `tls` option on its [listener](#listeners) that
+has the `replication` resource enabled.
+
+**Please note:** by default, it is not safe to expose replication ports to the
+public Internet, even with TLS enabled.
+See [`worker_replication_secret`](#worker_replication_secret).
+
+Defaults to `false`.
+
+*Added in Synapse 1.72.0.*
+
+Example configuration:
+```yaml
+worker_replication_http_tls: true
+```
+---
+### `worker_listeners`
+
+A worker can handle HTTP requests. To do so, a `worker_listeners` option
+must be declared, in the same way as the [`listeners` option](#listeners)
+in the shared config.
+
+Workers declared in [`stream_writers`](#stream_writers) will need to include a
+`replication` listener here, in order to accept internal HTTP requests from
+other workers.
+
+Example configuration:
+```yaml
+worker_listeners:
+ - type: http
+ port: 8083
+ resources:
+ - names: [client, federation]
+```
+---
+### `worker_daemonize`
+
+Specifies whether the worker should be started as a daemon process.
+If Synapse is being managed by [systemd](../../systemd-with-workers/README.md), this option
+must be omitted or set to `false`.
+
+Defaults to `false`.
+
+Example configuration:
+```yaml
+worker_daemonize: true
+```
+---
+### `worker_pid_file`
+
+When running a worker as a daemon, we need a place to store the
+[PID](https://en.wikipedia.org/wiki/Process_identifier) of the worker.
+This option defines the location of that "pid file".
+
+This option is required if `worker_daemonize` is `true` and ignored
+otherwise. It has no default.
+
+See also the [`pid_file` option](#pid_file) option for the main Synapse process.
+
+Example configuration:
+```yaml
+worker_pid_file: DATADIR/generic_worker1.pid
+```
+---
+### `worker_log_config`
+
+This option specifies a yaml python logging config file as described
+[here](https://docs.python.org/3/library/logging.config.html#configuration-dictionary-schema).
+See also the [`log_config` option](#log_config) option for the main Synapse process.
+
+Example configuration:
+```yaml
+worker_log_config: /etc/matrix-synapse/generic-worker-log.yaml
+```
+---
+## Background Updates
Configuration settings related to background updates.
---
diff --git a/docs/workers.md b/docs/workers.md
index 6969c424d8..27e54c5846 100644
--- a/docs/workers.md
+++ b/docs/workers.md
@@ -32,13 +32,8 @@ stream between all configured Synapse processes. Additionally, processes may
make HTTP requests to each other, primarily for operations which need to wait
for a reply ─ such as sending an event.
-Redis support was added in v1.13.0 with it becoming the recommended method in
-v1.18.0. It replaced the old direct TCP connections (which is deprecated as of
-v1.18.0) to the main process. With Redis, rather than all the workers connecting
-to the main process, all the workers and the main process connect to Redis,
-which relays replication commands between processes. This can give a significant
-cpu saving on the main process and will be a prerequisite for upcoming
-performance improvements.
+All the workers and the main process connect to Redis, which relays replication
+commands between processes.
If Redis support is enabled Synapse will use it as a shared cache, as well as a
pub/sub mechanism.
@@ -93,11 +88,12 @@ shared configuration file.
### Shared configuration
Normally, only a couple of changes are needed to make an existing configuration
-file suitable for use with workers. First, you need to enable an "HTTP replication
-listener" for the main process; and secondly, you need to enable redis-based
-replication. Optionally, a shared secret can be used to authenticate HTTP
-traffic between workers. For example:
-
+file suitable for use with workers. First, you need to enable an
+["HTTP replication listener"](usage/configuration/config_documentation.md#listeners)
+for the main process; and secondly, you need to enable
+[redis-based replication](usage/configuration/config_documentation.md#redis).
+Optionally, a [shared secret](usage/configuration/config_documentation.md#worker_replication_secret)
+can be used to authenticate HTTP traffic between workers. For example:
```yaml
# extend the existing `listeners` section. This defines the ports that the
@@ -117,23 +113,30 @@ redis:
enabled: true
```
-See the sample config for the full documentation of each option.
+See the [configuration manual](usage/configuration/config_documentation.md)
+for the full documentation of each option.
Under **no circumstances** should the replication listener be exposed to the
-public internet; it has no authentication and is unencrypted.
+public internet; replication traffic is:
+
+* always unencrypted
+* unauthenticated, unless [`worker_replication_secret`](usage/configuration/config_documentation.md#worker_replication_secret)
+ is configured
### Worker configuration
-In the config file for each worker, you must specify the type of worker
-application (`worker_app`), and you should specify a unique name for the worker
-(`worker_name`). The currently available worker applications are listed below.
-You must also specify the HTTP replication endpoint that it should talk to on
-the main synapse process. `worker_replication_host` should specify the host of
-the main synapse and `worker_replication_http_port` should point to the HTTP
-replication port. If the worker will handle HTTP requests then the
-`worker_listeners` option should be set with a `http` listener, in the same way
-as the `listeners` option in the shared config.
+In the config file for each worker, you must specify:
+ * The type of worker ([`worker_app`](usage/configuration/config_documentation.md#worker_app)).
+ The currently available worker applications are listed [below](#available-worker-applications).
+ * A unique name for the worker ([`worker_name`](usage/configuration/config_documentation.md#worker_name)).
+ * The HTTP replication endpoint that it should talk to on the main synapse process
+ ([`worker_replication_host`](usage/configuration/config_documentation.md#worker_replication_host) and
+ [`worker_replication_http_port`](usage/configuration/config_documentation.md#worker_replication_http_port)).
+ * If handling HTTP requests, a [`worker_listeners`](usage/configuration/config_documentation.md#worker_listeners) option
+ with an `http` listener.
+ * **Synapse 1.72 and older:** if handling the `^/_matrix/client/v3/keys/upload` endpoint, the HTTP URI for
+ the main process (`worker_main_http_uri`). This config option is no longer required and is ignored when running Synapse 1.73 and newer.
For example:
@@ -148,7 +151,6 @@ plain HTTP endpoint on port 8083 separately serving various endpoints, e.g.
Obviously you should configure your reverse-proxy to route the relevant
endpoints to the worker (`localhost:8083` in the above example).
-
### Running Synapse with workers
Finally, you need to start your worker processes. This can be done with either
@@ -205,6 +207,8 @@ information.
^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/members$
^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/state$
^/_matrix/client/v1/rooms/.*/hierarchy$
+ ^/_matrix/client/(v1|unstable)/rooms/.*/relations/
+ ^/_matrix/client/v1/rooms/.*/threads$
^/_matrix/client/unstable/org.matrix.msc2716/rooms/.*/batch_send$
^/_matrix/client/unstable/im.nheko.summary/rooms/.*/summary$
^/_matrix/client/(r0|v3|unstable)/account/3pid$
@@ -221,6 +225,7 @@ information.
^/_matrix/client/(r0|v3|unstable)/keys/changes$
^/_matrix/client/(r0|v3|unstable)/keys/claim$
^/_matrix/client/(r0|v3|unstable)/room_keys/
+ ^/_matrix/client/(r0|v3|unstable)/keys/upload/
# Registration/login requests
^/_matrix/client/(api/v1|r0|v3|unstable)/login$
@@ -285,8 +290,10 @@ For multiple workers not handling the SSO endpoints properly, see
[#7530](https://github.com/matrix-org/synapse/issues/7530) and
[#9427](https://github.com/matrix-org/synapse/issues/9427).
-Note that a HTTP listener with `client` and `federation` resources must be
-configured in the `worker_listeners` option in the worker config.
+Note that a [HTTP listener](usage/configuration/config_documentation.md#listeners)
+with `client` and `federation` `resources` must be configured in the
+[`worker_listeners`](usage/configuration/config_documentation.md#worker_listeners)
+option in the worker config.
#### Load balancing
@@ -297,9 +304,11 @@ may wish to run multiple groups of workers handling different endpoints so that
load balancing can be done in different ways.
For `/sync` and `/initialSync` requests it will be more efficient if all
-requests from a particular user are routed to a single instance. Extracting a
-user ID from the access token or `Authorization` header is currently left as an
-exercise for the reader. Admins may additionally wish to separate out `/sync`
+requests from a particular user are routed to a single instance. This can
+be done e.g. in nginx via IP `hash $http_x_forwarded_for;` or via
+`hash $http_authorization consistent;` which contains the users access token.
+
+Admins may additionally wish to separate out `/sync`
requests that have a `since` query parameter from those that don't (and
`/initialSync`), as requests that don't are known as "initial sync" that happens
when a user logs in on a new device and can be *very* resource intensive, so
@@ -325,12 +334,13 @@ effects of bursts of events from that bridge on events sent by normal users.
Additionally, the writing of specific streams (such as events) can be moved off
of the main process to a particular worker.
-(This is only supported with Redis-based replication.)
-To enable this, the worker must have a HTTP replication listener configured,
-have a `worker_name` and be listed in the `instance_map` config. The same worker
-can handle multiple streams, but unless otherwise documented, each stream can only
-have a single writer.
+To enable this, the worker must have a
+[HTTP `replication` listener](usage/configuration/config_documentation.md#listeners) configured,
+have a [`worker_name`](usage/configuration/config_documentation.md#worker_name)
+and be listed in the [`instance_map`](usage/configuration/config_documentation.md#instance_map)
+config. The same worker can handle multiple streams, but unless otherwise documented,
+each stream can only have a single writer.
For example, to move event persistence off to a dedicated worker, the shared
configuration would include:
@@ -357,9 +367,26 @@ streams and the endpoints associated with them:
##### The `events` stream
-The `events` stream experimentally supports having multiple writers, where work
-is sharded between them by room ID. Note that you *must* restart all worker
-instances when adding or removing event persisters. An example `stream_writers`
+The `events` stream experimentally supports having multiple writer workers, where load
+is sharded between them by room ID. Each writer is called an _event persister_. They are
+responsible for
+- receiving new events,
+- linking them to those already in the room [DAG](development/room-dag-concepts.md),
+- persisting them to the DB, and finally
+- updating the events stream.
+
+Because load is sharded in this way, you *must* restart all worker instances when
+adding or removing event persisters.
+
+An `event_persister` should not be mistaken for an `event_creator`.
+An `event_creator` listens for requests from clients to create new events and does
+so. It will then pass those events over HTTP replication to any configured event
+persisters (or the main process if none are configured).
+
+Note that `event_creator`s and `event_persister`s are implemented using the same
+[`synapse.app.generic_worker`](#synapse.app.generic_worker).
+
+An example [`stream_writers`](usage/configuration/config_documentation.md#stream_writers)
configuration with multiple writers:
```yaml
@@ -411,18 +438,20 @@ the stream writer for the `presence` stream:
There is also support for moving background tasks to a separate
worker. Background tasks are run periodically or started via replication. Exactly
which tasks are configured to run depends on your Synapse configuration (e.g. if
-stats is enabled).
+stats is enabled). This worker doesn't handle any REST endpoints itself.
-To enable this, the worker must have a `worker_name` and can be configured to run
-background tasks. For example, to move background tasks to a dedicated worker,
-the shared configuration would include:
+To enable this, the worker must have a unique
+[`worker_name`](usage/configuration/config_documentation.md#worker_name)
+and can be configured to run background tasks. For example, to move background tasks
+to a dedicated worker, the shared configuration would include:
```yaml
run_background_tasks_on: background_worker
```
-You might also wish to investigate the `update_user_directory_from_worker` and
-`media_instance_running_background_jobs` settings.
+You might also wish to investigate the
+[`update_user_directory_from_worker`](#updating-the-user-directory) and
+[`media_instance_running_background_jobs`](#synapseappmedia_repository) settings.
An example for a dedicated background worker instance:
@@ -458,8 +487,8 @@ worker application type.
#### Notifying Application Services
You can designate one generic worker to send output traffic to Application Services.
-
-Specify its name in the shared configuration as follows:
+Doesn't handle any REST endpoints itself, but you should specify its name in the
+shared configuration as follows:
```yaml
notify_appservices_from_worker: worker_name
@@ -475,18 +504,28 @@ worker application type.
### `synapse.app.pusher`
Handles sending push notifications to sygnal and email. Doesn't handle any
-REST endpoints itself, but you should set `start_pushers: False` in the
+REST endpoints itself, but you should set
+[`start_pushers: false`](usage/configuration/config_documentation.md#start_pushers) in the
shared configuration file to stop the main synapse sending push notifications.
-To run multiple instances at once the `pusher_instances` option should list all
-pusher instances by their worker name, e.g.:
+To run multiple instances at once the
+[`pusher_instances`](usage/configuration/config_documentation.md#pusher_instances)
+option should list all pusher instances by their
+[`worker_name`](usage/configuration/config_documentation.md#worker_name), e.g.:
```yaml
+start_pushers: false
pusher_instances:
- pusher_worker1
- pusher_worker2
```
+An example for a pusher instance:
+
+```yaml
+{{#include systemd-with-workers/workers/pusher_worker.yaml}}
+```
+
### `synapse.app.appservice`
@@ -503,20 +542,31 @@ Note this worker cannot be load-balanced: only one instance should be active.
### `synapse.app.federation_sender`
Handles sending federation traffic to other servers. Doesn't handle any
-REST endpoints itself, but you should set `send_federation: False` in the
-shared configuration file to stop the main synapse sending this traffic.
+REST endpoints itself, but you should set
+[`send_federation: false`](usage/configuration/config_documentation.md#send_federation)
+in the shared configuration file to stop the main synapse sending this traffic.
If running multiple federation senders then you must list each
-instance in the `federation_sender_instances` option by their `worker_name`.
+instance in the
+[`federation_sender_instances`](usage/configuration/config_documentation.md#federation_sender_instances)
+option by their
+[`worker_name`](usage/configuration/config_documentation.md#worker_name).
All instances must be stopped and started when adding or removing instances.
For example:
```yaml
+send_federation: false
federation_sender_instances:
- federation_sender1
- federation_sender2
```
+An example for a federation sender instance:
+
+```yaml
+{{#include systemd-with-workers/workers/federation_sender.yaml}}
+```
+
### `synapse.app.media_repository`
Handles the media repository. It can handle all endpoints starting with:
@@ -532,21 +582,19 @@ Handles the media repository. It can handle all endpoints starting with:
^/_synapse/admin/v1/quarantine_media/.*$
^/_synapse/admin/v1/users/.*/media$
-You should also set `enable_media_repo: False` in the shared configuration
+You should also set
+[`enable_media_repo: False`](usage/configuration/config_documentation.md#enable_media_repo)
+in the shared configuration
file to stop the main synapse running background jobs related to managing the
media repository. Note that doing so will prevent the main process from being
able to handle the above endpoints.
-In the `media_repository` worker configuration file, configure the http listener to
+In the `media_repository` worker configuration file, configure the
+[HTTP listener](usage/configuration/config_documentation.md#listeners) to
expose the `media` resource. For example:
```yaml
-worker_listeners:
- - type: http
- port: 8085
- resources:
- - names:
- - media
+{{#include systemd-with-workers/workers/media_worker.yaml}}
```
Note that if running multiple media repositories they must be on the same server
@@ -581,52 +629,23 @@ handle it, and are online.
If `update_user_directory` is set to `false`, and this worker is not running,
the above endpoint may give outdated results.
-### `synapse.app.frontend_proxy`
-
-Proxies some frequently-requested client endpoints to add caching and remove
-load from the main synapse. It can handle REST endpoints matching the following
-regular expressions:
-
- ^/_matrix/client/(r0|v3|unstable)/keys/upload
-
-If `use_presence` is False in the homeserver config, it can also handle REST
-endpoints matching the following regular expressions:
-
- ^/_matrix/client/(api/v1|r0|v3|unstable)/presence/[^/]+/status
-
-This "stub" presence handler will pass through `GET` request but make the
-`PUT` effectively a no-op.
-
-It will proxy any requests it cannot handle to the main synapse instance. It
-must therefore be configured with the location of the main instance, via
-the `worker_main_http_uri` setting in the `frontend_proxy` worker configuration
-file. For example:
-
-```yaml
-worker_main_http_uri: http://127.0.0.1:8008
-```
-
### Historical apps
-*Note:* Historically there used to be more apps, however they have been
-amalgamated into a single `synapse.app.generic_worker` app. The remaining apps
-are ones that do specific processing unrelated to requests, e.g. the `pusher`
-that handles sending out push notifications for new events. The intention is for
-all these to be folded into the `generic_worker` app and to use config to define
-which processes handle the various proccessing such as push notifications.
+The following used to be separate worker application types, but are now
+equivalent to `synapse.app.generic_worker`:
+ * `synapse.app.client_reader`
+ * `synapse.app.event_creator`
+ * `synapse.app.federation_reader`
+ * `synapse.app.frontend_proxy`
+ * `synapse.app.synchrotron`
-## Migration from old config
-There are two main independent changes that have been made: introducing Redis
-support and merging apps into `synapse.app.generic_worker`. Both these changes
-are backwards compatible and so no changes to the config are required, however
-server admins are encouraged to plan to migrate to Redis as the old style direct
-TCP replication config is deprecated.
+## Migration from old config
-To migrate to Redis add the `redis` config as above, and optionally remove the
-TCP `replication` listener from master and `worker_replication_port` from worker
-config.
+A main change that has occurred is the merging of worker apps into
+`synapse.app.generic_worker`. This change is backwards compatible and so no
+changes to the config are required.
To migrate apps to use `synapse.app.generic_worker` simply update the
`worker_app` option in the worker configs, and where worker are started (e.g.
diff --git a/mypy.ini b/mypy.ini
index 6add272990..0b6e7df267 100644
--- a/mypy.ini
+++ b/mypy.ini
@@ -1,6 +1,6 @@
[mypy]
namespace_packages = True
-plugins = mypy_zope:plugin, scripts-dev/mypy_synapse_plugin.py
+plugins = pydantic.mypy, mypy_zope:plugin, scripts-dev/mypy_synapse_plugin.py
follow_imports = normal
check_untyped_defs = True
show_error_codes = True
@@ -11,12 +11,14 @@ warn_unused_ignores = True
local_partial_types = True
no_implicit_optional = True
disallow_untyped_defs = True
+strict_equality = True
files =
docker/,
scripts-dev/,
synapse/,
- tests/
+ tests/,
+ build_rust.py
# Note: Better exclusion syntax coming in mypy > 0.910
# https://github.com/python/mypy/pull/11329
@@ -55,14 +57,8 @@ exclude = (?x)
|tests/rest/media/v1/test_media_storage.py
|tests/server.py
|tests/server_notices/test_resource_limits_server_notices.py
- |tests/test_metrics.py
|tests/test_state.py
|tests/test_terms_auth.py
- |tests/util/caches/test_cached_call.py
- |tests/util/caches/test_deferred_cache.py
- |tests/util/caches/test_descriptors.py
- |tests/util/caches/test_response_cache.py
- |tests/util/caches/test_ttlcache.py
|tests/util/test_async_helpers.py
|tests/util/test_batching_queue.py
|tests/util/test_dict_cache.py
@@ -105,15 +101,27 @@ disallow_untyped_defs = False
[mypy-tests.handlers.test_user_directory]
disallow_untyped_defs = True
+[mypy-tests.metrics.test_background_process_metrics]
+disallow_untyped_defs = True
+
+[mypy-tests.push.test_bulk_push_rule_evaluator]
+disallow_untyped_defs = True
+
[mypy-tests.test_server]
disallow_untyped_defs = True
[mypy-tests.state.test_profile]
disallow_untyped_defs = True
+[mypy-tests.storage.test_id_generators]
+disallow_untyped_defs = True
+
[mypy-tests.storage.test_profile]
disallow_untyped_defs = True
+[mypy-tests.handlers.test_sso]
+disallow_untyped_defs = True
+
[mypy-tests.storage.test_user_directory]
disallow_untyped_defs = True
@@ -123,9 +131,14 @@ disallow_untyped_defs = True
[mypy-tests.federation.transport.test_client]
disallow_untyped_defs = True
-[mypy-tests.utils]
+[mypy-tests.util.caches.*]
disallow_untyped_defs = True
+[mypy-tests.util.caches.test_descriptors]
+disallow_untyped_defs = False
+
+[mypy-tests.utils]
+disallow_untyped_defs = True
;; Dependencies without annotations
;; Before ignoring a module, check to see if type stubs are available.
@@ -181,3 +194,6 @@ ignore_missing_imports = True
[mypy-incremental.*]
ignore_missing_imports = True
+
+[mypy-setuptools_rust.*]
+ignore_missing_imports = True
diff --git a/poetry.lock b/poetry.lock
index b62c24ae16..d9e4803a5f 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -1,34 +1,31 @@
[[package]]
name = "attrs"
-version = "21.4.0"
+version = "22.1.0"
description = "Classes Without Boilerplate"
category = "main"
optional = false
-python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
+python-versions = ">=3.5"
[package.extras]
-dev = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "mypy", "pytest-mypy-plugins", "zope.interface", "furo", "sphinx", "sphinx-notfound-page", "pre-commit", "cloudpickle"]
-docs = ["furo", "sphinx", "zope.interface", "sphinx-notfound-page"]
-tests = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "mypy", "pytest-mypy-plugins", "zope.interface", "cloudpickle"]
-tests_no_zope = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "mypy", "pytest-mypy-plugins", "cloudpickle"]
+dev = ["cloudpickle", "coverage[toml] (>=5.0.2)", "furo", "hypothesis", "mypy (>=0.900,!=0.940)", "pre-commit", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "sphinx", "sphinx-notfound-page", "zope.interface"]
+docs = ["furo", "sphinx", "sphinx-notfound-page", "zope.interface"]
+tests = ["cloudpickle", "coverage[toml] (>=5.0.2)", "hypothesis", "mypy (>=0.900,!=0.940)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "zope.interface"]
+tests-no-zope = ["cloudpickle", "coverage[toml] (>=5.0.2)", "hypothesis", "mypy (>=0.900,!=0.940)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins"]
[[package]]
-name = "authlib"
-version = "0.15.5"
-description = "The ultimate Python library in building OAuth and OpenID Connect servers."
+name = "Authlib"
+version = "1.1.0"
+description = "The ultimate Python library in building OAuth and OpenID Connect servers and clients."
category = "main"
optional = true
python-versions = "*"
[package.dependencies]
-cryptography = "*"
-
-[package.extras]
-client = ["requests"]
+cryptography = ">=3.2"
[[package]]
name = "automat"
-version = "20.2.0"
+version = "22.10.0"
description = "Self-service finite-state machines for the programmer on the go."
category = "main"
optional = false
@@ -39,38 +36,34 @@ attrs = ">=19.2.0"
six = "*"
[package.extras]
-visualize = ["graphviz (>0.5.1)", "Twisted (>=16.1.1)"]
+visualize = ["Twisted (>=16.1.1)", "graphviz (>0.5.1)"]
[[package]]
name = "bcrypt"
-version = "3.2.0"
+version = "4.0.1"
description = "Modern password hashing for your software and your servers"
category = "main"
optional = false
python-versions = ">=3.6"
-[package.dependencies]
-cffi = ">=1.1"
-six = ">=1.4.1"
-
[package.extras]
tests = ["pytest (>=3.2.1,!=3.3.0)"]
typecheck = ["mypy"]
[[package]]
name = "black"
-version = "22.3.0"
+version = "22.10.0"
description = "The uncompromising code formatter."
category = "dev"
optional = false
-python-versions = ">=3.6.2"
+python-versions = ">=3.7"
[package.dependencies]
click = ">=8.0.0"
mypy-extensions = ">=0.4.3"
pathspec = ">=0.9.0"
platformdirs = ">=2"
-tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""}
+tomli = {version = ">=1.1.0", markers = "python_full_version < \"3.11.0a7\""}
typed-ast = {version = ">=1.4.2", markers = "python_version < \"3.8\" and implementation_name == \"cpython\""}
typing-extensions = {version = ">=3.10.0.0", markers = "python_version < \"3.10\""}
@@ -82,27 +75,31 @@ uvloop = ["uvloop (>=0.15.2)"]
[[package]]
name = "bleach"
-version = "4.1.0"
+version = "5.0.1"
description = "An easy safelist-based HTML-sanitizing tool."
category = "main"
optional = false
-python-versions = ">=3.6"
+python-versions = ">=3.7"
[package.dependencies]
-packaging = "*"
six = ">=1.9.0"
webencodings = "*"
+[package.extras]
+css = ["tinycss2 (>=1.1.0,<1.2)"]
+dev = ["Sphinx (==4.3.2)", "black (==22.3.0)", "build (==0.8.0)", "flake8 (==4.0.1)", "hashin (==0.17.0)", "mypy (==0.961)", "pip-tools (==6.6.2)", "pytest (==7.1.2)", "tox (==3.25.0)", "twine (==4.0.1)", "wheel (==0.37.1)"]
+
[[package]]
name = "canonicaljson"
-version = "1.6.0"
+version = "1.6.4"
description = "Canonical JSON"
category = "main"
optional = false
-python-versions = "~=3.7"
+python-versions = ">=3.7"
[package.dependencies]
simplejson = ">=3.14.0"
+typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.8\""}
[package.extras]
frozendict = ["frozendict (>=1.0)"]
@@ -135,11 +132,11 @@ optional = false
python-versions = ">=3.5.0"
[package.extras]
-unicode_backport = ["unicodedata2"]
+unicode-backport = ["unicodedata2"]
[[package]]
name = "click"
-version = "8.1.1"
+version = "8.1.3"
description = "Composable command line interface toolkit"
category = "dev"
optional = false
@@ -189,7 +186,7 @@ python-versions = "*"
[[package]]
name = "cryptography"
-version = "36.0.1"
+version = "38.0.3"
description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers."
category = "main"
optional = false
@@ -200,11 +197,11 @@ cffi = ">=1.12"
[package.extras]
docs = ["sphinx (>=1.6.5,!=1.8.0,!=3.1.0,!=3.1.1)", "sphinx-rtd-theme"]
-docstest = ["pyenchant (>=1.6.11)", "twine (>=1.12.0)", "sphinxcontrib-spelling (>=4.0.1)"]
+docstest = ["pyenchant (>=1.6.11)", "sphinxcontrib-spelling (>=4.0.1)", "twine (>=1.12.0)"]
pep8test = ["black", "flake8", "flake8-import-order", "pep8-naming"]
-sdist = ["setuptools_rust (>=0.11.4)"]
+sdist = ["setuptools-rust (>=0.11.4)"]
ssh = ["bcrypt (>=3.1.5)"]
-test = ["pytest (>=6.2.0)", "pytest-cov", "pytest-subtests", "pytest-xdist", "pretend", "iso8601", "pytz", "hypothesis (>=1.11.4,!=3.79.2)"]
+test = ["hypothesis (>=1.11.4,!=3.79.2)", "iso8601", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-subtests", "pytest-xdist", "pytz"]
[[package]]
name = "defusedxml"
@@ -226,7 +223,7 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
wrapt = ">=1.10,<2"
[package.extras]
-dev = ["tox", "bump2version (<1)", "sphinx (<2)", "importlib-metadata (<3)", "importlib-resources (<4)", "configparser (<5)", "sphinxcontrib-websupport (<2)", "zipp (<2)", "PyTest (<5)", "PyTest-Cov (<2.6)", "pytest", "pytest-cov"]
+dev = ["PyTest", "PyTest (<5)", "PyTest-Cov", "PyTest-Cov (<2.6)", "bump2version (<1)", "configparser (<5)", "importlib-metadata (<3)", "importlib-resources (<4)", "sphinx (<2)", "sphinxcontrib-websupport (<2)", "tox", "zipp (<2)"]
[[package]]
name = "docutils"
@@ -245,40 +242,40 @@ optional = true
python-versions = ">=3.7"
[package.extras]
-dev = ["tox", "coverage", "lxml", "xmlschema (>=1.8.0)", "sphinx", "memory-profiler", "flake8", "mypy (==0.910)"]
+dev = ["Sphinx", "coverage", "flake8", "lxml", "memory-profiler", "mypy (==0.910)", "tox", "xmlschema (>=1.8.0)"]
[[package]]
name = "flake8"
-version = "4.0.1"
+version = "5.0.4"
description = "the modular source code checker: pep8 pyflakes and co"
category = "dev"
optional = false
-python-versions = ">=3.6"
+python-versions = ">=3.6.1"
[package.dependencies]
-importlib-metadata = {version = "<4.3", markers = "python_version < \"3.8\""}
-mccabe = ">=0.6.0,<0.7.0"
-pycodestyle = ">=2.8.0,<2.9.0"
-pyflakes = ">=2.4.0,<2.5.0"
+importlib-metadata = {version = ">=1.1.0,<4.3", markers = "python_version < \"3.8\""}
+mccabe = ">=0.7.0,<0.8.0"
+pycodestyle = ">=2.9.0,<2.10.0"
+pyflakes = ">=2.5.0,<2.6.0"
[[package]]
name = "flake8-bugbear"
-version = "21.3.2"
+version = "22.10.27"
description = "A plugin for flake8 finding likely bugs and design problems in your program. Contains warnings that don't belong in pyflakes and pycodestyle."
category = "dev"
optional = false
-python-versions = ">=3.6"
+python-versions = ">=3.7"
[package.dependencies]
attrs = ">=19.2.0"
flake8 = ">=3.0.0"
[package.extras]
-dev = ["coverage", "black", "hypothesis", "hypothesmith"]
+dev = ["coverage", "hypothesis", "hypothesmith (>=0.2)", "pre-commit", "tox"]
[[package]]
name = "flake8-comprehensions"
-version = "3.8.0"
+version = "3.10.1"
description = "A flake8 plugin to help you write better list/set/dict comprehensions."
category = "dev"
optional = false
@@ -290,7 +287,7 @@ importlib-metadata = {version = "*", markers = "python_version < \"3.8\""}
[[package]]
name = "frozendict"
-version = "2.3.3"
+version = "2.3.4"
description = "A simple immutable dictionary"
category = "main"
optional = false
@@ -309,7 +306,7 @@ smmap = ">=3.0.1,<6"
[[package]]
name = "gitpython"
-version = "3.1.27"
+version = "3.1.29"
description = "GitPython is a python library used to interact with Git repositories"
category = "dev"
optional = false
@@ -340,7 +337,7 @@ idna = ">=2.5"
[[package]]
name = "idna"
-version = "3.3"
+version = "3.4"
description = "Internationalized Domain Names in Applications (IDNA)"
category = "main"
optional = false
@@ -367,8 +364,8 @@ typing-extensions = {version = ">=3.6.4", markers = "python_version < \"3.8\""}
zipp = ">=0.5"
[package.extras]
-docs = ["sphinx", "jaraco.packaging (>=8.2)", "rst.linker (>=1.9)"]
-testing = ["pytest (>=4.6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "packaging", "pep517", "pyfakefs", "flufl.flake8", "pytest-black (>=0.3.7)", "pytest-mypy", "importlib-resources (>=1.3)"]
+docs = ["jaraco.packaging (>=8.2)", "rst.linker (>=1.9)", "sphinx"]
+testing = ["flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pep517", "pyfakefs", "pytest (>=4.6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.0.1)", "pytest-flake8", "pytest-mypy"]
[[package]]
name = "importlib-resources"
@@ -382,8 +379,8 @@ python-versions = ">=3.6"
zipp = {version = ">=3.1.0", markers = "python_version < \"3.10\""}
[package.extras]
-docs = ["sphinx", "jaraco.packaging (>=8.2)", "rst.linker (>=1.9)"]
-testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "pytest-black (>=0.3.7)", "pytest-mypy"]
+docs = ["jaraco.packaging (>=8.2)", "rst.linker (>=1.9)", "sphinx"]
+testing = ["pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.0.1)", "pytest-flake8", "pytest-mypy"]
[[package]]
name = "incremental"
@@ -398,16 +395,17 @@ scripts = ["click (>=6.0)", "twisted (>=16.4.0)"]
[[package]]
name = "isort"
-version = "5.7.0"
+version = "5.10.1"
description = "A Python utility / library to sort Python imports."
category = "dev"
optional = false
-python-versions = ">=3.6,<4.0"
+python-versions = ">=3.6.1,<4.0"
[package.extras]
-pipfile_deprecated_finder = ["pipreqs", "requirementslib"]
-requirements_deprecated_finder = ["pipreqs", "pip-api"]
colors = ["colorama (>=0.4.3,<0.5.0)"]
+pipfile-deprecated-finder = ["pipreqs", "requirementslib"]
+plugins = ["setuptools"]
+requirements-deprecated-finder = ["pip-api", "pipreqs"]
[[package]]
name = "jaeger-client"
@@ -424,7 +422,7 @@ thrift = "*"
tornado = ">=4.3"
[package.extras]
-tests = ["mock", "pycurl", "pytest", "pytest-cov", "coverage", "pytest-timeout", "pytest-tornado", "pytest-benchmark", "pytest-localserver", "flake8", "flake8-quotes", "flake8-typing-imports", "codecov", "tchannel (==2.1.0)", "opentracing_instrumentation (>=3,<4)", "prometheus_client (==0.11.0)", "mypy"]
+tests = ["codecov", "coverage", "flake8", "flake8-quotes", "flake8-typing-imports", "mock", "mypy", "opentracing_instrumentation (>=3,<4)", "prometheus_client (==0.11.0)", "pycurl", "pytest", "pytest-benchmark[histogram]", "pytest-cov", "pytest-localserver", "pytest-timeout", "pytest-tornado", "tchannel (==2.1.0)"]
[[package]]
name = "jeepney"
@@ -435,16 +433,16 @@ optional = false
python-versions = ">=3.6"
[package.extras]
-test = ["pytest", "pytest-trio", "pytest-asyncio", "testpath", "trio", "async-timeout"]
-trio = ["trio", "async-generator"]
+test = ["async-timeout", "pytest", "pytest-asyncio", "pytest-trio", "testpath", "trio"]
+trio = ["async_generator", "trio"]
[[package]]
name = "jinja2"
-version = "3.0.3"
+version = "3.1.2"
description = "A very fast and expressive template engine."
category = "main"
optional = false
-python-versions = ">=3.6"
+python-versions = ">=3.7"
[package.dependencies]
MarkupSafe = ">=2.0"
@@ -454,7 +452,7 @@ i18n = ["Babel (>=2.7)"]
[[package]]
name = "jsonschema"
-version = "4.4.0"
+version = "4.17.0"
description = "An implementation of JSON Schema validation for Python"
category = "main"
optional = false
@@ -464,12 +462,13 @@ python-versions = ">=3.7"
attrs = ">=17.4.0"
importlib-metadata = {version = "*", markers = "python_version < \"3.8\""}
importlib-resources = {version = ">=1.4.0", markers = "python_version < \"3.9\""}
+pkgutil-resolve-name = {version = ">=1.3.10", markers = "python_version < \"3.9\""}
pyrsistent = ">=0.14.0,<0.17.0 || >0.17.0,<0.17.1 || >0.17.1,<0.17.2 || >0.17.2"
typing-extensions = {version = "*", markers = "python_version < \"3.8\""}
[package.extras]
format = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3987", "uri-template", "webcolors (>=1.11)"]
-format_nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "uri-template", "webcolors (>=1.11)"]
+format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "uri-template", "webcolors (>=1.11)"]
[[package]]
name = "keyring"
@@ -486,8 +485,8 @@ pywin32-ctypes = {version = "<0.1.0 || >0.1.0,<0.1.1 || >0.1.1", markers = "sys_
SecretStorage = {version = ">=3.2", markers = "sys_platform == \"linux\""}
[package.extras]
-docs = ["sphinx", "jaraco.packaging (>=8.2)", "rst.linker (>=1.9)", "jaraco.tidelift (>=1.4)"]
-testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "pytest-black (>=0.3.7)", "pytest-mypy"]
+docs = ["jaraco.packaging (>=8.2)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx"]
+testing = ["pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.0.1)", "pytest-flake8", "pytest-mypy"]
[[package]]
name = "ldap3"
@@ -511,7 +510,7 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, != 3.4.*"
[package.extras]
cssselect = ["cssselect (>=0.7)"]
html5 = ["html5lib"]
-htmlsoup = ["beautifulsoup4"]
+htmlsoup = ["BeautifulSoup4"]
source = ["Cython (>=0.29.7)"]
[[package]]
@@ -524,23 +523,23 @@ python-versions = ">=3.7"
[[package]]
name = "matrix-common"
-version = "1.2.1"
+version = "1.3.0"
description = "Common utilities for Synapse, Sydent and Sygnal"
category = "main"
optional = false
-python-versions = ">=3.6"
+python-versions = ">=3.7"
[package.dependencies]
attrs = "*"
importlib-metadata = {version = ">=1.4", markers = "python_version < \"3.8\""}
[package.extras]
-dev = ["tox", "twisted", "aiounittest", "mypy (==0.910)", "black (==22.3.0)", "flake8 (==4.0.1)", "isort (==5.9.3)", "build (==0.8.0)", "twine (==4.0.1)"]
-test = ["tox", "twisted", "aiounittest"]
+dev = ["aiounittest", "black (==22.3.0)", "build (==0.8.0)", "flake8 (==4.0.1)", "isort (==5.9.3)", "mypy (==0.910)", "tox", "twine (==4.0.1)", "twisted"]
+test = ["aiounittest", "tox", "twisted"]
[[package]]
name = "matrix-synapse-ldap3"
-version = "0.2.1"
+version = "0.2.2"
description = "An LDAP3 auth provider for Synapse"
category = "main"
optional = true
@@ -552,31 +551,31 @@ service-identity = "*"
Twisted = ">=15.1.0"
[package.extras]
-dev = ["matrix-synapse", "tox", "ldaptor", "mypy (==0.910)", "types-setuptools", "black (==22.3.0)", "flake8 (==4.0.1)", "isort (==5.9.3)"]
+dev = ["black (==22.3.0)", "flake8 (==4.0.1)", "isort (==5.9.3)", "ldaptor", "matrix-synapse", "mypy (==0.910)", "tox", "types-setuptools"]
[[package]]
name = "mccabe"
-version = "0.6.1"
+version = "0.7.0"
description = "McCabe checker, plugin for flake8"
category = "dev"
optional = false
-python-versions = "*"
+python-versions = ">=3.6"
[[package]]
name = "msgpack"
-version = "1.0.3"
-description = "MessagePack (de)serializer."
+version = "1.0.4"
+description = "MessagePack serializer"
category = "main"
optional = false
python-versions = "*"
[[package]]
name = "mypy"
-version = "0.950"
+version = "0.981"
description = "Optional static typing for Python"
category = "dev"
optional = false
-python-versions = ">=3.6"
+python-versions = ">=3.7"
[package.dependencies]
mypy-extensions = ">=0.4.3"
@@ -599,19 +598,19 @@ python-versions = "*"
[[package]]
name = "mypy-zope"
-version = "0.3.7"
+version = "0.3.11"
description = "Plugin for mypy to support zope interfaces"
category = "dev"
optional = false
python-versions = "*"
[package.dependencies]
-mypy = "0.950"
+mypy = "0.981"
"zope.interface" = "*"
"zope.schema" = "*"
[package.extras]
-test = ["pytest (>=4.6)", "pytest-cov", "lxml"]
+test = ["lxml", "pytest (>=4.6)", "pytest-cov"]
[[package]]
name = "netaddr"
@@ -630,7 +629,7 @@ optional = true
python-versions = "*"
[package.extras]
-tests = ["doubles", "flake8", "flake8-quotes", "mock", "pytest", "pytest-cov", "pytest-mock", "sphinx", "sphinx-rtd-theme", "six (>=1.10.0,<2.0)", "gevent", "tornado"]
+tests = ["Sphinx", "doubles", "flake8", "flake8-quotes", "gevent", "mock", "pytest", "pytest-cov", "pytest-mock", "six (>=1.10.0,<2.0)", "sphinx_rtd_theme", "tornado"]
[[package]]
name = "packaging"
@@ -664,7 +663,7 @@ python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7"
[[package]]
name = "phonenumbers"
-version = "8.12.44"
+version = "8.13.0"
description = "Python version of Google's common library for parsing, formatting, storing and validating international phone numbers."
category = "main"
optional = false
@@ -672,12 +671,16 @@ python-versions = "*"
[[package]]
name = "pillow"
-version = "9.0.1"
+version = "9.3.0"
description = "Python Imaging Library (Fork)"
category = "main"
optional = false
python-versions = ">=3.7"
+[package.extras]
+docs = ["furo", "olefile", "sphinx (>=2.4)", "sphinx-copybutton", "sphinx-issues (>=3.0.1)", "sphinx-removed-in", "sphinxext-opengraph"]
+tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"]
+
[[package]]
name = "pkginfo"
version = "1.8.2"
@@ -690,6 +693,14 @@ python-versions = "*"
testing = ["coverage", "nose"]
[[package]]
+name = "pkgutil_resolve_name"
+version = "1.3.10"
+description = "Resolve a name to an object."
+category = "main"
+optional = false
+python-versions = ">=3.6"
+
+[[package]]
name = "platformdirs"
version = "2.5.1"
description = "A small Python module for determining appropriate platform-specific dirs, e.g. a \"user data dir\"."
@@ -703,7 +714,7 @@ test = ["appdirs (==1.4.4)", "pytest (>=6)", "pytest-cov (>=2.7)", "pytest-mock
[[package]]
name = "prometheus-client"
-version = "0.14.0"
+version = "0.15.0"
description = "Python client for the Prometheus monitoring system."
category = "main"
optional = false
@@ -714,7 +725,7 @@ twisted = ["twisted"]
[[package]]
name = "psycopg2"
-version = "2.9.3"
+version = "2.9.5"
description = "psycopg2 - Python-PostgreSQL Database Adapter"
category = "main"
optional = true
@@ -764,11 +775,11 @@ pyasn1 = ">=0.4.6,<0.5.0"
[[package]]
name = "pycodestyle"
-version = "2.8.0"
+version = "2.9.1"
description = "Python style guide checker"
category = "dev"
optional = false
-python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
+python-versions = ">=3.6"
[[package]]
name = "pycparser"
@@ -779,24 +790,39 @@ optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
[[package]]
+name = "pydantic"
+version = "1.10.2"
+description = "Data validation and settings management using python type hints"
+category = "main"
+optional = false
+python-versions = ">=3.7"
+
+[package.dependencies]
+typing-extensions = ">=4.1.0"
+
+[package.extras]
+dotenv = ["python-dotenv (>=0.10.4)"]
+email = ["email-validator (>=1.0.3)"]
+
+[[package]]
name = "pyflakes"
-version = "2.4.0"
+version = "2.5.0"
description = "passive checker of Python programs"
category = "dev"
optional = false
-python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
+python-versions = ">=3.6"
[[package]]
name = "pygithub"
-version = "1.55"
+version = "1.57"
description = "Use the full Github API v3"
category = "dev"
optional = false
-python-versions = ">=3.6"
+python-versions = ">=3.7"
[package.dependencies]
deprecated = "*"
-pyjwt = ">=2.0"
+pyjwt = ">=2.4.0"
pynacl = ">=1.4.0"
requests = ">=2.14.0"
@@ -821,9 +847,9 @@ python-versions = ">=3.6"
[package.extras]
crypto = ["cryptography (>=3.3.1)"]
-dev = ["sphinx", "sphinx-rtd-theme", "zope.interface", "cryptography (>=3.3.1)", "pytest (>=6.0.0,<7.0.0)", "coverage[toml] (==5.0.4)", "mypy", "pre-commit"]
+dev = ["coverage[toml] (==5.0.4)", "cryptography (>=3.3.1)", "mypy", "pre-commit", "pytest (>=6.0.0,<7.0.0)", "sphinx", "sphinx-rtd-theme", "zope.interface"]
docs = ["sphinx", "sphinx-rtd-theme", "zope.interface"]
-tests = ["pytest (>=6.0.0,<7.0.0)", "coverage[toml] (==5.0.4)"]
+tests = ["coverage[toml] (==5.0.4)", "pytest (>=6.0.0,<7.0.0)"]
[[package]]
name = "pymacaroons"
@@ -857,8 +883,8 @@ python-versions = ">=3.6"
cffi = ">=1.4.1"
[package.extras]
-docs = ["sphinx (>=1.6.5)", "sphinx-rtd-theme"]
-tests = ["pytest (>=3.2.1,!=3.3.0)", "hypothesis (>=3.27.0)"]
+docs = ["sphinx (>=1.6.5)", "sphinx_rtd_theme"]
+tests = ["hypothesis (>=3.27.0)", "pytest (>=3.2.1,!=3.3.0)"]
[[package]]
name = "pyopenssl"
@@ -896,25 +922,26 @@ python-versions = ">=3.7"
[[package]]
name = "pysaml2"
-version = "7.1.2"
+version = "7.2.1"
description = "Python implementation of SAML Version 2 Standard"
category = "main"
optional = true
python-versions = "<4,>=3.6"
[package.dependencies]
-cryptography = ">=1.4"
+cryptography = ">=3.1"
defusedxml = "*"
importlib-resources = {version = "*", markers = "python_version < \"3.9\""}
pyOpenSSL = "*"
python-dateutil = "*"
pytz = "*"
requests = ">=1.0.0"
+setuptools = "*"
six = "*"
xmlschema = ">=1.2.1"
[package.extras]
-s2repoze = ["paste", "zope.interface", "repoze.who"]
+s2repoze = ["paste", "repoze.who", "zope.interface"]
[[package]]
name = "python-dateutil"
@@ -953,11 +980,11 @@ python-versions = ">=3.6"
[[package]]
name = "readme-renderer"
-version = "33.0"
+version = "37.2"
description = "readme_renderer is a library for rendering \"readme\" descriptions for Warehouse"
category = "dev"
optional = false
-python-versions = ">=3.6"
+python-versions = ">=3.7"
[package.dependencies]
bleach = ">=2.1.0"
@@ -983,7 +1010,7 @@ urllib3 = ">=1.21.1,<1.27"
[package.extras]
socks = ["PySocks (>=1.5.6,!=1.5.7)", "win-inet-pton"]
-use_chardet_on_py3 = ["chardet (>=3.0.2,<5)"]
+use-chardet-on-py3 = ["chardet (>=3.0.2,<5)"]
[[package]]
name = "requests-toolbelt"
@@ -1008,6 +1035,22 @@ python-versions = ">=3.7"
idna2008 = ["idna"]
[[package]]
+name = "rich"
+version = "12.6.0"
+description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal"
+category = "dev"
+optional = false
+python-versions = ">=3.6.3,<4.0.0"
+
+[package.dependencies]
+commonmark = ">=0.9.0,<0.10.0"
+pygments = ">=2.6.0,<3.0.0"
+typing-extensions = {version = ">=4.0.0,<5.0", markers = "python_version < \"3.9\""}
+
+[package.extras]
+jupyter = ["ipywidgets (>=7.5.1,<8.0.0)"]
+
+[[package]]
name = "secretstorage"
version = "3.3.1"
description = "Python bindings to FreeDesktop.org Secret Service API"
@@ -1020,8 +1063,20 @@ cryptography = ">=2.0"
jeepney = ">=0.6"
[[package]]
+name = "semantic-version"
+version = "2.10.0"
+description = "A library implementing the 'SemVer' scheme."
+category = "main"
+optional = false
+python-versions = ">=2.7"
+
+[package.extras]
+dev = ["Django (>=1.11)", "check-manifest", "colorama (<=0.4.1)", "coverage", "flake8", "nose2", "readme-renderer (<25.0)", "tox", "wheel", "zest.releaser[recommended]"]
+doc = ["Sphinx", "sphinx-rtd-theme"]
+
+[[package]]
name = "sentry-sdk"
-version = "1.5.11"
+version = "1.11.0"
description = "Python client for Sentry (https://sentry.io)"
category = "main"
optional = true
@@ -1029,7 +1084,7 @@ python-versions = "*"
[package.dependencies]
certifi = "*"
-urllib3 = ">=1.10.0"
+urllib3 = {version = ">=1.26.11", markers = "python_version >= \"3.6\""}
[package.extras]
aiohttp = ["aiohttp (>=3.5)"]
@@ -1039,14 +1094,17 @@ celery = ["celery (>=3)"]
chalice = ["chalice (>=1.16.0)"]
django = ["django (>=1.8)"]
falcon = ["falcon (>=1.4)"]
-flask = ["flask (>=0.11)", "blinker (>=1.1)"]
+fastapi = ["fastapi (>=0.79.0)"]
+flask = ["blinker (>=1.1)", "flask (>=0.11)"]
httpx = ["httpx (>=0.16.0)"]
-pure_eval = ["pure-eval", "executing", "asttokens"]
+pure-eval = ["asttokens", "executing", "pure-eval"]
+pymongo = ["pymongo (>=3.1)"]
pyspark = ["pyspark (>=2.4.4)"]
-quart = ["quart (>=0.16.1)", "blinker (>=1.1)"]
+quart = ["blinker (>=1.1)", "quart (>=0.16.1)"]
rq = ["rq (>=0.6)"]
sanic = ["sanic (>=0.8)"]
sqlalchemy = ["sqlalchemy (>=1.2)"]
+starlette = ["starlette (>=0.19.1)"]
tornado = ["tornado (>=5)"]
[[package]]
@@ -1065,12 +1123,38 @@ pyasn1-modules = "*"
six = "*"
[package.extras]
-dev = ["coverage[toml] (>=5.0.2)", "pytest", "sphinx", "furo", "idna", "pyopenssl"]
-docs = ["sphinx", "furo"]
+dev = ["coverage[toml] (>=5.0.2)", "furo", "idna", "pyOpenSSL", "pytest", "sphinx"]
+docs = ["furo", "sphinx"]
idna = ["idna"]
tests = ["coverage[toml] (>=5.0.2)", "pytest"]
[[package]]
+name = "setuptools"
+version = "65.3.0"
+description = "Easily download, build, install, upgrade, and uninstall Python packages"
+category = "main"
+optional = false
+python-versions = ">=3.7"
+
+[package.extras]
+docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx", "sphinx-favicon", "sphinx-hoverxref (<2)", "sphinx-inline-tabs", "sphinx-notfound-page (==0.8.3)", "sphinx-reredirects", "sphinxcontrib-towncrier"]
+testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8 (<5)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "mock", "pip (>=19.1)", "pip-run (>=8.8)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"]
+testing-integration = ["build[virtualenv]", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"]
+
+[[package]]
+name = "setuptools-rust"
+version = "1.5.2"
+description = "Setuptools Rust extension plugin"
+category = "main"
+optional = false
+python-versions = ">=3.7"
+
+[package.dependencies]
+semantic-version = ">=2.8.2,<3"
+setuptools = ">=62.4"
+typing-extensions = ">=3.7.4.3"
+
+[[package]]
name = "signedjson"
version = "1.1.4"
description = "Sign JSON with Ed25519 signatures"
@@ -1173,39 +1257,24 @@ python-versions = ">= 3.5"
[[package]]
name = "towncrier"
-version = "21.9.0"
+version = "22.8.0"
description = "Building newsfiles for your project."
category = "dev"
optional = false
-python-versions = "*"
+python-versions = ">=3.7"
[package.dependencies]
click = "*"
click-default-group = "*"
incremental = "*"
jinja2 = "*"
-tomli = {version = "*", markers = "python_version >= \"3.6\""}
+setuptools = "*"
+tomli = "*"
[package.extras]
dev = ["packaging"]
[[package]]
-name = "tqdm"
-version = "4.63.0"
-description = "Fast, Extensible Progress Meter"
-category = "dev"
-optional = false
-python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7"
-
-[package.dependencies]
-colorama = {version = "*", markers = "platform_system == \"Windows\""}
-
-[package.extras]
-dev = ["py-make (>=0.1.0)", "twine", "wheel"]
-notebook = ["ipywidgets (>=6)"]
-telegram = ["requests"]
-
-[[package]]
name = "treq"
version = "22.2.0"
description = "High-level Twisted HTTP Client API"
@@ -1221,36 +1290,35 @@ requests = ">=2.1.0"
Twisted = {version = ">=18.7.0", extras = ["tls"]}
[package.extras]
-dev = ["pep8", "pyflakes", "httpbin (==0.5.0)"]
+dev = ["httpbin (==0.5.0)", "pep8", "pyflakes"]
docs = ["sphinx (>=1.4.8)"]
[[package]]
name = "twine"
-version = "3.8.0"
+version = "4.0.1"
description = "Collection of utilities for publishing packages on PyPI"
category = "dev"
optional = false
-python-versions = ">=3.6"
+python-versions = ">=3.7"
[package.dependencies]
-colorama = ">=0.4.3"
importlib-metadata = ">=3.6"
keyring = ">=15.1"
pkginfo = ">=1.8.1"
-readme-renderer = ">=21.0"
+readme-renderer = ">=35.0"
requests = ">=2.20"
requests-toolbelt = ">=0.8.0,<0.9.0 || >0.9.0"
rfc3986 = ">=1.4.0"
-tqdm = ">=4.14"
+rich = ">=12.0.0"
urllib3 = ">=1.26.0"
[[package]]
name = "twisted"
-version = "22.4.0"
+version = "22.10.0"
description = "An asynchronous networking framework written in Python"
category = "main"
optional = false
-python-versions = ">=3.6.7"
+python-versions = ">=3.7.1"
[package.dependencies]
attrs = ">=19.2.0"
@@ -1259,27 +1327,28 @@ constantly = ">=15.1"
hyperlink = ">=17.1.1"
idna = {version = ">=2.4", optional = true, markers = "extra == \"tls\""}
incremental = ">=21.3.0"
-pyopenssl = {version = ">=16.0.0", optional = true, markers = "extra == \"tls\""}
+pyopenssl = {version = ">=21.0.0", optional = true, markers = "extra == \"tls\""}
service-identity = {version = ">=18.1.0", optional = true, markers = "extra == \"tls\""}
twisted-iocpsupport = {version = ">=1.0.2,<2", markers = "platform_system == \"Windows\""}
typing-extensions = ">=3.6.5"
"zope.interface" = ">=4.4.2"
[package.extras]
-all_non_platform = ["cython-test-exception-raiser (>=1.0.2,<2)", "PyHamcrest (>=1.9.0)", "pyopenssl (>=16.0.0)", "service-identity (>=18.1.0)", "idna (>=2.4)", "pyasn1", "cryptography (>=2.6)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "pyserial (>=3.0)", "h2 (>=3.0,<5.0)", "priority (>=1.1.0,<2.0)", "pywin32 (!=226)", "contextvars (>=2.4,<3)"]
-conch = ["pyasn1", "cryptography (>=2.6)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)"]
-conch_nacl = ["pyasn1", "cryptography (>=2.6)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "pynacl"]
+all-non-platform = ["PyHamcrest (>=1.9.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.0,<7.0)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "service-identity (>=18.1.0)"]
+conch = ["appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "cryptography (>=2.6)", "pyasn1"]
+conch-nacl = ["PyNaCl", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "cryptography (>=2.6)", "pyasn1"]
contextvars = ["contextvars (>=2.4,<3)"]
-dev = ["towncrier (>=19.2,<20.0)", "sphinx-rtd-theme (>=0.5,<1.0)", "readthedocs-sphinx-ext (>=2.1,<3.0)", "sphinx (>=4.1.2,<6)", "pyflakes (>=2.2,<3.0)", "twistedchecker (>=0.7,<1.0)", "coverage (>=6b1,<7)", "python-subunit (>=1.4,<2.0)", "pydoctor (>=21.9.0,<21.10.0)"]
-dev_release = ["towncrier (>=19.2,<20.0)", "sphinx-rtd-theme (>=0.5,<1.0)", "readthedocs-sphinx-ext (>=2.1,<3.0)", "sphinx (>=4.1.2,<6)", "pydoctor (>=21.9.0,<21.10.0)"]
+dev = ["coverage (>=6b1,<7)", "pydoctor (>=22.9.0,<22.10.0)", "pyflakes (>=2.2,<3.0)", "python-subunit (>=1.4,<2.0)", "readthedocs-sphinx-ext (>=2.1,<3.0)", "sphinx (>=5.0,<6)", "sphinx-rtd-theme (>=1.0,<2.0)", "towncrier (>=22.8,<23.0)", "twistedchecker (>=0.7,<1.0)"]
+dev-release = ["pydoctor (>=22.9.0,<22.10.0)", "readthedocs-sphinx-ext (>=2.1,<3.0)", "sphinx (>=5.0,<6)", "sphinx-rtd-theme (>=1.0,<2.0)", "towncrier (>=22.8,<23.0)"]
+gtk-platform = ["PyHamcrest (>=1.9.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.0,<7.0)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pygobject", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "service-identity (>=18.1.0)"]
http2 = ["h2 (>=3.0,<5.0)", "priority (>=1.1.0,<2.0)"]
-macos_platform = ["pyobjc-core", "pyobjc-framework-cfnetwork", "pyobjc-framework-cocoa", "cython-test-exception-raiser (>=1.0.2,<2)", "PyHamcrest (>=1.9.0)", "pyopenssl (>=16.0.0)", "service-identity (>=18.1.0)", "idna (>=2.4)", "pyasn1", "cryptography (>=2.6)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "pyserial (>=3.0)", "h2 (>=3.0,<5.0)", "priority (>=1.1.0,<2.0)", "pywin32 (!=226)", "contextvars (>=2.4,<3)"]
-mypy = ["mypy (==0.930)", "mypy-zope (==0.3.4)", "types-setuptools", "types-pyopenssl", "towncrier (>=19.2,<20.0)", "sphinx-rtd-theme (>=0.5,<1.0)", "readthedocs-sphinx-ext (>=2.1,<3.0)", "sphinx (>=4.1.2,<6)", "pyflakes (>=2.2,<3.0)", "twistedchecker (>=0.7,<1.0)", "coverage (>=6b1,<7)", "cython-test-exception-raiser (>=1.0.2,<2)", "PyHamcrest (>=1.9.0)", "pyopenssl (>=16.0.0)", "service-identity (>=18.1.0)", "idna (>=2.4)", "pyasn1", "cryptography (>=2.6)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "pyserial (>=3.0)", "h2 (>=3.0,<5.0)", "priority (>=1.1.0,<2.0)", "pynacl", "pywin32 (!=226)", "python-subunit (>=1.4,<2.0)", "contextvars (>=2.4,<3)", "pydoctor (>=21.9.0,<21.10.0)"]
-osx_platform = ["pyobjc-core", "pyobjc-framework-cfnetwork", "pyobjc-framework-cocoa", "cython-test-exception-raiser (>=1.0.2,<2)", "PyHamcrest (>=1.9.0)", "pyopenssl (>=16.0.0)", "service-identity (>=18.1.0)", "idna (>=2.4)", "pyasn1", "cryptography (>=2.6)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "pyserial (>=3.0)", "h2 (>=3.0,<5.0)", "priority (>=1.1.0,<2.0)", "pywin32 (!=226)", "contextvars (>=2.4,<3)"]
+macos-platform = ["PyHamcrest (>=1.9.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.0,<7.0)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pyobjc-core", "pyobjc-framework-CFNetwork", "pyobjc-framework-Cocoa", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "service-identity (>=18.1.0)"]
+mypy = ["PyHamcrest (>=1.9.0)", "PyNaCl", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "coverage (>=6b1,<7)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.0,<7.0)", "idna (>=2.4)", "mypy (==0.930)", "mypy-zope (==0.3.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pydoctor (>=22.9.0,<22.10.0)", "pyflakes (>=2.2,<3.0)", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "python-subunit (>=1.4,<2.0)", "pywin32 (!=226)", "readthedocs-sphinx-ext (>=2.1,<3.0)", "service-identity (>=18.1.0)", "sphinx (>=5.0,<6)", "sphinx-rtd-theme (>=1.0,<2.0)", "towncrier (>=22.8,<23.0)", "twistedchecker (>=0.7,<1.0)", "types-pyOpenSSL", "types-setuptools"]
+osx-platform = ["PyHamcrest (>=1.9.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.0,<7.0)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pyobjc-core", "pyobjc-framework-CFNetwork", "pyobjc-framework-Cocoa", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "service-identity (>=18.1.0)"]
serial = ["pyserial (>=3.0)", "pywin32 (!=226)"]
-test = ["cython-test-exception-raiser (>=1.0.2,<2)", "PyHamcrest (>=1.9.0)"]
-tls = ["pyopenssl (>=16.0.0)", "service-identity (>=18.1.0)", "idna (>=2.4)"]
-windows_platform = ["pywin32 (!=226)", "cython-test-exception-raiser (>=1.0.2,<2)", "PyHamcrest (>=1.9.0)", "pyopenssl (>=16.0.0)", "service-identity (>=18.1.0)", "idna (>=2.4)", "pyasn1", "cryptography (>=2.6)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "pyserial (>=3.0)", "h2 (>=3.0,<5.0)", "priority (>=1.1.0,<2.0)", "pywin32 (!=226)", "contextvars (>=2.4,<3)"]
+test = ["PyHamcrest (>=1.9.0)", "cython-test-exception-raiser (>=1.0.2,<2)", "hypothesis (>=6.0,<7.0)"]
+tls = ["idna (>=2.4)", "pyopenssl (>=21.0.0)", "service-identity (>=18.1.0)"]
+windows-platform = ["PyHamcrest (>=1.9.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.0,<7.0)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "pywin32 (!=226)", "service-identity (>=18.1.0)"]
[[package]]
name = "twisted-iocpsupport"
@@ -1311,7 +1380,7 @@ python-versions = ">=3.6"
[[package]]
name = "types-bleach"
-version = "4.1.4"
+version = "5.0.3"
description = "Typing stubs for bleach"
category = "dev"
optional = false
@@ -1355,7 +1424,7 @@ python-versions = "*"
[[package]]
name = "types-jsonschema"
-version = "4.4.6"
+version = "4.17.0.1"
description = "Typing stubs for jsonschema"
category = "dev"
optional = false
@@ -1363,7 +1432,7 @@ python-versions = "*"
[[package]]
name = "types-opentracing"
-version = "2.4.7"
+version = "2.4.10"
description = "Typing stubs for opentracing"
category = "dev"
optional = false
@@ -1371,7 +1440,7 @@ python-versions = "*"
[[package]]
name = "types-pillow"
-version = "9.0.15"
+version = "9.3.0.1"
description = "Typing stubs for Pillow"
category = "dev"
optional = false
@@ -1379,7 +1448,7 @@ python-versions = "*"
[[package]]
name = "types-psycopg2"
-version = "2.9.9"
+version = "2.9.21.1"
description = "Typing stubs for psycopg2"
category = "dev"
optional = false
@@ -1387,7 +1456,7 @@ python-versions = "*"
[[package]]
name = "types-pyopenssl"
-version = "22.0.0"
+version = "22.1.0.2"
description = "Typing stubs for pyOpenSSL"
category = "dev"
optional = false
@@ -1398,7 +1467,7 @@ types-cryptography = "*"
[[package]]
name = "types-pyyaml"
-version = "6.0.4"
+version = "6.0.12.2"
description = "Typing stubs for PyYAML"
category = "dev"
optional = false
@@ -1406,7 +1475,7 @@ python-versions = "*"
[[package]]
name = "types-requests"
-version = "2.27.11"
+version = "2.28.11.2"
description = "Typing stubs for requests"
category = "dev"
optional = false
@@ -1417,7 +1486,7 @@ types-urllib3 = "<1.27"
[[package]]
name = "types-setuptools"
-version = "57.4.9"
+version = "65.5.0.3"
description = "Typing stubs for setuptools"
category = "dev"
optional = false
@@ -1433,11 +1502,11 @@ python-versions = "*"
[[package]]
name = "typing-extensions"
-version = "4.1.1"
-description = "Backported and Experimental Type Hints for Python 3.6+"
+version = "4.4.0"
+description = "Backported and Experimental Type Hints for Python 3.7+"
category = "main"
optional = false
-python-versions = ">=3.6"
+python-versions = ">=3.7"
[[package]]
name = "unpaddedbase64"
@@ -1449,15 +1518,15 @@ python-versions = ">=3.6,<4.0"
[[package]]
name = "urllib3"
-version = "1.26.8"
+version = "1.26.12"
description = "HTTP library with thread-safe connection pooling, file post, and more."
category = "main"
optional = false
-python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4"
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, <4"
[package.extras]
-brotli = ["brotlipy (>=0.6.0)"]
-secure = ["pyOpenSSL (>=0.14)", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "certifi", "ipaddress"]
+brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)", "brotlipy (>=0.6.0)"]
+secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress", "pyOpenSSL (>=0.14)", "urllib3-secure-extra"]
socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"]
[[package]]
@@ -1470,7 +1539,7 @@ python-versions = "*"
[[package]]
name = "wrapt"
-version = "1.13.3"
+version = "1.14.1"
description = "Module for decorators, wrappers and monkey patching."
category = "dev"
optional = false
@@ -1489,8 +1558,8 @@ elementpath = ">=2.5.0,<3.0.0"
[package.extras]
codegen = ["elementpath (>=2.5.0,<3.0.0)", "jinja2"]
-dev = ["tox", "coverage", "lxml", "elementpath (>=2.5.0,<3.0.0)", "memory-profiler", "sphinx", "sphinx-rtd-theme", "jinja2", "flake8", "mypy", "lxml-stubs"]
-docs = ["elementpath (>=2.5.0,<3.0.0)", "sphinx", "sphinx-rtd-theme", "jinja2"]
+dev = ["Sphinx", "coverage", "elementpath (>=2.5.0,<3.0.0)", "flake8", "jinja2", "lxml", "lxml-stubs", "memory-profiler", "mypy", "sphinx-rtd-theme", "tox"]
+docs = ["Sphinx", "elementpath (>=2.5.0,<3.0.0)", "jinja2", "sphinx-rtd-theme"]
[[package]]
name = "zipp"
@@ -1501,8 +1570,8 @@ optional = false
python-versions = ">=3.7"
[package.extras]
-docs = ["sphinx", "jaraco.packaging (>=8.2)", "rst.linker (>=1.9)"]
-testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "jaraco.itertools", "func-timeout", "pytest-black (>=0.3.7)", "pytest-mypy"]
+docs = ["jaraco.packaging (>=8.2)", "rst.linker (>=1.9)", "sphinx"]
+testing = ["func-timeout", "jaraco.itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.0.1)", "pytest-flake8", "pytest-mypy"]
[[package]]
name = "zope.event"
@@ -1512,8 +1581,11 @@ category = "dev"
optional = false
python-versions = "*"
+[package.dependencies]
+setuptools = "*"
+
[package.extras]
-docs = ["sphinx"]
+docs = ["Sphinx"]
test = ["zope.testrunner"]
[[package]]
@@ -1524,8 +1596,11 @@ category = "main"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
+[package.dependencies]
+setuptools = "*"
+
[package.extras]
-docs = ["sphinx", "repoze.sphinx.autointerface"]
+docs = ["Sphinx", "repoze.sphinx.autointerface"]
test = ["coverage (>=5.0.3)", "zope.event", "zope.testing"]
testing = ["coverage (>=5.0.3)", "zope.event", "zope.testing"]
@@ -1538,16 +1613,17 @@ optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
[package.dependencies]
+setuptools = "*"
"zope.event" = "*"
"zope.interface" = ">=5.0.0"
[package.extras]
-docs = ["sphinx", "repoze.sphinx.autointerface"]
+docs = ["Sphinx", "repoze.sphinx.autointerface"]
test = ["zope.i18nmessageid", "zope.testing", "zope.testrunner"]
[extras]
all = ["matrix-synapse-ldap3", "psycopg2", "psycopg2cffi", "psycopg2cffi-compat", "pysaml2", "authlib", "lxml", "sentry-sdk", "jaeger-client", "opentracing", "txredisapi", "hiredis", "Pympler"]
-cache_memory = ["Pympler"]
+cache-memory = ["Pympler"]
jwt = ["authlib"]
matrix-synapse-ldap3 = ["matrix-synapse-ldap3"]
oidc = ["authlib"]
@@ -1558,70 +1634,79 @@ saml2 = ["pysaml2"]
sentry = ["sentry-sdk"]
systemd = ["systemd-python"]
test = ["parameterized", "idna"]
-url_preview = ["lxml"]
+url-preview = ["lxml"]
[metadata]
lock-version = "1.1"
python-versions = "^3.7.1"
-content-hash = "c24bbcee7e86dbbe7cdbf49f91a25b310bf21095452641e7440129f59b077f78"
+content-hash = "27811bd21d56ceeb0f68ded5a00375efcd1a004928f0736f5b02927ce8594cb0"
[metadata.files]
attrs = [
- {file = "attrs-21.4.0-py2.py3-none-any.whl", hash = "sha256:2d27e3784d7a565d36ab851fe94887c5eccd6a463168875832a1be79c82828b4"},
- {file = "attrs-21.4.0.tar.gz", hash = "sha256:626ba8234211db98e869df76230a137c4c40a12d72445c45d5f5b716f076e2fd"},
+ {file = "attrs-22.1.0-py2.py3-none-any.whl", hash = "sha256:86efa402f67bf2df34f51a335487cf46b1ec130d02b8d39fd248abfd30da551c"},
+ {file = "attrs-22.1.0.tar.gz", hash = "sha256:29adc2665447e5191d0e7c568fde78b21f9672d344281d0c6e1ab085429b22b6"},
]
-authlib = [
- {file = "Authlib-0.15.5-py2.py3-none-any.whl", hash = "sha256:ecf4a7a9f2508c0bb07e93a752dd3c495cfaffc20e864ef0ffc95e3f40d2abaf"},
- {file = "Authlib-0.15.5.tar.gz", hash = "sha256:b83cf6360c8e92b0e9df0d1f32d675790bcc4e3c03977499b1eed24dcdef4252"},
+Authlib = [
+ {file = "Authlib-1.1.0-py2.py3-none-any.whl", hash = "sha256:be4b6a1dea51122336c210a6945b27a105b9ac572baffd15b07bcff4376c1523"},
+ {file = "Authlib-1.1.0.tar.gz", hash = "sha256:0a270c91409fc2b7b0fbee6996e09f2ee3187358762111a9a4225c874b94e891"},
]
automat = [
- {file = "Automat-20.2.0-py2.py3-none-any.whl", hash = "sha256:b6feb6455337df834f6c9962d6ccf771515b7d939bca142b29c20c2376bc6111"},
- {file = "Automat-20.2.0.tar.gz", hash = "sha256:7979803c74610e11ef0c0d68a2942b152df52da55336e0c9d58daf1831cbdf33"},
+ {file = "Automat-22.10.0-py2.py3-none-any.whl", hash = "sha256:c3164f8742b9dc440f3682482d32aaff7bb53f71740dd018533f9de286b64180"},
+ {file = "Automat-22.10.0.tar.gz", hash = "sha256:e56beb84edad19dcc11d30e8d9b895f75deeb5ef5e96b84a467066b3b84bb04e"},
]
bcrypt = [
- {file = "bcrypt-3.2.0-cp36-abi3-macosx_10_10_universal2.whl", hash = "sha256:b589229207630484aefe5899122fb938a5b017b0f4349f769b8c13e78d99a8fd"},
- {file = "bcrypt-3.2.0-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:c95d4cbebffafcdd28bd28bb4e25b31c50f6da605c81ffd9ad8a3d1b2ab7b1b6"},
- {file = "bcrypt-3.2.0-cp36-abi3-manylinux1_x86_64.whl", hash = "sha256:63d4e3ff96188e5898779b6057878fecf3f11cfe6ec3b313ea09955d587ec7a7"},
- {file = "bcrypt-3.2.0-cp36-abi3-manylinux2010_x86_64.whl", hash = "sha256:cd1ea2ff3038509ea95f687256c46b79f5fc382ad0aa3664d200047546d511d1"},
- {file = "bcrypt-3.2.0-cp36-abi3-manylinux2014_aarch64.whl", hash = "sha256:cdcdcb3972027f83fe24a48b1e90ea4b584d35f1cc279d76de6fc4b13376239d"},
- {file = "bcrypt-3.2.0-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:a0584a92329210fcd75eb8a3250c5a941633f8bfaf2a18f81009b097732839b7"},
- {file = "bcrypt-3.2.0-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:56e5da069a76470679f312a7d3d23deb3ac4519991a0361abc11da837087b61d"},
- {file = "bcrypt-3.2.0-cp36-abi3-win32.whl", hash = "sha256:a67fb841b35c28a59cebed05fbd3e80eea26e6d75851f0574a9273c80f3e9b55"},
- {file = "bcrypt-3.2.0-cp36-abi3-win_amd64.whl", hash = "sha256:81fec756feff5b6818ea7ab031205e1d323d8943d237303baca2c5f9c7846f34"},
- {file = "bcrypt-3.2.0.tar.gz", hash = "sha256:5b93c1726e50a93a033c36e5ca7fdcd29a5c7395af50a6892f5d9e7c6cfbfb29"},
+ {file = "bcrypt-4.0.1-cp36-abi3-macosx_10_10_universal2.whl", hash = "sha256:b1023030aec778185a6c16cf70f359cbb6e0c289fd564a7cfa29e727a1c38f8f"},
+ {file = "bcrypt-4.0.1-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:08d2947c490093a11416df18043c27abe3921558d2c03e2076ccb28a116cb6d0"},
+ {file = "bcrypt-4.0.1-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0eaa47d4661c326bfc9d08d16debbc4edf78778e6aaba29c1bc7ce67214d4410"},
+ {file = "bcrypt-4.0.1-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ae88eca3024bb34bb3430f964beab71226e761f51b912de5133470b649d82344"},
+ {file = "bcrypt-4.0.1-cp36-abi3-manylinux_2_24_x86_64.whl", hash = "sha256:a522427293d77e1c29e303fc282e2d71864579527a04ddcfda6d4f8396c6c36a"},
+ {file = "bcrypt-4.0.1-cp36-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:fbdaec13c5105f0c4e5c52614d04f0bca5f5af007910daa8b6b12095edaa67b3"},
+ {file = "bcrypt-4.0.1-cp36-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:ca3204d00d3cb2dfed07f2d74a25f12fc12f73e606fcaa6975d1f7ae69cacbb2"},
+ {file = "bcrypt-4.0.1-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:089098effa1bc35dc055366740a067a2fc76987e8ec75349eb9484061c54f535"},
+ {file = "bcrypt-4.0.1-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:e9a51bbfe7e9802b5f3508687758b564069ba937748ad7b9e890086290d2f79e"},
+ {file = "bcrypt-4.0.1-cp36-abi3-win32.whl", hash = "sha256:2caffdae059e06ac23fce178d31b4a702f2a3264c20bfb5ff541b338194d8fab"},
+ {file = "bcrypt-4.0.1-cp36-abi3-win_amd64.whl", hash = "sha256:8a68f4341daf7522fe8d73874de8906f3a339048ba406be6ddc1b3ccb16fc0d9"},
+ {file = "bcrypt-4.0.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf4fa8b2ca74381bb5442c089350f09a3f17797829d958fad058d6e44d9eb83c"},
+ {file = "bcrypt-4.0.1-pp37-pypy37_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:67a97e1c405b24f19d08890e7ae0c4f7ce1e56a712a016746c8b2d7732d65d4b"},
+ {file = "bcrypt-4.0.1-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:b3b85202d95dd568efcb35b53936c5e3b3600c7cdcc6115ba461df3a8e89f38d"},
+ {file = "bcrypt-4.0.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cbb03eec97496166b704ed663a53680ab57c5084b2fc98ef23291987b525cb7d"},
+ {file = "bcrypt-4.0.1-pp38-pypy38_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:5ad4d32a28b80c5fa6671ccfb43676e8c1cc232887759d1cd7b6f56ea4355215"},
+ {file = "bcrypt-4.0.1-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:b57adba8a1444faf784394de3436233728a1ecaeb6e07e8c22c8848f179b893c"},
+ {file = "bcrypt-4.0.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:705b2cea8a9ed3d55b4491887ceadb0106acf7c6387699fca771af56b1cdeeda"},
+ {file = "bcrypt-4.0.1-pp39-pypy39_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:2b3ac11cf45161628f1f3733263e63194f22664bf4d0c0f3ab34099c02134665"},
+ {file = "bcrypt-4.0.1-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:3100851841186c25f127731b9fa11909ab7b1df6fc4b9f8353f4f1fd952fbf71"},
+ {file = "bcrypt-4.0.1.tar.gz", hash = "sha256:27d375903ac8261cfe4047f6709d16f7d18d39b1ec92aaf72af989552a650ebd"},
]
black = [
- {file = "black-22.3.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:2497f9c2386572e28921fa8bec7be3e51de6801f7459dffd6e62492531c47e09"},
- {file = "black-22.3.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5795a0375eb87bfe902e80e0c8cfaedf8af4d49694d69161e5bd3206c18618bb"},
- {file = "black-22.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e3556168e2e5c49629f7b0f377070240bd5511e45e25a4497bb0073d9dda776a"},
- {file = "black-22.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:67c8301ec94e3bcc8906740fe071391bce40a862b7be0b86fb5382beefecd968"},
- {file = "black-22.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:fd57160949179ec517d32ac2ac898b5f20d68ed1a9c977346efbac9c2f1e779d"},
- {file = "black-22.3.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:cc1e1de68c8e5444e8f94c3670bb48a2beef0e91dddfd4fcc29595ebd90bb9ce"},
- {file = "black-22.3.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d2fc92002d44746d3e7db7cf9313cf4452f43e9ea77a2c939defce3b10b5c82"},
- {file = "black-22.3.0-cp36-cp36m-win_amd64.whl", hash = "sha256:a6342964b43a99dbc72f72812bf88cad8f0217ae9acb47c0d4f141a6416d2d7b"},
- {file = "black-22.3.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:328efc0cc70ccb23429d6be184a15ce613f676bdfc85e5fe8ea2a9354b4e9015"},
- {file = "black-22.3.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:06f9d8846f2340dfac80ceb20200ea5d1b3f181dd0556b47af4e8e0b24fa0a6b"},
- {file = "black-22.3.0-cp37-cp37m-win_amd64.whl", hash = "sha256:ad4efa5fad66b903b4a5f96d91461d90b9507a812b3c5de657d544215bb7877a"},
- {file = "black-22.3.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:e8477ec6bbfe0312c128e74644ac8a02ca06bcdb8982d4ee06f209be28cdf163"},
- {file = "black-22.3.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:637a4014c63fbf42a692d22b55d8ad6968a946b4a6ebc385c5505d9625b6a464"},
- {file = "black-22.3.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:863714200ada56cbc366dc9ae5291ceb936573155f8bf8e9de92aef51f3ad0f0"},
- {file = "black-22.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10dbe6e6d2988049b4655b2b739f98785a884d4d6b85bc35133a8fb9a2233176"},
- {file = "black-22.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:cee3e11161dde1b2a33a904b850b0899e0424cc331b7295f2a9698e79f9a69a0"},
- {file = "black-22.3.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:5891ef8abc06576985de8fa88e95ab70641de6c1fca97e2a15820a9b69e51b20"},
- {file = "black-22.3.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:30d78ba6bf080eeaf0b7b875d924b15cd46fec5fd044ddfbad38c8ea9171043a"},
- {file = "black-22.3.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ee8f1f7228cce7dffc2b464f07ce769f478968bfb3dd1254a4c2eeed84928aad"},
- {file = "black-22.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6ee227b696ca60dd1c507be80a6bc849a5a6ab57ac7352aad1ffec9e8b805f21"},
- {file = "black-22.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:9b542ced1ec0ceeff5b37d69838106a6348e60db7b8fdd245294dc1d26136265"},
- {file = "black-22.3.0-py3-none-any.whl", hash = "sha256:bc58025940a896d7e5356952228b68f793cf5fcb342be703c3a2669a1488cb72"},
- {file = "black-22.3.0.tar.gz", hash = "sha256:35020b8886c022ced9282b51b5a875b6d1ab0c387b31a065b84db7c33085ca79"},
+ {file = "black-22.10.0-1fixedarch-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:5cc42ca67989e9c3cf859e84c2bf014f6633db63d1cbdf8fdb666dcd9e77e3fa"},
+ {file = "black-22.10.0-1fixedarch-cp311-cp311-macosx_11_0_x86_64.whl", hash = "sha256:5d8f74030e67087b219b032aa33a919fae8806d49c867846bfacde57f43972ef"},
+ {file = "black-22.10.0-1fixedarch-cp37-cp37m-macosx_10_16_x86_64.whl", hash = "sha256:197df8509263b0b8614e1df1756b1dd41be6738eed2ba9e9769f3880c2b9d7b6"},
+ {file = "black-22.10.0-1fixedarch-cp38-cp38-macosx_10_16_x86_64.whl", hash = "sha256:2644b5d63633702bc2c5f3754b1b475378fbbfb481f62319388235d0cd104c2d"},
+ {file = "black-22.10.0-1fixedarch-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:e41a86c6c650bcecc6633ee3180d80a025db041a8e2398dcc059b3afa8382cd4"},
+ {file = "black-22.10.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2039230db3c6c639bd84efe3292ec7b06e9214a2992cd9beb293d639c6402edb"},
+ {file = "black-22.10.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:14ff67aec0a47c424bc99b71005202045dc09270da44a27848d534600ac64fc7"},
+ {file = "black-22.10.0-cp310-cp310-win_amd64.whl", hash = "sha256:819dc789f4498ecc91438a7de64427c73b45035e2e3680c92e18795a839ebb66"},
+ {file = "black-22.10.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5b9b29da4f564ba8787c119f37d174f2b69cdfdf9015b7d8c5c16121ddc054ae"},
+ {file = "black-22.10.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8b49776299fece66bffaafe357d929ca9451450f5466e997a7285ab0fe28e3b"},
+ {file = "black-22.10.0-cp311-cp311-win_amd64.whl", hash = "sha256:21199526696b8f09c3997e2b4db8d0b108d801a348414264d2eb8eb2532e540d"},
+ {file = "black-22.10.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1e464456d24e23d11fced2bc8c47ef66d471f845c7b7a42f3bd77bf3d1789650"},
+ {file = "black-22.10.0-cp37-cp37m-win_amd64.whl", hash = "sha256:9311e99228ae10023300ecac05be5a296f60d2fd10fff31cf5c1fa4ca4b1988d"},
+ {file = "black-22.10.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:fba8a281e570adafb79f7755ac8721b6cf1bbf691186a287e990c7929c7692ff"},
+ {file = "black-22.10.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:915ace4ff03fdfff953962fa672d44be269deb2eaf88499a0f8805221bc68c87"},
+ {file = "black-22.10.0-cp38-cp38-win_amd64.whl", hash = "sha256:444ebfb4e441254e87bad00c661fe32df9969b2bf224373a448d8aca2132b395"},
+ {file = "black-22.10.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:974308c58d057a651d182208a484ce80a26dac0caef2895836a92dd6ebd725e0"},
+ {file = "black-22.10.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:72ef3925f30e12a184889aac03d77d031056860ccae8a1e519f6cbb742736383"},
+ {file = "black-22.10.0-cp39-cp39-win_amd64.whl", hash = "sha256:432247333090c8c5366e69627ccb363bc58514ae3e63f7fc75c54b1ea80fa7de"},
+ {file = "black-22.10.0-py3-none-any.whl", hash = "sha256:c957b2b4ea88587b46cf49d1dc17681c1e672864fd7af32fc1e9664d572b3458"},
+ {file = "black-22.10.0.tar.gz", hash = "sha256:f513588da599943e0cde4e32cc9879e825d58720d6557062d1098c5ad80080e1"},
]
bleach = [
- {file = "bleach-4.1.0-py2.py3-none-any.whl", hash = "sha256:4d2651ab93271d1129ac9cbc679f524565cc8a1b791909c4a51eac4446a15994"},
- {file = "bleach-4.1.0.tar.gz", hash = "sha256:0900d8b37eba61a802ee40ac0061f8c2b5dee29c1927dd1d233e075ebf5a71da"},
+ {file = "bleach-5.0.1-py3-none-any.whl", hash = "sha256:085f7f33c15bd408dd9b17a4ad77c577db66d76203e5984b1bd59baeee948b2a"},
+ {file = "bleach-5.0.1.tar.gz", hash = "sha256:0d03255c47eb9bd2f26aa9bb7f2107732e7e8fe195ca2f64709fcf3b0a4a085c"},
]
canonicaljson = [
- {file = "canonicaljson-1.6.0-py3-none-any.whl", hash = "sha256:7230c2a2a3db07874f622af84effe41a655e07bf23734830e18a454e65d5b998"},
- {file = "canonicaljson-1.6.0.tar.gz", hash = "sha256:8739d5fd91aca7281d425660ae65af7663808c8177778965f67e90b16a2b2427"},
+ {file = "canonicaljson-1.6.4-py3-none-any.whl", hash = "sha256:55d282853b4245dbcd953fe54c39b91571813d7c44e1dbf66e3c4f97ff134a48"},
+ {file = "canonicaljson-1.6.4.tar.gz", hash = "sha256:6c09b2119511f30eb1126cfcd973a10824e20f1cfd25039cde3d1218dd9c8d8f"},
]
certifi = [
{file = "certifi-2021.10.8-py2.py3-none-any.whl", hash = "sha256:d62a0163eb4c2344ac042ab2bdf75399a71a2d8c7d47eac2e2ee91b9d6339569"},
@@ -1684,8 +1769,8 @@ charset-normalizer = [
{file = "charset_normalizer-2.0.12-py3-none-any.whl", hash = "sha256:6881edbebdb17b39b4eaaa821b438bf6eddffb4468cf344f09f89def34a8b1df"},
]
click = [
- {file = "click-8.1.1-py3-none-any.whl", hash = "sha256:5e0d195c2067da3136efb897449ec1e9e6c98282fbf30d7f9e164af9be901a6b"},
- {file = "click-8.1.1.tar.gz", hash = "sha256:7ab900e38149c9872376e8f9b5986ddcaf68c0f413cf73678a0bca5547e6f976"},
+ {file = "click-8.1.3-py3-none-any.whl", hash = "sha256:bb4d8133cb15a609f44e8213d9b391b0809795062913b383c62be0ee95b1db48"},
+ {file = "click-8.1.3.tar.gz", hash = "sha256:7682dc8afb30297001674575ea00d1814d808d6a36af415a82bd481d37ba7b8e"},
]
click-default-group = [
{file = "click-default-group-1.2.2.tar.gz", hash = "sha256:d9560e8e8dfa44b3562fbc9425042a0fd6d21956fcc2db0077f63f34253ab904"},
@@ -1703,26 +1788,32 @@ constantly = [
{file = "constantly-15.1.0.tar.gz", hash = "sha256:586372eb92059873e29eba4f9dec8381541b4d3834660707faf8ba59146dfc35"},
]
cryptography = [
- {file = "cryptography-36.0.1-cp36-abi3-macosx_10_10_universal2.whl", hash = "sha256:73bc2d3f2444bcfeac67dd130ff2ea598ea5f20b40e36d19821b4df8c9c5037b"},
- {file = "cryptography-36.0.1-cp36-abi3-macosx_10_10_x86_64.whl", hash = "sha256:2d87cdcb378d3cfed944dac30596da1968f88fb96d7fc34fdae30a99054b2e31"},
- {file = "cryptography-36.0.1-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:74d6c7e80609c0f4c2434b97b80c7f8fdfaa072ca4baab7e239a15d6d70ed73a"},
- {file = "cryptography-36.0.1-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:6c0c021f35b421ebf5976abf2daacc47e235f8b6082d3396a2fe3ccd537ab173"},
- {file = "cryptography-36.0.1-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d59a9d55027a8b88fd9fd2826c4392bd487d74bf628bb9d39beecc62a644c12"},
- {file = "cryptography-36.0.1-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0a817b961b46894c5ca8a66b599c745b9a3d9f822725221f0e0fe49dc043a3a3"},
- {file = "cryptography-36.0.1-cp36-abi3-manylinux_2_24_x86_64.whl", hash = "sha256:94ae132f0e40fe48f310bba63f477f14a43116f05ddb69d6fa31e93f05848ae2"},
- {file = "cryptography-36.0.1-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:7be0eec337359c155df191d6ae00a5e8bbb63933883f4f5dffc439dac5348c3f"},
- {file = "cryptography-36.0.1-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:e0344c14c9cb89e76eb6a060e67980c9e35b3f36691e15e1b7a9e58a0a6c6dc3"},
- {file = "cryptography-36.0.1-cp36-abi3-win32.whl", hash = "sha256:4caa4b893d8fad33cf1964d3e51842cd78ba87401ab1d2e44556826df849a8ca"},
- {file = "cryptography-36.0.1-cp36-abi3-win_amd64.whl", hash = "sha256:391432971a66cfaf94b21c24ab465a4cc3e8bf4a939c1ca5c3e3a6e0abebdbcf"},
- {file = "cryptography-36.0.1-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:bb5829d027ff82aa872d76158919045a7c1e91fbf241aec32cb07956e9ebd3c9"},
- {file = "cryptography-36.0.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ebc15b1c22e55c4d5566e3ca4db8689470a0ca2babef8e3a9ee057a8b82ce4b1"},
- {file = "cryptography-36.0.1-pp37-pypy37_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:596f3cd67e1b950bc372c33f1a28a0692080625592ea6392987dba7f09f17a94"},
- {file = "cryptography-36.0.1-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:30ee1eb3ebe1644d1c3f183d115a8c04e4e603ed6ce8e394ed39eea4a98469ac"},
- {file = "cryptography-36.0.1-pp38-pypy38_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ec63da4e7e4a5f924b90af42eddf20b698a70e58d86a72d943857c4c6045b3ee"},
- {file = "cryptography-36.0.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ca238ceb7ba0bdf6ce88c1b74a87bffcee5afbfa1e41e173b1ceb095b39add46"},
- {file = "cryptography-36.0.1-pp38-pypy38_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:ca28641954f767f9822c24e927ad894d45d5a1e501767599647259cbf030b903"},
- {file = "cryptography-36.0.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:39bdf8e70eee6b1c7b289ec6e5d84d49a6bfa11f8b8646b5b3dfe41219153316"},
- {file = "cryptography-36.0.1.tar.gz", hash = "sha256:53e5c1dc3d7a953de055d77bef2ff607ceef7a2aac0353b5d630ab67f7423638"},
+ {file = "cryptography-38.0.3-cp36-abi3-macosx_10_10_universal2.whl", hash = "sha256:984fe150f350a3c91e84de405fe49e688aa6092b3525f407a18b9646f6612320"},
+ {file = "cryptography-38.0.3-cp36-abi3-macosx_10_10_x86_64.whl", hash = "sha256:ed7b00096790213e09eb11c97cc6e2b757f15f3d2f85833cd2d3ec3fe37c1722"},
+ {file = "cryptography-38.0.3-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:bbf203f1a814007ce24bd4d51362991d5cb90ba0c177a9c08825f2cc304d871f"},
+ {file = "cryptography-38.0.3-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:554bec92ee7d1e9d10ded2f7e92a5d70c1f74ba9524947c0ba0c850c7b011828"},
+ {file = "cryptography-38.0.3-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1b52c9e5f8aa2b802d48bd693190341fae201ea51c7a167d69fc48b60e8a959"},
+ {file = "cryptography-38.0.3-cp36-abi3-manylinux_2_24_x86_64.whl", hash = "sha256:728f2694fa743a996d7784a6194da430f197d5c58e2f4e278612b359f455e4a2"},
+ {file = "cryptography-38.0.3-cp36-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:dfb4f4dd568de1b6af9f4cda334adf7d72cf5bc052516e1b2608b683375dd95c"},
+ {file = "cryptography-38.0.3-cp36-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:5419a127426084933076132d317911e3c6eb77568a1ce23c3ac1e12d111e61e0"},
+ {file = "cryptography-38.0.3-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:9b24bcff7853ed18a63cfb0c2b008936a9554af24af2fb146e16d8e1aed75748"},
+ {file = "cryptography-38.0.3-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:25c1d1f19729fb09d42e06b4bf9895212292cb27bb50229f5aa64d039ab29146"},
+ {file = "cryptography-38.0.3-cp36-abi3-win32.whl", hash = "sha256:7f836217000342d448e1c9a342e9163149e45d5b5eca76a30e84503a5a96cab0"},
+ {file = "cryptography-38.0.3-cp36-abi3-win_amd64.whl", hash = "sha256:c46837ea467ed1efea562bbeb543994c2d1f6e800785bd5a2c98bc096f5cb220"},
+ {file = "cryptography-38.0.3-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:06fc3cc7b6f6cca87bd56ec80a580c88f1da5306f505876a71c8cfa7050257dd"},
+ {file = "cryptography-38.0.3-pp37-pypy37_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:65535bc550b70bd6271984d9863a37741352b4aad6fb1b3344a54e6950249b55"},
+ {file = "cryptography-38.0.3-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:5e89468fbd2fcd733b5899333bc54d0d06c80e04cd23d8c6f3e0542358c6060b"},
+ {file = "cryptography-38.0.3-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:6ab9516b85bebe7aa83f309bacc5f44a61eeb90d0b4ec125d2d003ce41932d36"},
+ {file = "cryptography-38.0.3-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:068147f32fa662c81aebab95c74679b401b12b57494872886eb5c1139250ec5d"},
+ {file = "cryptography-38.0.3-pp38-pypy38_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:402852a0aea73833d982cabb6d0c3bb582c15483d29fb7085ef2c42bfa7e38d7"},
+ {file = "cryptography-38.0.3-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:b1b35d9d3a65542ed2e9d90115dfd16bbc027b3f07ee3304fc83580f26e43249"},
+ {file = "cryptography-38.0.3-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:6addc3b6d593cd980989261dc1cce38263c76954d758c3c94de51f1e010c9a50"},
+ {file = "cryptography-38.0.3-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:be243c7e2bfcf6cc4cb350c0d5cdf15ca6383bbcb2a8ef51d3c9411a9d4386f0"},
+ {file = "cryptography-38.0.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78cf5eefac2b52c10398a42765bfa981ce2372cbc0457e6bf9658f41ec3c41d8"},
+ {file = "cryptography-38.0.3-pp39-pypy39_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:4e269dcd9b102c5a3d72be3c45d8ce20377b8076a43cbed6f660a1afe365e436"},
+ {file = "cryptography-38.0.3-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:8d41a46251bf0634e21fac50ffd643216ccecfaf3701a063257fe0b2be1b6548"},
+ {file = "cryptography-38.0.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:785e4056b5a8b28f05a533fab69febf5004458e20dad7e2e13a3120d8ecec75a"},
+ {file = "cryptography-38.0.3.tar.gz", hash = "sha256:bfbe6ee19615b07a98b1d2287d6a6073f734735b49ee45b11324d85efc4d5cbd"},
]
defusedxml = [
{file = "defusedxml-0.7.1-py2.py3-none-any.whl", hash = "sha256:a352e7e428770286cc899e2542b6cdaedb2b4953ff269a210103ec58f6198a61"},
@@ -1741,43 +1832,43 @@ elementpath = [
{file = "elementpath-2.5.0.tar.gz", hash = "sha256:3a27aaf3399929fccda013899cb76d3ff111734abf4281e5f9d3721ba0b9ffa3"},
]
flake8 = [
- {file = "flake8-4.0.1-py2.py3-none-any.whl", hash = "sha256:479b1304f72536a55948cb40a32dce8bb0ffe3501e26eaf292c7e60eb5e0428d"},
- {file = "flake8-4.0.1.tar.gz", hash = "sha256:806e034dda44114815e23c16ef92f95c91e4c71100ff52813adf7132a6ad870d"},
+ {file = "flake8-5.0.4-py2.py3-none-any.whl", hash = "sha256:7a1cf6b73744f5806ab95e526f6f0d8c01c66d7bbe349562d22dfca20610b248"},
+ {file = "flake8-5.0.4.tar.gz", hash = "sha256:6fbe320aad8d6b95cec8b8e47bc933004678dc63095be98528b7bdd2a9f510db"},
]
flake8-bugbear = [
- {file = "flake8-bugbear-21.3.2.tar.gz", hash = "sha256:cadce434ceef96463b45a7c3000f23527c04ea4b531d16c7ac8886051f516ca0"},
- {file = "flake8_bugbear-21.3.2-py36.py37.py38-none-any.whl", hash = "sha256:5d6ccb0c0676c738a6e066b4d50589c408dcc1c5bf1d73b464b18b73cd6c05c2"},
+ {file = "flake8-bugbear-22.10.27.tar.gz", hash = "sha256:a6708608965c9e0de5fff13904fed82e0ba21ac929fe4896459226a797e11cd5"},
+ {file = "flake8_bugbear-22.10.27-py3-none-any.whl", hash = "sha256:6ad0ab754507319060695e2f2be80e6d8977cfcea082293089a9226276bd825d"},
]
flake8-comprehensions = [
- {file = "flake8-comprehensions-3.8.0.tar.gz", hash = "sha256:8e108707637b1d13734f38e03435984f6b7854fa6b5a4e34f93e69534be8e521"},
- {file = "flake8_comprehensions-3.8.0-py3-none-any.whl", hash = "sha256:9406314803abe1193c064544ab14fdc43c58424c0882f6ff8a581eb73fc9bb58"},
+ {file = "flake8-comprehensions-3.10.1.tar.gz", hash = "sha256:412052ac4a947f36b891143430fef4859705af11b2572fbb689f90d372cf26ab"},
+ {file = "flake8_comprehensions-3.10.1-py3-none-any.whl", hash = "sha256:d763de3c74bc18a79c039a7ec732e0a1985b0c79309ceb51e56401ad0a2cd44e"},
]
frozendict = [
- {file = "frozendict-2.3.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:39942914c1217a5a49c7551495a103b3dbd216e19413687e003b859c6b0ebc12"},
- {file = "frozendict-2.3.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5589256058b31f2b91419fa30b8dc62dbdefe7710e688a3fd5b43849161eecc9"},
- {file = "frozendict-2.3.3-cp310-cp310-win_amd64.whl", hash = "sha256:35eb7e59e287c41f4f712d4d3d2333354175b155d217b97c99c201d2d8920790"},
- {file = "frozendict-2.3.3-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:310aaf81793abf4f471895e6fe65e0e74a28a2aaf7b25c2ba6ccd4e35af06842"},
- {file = "frozendict-2.3.3-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c353c11010a986566a0cb37f9a783c560ffff7d67d5e7fd52221fb03757cdc43"},
- {file = "frozendict-2.3.3-cp36-cp36m-win_amd64.whl", hash = "sha256:15b5f82aad108125336593cec1b6420c638bf45f449c57e50949fc7654ea5a41"},
- {file = "frozendict-2.3.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a4737e5257756bd6b877504ff50185b705db577b5330d53040a6cf6417bb3cdb"},
- {file = "frozendict-2.3.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80a14c11e33e8b0bc09e07bba3732c77a502c39edb8c3959fd9a0e490e031158"},
- {file = "frozendict-2.3.3-cp37-cp37m-win_amd64.whl", hash = "sha256:027952d1698ac9c766ef43711226b178cdd49d2acbdff396936639ad1d2a5615"},
- {file = "frozendict-2.3.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ef818d66c85098a37cf42509545a4ba7dd0c4c679d6262123a8dc14cc474bab7"},
- {file = "frozendict-2.3.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:812279f2b270c980112dc4e367b168054f937108f8044eced4199e0ab2945a37"},
- {file = "frozendict-2.3.3-cp38-cp38-win_amd64.whl", hash = "sha256:c1fb7efbfebc2075f781be3d9774e4ba6ce4fc399148b02097f68d4b3c4bc00a"},
- {file = "frozendict-2.3.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a0b46d4bf95bce843c0151959d54c3e5b8d0ce29cb44794e820b3ec980d63eee"},
- {file = "frozendict-2.3.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:38c4660f37fcc70a32ff997fe58e40b3fcc60b2017b286e33828efaa16b01308"},
- {file = "frozendict-2.3.3-cp39-cp39-win_amd64.whl", hash = "sha256:919e3609844fece11ab18bcbf28a3ed20f8108ad4149d7927d413687f281c6c9"},
- {file = "frozendict-2.3.3-py3-none-any.whl", hash = "sha256:f988b482d08972a196664718167a993a61c9e9f6fe7b0ca2443570b5f20ca44a"},
- {file = "frozendict-2.3.3.tar.gz", hash = "sha256:398539c52af3c647d103185bbaa1291679f0507ad035fe3bab2a8b0366d52cf1"},
+ {file = "frozendict-2.3.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:4a3b32d47282ae0098b9239a6d53ec539da720258bd762d62191b46f2f87c5fc"},
+ {file = "frozendict-2.3.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:84c9887179a245a66a50f52afa08d4d92ae0f269839fab82285c70a0fa0dd782"},
+ {file = "frozendict-2.3.4-cp310-cp310-win_amd64.whl", hash = "sha256:b98a0d65a59af6da03f794f90b0c3085a7ee14e7bf8f0ef36b079ee8aa992439"},
+ {file = "frozendict-2.3.4-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:3d8042b7dab5e992e30889c9b71b781d5feef19b372d47d735e4d7d45846fd4a"},
+ {file = "frozendict-2.3.4-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:25a6d2e8b7cf6b6e5677a1a4b53b4073e5d9ec640d1db30dc679627668d25e90"},
+ {file = "frozendict-2.3.4-cp36-cp36m-win_amd64.whl", hash = "sha256:dbbe1339ac2646523e0bb00d1896085d1f70de23780e4927ca82b36ab8a044d3"},
+ {file = "frozendict-2.3.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:95bac22f7f09d81f378f2b3f672b7a50a974ca180feae1507f5e21bc147e8bc8"},
+ {file = "frozendict-2.3.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dae686722c144b333c4dbdc16323a5de11406d26b76d2be1cc175f90afacb5ba"},
+ {file = "frozendict-2.3.4-cp37-cp37m-win_amd64.whl", hash = "sha256:389f395a74eb16992217ac1521e689c1dea2d70113bcb18714669ace1ed623b9"},
+ {file = "frozendict-2.3.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ccb6450a416c9cc9acef7683e637e28356e3ceeabf83521f74cc2718883076b7"},
+ {file = "frozendict-2.3.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aca59108b77cadc13ba7dfea7e8f50811208c7652a13dc6c7f92d7782a24d299"},
+ {file = "frozendict-2.3.4-cp38-cp38-win_amd64.whl", hash = "sha256:3ec86ebf143dd685184215c27ec416c36e0ba1b80d81b1b9482f7d380c049b4e"},
+ {file = "frozendict-2.3.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5809e6ff6b7257043a486f7a3b73a7da71cf69a38980b4171e4741291d0d9eb3"},
+ {file = "frozendict-2.3.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7c550ed7fdf1962984bec21630c584d722b3ee5d5f57a0ae2527a0121dc0414a"},
+ {file = "frozendict-2.3.4-cp39-cp39-win_amd64.whl", hash = "sha256:3e93aebc6e69a8ef329bbe9afb8342bd33c7b5c7a0c480cb9f7e60b0cbe48072"},
+ {file = "frozendict-2.3.4-py3-none-any.whl", hash = "sha256:d722f3d89db6ae35ef35ecc243c40c800eb344848c83dba4798353312cd37b15"},
+ {file = "frozendict-2.3.4.tar.gz", hash = "sha256:15b4b18346259392b0d27598f240e9390fafbff882137a9c48a1e0104fb17f78"},
]
gitdb = [
{file = "gitdb-4.0.9-py3-none-any.whl", hash = "sha256:8033ad4e853066ba6ca92050b9df2f89301b8fc8bf7e9324d412a63f8bf1a8fd"},
{file = "gitdb-4.0.9.tar.gz", hash = "sha256:bac2fd45c0a1c9cf619e63a90d62bdc63892ef92387424b855792a6cabe789aa"},
]
gitpython = [
- {file = "GitPython-3.1.27-py3-none-any.whl", hash = "sha256:5b68b000463593e05ff2b261acff0ff0972df8ab1b70d3cdbd41b546c8b8fc3d"},
- {file = "GitPython-3.1.27.tar.gz", hash = "sha256:1c885ce809e8ba2d88a29befeb385fcea06338d3640712b59ca623c220bb5704"},
+ {file = "GitPython-3.1.29-py3-none-any.whl", hash = "sha256:41eea0deec2deea139b459ac03656f0dd28fc4a3387240ec1d3c259a2c47850f"},
+ {file = "GitPython-3.1.29.tar.gz", hash = "sha256:cc36bfc4a3f913e66805a28e84703e419d9c264c1077e537b54f0e1af85dbefd"},
]
hiredis = [
{file = "hiredis-2.0.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:b4c8b0bc5841e578d5fb32a16e0c305359b987b850a06964bd5a62739d688048"},
@@ -1827,8 +1918,8 @@ hyperlink = [
{file = "hyperlink-21.0.0.tar.gz", hash = "sha256:427af957daa58bc909471c6c40f74c5450fa123dd093fc53efd2e91d2705a56b"},
]
idna = [
- {file = "idna-3.3-py3-none-any.whl", hash = "sha256:84d9dd047ffa80596e0f246e2eab0b391788b0503584e8945f2368256d2735ff"},
- {file = "idna-3.3.tar.gz", hash = "sha256:9d643ff0a55b762d5cdb124b8eaa99c66322e2157b69160bc32796e824360e6d"},
+ {file = "idna-3.4-py3-none-any.whl", hash = "sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2"},
+ {file = "idna-3.4.tar.gz", hash = "sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4"},
]
ijson = [
{file = "ijson-3.1.4-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:6c1a777096be5f75ffebb335c6d2ebc0e489b231496b7f2ca903aa061fe7d381"},
@@ -1907,8 +1998,8 @@ incremental = [
{file = "incremental-21.3.0.tar.gz", hash = "sha256:02f5de5aff48f6b9f665d99d48bfc7ec03b6e3943210de7cfc88856d755d6f57"},
]
isort = [
- {file = "isort-5.7.0-py3-none-any.whl", hash = "sha256:fff4f0c04e1825522ce6949973e83110a6e907750cd92d128b0d14aaaadbffdc"},
- {file = "isort-5.7.0.tar.gz", hash = "sha256:c729845434366216d320e936b8ad6f9d681aab72dc7cbc2d51bedc3582f3ad1e"},
+ {file = "isort-5.10.1-py3-none-any.whl", hash = "sha256:6f62d78e2f89b4500b080fe3a81690850cd254227f27f75c3a0c491a1f351ba7"},
+ {file = "isort-5.10.1.tar.gz", hash = "sha256:e8443a5e7a020e9d7f97f1d7d9cd17c88bcb3bc7e218bf9cf5095fe550be2951"},
]
jaeger-client = [
{file = "jaeger-client-4.8.0.tar.gz", hash = "sha256:3157836edab8e2c209bd2d6ae61113db36f7ee399e66b1dcbb715d87ab49bfe0"},
@@ -1918,22 +2009,19 @@ jeepney = [
{file = "jeepney-0.7.1.tar.gz", hash = "sha256:fa9e232dfa0c498bd0b8a3a73b8d8a31978304dcef0515adc859d4e096f96f4f"},
]
jinja2 = [
- {file = "Jinja2-3.0.3-py3-none-any.whl", hash = "sha256:077ce6014f7b40d03b47d1f1ca4b0fc8328a692bd284016f806ed0eaca390ad8"},
- {file = "Jinja2-3.0.3.tar.gz", hash = "sha256:611bb273cd68f3b993fabdc4064fc858c5b47a973cb5aa7999ec1ba405c87cd7"},
+ {file = "Jinja2-3.1.2-py3-none-any.whl", hash = "sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61"},
+ {file = "Jinja2-3.1.2.tar.gz", hash = "sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852"},
]
jsonschema = [
- {file = "jsonschema-4.4.0-py3-none-any.whl", hash = "sha256:77281a1f71684953ee8b3d488371b162419767973789272434bbc3f29d9c8823"},
- {file = "jsonschema-4.4.0.tar.gz", hash = "sha256:636694eb41b3535ed608fe04129f26542b59ed99808b4f688aa32dcf55317a83"},
+ {file = "jsonschema-4.17.0-py3-none-any.whl", hash = "sha256:f660066c3966db7d6daeaea8a75e0b68237a48e51cf49882087757bb59916248"},
+ {file = "jsonschema-4.17.0.tar.gz", hash = "sha256:5bfcf2bca16a087ade17e02b282d34af7ccd749ef76241e7f9bd7c0cb8a9424d"},
]
keyring = [
{file = "keyring-23.5.0-py3-none-any.whl", hash = "sha256:b0d28928ac3ec8e42ef4cc227822647a19f1d544f21f96457965dc01cf555261"},
{file = "keyring-23.5.0.tar.gz", hash = "sha256:9012508e141a80bd1c0b6778d5c610dd9f8c464d75ac6774248500503f972fb9"},
]
ldap3 = [
- {file = "ldap3-2.9.1-py2.6.egg", hash = "sha256:5ab7febc00689181375de40c396dcad4f2659cd260fc5e94c508b6d77c17e9d5"},
- {file = "ldap3-2.9.1-py2.7.egg", hash = "sha256:2bc966556fc4d4fa9f445a1c31dc484ee81d44a51ab0e2d0fd05b62cac75daa6"},
{file = "ldap3-2.9.1-py2.py3-none-any.whl", hash = "sha256:5869596fc4948797020d3f03b7939da938778a0f9e2009f7a072ccf92b8e8d70"},
- {file = "ldap3-2.9.1-py3.9.egg", hash = "sha256:5630d1383e09ba94839e253e013f1aa1a2cf7a547628ba1265cb7b9a844b5687"},
{file = "ldap3-2.9.1.tar.gz", hash = "sha256:f3e7fc4718e3f09dda568b57100095e0ce58633bcabbed8667ce3f8fbaa4229f"},
]
lxml = [
@@ -2051,85 +2139,104 @@ markupsafe = [
{file = "MarkupSafe-2.1.0.tar.gz", hash = "sha256:80beaf63ddfbc64a0452b841d8036ca0611e049650e20afcb882f5d3c266d65f"},
]
matrix-common = [
- {file = "matrix_common-1.2.1-py3-none-any.whl", hash = "sha256:946709c405944a0d4b1d73207b77eb064b6dbfc5d70a69471320b06d8ce98b20"},
- {file = "matrix_common-1.2.1.tar.gz", hash = "sha256:a99dcf02a6bd95b24a5a61b354888a2ac92bf2b4b839c727b8dd9da2cdfa3853"},
+ {file = "matrix_common-1.3.0-py3-none-any.whl", hash = "sha256:524e2785b9b03be4d15f3a8a6b857c5b6af68791ffb1b9918f0ad299abc4db20"},
+ {file = "matrix_common-1.3.0.tar.gz", hash = "sha256:62e121cccd9f243417b57ec37a76dc44aeb198a7a5c67afd6b8275992ff2abd1"},
]
matrix-synapse-ldap3 = [
- {file = "matrix-synapse-ldap3-0.2.1.tar.gz", hash = "sha256:bfb4390f4a262ffb0d6f057ff3aeb1e46d4e52ff420a064d795fb4f555f00285"},
- {file = "matrix_synapse_ldap3-0.2.1-py3-none-any.whl", hash = "sha256:1b3310a60f1d06466f35905a269b6df95747fd1305f2b7fe638f373963b2aa2c"},
+ {file = "matrix-synapse-ldap3-0.2.2.tar.gz", hash = "sha256:b388d95693486eef69adaefd0fd9e84463d52fe17b0214a00efcaa669b73cb74"},
+ {file = "matrix_synapse_ldap3-0.2.2-py3-none-any.whl", hash = "sha256:66ee4c85d7952c6c27fd04c09cdfdf4847b8e8b7d6a7ada6ba1100013bda060f"},
]
mccabe = [
- {file = "mccabe-0.6.1-py2.py3-none-any.whl", hash = "sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42"},
- {file = "mccabe-0.6.1.tar.gz", hash = "sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f"},
+ {file = "mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e"},
+ {file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"},
]
msgpack = [
- {file = "msgpack-1.0.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:96acc674bb9c9be63fa8b6dabc3248fdc575c4adc005c440ad02f87ca7edd079"},
- {file = "msgpack-1.0.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2c3ca57c96c8e69c1a0d2926a6acf2d9a522b41dc4253a8945c4c6cd4981a4e3"},
- {file = "msgpack-1.0.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b0a792c091bac433dfe0a70ac17fc2087d4595ab835b47b89defc8bbabcf5c73"},
- {file = "msgpack-1.0.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c58cdec1cb5fcea8c2f1771d7b5fec79307d056874f746690bd2bdd609ab147"},
- {file = "msgpack-1.0.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2f97c0f35b3b096a330bb4a1a9247d0bd7e1f3a2eba7ab69795501504b1c2c39"},
- {file = "msgpack-1.0.3-cp310-cp310-win32.whl", hash = "sha256:36a64a10b16c2ab31dcd5f32d9787ed41fe68ab23dd66957ca2826c7f10d0b85"},
- {file = "msgpack-1.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:c1ba333b4024c17c7591f0f372e2daa3c31db495a9b2af3cf664aef3c14354f7"},
- {file = "msgpack-1.0.3-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:c2140cf7a3ec475ef0938edb6eb363fa704159e0bf71dde15d953bacc1cf9d7d"},
- {file = "msgpack-1.0.3-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6f4c22717c74d44bcd7af353024ce71c6b55346dad5e2cc1ddc17ce8c4507c6b"},
- {file = "msgpack-1.0.3-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47d733a15ade190540c703de209ffbc42a3367600421b62ac0c09fde594da6ec"},
- {file = "msgpack-1.0.3-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c7e03b06f2982aa98d4ddd082a210c3db200471da523f9ac197f2828e80e7770"},
- {file = "msgpack-1.0.3-cp36-cp36m-win32.whl", hash = "sha256:3d875631ecab42f65f9dce6f55ce6d736696ced240f2634633188de2f5f21af9"},
- {file = "msgpack-1.0.3-cp36-cp36m-win_amd64.whl", hash = "sha256:40fb89b4625d12d6027a19f4df18a4de5c64f6f3314325049f219683e07e678a"},
- {file = "msgpack-1.0.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:6eef0cf8db3857b2b556213d97dd82de76e28a6524853a9beb3264983391dc1a"},
- {file = "msgpack-1.0.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d8c332f53ffff01953ad25131272506500b14750c1d0ce8614b17d098252fbc"},
- {file = "msgpack-1.0.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9c0903bd93cbd34653dd63bbfcb99d7539c372795201f39d16fdfde4418de43a"},
- {file = "msgpack-1.0.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bf1e6bfed4860d72106f4e0a1ab519546982b45689937b40257cfd820650b920"},
- {file = "msgpack-1.0.3-cp37-cp37m-win32.whl", hash = "sha256:d02cea2252abc3756b2ac31f781f7a98e89ff9759b2e7450a1c7a0d13302ff50"},
- {file = "msgpack-1.0.3-cp37-cp37m-win_amd64.whl", hash = "sha256:2f30dd0dc4dfe6231ad253b6f9f7128ac3202ae49edd3f10d311adc358772dba"},
- {file = "msgpack-1.0.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:f201d34dc89342fabb2a10ed7c9a9aaaed9b7af0f16a5923f1ae562b31258dea"},
- {file = "msgpack-1.0.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:bb87f23ae7d14b7b3c21009c4b1705ec107cb21ee71975992f6aca571fb4a42a"},
- {file = "msgpack-1.0.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8a3a5c4b16e9d0edb823fe54b59b5660cc8d4782d7bf2c214cb4b91a1940a8ef"},
- {file = "msgpack-1.0.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f74da1e5fcf20ade12c6bf1baa17a2dc3604958922de8dc83cbe3eff22e8b611"},
- {file = "msgpack-1.0.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:73a80bd6eb6bcb338c1ec0da273f87420829c266379c8c82fa14c23fb586cfa1"},
- {file = "msgpack-1.0.3-cp38-cp38-win32.whl", hash = "sha256:9fce00156e79af37bb6db4e7587b30d11e7ac6a02cb5bac387f023808cd7d7f4"},
- {file = "msgpack-1.0.3-cp38-cp38-win_amd64.whl", hash = "sha256:9b6f2d714c506e79cbead331de9aae6837c8dd36190d02da74cb409b36162e8a"},
- {file = "msgpack-1.0.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:89908aea5f46ee1474cc37fbc146677f8529ac99201bc2faf4ef8edc023c2bf3"},
- {file = "msgpack-1.0.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:973ad69fd7e31159eae8f580f3f707b718b61141838321c6fa4d891c4a2cca52"},
- {file = "msgpack-1.0.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da24375ab4c50e5b7486c115a3198d207954fe10aaa5708f7b65105df09109b2"},
- {file = "msgpack-1.0.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a598d0685e4ae07a0672b59792d2cc767d09d7a7f39fd9bd37ff84e060b1a996"},
- {file = "msgpack-1.0.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e4c309a68cb5d6bbd0c50d5c71a25ae81f268c2dc675c6f4ea8ab2feec2ac4e2"},
- {file = "msgpack-1.0.3-cp39-cp39-win32.whl", hash = "sha256:494471d65b25a8751d19c83f1a482fd411d7ca7a3b9e17d25980a74075ba0e88"},
- {file = "msgpack-1.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:f01b26c2290cbd74316990ba84a14ac3d599af9cebefc543d241a66e785cf17d"},
- {file = "msgpack-1.0.3.tar.gz", hash = "sha256:51fdc7fb93615286428ee7758cecc2f374d5ff363bdd884c7ea622a7a327a81e"},
+ {file = "msgpack-1.0.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4ab251d229d10498e9a2f3b1e68ef64cb393394ec477e3370c457f9430ce9250"},
+ {file = "msgpack-1.0.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:112b0f93202d7c0fef0b7810d465fde23c746a2d482e1e2de2aafd2ce1492c88"},
+ {file = "msgpack-1.0.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:002b5c72b6cd9b4bafd790f364b8480e859b4712e91f43014fe01e4f957b8467"},
+ {file = "msgpack-1.0.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:35bc0faa494b0f1d851fd29129b2575b2e26d41d177caacd4206d81502d4c6a6"},
+ {file = "msgpack-1.0.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4733359808c56d5d7756628736061c432ded018e7a1dff2d35a02439043321aa"},
+ {file = "msgpack-1.0.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb514ad14edf07a1dbe63761fd30f89ae79b42625731e1ccf5e1f1092950eaa6"},
+ {file = "msgpack-1.0.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:c23080fdeec4716aede32b4e0ef7e213c7b1093eede9ee010949f2a418ced6ba"},
+ {file = "msgpack-1.0.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:49565b0e3d7896d9ea71d9095df15b7f75a035c49be733051c34762ca95bbf7e"},
+ {file = "msgpack-1.0.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:aca0f1644d6b5a73eb3e74d4d64d5d8c6c3d577e753a04c9e9c87d07692c58db"},
+ {file = "msgpack-1.0.4-cp310-cp310-win32.whl", hash = "sha256:0dfe3947db5fb9ce52aaea6ca28112a170db9eae75adf9339a1aec434dc954ef"},
+ {file = "msgpack-1.0.4-cp310-cp310-win_amd64.whl", hash = "sha256:4dea20515f660aa6b7e964433b1808d098dcfcabbebeaaad240d11f909298075"},
+ {file = "msgpack-1.0.4-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:e83f80a7fec1a62cf4e6c9a660e39c7f878f603737a0cdac8c13131d11d97f52"},
+ {file = "msgpack-1.0.4-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3c11a48cf5e59026ad7cb0dc29e29a01b5a66a3e333dc11c04f7e991fc5510a9"},
+ {file = "msgpack-1.0.4-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1276e8f34e139aeff1c77a3cefb295598b504ac5314d32c8c3d54d24fadb94c9"},
+ {file = "msgpack-1.0.4-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6c9566f2c39ccced0a38d37c26cc3570983b97833c365a6044edef3574a00c08"},
+ {file = "msgpack-1.0.4-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:fcb8a47f43acc113e24e910399376f7277cf8508b27e5b88499f053de6b115a8"},
+ {file = "msgpack-1.0.4-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:76ee788122de3a68a02ed6f3a16bbcd97bc7c2e39bd4d94be2f1821e7c4a64e6"},
+ {file = "msgpack-1.0.4-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:0a68d3ac0104e2d3510de90a1091720157c319ceeb90d74f7b5295a6bee51bae"},
+ {file = "msgpack-1.0.4-cp36-cp36m-win32.whl", hash = "sha256:85f279d88d8e833ec015650fd15ae5eddce0791e1e8a59165318f371158efec6"},
+ {file = "msgpack-1.0.4-cp36-cp36m-win_amd64.whl", hash = "sha256:c1683841cd4fa45ac427c18854c3ec3cd9b681694caf5bff04edb9387602d661"},
+ {file = "msgpack-1.0.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a75dfb03f8b06f4ab093dafe3ddcc2d633259e6c3f74bb1b01996f5d8aa5868c"},
+ {file = "msgpack-1.0.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9667bdfdf523c40d2511f0e98a6c9d3603be6b371ae9a238b7ef2dc4e7a427b0"},
+ {file = "msgpack-1.0.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:11184bc7e56fd74c00ead4f9cc9a3091d62ecb96e97653add7a879a14b003227"},
+ {file = "msgpack-1.0.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ac5bd7901487c4a1dd51a8c58f2632b15d838d07ceedaa5e4c080f7190925bff"},
+ {file = "msgpack-1.0.4-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:1e91d641d2bfe91ba4c52039adc5bccf27c335356055825c7f88742c8bb900dd"},
+ {file = "msgpack-1.0.4-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:2a2df1b55a78eb5f5b7d2a4bb221cd8363913830145fad05374a80bf0877cb1e"},
+ {file = "msgpack-1.0.4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:545e3cf0cf74f3e48b470f68ed19551ae6f9722814ea969305794645da091236"},
+ {file = "msgpack-1.0.4-cp37-cp37m-win32.whl", hash = "sha256:2cc5ca2712ac0003bcb625c96368fd08a0f86bbc1a5578802512d87bc592fe44"},
+ {file = "msgpack-1.0.4-cp37-cp37m-win_amd64.whl", hash = "sha256:eba96145051ccec0ec86611fe9cf693ce55f2a3ce89c06ed307de0e085730ec1"},
+ {file = "msgpack-1.0.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:7760f85956c415578c17edb39eed99f9181a48375b0d4a94076d84148cf67b2d"},
+ {file = "msgpack-1.0.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:449e57cc1ff18d3b444eb554e44613cffcccb32805d16726a5494038c3b93dab"},
+ {file = "msgpack-1.0.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:d603de2b8d2ea3f3bcb2efe286849aa7a81531abc52d8454da12f46235092bcb"},
+ {file = "msgpack-1.0.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:48f5d88c99f64c456413d74a975bd605a9b0526293218a3b77220a2c15458ba9"},
+ {file = "msgpack-1.0.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6916c78f33602ecf0509cc40379271ba0f9ab572b066bd4bdafd7434dee4bc6e"},
+ {file = "msgpack-1.0.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:81fc7ba725464651190b196f3cd848e8553d4d510114a954681fd0b9c479d7e1"},
+ {file = "msgpack-1.0.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:d5b5b962221fa2c5d3a7f8133f9abffc114fe218eb4365e40f17732ade576c8e"},
+ {file = "msgpack-1.0.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:77ccd2af37f3db0ea59fb280fa2165bf1b096510ba9fe0cc2bf8fa92a22fdb43"},
+ {file = "msgpack-1.0.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b17be2478b622939e39b816e0aa8242611cc8d3583d1cd8ec31b249f04623243"},
+ {file = "msgpack-1.0.4-cp38-cp38-win32.whl", hash = "sha256:2bb8cdf50dd623392fa75525cce44a65a12a00c98e1e37bf0fb08ddce2ff60d2"},
+ {file = "msgpack-1.0.4-cp38-cp38-win_amd64.whl", hash = "sha256:26b8feaca40a90cbe031b03d82b2898bf560027160d3eae1423f4a67654ec5d6"},
+ {file = "msgpack-1.0.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:462497af5fd4e0edbb1559c352ad84f6c577ffbbb708566a0abaaa84acd9f3ae"},
+ {file = "msgpack-1.0.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2999623886c5c02deefe156e8f869c3b0aaeba14bfc50aa2486a0415178fce55"},
+ {file = "msgpack-1.0.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f0029245c51fd9473dc1aede1160b0a29f4a912e6b1dd353fa6d317085b219da"},
+ {file = "msgpack-1.0.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed6f7b854a823ea44cf94919ba3f727e230da29feb4a99711433f25800cf747f"},
+ {file = "msgpack-1.0.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0df96d6eaf45ceca04b3f3b4b111b86b33785683d682c655063ef8057d61fd92"},
+ {file = "msgpack-1.0.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6a4192b1ab40f8dca3f2877b70e63799d95c62c068c84dc028b40a6cb03ccd0f"},
+ {file = "msgpack-1.0.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0e3590f9fb9f7fbc36df366267870e77269c03172d086fa76bb4eba8b2b46624"},
+ {file = "msgpack-1.0.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:1576bd97527a93c44fa856770197dec00d223b0b9f36ef03f65bac60197cedf8"},
+ {file = "msgpack-1.0.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:63e29d6e8c9ca22b21846234913c3466b7e4ee6e422f205a2988083de3b08cae"},
+ {file = "msgpack-1.0.4-cp39-cp39-win32.whl", hash = "sha256:fb62ea4b62bfcb0b380d5680f9a4b3f9a2d166d9394e9bbd9666c0ee09a3645c"},
+ {file = "msgpack-1.0.4-cp39-cp39-win_amd64.whl", hash = "sha256:4d5834a2a48965a349da1c5a79760d94a1a0172fbb5ab6b5b33cbf8447e109ce"},
+ {file = "msgpack-1.0.4.tar.gz", hash = "sha256:f5d869c18f030202eb412f08b28d2afeea553d6613aee89e200d7aca7ef01f5f"},
]
mypy = [
- {file = "mypy-0.950-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:cf9c261958a769a3bd38c3e133801ebcd284ffb734ea12d01457cb09eacf7d7b"},
- {file = "mypy-0.950-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b5b5bd0ffb11b4aba2bb6d31b8643902c48f990cc92fda4e21afac658044f0c0"},
- {file = "mypy-0.950-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5e7647df0f8fc947388e6251d728189cfadb3b1e558407f93254e35abc026e22"},
- {file = "mypy-0.950-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:eaff8156016487c1af5ffa5304c3e3fd183edcb412f3e9c72db349faf3f6e0eb"},
- {file = "mypy-0.950-cp310-cp310-win_amd64.whl", hash = "sha256:563514c7dc504698fb66bb1cf897657a173a496406f1866afae73ab5b3cdb334"},
- {file = "mypy-0.950-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:dd4d670eee9610bf61c25c940e9ade2d0ed05eb44227275cce88701fee014b1f"},
- {file = "mypy-0.950-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ca75ecf2783395ca3016a5e455cb322ba26b6d33b4b413fcdedfc632e67941dc"},
- {file = "mypy-0.950-cp36-cp36m-win_amd64.whl", hash = "sha256:6003de687c13196e8a1243a5e4bcce617d79b88f83ee6625437e335d89dfebe2"},
- {file = "mypy-0.950-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:4c653e4846f287051599ed8f4b3c044b80e540e88feec76b11044ddc5612ffed"},
- {file = "mypy-0.950-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:e19736af56947addedce4674c0971e5dceef1b5ec7d667fe86bcd2b07f8f9075"},
- {file = "mypy-0.950-cp37-cp37m-win_amd64.whl", hash = "sha256:ef7beb2a3582eb7a9f37beaf38a28acfd801988cde688760aea9e6cc4832b10b"},
- {file = "mypy-0.950-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:0112752a6ff07230f9ec2f71b0d3d4e088a910fdce454fdb6553e83ed0eced7d"},
- {file = "mypy-0.950-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ee0a36edd332ed2c5208565ae6e3a7afc0eabb53f5327e281f2ef03a6bc7687a"},
- {file = "mypy-0.950-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:77423570c04aca807508a492037abbd72b12a1fb25a385847d191cd50b2c9605"},
- {file = "mypy-0.950-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:5ce6a09042b6da16d773d2110e44f169683d8cc8687e79ec6d1181a72cb028d2"},
- {file = "mypy-0.950-cp38-cp38-win_amd64.whl", hash = "sha256:5b231afd6a6e951381b9ef09a1223b1feabe13625388db48a8690f8daa9b71ff"},
- {file = "mypy-0.950-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:0384d9f3af49837baa92f559d3fa673e6d2652a16550a9ee07fc08c736f5e6f8"},
- {file = "mypy-0.950-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1fdeb0a0f64f2a874a4c1f5271f06e40e1e9779bf55f9567f149466fc7a55038"},
- {file = "mypy-0.950-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:61504b9a5ae166ba5ecfed9e93357fd51aa693d3d434b582a925338a2ff57fd2"},
- {file = "mypy-0.950-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:a952b8bc0ae278fc6316e6384f67bb9a396eb30aced6ad034d3a76120ebcc519"},
- {file = "mypy-0.950-cp39-cp39-win_amd64.whl", hash = "sha256:eaea21d150fb26d7b4856766e7addcf929119dd19fc832b22e71d942835201ef"},
- {file = "mypy-0.950-py3-none-any.whl", hash = "sha256:a4d9898f46446bfb6405383b57b96737dcfd0a7f25b748e78ef3e8c576bba3cb"},
- {file = "mypy-0.950.tar.gz", hash = "sha256:1b333cfbca1762ff15808a0ef4f71b5d3eed8528b23ea1c3fb50543c867d68de"},
+ {file = "mypy-0.981-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4bc460e43b7785f78862dab78674e62ec3cd523485baecfdf81a555ed29ecfa0"},
+ {file = "mypy-0.981-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:756fad8b263b3ba39e4e204ee53042671b660c36c9017412b43af210ddee7b08"},
+ {file = "mypy-0.981-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a16a0145d6d7d00fbede2da3a3096dcc9ecea091adfa8da48fa6a7b75d35562d"},
+ {file = "mypy-0.981-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce65f70b14a21fdac84c294cde75e6dbdabbcff22975335e20827b3b94bdbf49"},
+ {file = "mypy-0.981-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6e35d764784b42c3e256848fb8ed1d4292c9fc0098413adb28d84974c095b279"},
+ {file = "mypy-0.981-cp310-cp310-win_amd64.whl", hash = "sha256:e53773073c864d5f5cec7f3fc72fbbcef65410cde8cc18d4f7242dea60dac52e"},
+ {file = "mypy-0.981-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:6ee196b1d10b8b215e835f438e06965d7a480f6fe016eddbc285f13955cca659"},
+ {file = "mypy-0.981-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8ad21d4c9d3673726cf986ea1d0c9fb66905258709550ddf7944c8f885f208be"},
+ {file = "mypy-0.981-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d1debb09043e1f5ee845fa1e96d180e89115b30e47c5d3ce53bc967bab53f62d"},
+ {file = "mypy-0.981-cp37-cp37m-win_amd64.whl", hash = "sha256:9f362470a3480165c4c6151786b5379351b790d56952005be18bdbdd4c7ce0ae"},
+ {file = "mypy-0.981-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:c9e0efb95ed6ca1654951bd5ec2f3fa91b295d78bf6527e026529d4aaa1e0c30"},
+ {file = "mypy-0.981-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e178eaffc3c5cd211a87965c8c0df6da91ed7d258b5fc72b8e047c3771317ddb"},
+ {file = "mypy-0.981-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:06e1eac8d99bd404ed8dd34ca29673c4346e76dd8e612ea507763dccd7e13c7a"},
+ {file = "mypy-0.981-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa38f82f53e1e7beb45557ff167c177802ba7b387ad017eab1663d567017c8ee"},
+ {file = "mypy-0.981-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:64e1f6af81c003f85f0dfed52db632817dabb51b65c0318ffbf5ff51995bbb08"},
+ {file = "mypy-0.981-cp38-cp38-win_amd64.whl", hash = "sha256:e1acf62a8c4f7c092462c738aa2c2489e275ed386320c10b2e9bff31f6f7e8d6"},
+ {file = "mypy-0.981-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:b6ede64e52257931315826fdbfc6ea878d89a965580d1a65638ef77cb551f56d"},
+ {file = "mypy-0.981-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:eb3978b191b9fa0488524bb4ffedf2c573340e8c2b4206fc191d44c7093abfb7"},
+ {file = "mypy-0.981-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:77f8fcf7b4b3cc0c74fb33ae54a4cd00bb854d65645c48beccf65fa10b17882c"},
+ {file = "mypy-0.981-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f64d2ce043a209a297df322eb4054dfbaa9de9e8738291706eaafda81ab2b362"},
+ {file = "mypy-0.981-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2ee3dbc53d4df7e6e3b1c68ac6a971d3a4fb2852bf10a05fda228721dd44fae1"},
+ {file = "mypy-0.981-cp39-cp39-win_amd64.whl", hash = "sha256:8e8e49aa9cc23aa4c926dc200ce32959d3501c4905147a66ce032f05cb5ecb92"},
+ {file = "mypy-0.981-py3-none-any.whl", hash = "sha256:794f385653e2b749387a42afb1e14c2135e18daeb027e0d97162e4b7031210f8"},
+ {file = "mypy-0.981.tar.gz", hash = "sha256:ad77c13037d3402fbeffda07d51e3f228ba078d1c7096a73759c9419ea031bf4"},
]
mypy-extensions = [
{file = "mypy_extensions-0.4.3-py2.py3-none-any.whl", hash = "sha256:090fedd75945a69ae91ce1303b5824f428daf5a028d2f6ab8a299250a846f15d"},
{file = "mypy_extensions-0.4.3.tar.gz", hash = "sha256:2d82818f5bb3e369420cb3c4060a7970edba416647068eb4c5343488a6c604a8"},
]
mypy-zope = [
- {file = "mypy-zope-0.3.7.tar.gz", hash = "sha256:9da171e78e8ef7ac8922c86af1a62f1b7f3244f121020bd94a2246bc3f33c605"},
- {file = "mypy_zope-0.3.7-py3-none-any.whl", hash = "sha256:9c7637d066e4d1bafa0651abc091c752009769098043b236446e6725be2bc9c2"},
+ {file = "mypy-zope-0.3.11.tar.gz", hash = "sha256:d4255f9f04d48c79083bbd4e2fea06513a6ac7b8de06f8c4ce563fd85142ca05"},
+ {file = "mypy_zope-0.3.11-py3-none-any.whl", hash = "sha256:ec080a6508d1f7805c8d2054f9fdd13c849742ce96803519e1fdfa3d3cab7140"},
]
netaddr = [
{file = "netaddr-0.8.0-py2.py3-none-any.whl", hash = "sha256:9666d0232c32d2656e5e5f8d735f58fd6c7457ce52fc21c98d45f2af78f990ac"},
@@ -2151,70 +2258,102 @@ pathspec = [
{file = "pathspec-0.9.0.tar.gz", hash = "sha256:e564499435a2673d586f6b2130bb5b95f04a3ba06f81b8f895b651a3c76aabb1"},
]
phonenumbers = [
- {file = "phonenumbers-8.12.44-py2.py3-none-any.whl", hash = "sha256:cc1299cf37b309ecab6214297663ab86cb3d64ae37fd5b88e904fe7983a874a6"},
- {file = "phonenumbers-8.12.44.tar.gz", hash = "sha256:26cfd0257d1704fe2f88caff2caabb70d16a877b1e65b6aae51f9fbbe10aa8ce"},
+ {file = "phonenumbers-8.13.0-py2.py3-none-any.whl", hash = "sha256:dbaea9e4005a976bcf18fbe2bb87cb9cd0a3f119136f04188ac412d7741cebf0"},
+ {file = "phonenumbers-8.13.0.tar.gz", hash = "sha256:93745d7afd38e246660bb601b07deac54eeb76c8e5e43f5e83333b0383a0a1e4"},
]
pillow = [
- {file = "Pillow-9.0.1-1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a5d24e1d674dd9d72c66ad3ea9131322819ff86250b30dc5821cbafcfa0b96b4"},
- {file = "Pillow-9.0.1-1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:2632d0f846b7c7600edf53c48f8f9f1e13e62f66a6dbc15191029d950bfed976"},
- {file = "Pillow-9.0.1-1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b9618823bd237c0d2575283f2939655f54d51b4527ec3972907a927acbcc5bfc"},
- {file = "Pillow-9.0.1-cp310-cp310-macosx_10_10_universal2.whl", hash = "sha256:9bfdb82cdfeccec50aad441afc332faf8606dfa5e8efd18a6692b5d6e79f00fd"},
- {file = "Pillow-9.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5100b45a4638e3c00e4d2320d3193bdabb2d75e79793af7c3eb139e4f569f16f"},
- {file = "Pillow-9.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:528a2a692c65dd5cafc130de286030af251d2ee0483a5bf50c9348aefe834e8a"},
- {file = "Pillow-9.0.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0f29d831e2151e0b7b39981756d201f7108d3d215896212ffe2e992d06bfe049"},
- {file = "Pillow-9.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:855c583f268edde09474b081e3ddcd5cf3b20c12f26e0d434e1386cc5d318e7a"},
- {file = "Pillow-9.0.1-cp310-cp310-win32.whl", hash = "sha256:d9d7942b624b04b895cb95af03a23407f17646815495ce4547f0e60e0b06f58e"},
- {file = "Pillow-9.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:81c4b81611e3a3cb30e59b0cf05b888c675f97e3adb2c8672c3154047980726b"},
- {file = "Pillow-9.0.1-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:413ce0bbf9fc6278b2d63309dfeefe452835e1c78398efb431bab0672fe9274e"},
- {file = "Pillow-9.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:80fe64a6deb6fcfdf7b8386f2cf216d329be6f2781f7d90304351811fb591360"},
- {file = "Pillow-9.0.1-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cef9c85ccbe9bee00909758936ea841ef12035296c748aaceee535969e27d31b"},
- {file = "Pillow-9.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1d19397351f73a88904ad1aee421e800fe4bbcd1aeee6435fb62d0a05ccd1030"},
- {file = "Pillow-9.0.1-cp37-cp37m-win32.whl", hash = "sha256:d21237d0cd37acded35154e29aec853e945950321dd2ffd1a7d86fe686814669"},
- {file = "Pillow-9.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:ede5af4a2702444a832a800b8eb7f0a7a1c0eed55b644642e049c98d589e5092"},
- {file = "Pillow-9.0.1-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:b5b3f092fe345c03bca1e0b687dfbb39364b21ebb8ba90e3fa707374b7915204"},
- {file = "Pillow-9.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:335ace1a22325395c4ea88e00ba3dc89ca029bd66bd5a3c382d53e44f0ccd77e"},
- {file = "Pillow-9.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:db6d9fac65bd08cea7f3540b899977c6dee9edad959fa4eaf305940d9cbd861c"},
- {file = "Pillow-9.0.1-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f154d173286a5d1863637a7dcd8c3437bb557520b01bddb0be0258dcb72696b5"},
- {file = "Pillow-9.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:14d4b1341ac07ae07eb2cc682f459bec932a380c3b122f5540432d8977e64eae"},
- {file = "Pillow-9.0.1-cp38-cp38-win32.whl", hash = "sha256:effb7749713d5317478bb3acb3f81d9d7c7f86726d41c1facca068a04cf5bb4c"},
- {file = "Pillow-9.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:7f7609a718b177bf171ac93cea9fd2ddc0e03e84d8fa4e887bdfc39671d46b00"},
- {file = "Pillow-9.0.1-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:80ca33961ced9c63358056bd08403ff866512038883e74f3a4bf88ad3eb66838"},
- {file = "Pillow-9.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1c3c33ac69cf059bbb9d1a71eeaba76781b450bc307e2291f8a4764d779a6b28"},
- {file = "Pillow-9.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:12875d118f21cf35604176872447cdb57b07126750a33748bac15e77f90f1f9c"},
- {file = "Pillow-9.0.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:514ceac913076feefbeaf89771fd6febde78b0c4c1b23aaeab082c41c694e81b"},
- {file = "Pillow-9.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d3c5c79ab7dfce6d88f1ba639b77e77a17ea33a01b07b99840d6ed08031cb2a7"},
- {file = "Pillow-9.0.1-cp39-cp39-win32.whl", hash = "sha256:718856856ba31f14f13ba885ff13874be7fefc53984d2832458f12c38205f7f7"},
- {file = "Pillow-9.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:f25ed6e28ddf50de7e7ea99d7a976d6a9c415f03adcaac9c41ff6ff41b6d86ac"},
- {file = "Pillow-9.0.1-pp37-pypy37_pp73-macosx_10_10_x86_64.whl", hash = "sha256:011233e0c42a4a7836498e98c1acf5e744c96a67dd5032a6f666cc1fb97eab97"},
- {file = "Pillow-9.0.1-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:253e8a302a96df6927310a9d44e6103055e8fb96a6822f8b7f514bb7ef77de56"},
- {file = "Pillow-9.0.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6295f6763749b89c994fcb6d8a7f7ce03c3992e695f89f00b741b4580b199b7e"},
- {file = "Pillow-9.0.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:a9f44cd7e162ac6191491d7249cceb02b8116b0f7e847ee33f739d7cb1ea1f70"},
- {file = "Pillow-9.0.1.tar.gz", hash = "sha256:6c8bc8238a7dfdaf7a75f5ec5a663f4173f8c367e5a39f87e720495e1eed75fa"},
+ {file = "Pillow-9.3.0-1-cp37-cp37m-win32.whl", hash = "sha256:e6ea6b856a74d560d9326c0f5895ef8050126acfdc7ca08ad703eb0081e82b74"},
+ {file = "Pillow-9.3.0-1-cp37-cp37m-win_amd64.whl", hash = "sha256:32a44128c4bdca7f31de5be641187367fe2a450ad83b833ef78910397db491aa"},
+ {file = "Pillow-9.3.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:0b7257127d646ff8676ec8a15520013a698d1fdc48bc2a79ba4e53df792526f2"},
+ {file = "Pillow-9.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b90f7616ea170e92820775ed47e136208e04c967271c9ef615b6fbd08d9af0e3"},
+ {file = "Pillow-9.3.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:68943d632f1f9e3dce98908e873b3a090f6cba1cbb1b892a9e8d97c938871fbe"},
+ {file = "Pillow-9.3.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:be55f8457cd1eac957af0c3f5ece7bc3f033f89b114ef30f710882717670b2a8"},
+ {file = "Pillow-9.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d77adcd56a42d00cc1be30843d3426aa4e660cab4a61021dc84467123f7a00c"},
+ {file = "Pillow-9.3.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:829f97c8e258593b9daa80638aee3789b7df9da5cf1336035016d76f03b8860c"},
+ {file = "Pillow-9.3.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:801ec82e4188e935c7f5e22e006d01611d6b41661bba9fe45b60e7ac1a8f84de"},
+ {file = "Pillow-9.3.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:871b72c3643e516db4ecf20efe735deb27fe30ca17800e661d769faab45a18d7"},
+ {file = "Pillow-9.3.0-cp310-cp310-win32.whl", hash = "sha256:655a83b0058ba47c7c52e4e2df5ecf484c1b0b0349805896dd350cbc416bdd91"},
+ {file = "Pillow-9.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:9f47eabcd2ded7698106b05c2c338672d16a6f2a485e74481f524e2a23c2794b"},
+ {file = "Pillow-9.3.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:57751894f6618fd4308ed8e0c36c333e2f5469744c34729a27532b3db106ee20"},
+ {file = "Pillow-9.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7db8b751ad307d7cf238f02101e8e36a128a6cb199326e867d1398067381bff4"},
+ {file = "Pillow-9.3.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3033fbe1feb1b59394615a1cafaee85e49d01b51d54de0cbf6aa8e64182518a1"},
+ {file = "Pillow-9.3.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:22b012ea2d065fd163ca096f4e37e47cd8b59cf4b0fd47bfca6abb93df70b34c"},
+ {file = "Pillow-9.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b9a65733d103311331875c1dca05cb4606997fd33d6acfed695b1232ba1df193"},
+ {file = "Pillow-9.3.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:502526a2cbfa431d9fc2a079bdd9061a2397b842bb6bc4239bb176da00993812"},
+ {file = "Pillow-9.3.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:90fb88843d3902fe7c9586d439d1e8c05258f41da473952aa8b328d8b907498c"},
+ {file = "Pillow-9.3.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:89dca0ce00a2b49024df6325925555d406b14aa3efc2f752dbb5940c52c56b11"},
+ {file = "Pillow-9.3.0-cp311-cp311-win32.whl", hash = "sha256:3168434d303babf495d4ba58fc22d6604f6e2afb97adc6a423e917dab828939c"},
+ {file = "Pillow-9.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:18498994b29e1cf86d505edcb7edbe814d133d2232d256db8c7a8ceb34d18cef"},
+ {file = "Pillow-9.3.0-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:772a91fc0e03eaf922c63badeca75e91baa80fe2f5f87bdaed4280662aad25c9"},
+ {file = "Pillow-9.3.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:afa4107d1b306cdf8953edde0534562607fe8811b6c4d9a486298ad31de733b2"},
+ {file = "Pillow-9.3.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b4012d06c846dc2b80651b120e2cdd787b013deb39c09f407727ba90015c684f"},
+ {file = "Pillow-9.3.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:77ec3e7be99629898c9a6d24a09de089fa5356ee408cdffffe62d67bb75fdd72"},
+ {file = "Pillow-9.3.0-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:6c738585d7a9961d8c2821a1eb3dcb978d14e238be3d70f0a706f7fa9316946b"},
+ {file = "Pillow-9.3.0-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:828989c45c245518065a110434246c44a56a8b2b2f6347d1409c787e6e4651ee"},
+ {file = "Pillow-9.3.0-cp37-cp37m-win32.whl", hash = "sha256:82409ffe29d70fd733ff3c1025a602abb3e67405d41b9403b00b01debc4c9a29"},
+ {file = "Pillow-9.3.0-cp37-cp37m-win_amd64.whl", hash = "sha256:41e0051336807468be450d52b8edd12ac60bebaa97fe10c8b660f116e50b30e4"},
+ {file = "Pillow-9.3.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:b03ae6f1a1878233ac620c98f3459f79fd77c7e3c2b20d460284e1fb370557d4"},
+ {file = "Pillow-9.3.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4390e9ce199fc1951fcfa65795f239a8a4944117b5935a9317fb320e7767b40f"},
+ {file = "Pillow-9.3.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40e1ce476a7804b0fb74bcfa80b0a2206ea6a882938eaba917f7a0f004b42502"},
+ {file = "Pillow-9.3.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a0a06a052c5f37b4ed81c613a455a81f9a3a69429b4fd7bb913c3fa98abefc20"},
+ {file = "Pillow-9.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:03150abd92771742d4a8cd6f2fa6246d847dcd2e332a18d0c15cc75bf6703040"},
+ {file = "Pillow-9.3.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:15c42fb9dea42465dfd902fb0ecf584b8848ceb28b41ee2b58f866411be33f07"},
+ {file = "Pillow-9.3.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:51e0e543a33ed92db9f5ef69a0356e0b1a7a6b6a71b80df99f1d181ae5875636"},
+ {file = "Pillow-9.3.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:3dd6caf940756101205dffc5367babf288a30043d35f80936f9bfb37f8355b32"},
+ {file = "Pillow-9.3.0-cp38-cp38-win32.whl", hash = "sha256:f1ff2ee69f10f13a9596480335f406dd1f70c3650349e2be67ca3139280cade0"},
+ {file = "Pillow-9.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:276a5ca930c913f714e372b2591a22c4bd3b81a418c0f6635ba832daec1cbcfc"},
+ {file = "Pillow-9.3.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:73bd195e43f3fadecfc50c682f5055ec32ee2c933243cafbfdec69ab1aa87cad"},
+ {file = "Pillow-9.3.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1c7c8ae3864846fc95f4611c78129301e203aaa2af813b703c55d10cc1628535"},
+ {file = "Pillow-9.3.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2e0918e03aa0c72ea56edbb00d4d664294815aa11291a11504a377ea018330d3"},
+ {file = "Pillow-9.3.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b0915e734b33a474d76c28e07292f196cdf2a590a0d25bcc06e64e545f2d146c"},
+ {file = "Pillow-9.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:af0372acb5d3598f36ec0914deed2a63f6bcdb7b606da04dc19a88d31bf0c05b"},
+ {file = "Pillow-9.3.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:ad58d27a5b0262c0c19b47d54c5802db9b34d38bbf886665b626aff83c74bacd"},
+ {file = "Pillow-9.3.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:97aabc5c50312afa5e0a2b07c17d4ac5e865b250986f8afe2b02d772567a380c"},
+ {file = "Pillow-9.3.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9aaa107275d8527e9d6e7670b64aabaaa36e5b6bd71a1015ddd21da0d4e06448"},
+ {file = "Pillow-9.3.0-cp39-cp39-win32.whl", hash = "sha256:bac18ab8d2d1e6b4ce25e3424f709aceef668347db8637c2296bcf41acb7cf48"},
+ {file = "Pillow-9.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:b472b5ea442148d1c3e2209f20f1e0bb0eb556538690fa70b5e1f79fa0ba8dc2"},
+ {file = "Pillow-9.3.0-pp37-pypy37_pp73-macosx_10_10_x86_64.whl", hash = "sha256:ab388aaa3f6ce52ac1cb8e122c4bd46657c15905904b3120a6248b5b8b0bc228"},
+ {file = "Pillow-9.3.0-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dbb8e7f2abee51cef77673be97760abff1674ed32847ce04b4af90f610144c7b"},
+ {file = "Pillow-9.3.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bca31dd6014cb8b0b2db1e46081b0ca7d936f856da3b39744aef499db5d84d02"},
+ {file = "Pillow-9.3.0-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:c7025dce65566eb6e89f56c9509d4f628fddcedb131d9465cacd3d8bac337e7e"},
+ {file = "Pillow-9.3.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:ebf2029c1f464c59b8bdbe5143c79fa2045a581ac53679733d3a91d400ff9efb"},
+ {file = "Pillow-9.3.0-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:b59430236b8e58840a0dfb4099a0e8717ffb779c952426a69ae435ca1f57210c"},
+ {file = "Pillow-9.3.0-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:12ce4932caf2ddf3e41d17fc9c02d67126935a44b86df6a206cf0d7161548627"},
+ {file = "Pillow-9.3.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ae5331c23ce118c53b172fa64a4c037eb83c9165aba3a7ba9ddd3ec9fa64a699"},
+ {file = "Pillow-9.3.0-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:0b07fffc13f474264c336298d1b4ce01d9c5a011415b79d4ee5527bb69ae6f65"},
+ {file = "Pillow-9.3.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:073adb2ae23431d3b9bcbcff3fe698b62ed47211d0716b067385538a1b0f28b8"},
+ {file = "Pillow-9.3.0.tar.gz", hash = "sha256:c935a22a557a560108d780f9a0fc426dd7459940dc54faa49d83249c8d3e760f"},
]
pkginfo = [
{file = "pkginfo-1.8.2-py2.py3-none-any.whl", hash = "sha256:c24c487c6a7f72c66e816ab1796b96ac6c3d14d49338293d2141664330b55ffc"},
{file = "pkginfo-1.8.2.tar.gz", hash = "sha256:542e0d0b6750e2e21c20179803e40ab50598d8066d51097a0e382cba9eb02bff"},
]
+pkgutil_resolve_name = [
+ {file = "pkgutil_resolve_name-1.3.10-py3-none-any.whl", hash = "sha256:ca27cc078d25c5ad71a9de0a7a330146c4e014c2462d9af19c6b828280649c5e"},
+ {file = "pkgutil_resolve_name-1.3.10.tar.gz", hash = "sha256:357d6c9e6a755653cfd78893817c0853af365dd51ec97f3d358a819373bbd174"},
+]
platformdirs = [
{file = "platformdirs-2.5.1-py3-none-any.whl", hash = "sha256:bcae7cab893c2d310a711b70b24efb93334febe65f8de776ee320b517471e227"},
{file = "platformdirs-2.5.1.tar.gz", hash = "sha256:7535e70dfa32e84d4b34996ea99c5e432fa29a708d0f4e394bbcb2a8faa4f16d"},
]
prometheus-client = [
- {file = "prometheus_client-0.14.0-py3-none-any.whl", hash = "sha256:f4aba3fdd1735852049f537c1f0ab177159b7ab76f271ecc4d2f45aa2a1d01f2"},
- {file = "prometheus_client-0.14.0.tar.gz", hash = "sha256:8f7a922dd5455ad524b6ba212ce8eb2b4b05e073f4ec7218287f88b1cac34750"},
+ {file = "prometheus_client-0.15.0-py3-none-any.whl", hash = "sha256:db7c05cbd13a0f79975592d112320f2605a325969b270a94b71dcabc47b931d2"},
+ {file = "prometheus_client-0.15.0.tar.gz", hash = "sha256:be26aa452490cfcf6da953f9436e95a9f2b4d578ca80094b4458930e5f584ab1"},
]
psycopg2 = [
- {file = "psycopg2-2.9.3-cp310-cp310-win32.whl", hash = "sha256:083707a696e5e1c330af2508d8fab36f9700b26621ccbcb538abe22e15485362"},
- {file = "psycopg2-2.9.3-cp310-cp310-win_amd64.whl", hash = "sha256:d3ca6421b942f60c008f81a3541e8faf6865a28d5a9b48544b0ee4f40cac7fca"},
- {file = "psycopg2-2.9.3-cp36-cp36m-win32.whl", hash = "sha256:9572e08b50aed176ef6d66f15a21d823bb6f6d23152d35e8451d7d2d18fdac56"},
- {file = "psycopg2-2.9.3-cp36-cp36m-win_amd64.whl", hash = "sha256:a81e3866f99382dfe8c15a151f1ca5fde5815fde879348fe5a9884a7c092a305"},
- {file = "psycopg2-2.9.3-cp37-cp37m-win32.whl", hash = "sha256:cb10d44e6694d763fa1078a26f7f6137d69f555a78ec85dc2ef716c37447e4b2"},
- {file = "psycopg2-2.9.3-cp37-cp37m-win_amd64.whl", hash = "sha256:4295093a6ae3434d33ec6baab4ca5512a5082cc43c0505293087b8a46d108461"},
- {file = "psycopg2-2.9.3-cp38-cp38-win32.whl", hash = "sha256:34b33e0162cfcaad151f249c2649fd1030010c16f4bbc40a604c1cb77173dcf7"},
- {file = "psycopg2-2.9.3-cp38-cp38-win_amd64.whl", hash = "sha256:0762c27d018edbcb2d34d51596e4346c983bd27c330218c56c4dc25ef7e819bf"},
- {file = "psycopg2-2.9.3-cp39-cp39-win32.whl", hash = "sha256:8cf3878353cc04b053822896bc4922b194792df9df2f1ad8da01fb3043602126"},
- {file = "psycopg2-2.9.3-cp39-cp39-win_amd64.whl", hash = "sha256:06f32425949bd5fe8f625c49f17ebb9784e1e4fe928b7cce72edc36fb68e4c0c"},
- {file = "psycopg2-2.9.3.tar.gz", hash = "sha256:8e841d1bf3434da985cc5ef13e6f75c8981ced601fd70cc6bf33351b91562981"},
+ {file = "psycopg2-2.9.5-cp310-cp310-win32.whl", hash = "sha256:d3ef67e630b0de0779c42912fe2cbae3805ebaba30cda27fea2a3de650a9414f"},
+ {file = "psycopg2-2.9.5-cp310-cp310-win_amd64.whl", hash = "sha256:4cb9936316d88bfab614666eb9e32995e794ed0f8f6b3b718666c22819c1d7ee"},
+ {file = "psycopg2-2.9.5-cp311-cp311-win32.whl", hash = "sha256:093e3894d2d3c592ab0945d9eba9d139c139664dcf83a1c440b8a7aa9bb21955"},
+ {file = "psycopg2-2.9.5-cp311-cp311-win_amd64.whl", hash = "sha256:920bf418000dd17669d2904472efeab2b20546efd0548139618f8fa305d1d7ad"},
+ {file = "psycopg2-2.9.5-cp36-cp36m-win32.whl", hash = "sha256:b9ac1b0d8ecc49e05e4e182694f418d27f3aedcfca854ebd6c05bb1cffa10d6d"},
+ {file = "psycopg2-2.9.5-cp36-cp36m-win_amd64.whl", hash = "sha256:fc04dd5189b90d825509caa510f20d1d504761e78b8dfb95a0ede180f71d50e5"},
+ {file = "psycopg2-2.9.5-cp37-cp37m-win32.whl", hash = "sha256:922cc5f0b98a5f2b1ff481f5551b95cd04580fd6f0c72d9b22e6c0145a4840e0"},
+ {file = "psycopg2-2.9.5-cp37-cp37m-win_amd64.whl", hash = "sha256:1e5a38aa85bd660c53947bd28aeaafb6a97d70423606f1ccb044a03a1203fe4a"},
+ {file = "psycopg2-2.9.5-cp38-cp38-win32.whl", hash = "sha256:f5b6320dbc3cf6cfb9f25308286f9f7ab464e65cfb105b64cc9c52831748ced2"},
+ {file = "psycopg2-2.9.5-cp38-cp38-win_amd64.whl", hash = "sha256:1a5c7d7d577e0eabfcf15eb87d1e19314c8c4f0e722a301f98e0e3a65e238b4e"},
+ {file = "psycopg2-2.9.5-cp39-cp39-win32.whl", hash = "sha256:322fd5fca0b1113677089d4ebd5222c964b1760e361f151cbb2706c4912112c5"},
+ {file = "psycopg2-2.9.5-cp39-cp39-win_amd64.whl", hash = "sha256:190d51e8c1b25a47484e52a79638a8182451d6f6dff99f26ad9bd81e5359a0fa"},
+ {file = "psycopg2-2.9.5.tar.gz", hash = "sha256:a5246d2e683a972e2187a8714b5c2cf8156c064629f9a9b1a873c1730d9e245a"},
]
psycopg2cffi = [
{file = "psycopg2cffi-2.9.0.tar.gz", hash = "sha256:7e272edcd837de3a1d12b62185eb85c45a19feda9e62fa1b120c54f9e8d35c52"},
@@ -2223,50 +2362,66 @@ psycopg2cffi-compat = [
{file = "psycopg2cffi-compat-1.1.tar.gz", hash = "sha256:d25e921748475522b33d13420aad5c2831c743227dc1f1f2585e0fdb5c914e05"},
]
pyasn1 = [
- {file = "pyasn1-0.4.8-py2.4.egg", hash = "sha256:fec3e9d8e36808a28efb59b489e4528c10ad0f480e57dcc32b4de5c9d8c9fdf3"},
- {file = "pyasn1-0.4.8-py2.5.egg", hash = "sha256:0458773cfe65b153891ac249bcf1b5f8f320b7c2ce462151f8fa74de8934becf"},
- {file = "pyasn1-0.4.8-py2.6.egg", hash = "sha256:5c9414dcfede6e441f7e8f81b43b34e834731003427e5b09e4e00e3172a10f00"},
- {file = "pyasn1-0.4.8-py2.7.egg", hash = "sha256:6e7545f1a61025a4e58bb336952c5061697da694db1cae97b116e9c46abcf7c8"},
{file = "pyasn1-0.4.8-py2.py3-none-any.whl", hash = "sha256:39c7e2ec30515947ff4e87fb6f456dfc6e84857d34be479c9d4a4ba4bf46aa5d"},
- {file = "pyasn1-0.4.8-py3.1.egg", hash = "sha256:78fa6da68ed2727915c4767bb386ab32cdba863caa7dbe473eaae45f9959da86"},
- {file = "pyasn1-0.4.8-py3.2.egg", hash = "sha256:08c3c53b75eaa48d71cf8c710312316392ed40899cb34710d092e96745a358b7"},
- {file = "pyasn1-0.4.8-py3.3.egg", hash = "sha256:03840c999ba71680a131cfaee6fab142e1ed9bbd9c693e285cc6aca0d555e576"},
- {file = "pyasn1-0.4.8-py3.4.egg", hash = "sha256:7ab8a544af125fb704feadb008c99a88805126fb525280b2270bb25cc1d78a12"},
- {file = "pyasn1-0.4.8-py3.5.egg", hash = "sha256:e89bf84b5437b532b0803ba5c9a5e054d21fec423a89952a74f87fa2c9b7bce2"},
- {file = "pyasn1-0.4.8-py3.6.egg", hash = "sha256:014c0e9976956a08139dc0712ae195324a75e142284d5f87f1a87ee1b068a359"},
- {file = "pyasn1-0.4.8-py3.7.egg", hash = "sha256:99fcc3c8d804d1bc6d9a099921e39d827026409a58f2a720dcdb89374ea0c776"},
{file = "pyasn1-0.4.8.tar.gz", hash = "sha256:aef77c9fb94a3ac588e87841208bdec464471d9871bd5050a287cc9a475cd0ba"},
]
pyasn1-modules = [
{file = "pyasn1-modules-0.2.8.tar.gz", hash = "sha256:905f84c712230b2c592c19470d3ca8d552de726050d1d1716282a1f6146be65e"},
- {file = "pyasn1_modules-0.2.8-py2.4.egg", hash = "sha256:0fe1b68d1e486a1ed5473f1302bd991c1611d319bba158e98b106ff86e1d7199"},
- {file = "pyasn1_modules-0.2.8-py2.5.egg", hash = "sha256:fe0644d9ab041506b62782e92b06b8c68cca799e1a9636ec398675459e031405"},
- {file = "pyasn1_modules-0.2.8-py2.6.egg", hash = "sha256:a99324196732f53093a84c4369c996713eb8c89d360a496b599fb1a9c47fc3eb"},
- {file = "pyasn1_modules-0.2.8-py2.7.egg", hash = "sha256:0845a5582f6a02bb3e1bde9ecfc4bfcae6ec3210dd270522fee602365430c3f8"},
{file = "pyasn1_modules-0.2.8-py2.py3-none-any.whl", hash = "sha256:a50b808ffeb97cb3601dd25981f6b016cbb3d31fbf57a8b8a87428e6158d0c74"},
- {file = "pyasn1_modules-0.2.8-py3.1.egg", hash = "sha256:f39edd8c4ecaa4556e989147ebf219227e2cd2e8a43c7e7fcb1f1c18c5fd6a3d"},
- {file = "pyasn1_modules-0.2.8-py3.2.egg", hash = "sha256:b80486a6c77252ea3a3e9b1e360bc9cf28eaac41263d173c032581ad2f20fe45"},
- {file = "pyasn1_modules-0.2.8-py3.3.egg", hash = "sha256:65cebbaffc913f4fe9e4808735c95ea22d7a7775646ab690518c056784bc21b4"},
- {file = "pyasn1_modules-0.2.8-py3.4.egg", hash = "sha256:15b7c67fabc7fc240d87fb9aabf999cf82311a6d6fb2c70d00d3d0604878c811"},
- {file = "pyasn1_modules-0.2.8-py3.5.egg", hash = "sha256:426edb7a5e8879f1ec54a1864f16b882c2837bfd06eee62f2c982315ee2473ed"},
- {file = "pyasn1_modules-0.2.8-py3.6.egg", hash = "sha256:cbac4bc38d117f2a49aeedec4407d23e8866ea4ac27ff2cf7fb3e5b570df19e0"},
- {file = "pyasn1_modules-0.2.8-py3.7.egg", hash = "sha256:c29a5e5cc7a3f05926aff34e097e84f8589cd790ce0ed41b67aed6857b26aafd"},
]
pycodestyle = [
- {file = "pycodestyle-2.8.0-py2.py3-none-any.whl", hash = "sha256:720f8b39dde8b293825e7ff02c475f3077124006db4f440dcbc9a20b76548a20"},
- {file = "pycodestyle-2.8.0.tar.gz", hash = "sha256:eddd5847ef438ea1c7870ca7eb78a9d47ce0cdb4851a5523949f2601d0cbbe7f"},
+ {file = "pycodestyle-2.9.1-py2.py3-none-any.whl", hash = "sha256:d1735fc58b418fd7c5f658d28d943854f8a849b01a5d0a1e6f3f3fdd0166804b"},
+ {file = "pycodestyle-2.9.1.tar.gz", hash = "sha256:2c9607871d58c76354b697b42f5d57e1ada7d261c261efac224b664affdc5785"},
]
pycparser = [
{file = "pycparser-2.21-py2.py3-none-any.whl", hash = "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9"},
{file = "pycparser-2.21.tar.gz", hash = "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206"},
]
+pydantic = [
+ {file = "pydantic-1.10.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:bb6ad4489af1bac6955d38ebcb95079a836af31e4c4f74aba1ca05bb9f6027bd"},
+ {file = "pydantic-1.10.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a1f5a63a6dfe19d719b1b6e6106561869d2efaca6167f84f5ab9347887d78b98"},
+ {file = "pydantic-1.10.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:352aedb1d71b8b0736c6d56ad2bd34c6982720644b0624462059ab29bd6e5912"},
+ {file = "pydantic-1.10.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:19b3b9ccf97af2b7519c42032441a891a5e05c68368f40865a90eb88833c2559"},
+ {file = "pydantic-1.10.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e9069e1b01525a96e6ff49e25876d90d5a563bc31c658289a8772ae186552236"},
+ {file = "pydantic-1.10.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:355639d9afc76bcb9b0c3000ddcd08472ae75318a6eb67a15866b87e2efa168c"},
+ {file = "pydantic-1.10.2-cp310-cp310-win_amd64.whl", hash = "sha256:ae544c47bec47a86bc7d350f965d8b15540e27e5aa4f55170ac6a75e5f73b644"},
+ {file = "pydantic-1.10.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a4c805731c33a8db4b6ace45ce440c4ef5336e712508b4d9e1aafa617dc9907f"},
+ {file = "pydantic-1.10.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d49f3db871575e0426b12e2f32fdb25e579dea16486a26e5a0474af87cb1ab0a"},
+ {file = "pydantic-1.10.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:37c90345ec7dd2f1bcef82ce49b6235b40f282b94d3eec47e801baf864d15525"},
+ {file = "pydantic-1.10.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7b5ba54d026c2bd2cb769d3468885f23f43710f651688e91f5fb1edcf0ee9283"},
+ {file = "pydantic-1.10.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:05e00dbebbe810b33c7a7362f231893183bcc4251f3f2ff991c31d5c08240c42"},
+ {file = "pydantic-1.10.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:2d0567e60eb01bccda3a4df01df677adf6b437958d35c12a3ac3e0f078b0ee52"},
+ {file = "pydantic-1.10.2-cp311-cp311-win_amd64.whl", hash = "sha256:c6f981882aea41e021f72779ce2a4e87267458cc4d39ea990729e21ef18f0f8c"},
+ {file = "pydantic-1.10.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c4aac8e7103bf598373208f6299fa9a5cfd1fc571f2d40bf1dd1955a63d6eeb5"},
+ {file = "pydantic-1.10.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:81a7b66c3f499108b448f3f004801fcd7d7165fb4200acb03f1c2402da73ce4c"},
+ {file = "pydantic-1.10.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bedf309630209e78582ffacda64a21f96f3ed2e51fbf3962d4d488e503420254"},
+ {file = "pydantic-1.10.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:9300fcbebf85f6339a02c6994b2eb3ff1b9c8c14f502058b5bf349d42447dcf5"},
+ {file = "pydantic-1.10.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:216f3bcbf19c726b1cc22b099dd409aa371f55c08800bcea4c44c8f74b73478d"},
+ {file = "pydantic-1.10.2-cp37-cp37m-win_amd64.whl", hash = "sha256:dd3f9a40c16daf323cf913593083698caee97df2804aa36c4b3175d5ac1b92a2"},
+ {file = "pydantic-1.10.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b97890e56a694486f772d36efd2ba31612739bc6f3caeee50e9e7e3ebd2fdd13"},
+ {file = "pydantic-1.10.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:9cabf4a7f05a776e7793e72793cd92cc865ea0e83a819f9ae4ecccb1b8aa6116"},
+ {file = "pydantic-1.10.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:06094d18dd5e6f2bbf93efa54991c3240964bb663b87729ac340eb5014310624"},
+ {file = "pydantic-1.10.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cc78cc83110d2f275ec1970e7a831f4e371ee92405332ebfe9860a715f8336e1"},
+ {file = "pydantic-1.10.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:1ee433e274268a4b0c8fde7ad9d58ecba12b069a033ecc4645bb6303c062d2e9"},
+ {file = "pydantic-1.10.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:7c2abc4393dea97a4ccbb4ec7d8658d4e22c4765b7b9b9445588f16c71ad9965"},
+ {file = "pydantic-1.10.2-cp38-cp38-win_amd64.whl", hash = "sha256:0b959f4d8211fc964772b595ebb25f7652da3f22322c007b6fed26846a40685e"},
+ {file = "pydantic-1.10.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c33602f93bfb67779f9c507e4d69451664524389546bacfe1bee13cae6dc7488"},
+ {file = "pydantic-1.10.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5760e164b807a48a8f25f8aa1a6d857e6ce62e7ec83ea5d5c5a802eac81bad41"},
+ {file = "pydantic-1.10.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6eb843dcc411b6a2237a694f5e1d649fc66c6064d02b204a7e9d194dff81eb4b"},
+ {file = "pydantic-1.10.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4b8795290deaae348c4eba0cebb196e1c6b98bdbe7f50b2d0d9a4a99716342fe"},
+ {file = "pydantic-1.10.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:e0bedafe4bc165ad0a56ac0bd7695df25c50f76961da29c050712596cf092d6d"},
+ {file = "pydantic-1.10.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2e05aed07fa02231dbf03d0adb1be1d79cabb09025dd45aa094aa8b4e7b9dcda"},
+ {file = "pydantic-1.10.2-cp39-cp39-win_amd64.whl", hash = "sha256:c1ba1afb396148bbc70e9eaa8c06c1716fdddabaf86e7027c5988bae2a829ab6"},
+ {file = "pydantic-1.10.2-py3-none-any.whl", hash = "sha256:1b6ee725bd6e83ec78b1aa32c5b1fa67a3a65badddde3976bca5fe4568f27709"},
+ {file = "pydantic-1.10.2.tar.gz", hash = "sha256:91b8e218852ef6007c2b98cd861601c6a09f1aa32bbbb74fab5b1c33d4a1e410"},
+]
pyflakes = [
- {file = "pyflakes-2.4.0-py2.py3-none-any.whl", hash = "sha256:3bb3a3f256f4b7968c9c788781e4ff07dce46bdf12339dcda61053375426ee2e"},
- {file = "pyflakes-2.4.0.tar.gz", hash = "sha256:05a85c2872edf37a4ed30b0cce2f6093e1d0581f8c19d7393122da7e25b2b24c"},
+ {file = "pyflakes-2.5.0-py2.py3-none-any.whl", hash = "sha256:4579f67d887f804e67edb544428f264b7b24f435b263c4614f384135cea553d2"},
+ {file = "pyflakes-2.5.0.tar.gz", hash = "sha256:491feb020dca48ccc562a8c0cbe8df07ee13078df59813b83959cbdada312ea3"},
]
pygithub = [
- {file = "PyGithub-1.55-py3-none-any.whl", hash = "sha256:2caf0054ea079b71e539741ae56c5a95e073b81fa472ce222e81667381b9601b"},
- {file = "PyGithub-1.55.tar.gz", hash = "sha256:1bbfff9372047ff3f21d5cd8e07720f3dbfdaf6462fcaed9d815f528f1ba7283"},
+ {file = "PyGithub-1.57-py3-none-any.whl", hash = "sha256:5822febeac2391f1306c55a99af2bc8f86c8bf82ded000030cd02c18f31b731f"},
+ {file = "PyGithub-1.57.tar.gz", hash = "sha256:c273f252b278fb81f1769505cc6921bdb6791e1cebd6ac850cc97dad13c31ff3"},
]
pygments = [
{file = "Pygments-2.11.2-py3-none-any.whl", hash = "sha256:44238f1b60a76d78fc8ca0528ee429702aae011c265fe6a8dd8b63049ae41c65"},
@@ -2328,8 +2483,8 @@ pyrsistent = [
{file = "pyrsistent-0.18.1.tar.gz", hash = "sha256:d4d61f8b993a7255ba714df3aca52700f8125289f84f704cf80916517c46eb96"},
]
pysaml2 = [
- {file = "pysaml2-7.1.2-py2.py3-none-any.whl", hash = "sha256:d915961aaa4d4d97d952b30fe5d18d64cf053465acf3e38d8090b36c5ff08325"},
- {file = "pysaml2-7.1.2.tar.gz", hash = "sha256:1ec94442306511b93fe7a5710f224e05e0aba948682d506614d1e04f3232f827"},
+ {file = "pysaml2-7.2.1-py2.py3-none-any.whl", hash = "sha256:2ca155f4eeb1471b247a7b0cc79ccfd5780046d33d0b201e1199a00698dce795"},
+ {file = "pysaml2-7.2.1.tar.gz", hash = "sha256:f40f9576dce9afef156469179277ffeeca36829248be333252af0517a26d0b1f"},
]
python-dateutil = [
{file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"},
@@ -2351,6 +2506,13 @@ pyyaml = [
{file = "PyYAML-6.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f84fbc98b019fef2ee9a1cb3ce93e3187a6df0b2538a651bfb890254ba9f90b5"},
{file = "PyYAML-6.0-cp310-cp310-win32.whl", hash = "sha256:2cd5df3de48857ed0544b34e2d40e9fac445930039f3cfe4bcc592a1f836d513"},
{file = "PyYAML-6.0-cp310-cp310-win_amd64.whl", hash = "sha256:daf496c58a8c52083df09b80c860005194014c3698698d1a57cbcfa182142a3a"},
+ {file = "PyYAML-6.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d4b0ba9512519522b118090257be113b9468d804b19d63c71dbcf4a48fa32358"},
+ {file = "PyYAML-6.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:81957921f441d50af23654aa6c5e5eaf9b06aba7f0a19c18a538dc7ef291c5a1"},
+ {file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:afa17f5bc4d1b10afd4466fd3a44dc0e245382deca5b3c353d8b757f9e3ecb8d"},
+ {file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dbad0e9d368bb989f4515da330b88a057617d16b6a8245084f1b05400f24609f"},
+ {file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:432557aa2c09802be39460360ddffd48156e30721f5e8d917f01d31694216782"},
+ {file = "PyYAML-6.0-cp311-cp311-win32.whl", hash = "sha256:bfaef573a63ba8923503d27530362590ff4f576c626d86a9fed95822a8255fd7"},
+ {file = "PyYAML-6.0-cp311-cp311-win_amd64.whl", hash = "sha256:01b45c0191e6d66c470b6cf1b9531a771a83c1c4208272ead47a3ae4f2f603bf"},
{file = "PyYAML-6.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:897b80890765f037df3403d22bab41627ca8811ae55e9a722fd0392850ec4d86"},
{file = "PyYAML-6.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50602afada6d6cbfad699b0c7bb50d5ccffa7e46a3d738092afddc1f9758427f"},
{file = "PyYAML-6.0-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:48c346915c114f5fdb3ead70312bd042a953a8ce5c7106d5bfb1a5254e47da92"},
@@ -2379,8 +2541,8 @@ pyyaml = [
{file = "PyYAML-6.0.tar.gz", hash = "sha256:68fb519c14306fec9720a2a5b45bc9f0c8d1b9c72adf45c37baedfcd949c35a2"},
]
readme-renderer = [
- {file = "readme_renderer-33.0-py3-none-any.whl", hash = "sha256:f02cee0c4de9636b5a62b6be50c9742427ba1b956aad1d938bfb087d0d72ccdf"},
- {file = "readme_renderer-33.0.tar.gz", hash = "sha256:e3b53bc84bd6af054e4cc1fe3567dc1ae19f554134221043a3f8c674e22209db"},
+ {file = "readme_renderer-37.2-py3-none-any.whl", hash = "sha256:d3f06a69e8c40fca9ab3174eca48f96d9771eddb43517b17d96583418427b106"},
+ {file = "readme_renderer-37.2.tar.gz", hash = "sha256:e8ad25293c98f781dbc2c5a36a309929390009f902f99e1798c761aaf04a7923"},
]
requests = [
{file = "requests-2.27.1-py2.py3-none-any.whl", hash = "sha256:f22fa1e554c9ddfd16e6e41ac79759e17be9e492b3587efa038054674760e72d"},
@@ -2394,18 +2556,34 @@ rfc3986 = [
{file = "rfc3986-2.0.0-py2.py3-none-any.whl", hash = "sha256:50b1502b60e289cb37883f3dfd34532b8873c7de9f49bb546641ce9cbd256ebd"},
{file = "rfc3986-2.0.0.tar.gz", hash = "sha256:97aacf9dbd4bfd829baad6e6309fa6573aaf1be3f6fa735c8ab05e46cecb261c"},
]
+rich = [
+ {file = "rich-12.6.0-py3-none-any.whl", hash = "sha256:a4eb26484f2c82589bd9a17c73d32a010b1e29d89f1604cd9bf3a2097b81bb5e"},
+ {file = "rich-12.6.0.tar.gz", hash = "sha256:ba3a3775974105c221d31141f2c116f4fd65c5ceb0698657a11e9f295ec93fd0"},
+]
secretstorage = [
{file = "SecretStorage-3.3.1-py3-none-any.whl", hash = "sha256:422d82c36172d88d6a0ed5afdec956514b189ddbfb72fefab0c8a1cee4eaf71f"},
{file = "SecretStorage-3.3.1.tar.gz", hash = "sha256:fd666c51a6bf200643495a04abb261f83229dcb6fd8472ec393df7ffc8b6f195"},
]
+semantic-version = [
+ {file = "semantic_version-2.10.0-py2.py3-none-any.whl", hash = "sha256:de78a3b8e0feda74cabc54aab2da702113e33ac9d9eb9d2389bcf1f58b7d9177"},
+ {file = "semantic_version-2.10.0.tar.gz", hash = "sha256:bdabb6d336998cbb378d4b9db3a4b56a1e3235701dc05ea2690d9a997ed5041c"},
+]
sentry-sdk = [
- {file = "sentry-sdk-1.5.11.tar.gz", hash = "sha256:6c01d9d0b65935fd275adc120194737d1df317dce811e642cbf0394d0d37a007"},
- {file = "sentry_sdk-1.5.11-py2.py3-none-any.whl", hash = "sha256:c17179183cac614e900cbd048dab03f49a48e2820182ec686c25e7ce46f8548f"},
+ {file = "sentry-sdk-1.11.0.tar.gz", hash = "sha256:e7b78a1ddf97a5f715a50ab8c3f7a93f78b114c67307785ee828ef67a5d6f117"},
+ {file = "sentry_sdk-1.11.0-py2.py3-none-any.whl", hash = "sha256:f467e6c7fac23d4d42bc83eb049c400f756cd2d65ab44f0cc1165d0c7c3d40bc"},
]
service-identity = [
{file = "service-identity-21.1.0.tar.gz", hash = "sha256:6e6c6086ca271dc11b033d17c3a8bea9f24ebff920c587da090afc9519419d34"},
{file = "service_identity-21.1.0-py2.py3-none-any.whl", hash = "sha256:f0b0caac3d40627c3c04d7a51b6e06721857a0e10a8775f2d1d7e72901b3a7db"},
]
+setuptools = [
+ {file = "setuptools-65.3.0-py3-none-any.whl", hash = "sha256:2e24e0bec025f035a2e72cdd1961119f557d78ad331bb00ff82efb2ab8da8e82"},
+ {file = "setuptools-65.3.0.tar.gz", hash = "sha256:7732871f4f7fa58fb6bdcaeadb0161b2bd046c85905dbaa066bdcbcc81953b57"},
+]
+setuptools-rust = [
+ {file = "setuptools-rust-1.5.2.tar.gz", hash = "sha256:d8daccb14dc0eae1b6b6eb3ecef79675bd37b4065369f79c35393dd5c55652c7"},
+ {file = "setuptools_rust-1.5.2-py3-none-any.whl", hash = "sha256:8eb45851e34288f2296cd5ab9e924535ac1757318b730a13fe6836867843f206"},
+]
signedjson = [
{file = "signedjson-1.1.4-py3-none-any.whl", hash = "sha256:45569ec54241c65d2403fe3faf7169be5322547706a231e884ca2b427f23d228"},
{file = "signedjson-1.1.4.tar.gz", hash = "sha256:cd91c56af53f169ef032c62e9c4a3292dc158866933318d0592e3462db3d6492"},
@@ -2543,24 +2721,20 @@ tornado = [
{file = "tornado-6.1.tar.gz", hash = "sha256:33c6e81d7bd55b468d2e793517c909b139960b6c790a60b7991b9b6b76fb9791"},
]
towncrier = [
- {file = "towncrier-21.9.0-py2.py3-none-any.whl", hash = "sha256:fc5a88a2a54988e3a8ed2b60d553599da8330f65722cc607c839614ed87e0f92"},
- {file = "towncrier-21.9.0.tar.gz", hash = "sha256:9cb6f45c16e1a1eec9d0e7651165e7be60cd0ab81d13a5c96ca97a498ae87f48"},
-]
-tqdm = [
- {file = "tqdm-4.63.0-py2.py3-none-any.whl", hash = "sha256:e643e071046f17139dea55b880dc9b33822ce21613b4a4f5ea57f202833dbc29"},
- {file = "tqdm-4.63.0.tar.gz", hash = "sha256:1d9835ede8e394bb8c9dcbffbca02d717217113adc679236873eeaac5bc0b3cd"},
+ {file = "towncrier-22.8.0-py2.py3-none-any.whl", hash = "sha256:3b780c3d966e1b26414830aec3d15000654b31e64e024f3e5fd128b4c6eb8f47"},
+ {file = "towncrier-22.8.0.tar.gz", hash = "sha256:7d3839b033859b45fb55df82b74cfd702431933c0cc9f287a5a7ea3e05d042cb"},
]
treq = [
{file = "treq-22.2.0-py3-none-any.whl", hash = "sha256:27d95b07c5c14be3e7b280416139b036087617ad5595be913b1f9b3ce981b9b2"},
{file = "treq-22.2.0.tar.gz", hash = "sha256:df757e3f141fc782ede076a604521194ffcb40fa2645cf48e5a37060307f52ec"},
]
twine = [
- {file = "twine-3.8.0-py3-none-any.whl", hash = "sha256:d0550fca9dc19f3d5e8eadfce0c227294df0a2a951251a4385797c8a6198b7c8"},
- {file = "twine-3.8.0.tar.gz", hash = "sha256:8efa52658e0ae770686a13b675569328f1fba9837e5de1867bfe5f46a9aefe19"},
+ {file = "twine-4.0.1-py3-none-any.whl", hash = "sha256:42026c18e394eac3e06693ee52010baa5313e4811d5a11050e7d48436cf41b9e"},
+ {file = "twine-4.0.1.tar.gz", hash = "sha256:96b1cf12f7ae611a4a40b6ae8e9570215daff0611828f5fe1f37a16255ab24a0"},
]
twisted = [
- {file = "Twisted-22.4.0-py3-none-any.whl", hash = "sha256:f9f7a91f94932477a9fc3b169d57f54f96c6e74a23d78d9ce54039a7f48928a2"},
- {file = "Twisted-22.4.0.tar.gz", hash = "sha256:a047990f57dfae1e0bd2b7df2526d4f16dcdc843774dc108b78c52f2a5f13680"},
+ {file = "Twisted-22.10.0-py3-none-any.whl", hash = "sha256:86c55f712cc5ab6f6d64e02503352464f0400f66d4f079096d744080afcccbd0"},
+ {file = "Twisted-22.10.0.tar.gz", hash = "sha256:32acbd40a94f5f46e7b42c109bfae2b302250945561783a8b7a059048f2d4d31"},
]
twisted-iocpsupport = [
{file = "twisted-iocpsupport-1.0.2.tar.gz", hash = "sha256:72068b206ee809c9c596b57b5287259ea41ddb4774d86725b19f35bf56aa32a9"},
@@ -2607,8 +2781,8 @@ typed-ast = [
{file = "typed_ast-1.5.2.tar.gz", hash = "sha256:525a2d4088e70a9f75b08b3f87a51acc9cde640e19cc523c7e41aa355564ae27"},
]
types-bleach = [
- {file = "types-bleach-4.1.4.tar.gz", hash = "sha256:2d30c2c4fb6854088ac636471352c9a51bf6c089289800d2a8060820a01cd43a"},
- {file = "types_bleach-4.1.4-py3-none-any.whl", hash = "sha256:edffe173ed6d7b6f3543036a96204a9319c3bf6c3645917b14274e43f000cc9b"},
+ {file = "types-bleach-5.0.3.tar.gz", hash = "sha256:f7b3df8278efe176d9670d0f063a66c866c77577f71f54b9c7a320e31b1a7bbd"},
+ {file = "types_bleach-5.0.3-py3-none-any.whl", hash = "sha256:5931525d03571f36b2bb40210c34b662c4d26c8fd6f2b1e1e83fe4d2d2fd63c7"},
]
types-commonmark = [
{file = "types-commonmark-0.9.2.tar.gz", hash = "sha256:b894b67750c52fd5abc9a40a9ceb9da4652a391d75c1b480bba9cef90f19fc86"},
@@ -2627,109 +2801,122 @@ types-ipaddress = [
{file = "types_ipaddress-1.0.8-py3-none-any.whl", hash = "sha256:4933b74da157ba877b1a705d64f6fa7742745e9ffd65e51011f370c11ebedb55"},
]
types-jsonschema = [
- {file = "types-jsonschema-4.4.6.tar.gz", hash = "sha256:7f2a804618756768c7c0616f8c794b61fcfe3077c7ee1ad47dcf01c5e5f692bb"},
- {file = "types_jsonschema-4.4.6-py3-none-any.whl", hash = "sha256:1db9031ca49a8444d01bd2ce8cf2f89318382b04610953b108321e6f8fb03390"},
+ {file = "types-jsonschema-4.17.0.1.tar.gz", hash = "sha256:62625d492e4930411a431909ac32301aeab6180500e70ee222f81d43204cfb3c"},
+ {file = "types_jsonschema-4.17.0.1-py3-none-any.whl", hash = "sha256:77badbe3881cbf79ac9561be2be2b1f37ab104b13afd2231840e6dd6e94e63c2"},
]
types-opentracing = [
- {file = "types-opentracing-2.4.7.tar.gz", hash = "sha256:be60e9618355aa892571ace002e6b353702538b1c0dc4fbc1c921219d6658830"},
- {file = "types_opentracing-2.4.7-py3-none-any.whl", hash = "sha256:861fb8103b07cf717f501dd400cb274ca9992552314d4d6c7a824b11a215e512"},
+ {file = "types-opentracing-2.4.10.tar.gz", hash = "sha256:6101414f3b6d3b9c10f1c510a261e8439b6c8d67c723d5c2872084697b4580a7"},
+ {file = "types_opentracing-2.4.10-py3-none-any.whl", hash = "sha256:66d9cfbbdc4a6f8ca8189a15ad26f0fe41cee84c07057759c5d194e2505b84c2"},
]
types-pillow = [
- {file = "types-Pillow-9.0.15.tar.gz", hash = "sha256:d2e385fe5c192e75970f18accce69f5c2a9f186f3feb578a9b91cd6fdf64211d"},
- {file = "types_Pillow-9.0.15-py3-none-any.whl", hash = "sha256:c9646595dfafdf8b63d4b1443292ead17ee0fc7b18a143e497b68e0ea2dc1eb6"},
+ {file = "types-Pillow-9.3.0.1.tar.gz", hash = "sha256:f3b7cada3fa496c78d75253c6b1f07a843d625f42e5639b320a72acaff6f7cfb"},
+ {file = "types_Pillow-9.3.0.1-py3-none-any.whl", hash = "sha256:79837755fe9659f29efd1016e9903ac4a500e0c73260483f07296bd6ca47668b"},
]
types-psycopg2 = [
- {file = "types-psycopg2-2.9.9.tar.gz", hash = "sha256:4f9d4d52eeb343dc00fd5ed4f1513a8a5c18efba0a072eb82706d15cf4f20a2e"},
- {file = "types_psycopg2-2.9.9-py3-none-any.whl", hash = "sha256:cec9291d4318ad70b407310f8304b3d40f6d0358f09870448f7a65e3027c80af"},
+ {file = "types-psycopg2-2.9.21.1.tar.gz", hash = "sha256:f5532cf15afdc6b5ebb1e59b7d896617217321f488fd1fbd74e7efb94decfab6"},
+ {file = "types_psycopg2-2.9.21.1-py3-none-any.whl", hash = "sha256:858838f1972f39da2a6e28274201fed8619a40a235dd86e7f66f4548ec474395"},
]
types-pyopenssl = [
- {file = "types-pyOpenSSL-22.0.0.tar.gz", hash = "sha256:d86dde7f6fe2f1ac9fe0b6282e489f649f480364bdaa9d6a4696d52505f4477e"},
- {file = "types_pyOpenSSL-22.0.0-py3-none-any.whl", hash = "sha256:da685f57b864979f36df0157895139c8244ad4aad19b551f1678206fbad0108a"},
+ {file = "types-pyOpenSSL-22.1.0.2.tar.gz", hash = "sha256:7a350e29e55bc3ee4571f996b4b1c18c4e4098947db45f7485b016eaa35b44bc"},
+ {file = "types_pyOpenSSL-22.1.0.2-py3-none-any.whl", hash = "sha256:54606a6afb203eb261e0fca9b7f75fa6c24d5ff71e13903c162ffb951c2c64c6"},
]
types-pyyaml = [
- {file = "types-PyYAML-6.0.4.tar.gz", hash = "sha256:6252f62d785e730e454dfa0c9f0fb99d8dae254c5c3c686903cf878ea27c04b7"},
- {file = "types_PyYAML-6.0.4-py3-none-any.whl", hash = "sha256:693b01c713464a6851f36ff41077f8adbc6e355eda929addfb4a97208aea9b4b"},
+ {file = "types-PyYAML-6.0.12.2.tar.gz", hash = "sha256:6840819871c92deebe6a2067fb800c11b8a063632eb4e3e755914e7ab3604e83"},
+ {file = "types_PyYAML-6.0.12.2-py3-none-any.whl", hash = "sha256:1e94e80aafee07a7e798addb2a320e32956a373f376655128ae20637adb2655b"},
]
types-requests = [
- {file = "types-requests-2.27.11.tar.gz", hash = "sha256:6a7ed24b21780af4a5b5e24c310b2cd885fb612df5fd95584d03d87e5f2a195a"},
- {file = "types_requests-2.27.11-py3-none-any.whl", hash = "sha256:506279bad570c7b4b19ac1f22e50146538befbe0c133b2cea66a9b04a533a859"},
+ {file = "types-requests-2.28.11.2.tar.gz", hash = "sha256:fdcd7bd148139fb8eef72cf4a41ac7273872cad9e6ada14b11ff5dfdeee60ed3"},
+ {file = "types_requests-2.28.11.2-py3-none-any.whl", hash = "sha256:14941f8023a80b16441b3b46caffcbfce5265fd14555844d6029697824b5a2ef"},
]
types-setuptools = [
- {file = "types-setuptools-57.4.9.tar.gz", hash = "sha256:536ef74744f8e1e4be4fc719887f886e74e4cf3c792b4a06984320be4df450b5"},
- {file = "types_setuptools-57.4.9-py3-none-any.whl", hash = "sha256:948dc6863373750e2cd0b223a84f1fb608414cde5e55cf38ea657b93aeb411d2"},
+ {file = "types-setuptools-65.5.0.3.tar.gz", hash = "sha256:17769171f5f2a2dc69b25c0d3106552a5cda767bbf6b36cb6212b26dae5aa9fc"},
+ {file = "types_setuptools-65.5.0.3-py3-none-any.whl", hash = "sha256:9254c32b0cc91c486548e7d7561243b5bd185402a383e93c6691e1b9bc8d86e2"},
]
types-urllib3 = [
{file = "types-urllib3-1.26.10.tar.gz", hash = "sha256:a26898f530e6c3f43f25b907f2b884486868ffd56a9faa94cbf9b3eb6e165d6a"},
{file = "types_urllib3-1.26.10-py3-none-any.whl", hash = "sha256:d755278d5ecd7a7a6479a190e54230f241f1a99c19b81518b756b19dc69e518c"},
]
typing-extensions = [
- {file = "typing_extensions-4.1.1-py3-none-any.whl", hash = "sha256:21c85e0fe4b9a155d0799430b0ad741cdce7e359660ccbd8b530613e8df88ce2"},
- {file = "typing_extensions-4.1.1.tar.gz", hash = "sha256:1a9462dcc3347a79b1f1c0271fbe79e844580bb598bafa1ed208b94da3cdcd42"},
+ {file = "typing_extensions-4.4.0-py3-none-any.whl", hash = "sha256:16fa4864408f655d35ec496218b85f79b3437c829e93320c7c9215ccfd92489e"},
+ {file = "typing_extensions-4.4.0.tar.gz", hash = "sha256:1511434bb92bf8dd198c12b1cc812e800d4181cfcb867674e0f8279cc93087aa"},
]
unpaddedbase64 = [
{file = "unpaddedbase64-2.1.0-py3-none-any.whl", hash = "sha256:485eff129c30175d2cd6f0cd8d2310dff51e666f7f36175f738d75dfdbd0b1c6"},
{file = "unpaddedbase64-2.1.0.tar.gz", hash = "sha256:7273c60c089de39d90f5d6d4a7883a79e319dc9d9b1c8924a7fab96178a5f005"},
]
urllib3 = [
- {file = "urllib3-1.26.8-py2.py3-none-any.whl", hash = "sha256:000ca7f471a233c2251c6c7023ee85305721bfdf18621ebff4fd17a8653427ed"},
- {file = "urllib3-1.26.8.tar.gz", hash = "sha256:0e7c33d9a63e7ddfcb86780aac87befc2fbddf46c58dbb487e0855f7ceec283c"},
+ {file = "urllib3-1.26.12-py2.py3-none-any.whl", hash = "sha256:b930dd878d5a8afb066a637fbb35144fe7901e3b209d1cd4f524bd0e9deee997"},
+ {file = "urllib3-1.26.12.tar.gz", hash = "sha256:3fa96cf423e6987997fc326ae8df396db2a8b7c667747d47ddd8ecba91f4a74e"},
]
webencodings = [
{file = "webencodings-0.5.1-py2.py3-none-any.whl", hash = "sha256:a0af1213f3c2226497a97e2b3aa01a7e4bee4f403f95be16fc9acd2947514a78"},
{file = "webencodings-0.5.1.tar.gz", hash = "sha256:b36a1c245f2d304965eb4e0a82848379241dc04b865afcc4aab16748587e1923"},
]
wrapt = [
- {file = "wrapt-1.13.3-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:e05e60ff3b2b0342153be4d1b597bbcfd8330890056b9619f4ad6b8d5c96a81a"},
- {file = "wrapt-1.13.3-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:85148f4225287b6a0665eef08a178c15097366d46b210574a658c1ff5b377489"},
- {file = "wrapt-1.13.3-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:2dded5496e8f1592ec27079b28b6ad2a1ef0b9296d270f77b8e4a3a796cf6909"},
- {file = "wrapt-1.13.3-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:e94b7d9deaa4cc7bac9198a58a7240aaf87fe56c6277ee25fa5b3aa1edebd229"},
- {file = "wrapt-1.13.3-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:498e6217523111d07cd67e87a791f5e9ee769f9241fcf8a379696e25806965af"},
- {file = "wrapt-1.13.3-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:ec7e20258ecc5174029a0f391e1b948bf2906cd64c198a9b8b281b811cbc04de"},
- {file = "wrapt-1.13.3-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:87883690cae293541e08ba2da22cacaae0a092e0ed56bbba8d018cc486fbafbb"},
- {file = "wrapt-1.13.3-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:f99c0489258086308aad4ae57da9e8ecf9e1f3f30fa35d5e170b4d4896554d80"},
- {file = "wrapt-1.13.3-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:6a03d9917aee887690aa3f1747ce634e610f6db6f6b332b35c2dd89412912bca"},
- {file = "wrapt-1.13.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:936503cb0a6ed28dbfa87e8fcd0a56458822144e9d11a49ccee6d9a8adb2ac44"},
- {file = "wrapt-1.13.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:f9c51d9af9abb899bd34ace878fbec8bf357b3194a10c4e8e0a25512826ef056"},
- {file = "wrapt-1.13.3-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:220a869982ea9023e163ba915077816ca439489de6d2c09089b219f4e11b6785"},
- {file = "wrapt-1.13.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:0877fe981fd76b183711d767500e6b3111378ed2043c145e21816ee589d91096"},
- {file = "wrapt-1.13.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:43e69ffe47e3609a6aec0fe723001c60c65305784d964f5007d5b4fb1bc6bf33"},
- {file = "wrapt-1.13.3-cp310-cp310-win32.whl", hash = "sha256:78dea98c81915bbf510eb6a3c9c24915e4660302937b9ae05a0947164248020f"},
- {file = "wrapt-1.13.3-cp310-cp310-win_amd64.whl", hash = "sha256:ea3e746e29d4000cd98d572f3ee2a6050a4f784bb536f4ac1f035987fc1ed83e"},
- {file = "wrapt-1.13.3-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:8c73c1a2ec7c98d7eaded149f6d225a692caa1bd7b2401a14125446e9e90410d"},
- {file = "wrapt-1.13.3-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:086218a72ec7d986a3eddb7707c8c4526d677c7b35e355875a0fe2918b059179"},
- {file = "wrapt-1.13.3-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:e92d0d4fa68ea0c02d39f1e2f9cb5bc4b4a71e8c442207433d8db47ee79d7aa3"},
- {file = "wrapt-1.13.3-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:d4a5f6146cfa5c7ba0134249665acd322a70d1ea61732723c7d3e8cc0fa80755"},
- {file = "wrapt-1.13.3-cp35-cp35m-win32.whl", hash = "sha256:8aab36778fa9bba1a8f06a4919556f9f8c7b33102bd71b3ab307bb3fecb21851"},
- {file = "wrapt-1.13.3-cp35-cp35m-win_amd64.whl", hash = "sha256:944b180f61f5e36c0634d3202ba8509b986b5fbaf57db3e94df11abee244ba13"},
- {file = "wrapt-1.13.3-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:2ebdde19cd3c8cdf8df3fc165bc7827334bc4e353465048b36f7deeae8ee0918"},
- {file = "wrapt-1.13.3-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:610f5f83dd1e0ad40254c306f4764fcdc846641f120c3cf424ff57a19d5f7ade"},
- {file = "wrapt-1.13.3-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:5601f44a0f38fed36cc07db004f0eedeaadbdcec90e4e90509480e7e6060a5bc"},
- {file = "wrapt-1.13.3-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:e6906d6f48437dfd80464f7d7af1740eadc572b9f7a4301e7dd3d65db285cacf"},
- {file = "wrapt-1.13.3-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:766b32c762e07e26f50d8a3468e3b4228b3736c805018e4b0ec8cc01ecd88125"},
- {file = "wrapt-1.13.3-cp36-cp36m-win32.whl", hash = "sha256:5f223101f21cfd41deec8ce3889dc59f88a59b409db028c469c9b20cfeefbe36"},
- {file = "wrapt-1.13.3-cp36-cp36m-win_amd64.whl", hash = "sha256:f122ccd12fdc69628786d0c947bdd9cb2733be8f800d88b5a37c57f1f1d73c10"},
- {file = "wrapt-1.13.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:46f7f3af321a573fc0c3586612db4decb7eb37172af1bc6173d81f5b66c2e068"},
- {file = "wrapt-1.13.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:778fd096ee96890c10ce96187c76b3e99b2da44e08c9e24d5652f356873f6709"},
- {file = "wrapt-1.13.3-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:0cb23d36ed03bf46b894cfec777eec754146d68429c30431c99ef28482b5c1df"},
- {file = "wrapt-1.13.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:96b81ae75591a795d8c90edc0bfaab44d3d41ffc1aae4d994c5aa21d9b8e19a2"},
- {file = "wrapt-1.13.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:7dd215e4e8514004c8d810a73e342c536547038fb130205ec4bba9f5de35d45b"},
- {file = "wrapt-1.13.3-cp37-cp37m-win32.whl", hash = "sha256:47f0a183743e7f71f29e4e21574ad3fa95676136f45b91afcf83f6a050914829"},
- {file = "wrapt-1.13.3-cp37-cp37m-win_amd64.whl", hash = "sha256:fd76c47f20984b43d93de9a82011bb6e5f8325df6c9ed4d8310029a55fa361ea"},
- {file = "wrapt-1.13.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b73d4b78807bd299b38e4598b8e7bd34ed55d480160d2e7fdaabd9931afa65f9"},
- {file = "wrapt-1.13.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:ec9465dd69d5657b5d2fa6133b3e1e989ae27d29471a672416fd729b429eb554"},
- {file = "wrapt-1.13.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:dd91006848eb55af2159375134d724032a2d1d13bcc6f81cd8d3ed9f2b8e846c"},
- {file = "wrapt-1.13.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:ae9de71eb60940e58207f8e71fe113c639da42adb02fb2bcbcaccc1ccecd092b"},
- {file = "wrapt-1.13.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:51799ca950cfee9396a87f4a1240622ac38973b6df5ef7a41e7f0b98797099ce"},
- {file = "wrapt-1.13.3-cp38-cp38-win32.whl", hash = "sha256:4b9c458732450ec42578b5642ac53e312092acf8c0bfce140ada5ca1ac556f79"},
- {file = "wrapt-1.13.3-cp38-cp38-win_amd64.whl", hash = "sha256:7dde79d007cd6dfa65afe404766057c2409316135cb892be4b1c768e3f3a11cb"},
- {file = "wrapt-1.13.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:981da26722bebb9247a0601e2922cedf8bb7a600e89c852d063313102de6f2cb"},
- {file = "wrapt-1.13.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:705e2af1f7be4707e49ced9153f8d72131090e52be9278b5dbb1498c749a1e32"},
- {file = "wrapt-1.13.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:25b1b1d5df495d82be1c9d2fad408f7ce5ca8a38085e2da41bb63c914baadff7"},
- {file = "wrapt-1.13.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:77416e6b17926d953b5c666a3cb718d5945df63ecf922af0ee576206d7033b5e"},
- {file = "wrapt-1.13.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:865c0b50003616f05858b22174c40ffc27a38e67359fa1495605f96125f76640"},
- {file = "wrapt-1.13.3-cp39-cp39-win32.whl", hash = "sha256:0a017a667d1f7411816e4bf214646d0ad5b1da2c1ea13dec6c162736ff25a374"},
- {file = "wrapt-1.13.3-cp39-cp39-win_amd64.whl", hash = "sha256:81bd7c90d28a4b2e1df135bfbd7c23aee3050078ca6441bead44c42483f9ebfb"},
- {file = "wrapt-1.13.3.tar.gz", hash = "sha256:1fea9cd438686e6682271d36f3481a9f3636195578bab9ca3382e2f5f01fc185"},
+ {file = "wrapt-1.14.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:1b376b3f4896e7930f1f772ac4b064ac12598d1c38d04907e696cc4d794b43d3"},
+ {file = "wrapt-1.14.1-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:903500616422a40a98a5a3c4ff4ed9d0066f3b4c951fa286018ecdf0750194ef"},
+ {file = "wrapt-1.14.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:5a9a0d155deafd9448baff28c08e150d9b24ff010e899311ddd63c45c2445e28"},
+ {file = "wrapt-1.14.1-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:ddaea91abf8b0d13443f6dac52e89051a5063c7d014710dcb4d4abb2ff811a59"},
+ {file = "wrapt-1.14.1-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:36f582d0c6bc99d5f39cd3ac2a9062e57f3cf606ade29a0a0d6b323462f4dd87"},
+ {file = "wrapt-1.14.1-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:7ef58fb89674095bfc57c4069e95d7a31cfdc0939e2a579882ac7d55aadfd2a1"},
+ {file = "wrapt-1.14.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:e2f83e18fe2f4c9e7db597e988f72712c0c3676d337d8b101f6758107c42425b"},
+ {file = "wrapt-1.14.1-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:ee2b1b1769f6707a8a445162ea16dddf74285c3964f605877a20e38545c3c462"},
+ {file = "wrapt-1.14.1-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:833b58d5d0b7e5b9832869f039203389ac7cbf01765639c7309fd50ef619e0b1"},
+ {file = "wrapt-1.14.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:80bb5c256f1415f747011dc3604b59bc1f91c6e7150bd7db03b19170ee06b320"},
+ {file = "wrapt-1.14.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:07f7a7d0f388028b2df1d916e94bbb40624c59b48ecc6cbc232546706fac74c2"},
+ {file = "wrapt-1.14.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:02b41b633c6261feff8ddd8d11c711df6842aba629fdd3da10249a53211a72c4"},
+ {file = "wrapt-1.14.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2fe803deacd09a233e4762a1adcea5db5d31e6be577a43352936179d14d90069"},
+ {file = "wrapt-1.14.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:257fd78c513e0fb5cdbe058c27a0624c9884e735bbd131935fd49e9fe719d310"},
+ {file = "wrapt-1.14.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:4fcc4649dc762cddacd193e6b55bc02edca674067f5f98166d7713b193932b7f"},
+ {file = "wrapt-1.14.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:11871514607b15cfeb87c547a49bca19fde402f32e2b1c24a632506c0a756656"},
+ {file = "wrapt-1.14.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8ad85f7f4e20964db4daadcab70b47ab05c7c1cf2a7c1e51087bfaa83831854c"},
+ {file = "wrapt-1.14.1-cp310-cp310-win32.whl", hash = "sha256:a9a52172be0b5aae932bef82a79ec0a0ce87288c7d132946d645eba03f0ad8a8"},
+ {file = "wrapt-1.14.1-cp310-cp310-win_amd64.whl", hash = "sha256:6d323e1554b3d22cfc03cd3243b5bb815a51f5249fdcbb86fda4bf62bab9e164"},
+ {file = "wrapt-1.14.1-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:43ca3bbbe97af00f49efb06e352eae40434ca9d915906f77def219b88e85d907"},
+ {file = "wrapt-1.14.1-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:6b1a564e6cb69922c7fe3a678b9f9a3c54e72b469875aa8018f18b4d1dd1adf3"},
+ {file = "wrapt-1.14.1-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:00b6d4ea20a906c0ca56d84f93065b398ab74b927a7a3dbd470f6fc503f95dc3"},
+ {file = "wrapt-1.14.1-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:a85d2b46be66a71bedde836d9e41859879cc54a2a04fad1191eb50c2066f6e9d"},
+ {file = "wrapt-1.14.1-cp35-cp35m-win32.whl", hash = "sha256:dbcda74c67263139358f4d188ae5faae95c30929281bc6866d00573783c422b7"},
+ {file = "wrapt-1.14.1-cp35-cp35m-win_amd64.whl", hash = "sha256:b21bb4c09ffabfa0e85e3a6b623e19b80e7acd709b9f91452b8297ace2a8ab00"},
+ {file = "wrapt-1.14.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:9e0fd32e0148dd5dea6af5fee42beb949098564cc23211a88d799e434255a1f4"},
+ {file = "wrapt-1.14.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9736af4641846491aedb3c3f56b9bc5568d92b0692303b5a305301a95dfd38b1"},
+ {file = "wrapt-1.14.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5b02d65b9ccf0ef6c34cba6cf5bf2aab1bb2f49c6090bafeecc9cd81ad4ea1c1"},
+ {file = "wrapt-1.14.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:21ac0156c4b089b330b7666db40feee30a5d52634cc4560e1905d6529a3897ff"},
+ {file = "wrapt-1.14.1-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:9f3e6f9e05148ff90002b884fbc2a86bd303ae847e472f44ecc06c2cd2fcdb2d"},
+ {file = "wrapt-1.14.1-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:6e743de5e9c3d1b7185870f480587b75b1cb604832e380d64f9504a0535912d1"},
+ {file = "wrapt-1.14.1-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:d79d7d5dc8a32b7093e81e97dad755127ff77bcc899e845f41bf71747af0c569"},
+ {file = "wrapt-1.14.1-cp36-cp36m-win32.whl", hash = "sha256:81b19725065dcb43df02b37e03278c011a09e49757287dca60c5aecdd5a0b8ed"},
+ {file = "wrapt-1.14.1-cp36-cp36m-win_amd64.whl", hash = "sha256:b014c23646a467558be7da3d6b9fa409b2c567d2110599b7cf9a0c5992b3b471"},
+ {file = "wrapt-1.14.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:88bd7b6bd70a5b6803c1abf6bca012f7ed963e58c68d76ee20b9d751c74a3248"},
+ {file = "wrapt-1.14.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b5901a312f4d14c59918c221323068fad0540e34324925c8475263841dbdfe68"},
+ {file = "wrapt-1.14.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d77c85fedff92cf788face9bfa3ebaa364448ebb1d765302e9af11bf449ca36d"},
+ {file = "wrapt-1.14.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d649d616e5c6a678b26d15ece345354f7c2286acd6db868e65fcc5ff7c24a77"},
+ {file = "wrapt-1.14.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:7d2872609603cb35ca513d7404a94d6d608fc13211563571117046c9d2bcc3d7"},
+ {file = "wrapt-1.14.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:ee6acae74a2b91865910eef5e7de37dc6895ad96fa23603d1d27ea69df545015"},
+ {file = "wrapt-1.14.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:2b39d38039a1fdad98c87279b48bc5dce2c0ca0d73483b12cb72aa9609278e8a"},
+ {file = "wrapt-1.14.1-cp37-cp37m-win32.whl", hash = "sha256:60db23fa423575eeb65ea430cee741acb7c26a1365d103f7b0f6ec412b893853"},
+ {file = "wrapt-1.14.1-cp37-cp37m-win_amd64.whl", hash = "sha256:709fe01086a55cf79d20f741f39325018f4df051ef39fe921b1ebe780a66184c"},
+ {file = "wrapt-1.14.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:8c0ce1e99116d5ab21355d8ebe53d9460366704ea38ae4d9f6933188f327b456"},
+ {file = "wrapt-1.14.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e3fb1677c720409d5f671e39bac6c9e0e422584e5f518bfd50aa4cbbea02433f"},
+ {file = "wrapt-1.14.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:642c2e7a804fcf18c222e1060df25fc210b9c58db7c91416fb055897fc27e8cc"},
+ {file = "wrapt-1.14.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7b7c050ae976e286906dd3f26009e117eb000fb2cf3533398c5ad9ccc86867b1"},
+ {file = "wrapt-1.14.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ef3f72c9666bba2bab70d2a8b79f2c6d2c1a42a7f7e2b0ec83bb2f9e383950af"},
+ {file = "wrapt-1.14.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:01c205616a89d09827986bc4e859bcabd64f5a0662a7fe95e0d359424e0e071b"},
+ {file = "wrapt-1.14.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:5a0f54ce2c092aaf439813735584b9537cad479575a09892b8352fea5e988dc0"},
+ {file = "wrapt-1.14.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:2cf71233a0ed05ccdabe209c606fe0bac7379fdcf687f39b944420d2a09fdb57"},
+ {file = "wrapt-1.14.1-cp38-cp38-win32.whl", hash = "sha256:aa31fdcc33fef9eb2552cbcbfee7773d5a6792c137b359e82879c101e98584c5"},
+ {file = "wrapt-1.14.1-cp38-cp38-win_amd64.whl", hash = "sha256:d1967f46ea8f2db647c786e78d8cc7e4313dbd1b0aca360592d8027b8508e24d"},
+ {file = "wrapt-1.14.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3232822c7d98d23895ccc443bbdf57c7412c5a65996c30442ebe6ed3df335383"},
+ {file = "wrapt-1.14.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:988635d122aaf2bdcef9e795435662bcd65b02f4f4c1ae37fbee7401c440b3a7"},
+ {file = "wrapt-1.14.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9cca3c2cdadb362116235fdbd411735de4328c61425b0aa9f872fd76d02c4e86"},
+ {file = "wrapt-1.14.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d52a25136894c63de15a35bc0bdc5adb4b0e173b9c0d07a2be9d3ca64a332735"},
+ {file = "wrapt-1.14.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40e7bc81c9e2b2734ea4bc1aceb8a8f0ceaac7c5299bc5d69e37c44d9081d43b"},
+ {file = "wrapt-1.14.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b9b7a708dd92306328117d8c4b62e2194d00c365f18eff11a9b53c6f923b01e3"},
+ {file = "wrapt-1.14.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:6a9a25751acb379b466ff6be78a315e2b439d4c94c1e99cb7266d40a537995d3"},
+ {file = "wrapt-1.14.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:34aa51c45f28ba7f12accd624225e2b1e5a3a45206aa191f6f9aac931d9d56fe"},
+ {file = "wrapt-1.14.1-cp39-cp39-win32.whl", hash = "sha256:dee0ce50c6a2dd9056c20db781e9c1cfd33e77d2d569f5d1d9321c641bb903d5"},
+ {file = "wrapt-1.14.1-cp39-cp39-win_amd64.whl", hash = "sha256:dee60e1de1898bde3b238f18340eec6148986da0455d8ba7848d50470a7a32fb"},
+ {file = "wrapt-1.14.1.tar.gz", hash = "sha256:380a85cf89e0e69b7cfbe2ea9f765f004ff419f34194018a6827ac0e3edfed4d"},
]
xmlschema = [
{file = "xmlschema-1.10.0-py3-none-any.whl", hash = "sha256:dbd68bded2fef00c19cf37110ca0565eca34cf0b6c9e1d3b62ad0de8cbb582ca"},
diff --git a/pyproject.toml b/pyproject.toml
index af7def0c53..5a265ba5a5 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -52,9 +52,12 @@ include_trailing_comma = true
combine_as_imports = true
skip_gitignore = true
+[tool.maturin]
+manifest-path = "rust/Cargo.toml"
+
[tool.poetry]
name = "matrix-synapse"
-version = "1.64.0"
+version = "1.72.0"
description = "Homeserver for the Matrix decentralised comms protocol"
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
license = "Apache-2.0"
@@ -82,8 +85,18 @@ include = [
{ path = "sytest-blacklist", format = "sdist" },
{ path = "tests", format = "sdist" },
{ path = "UPGRADE.rst", format = "sdist" },
+ { path = "Cargo.toml", format = "sdist" },
+ { path = "Cargo.lock", format = "sdist" },
+ { path = "rust/Cargo.toml", format = "sdist" },
+ { path = "rust/build.rs", format = "sdist" },
+ { path = "rust/src/**", format = "sdist" },
+]
+exclude = [
+ { path = "synapse/*.so", format = "sdist"}
]
+build = "build_rust.py"
+
[tool.poetry.scripts]
synapse_homeserver = "synapse.app.homeserver:main"
synapse_worker = "synapse.app.generic_worker:main"
@@ -126,7 +139,7 @@ pyOpenSSL = ">=16.0.0"
PyYAML = ">=3.11"
pyasn1 = ">=0.1.9"
pyasn1-modules = ">=0.0.7"
-bcrypt = ">=3.1.0"
+bcrypt = ">=3.1.7"
Pillow = ">=5.4.0"
sortedcontainers = ">=1.4.4"
pymacaroons = ">=0.13.0"
@@ -152,12 +165,24 @@ typing-extensions = ">=3.10.0.1"
cryptography = ">=3.4.7"
# ijson 3.1.4 fixes a bug with "." in property names
ijson = ">=3.1.4"
-matrix-common = "^1.2.1"
+matrix-common = "^1.3.0"
# We need packaging.requirements.Requirement, added in 16.1.
packaging = ">=16.1"
# At the time of writing, we only use functions from the version `importlib.metadata`
# which shipped in Python 3.8. This corresponds to version 1.4 of the backport.
importlib_metadata = { version = ">=1.4", python = "<3.8" }
+# This is the most recent version of Pydantic with available on common distros.
+pydantic = ">=1.7.4"
+
+# This is for building the rust components during "poetry install", which
+# currently ignores the `build-system.requires` directive (c.f.
+# https://github.com/python-poetry/poetry/issues/6154). Both `pip install` and
+# `poetry build` do the right thing without this explicit dependency.
+#
+# This isn't really a dev-dependency, as `poetry install --no-dev` will fail,
+# but the alternative is to add it to the main list of deps where it isn't
+# needed.
+setuptools_rust = ">=1.3"
# Optional Dependencies
@@ -167,7 +192,7 @@ psycopg2 = { version = ">=2.8", markers = "platform_python_implementation != 'Py
psycopg2cffi = { version = ">=2.8", markers = "platform_python_implementation == 'PyPy'", optional = true }
psycopg2cffi-compat = { version = "==1.1", markers = "platform_python_implementation == 'PyPy'", optional = true }
pysaml2 = { version = ">=4.5.0", optional = true }
-authlib = { version = ">=0.14.0", optional = true }
+authlib = { version = ">=0.15.1", optional = true }
# systemd-python is necessary for logging to the systemd journal via
# `systemd.journal.JournalHandler`, as is documented in
# `contrib/systemd/log_config.yaml`.
@@ -194,7 +219,7 @@ oidc = ["authlib"]
# `systemd.journal.JournalHandler`, as is documented in
# `contrib/systemd/log_config.yaml`.
systemd = ["systemd-python"]
-url_preview = ["lxml"]
+url-preview = ["lxml"]
sentry = ["sentry-sdk"]
opentracing = ["jaeger-client", "opentracing"]
jwt = ["authlib"]
@@ -202,7 +227,7 @@ jwt = ["authlib"]
# (if it is not installed, we fall back to slow code.)
redis = ["txredisapi", "hiredis"]
# Required to use experimental `caches.track_memory_usage` config option.
-cache_memory = ["pympler"]
+cache-memory = ["pympler"]
test = ["parameterized", "idna"]
# The duplication here is awful. I hate hate hate hate hate it. However, for now I want
@@ -225,7 +250,7 @@ all = [
"pysaml2",
# oidc and jwt
"authlib",
- # url_preview
+ # url-preview
"lxml",
# sentry
"sentry-sdk",
@@ -233,7 +258,7 @@ all = [
"jaeger-client", "opentracing",
# redis
"txredisapi", "hiredis",
- # cache_memory
+ # cache-memory
"pympler",
# omitted:
# - test: it's useful to have this separate from dev deps in the olddeps job
@@ -242,10 +267,10 @@ all = [
[tool.poetry.dev-dependencies]
## We pin black so that our tests don't start failing on new releases.
-isort = "==5.7.0"
-black = "==22.3.0"
+isort = ">=5.10.1"
+black = ">=22.3.0"
flake8-comprehensions = "*"
-flake8-bugbear = "==21.3.2"
+flake8-bugbear = ">=21.3.2"
flake8 = "*"
# Typechecking
@@ -271,16 +296,46 @@ parameterized = ">=0.7.4"
idna = ">=2.5"
# The following are used by the release script
-click = "==8.1.1"
+click = ">=8.1.3"
# GitPython was == 3.1.14; bumped to 3.1.20, the first release with type hints.
GitPython = ">=3.1.20"
-commonmark = "==0.9.1"
-pygithub = "==1.55"
+commonmark = ">=0.9.1"
+pygithub = ">=1.55"
# The following are executed as commands by the release script.
twine = "*"
# Towncrier min version comes from #3425. Rationale unclear.
towncrier = ">=18.6.0rc1"
[build-system]
-requires = ["poetry-core>=1.0.0"]
+# The upper bounds here are defensive, intended to prevent situations like
+# #13849 and #14079 where we see buildtime or runtime errors caused by build
+# system changes.
+# We are happy to raise these upper bounds upon request,
+# provided we check that it's safe to do so (i.e. that CI passes).
+requires = ["poetry-core>=1.0.0,<=1.3.2", "setuptools_rust>=1.3,<=1.5.2"]
build-backend = "poetry.core.masonry.api"
+
+
+[tool.cibuildwheel]
+# Skip unsupported platforms (by us or by Rust).
+skip = "cp36* *-musllinux_i686 pp*aarch64 *-musllinux_aarch64"
+
+# We need a rust compiler
+before-all = "curl https://sh.rustup.rs -sSf | sh -s -- --default-toolchain stable -y --profile minimal"
+environment= { PATH = "$PATH:$HOME/.cargo/bin" }
+
+# For some reason if we don't manually clean the build directory we
+# can end up polluting the next build with a .so that is for the wrong
+# Python version.
+before-build = "rm -rf {project}/build"
+build-frontend = "build"
+test-command = "python -c 'from synapse.synapse_rust import sum_as_string; print(sum_as_string(1, 2))'"
+
+
+[tool.cibuildwheel.linux]
+# Wrap the repair command to correctly rename the built cpython wheels as ABI3.
+repair-wheel-command = "./.ci/scripts/auditwheel_wrapper.py -w {dest_dir} {wheel}"
+
+[tool.cibuildwheel.macos]
+# Wrap the repair command to correctly rename the built cpython wheels as ABI3.
+repair-wheel-command = "./.ci/scripts/auditwheel_wrapper.py --require-archs {delocate_archs} -w {dest_dir} {wheel}"
diff --git a/rust/Cargo.toml b/rust/Cargo.toml
new file mode 100644
index 0000000000..cffaa5b51b
--- /dev/null
+++ b/rust/Cargo.toml
@@ -0,0 +1,35 @@
+[package]
+# We name the package `synapse` so that things like logging have the right
+# logging target.
+name = "synapse"
+
+# dummy version. See pyproject.toml for the Synapse's version number.
+version = "0.1.0"
+
+edition = "2021"
+rust-version = "1.58.1"
+
+[lib]
+name = "synapse"
+# We generate a `cdylib` for Python and a standard `lib` for running
+# tests/benchmarks.
+crate-type = ["lib", "cdylib"]
+
+[package.metadata.maturin]
+# This is where we tell maturin where to place the built library.
+name = "synapse.synapse_rust"
+
+[dependencies]
+anyhow = "1.0.63"
+lazy_static = "1.4.0"
+log = "0.4.17"
+pyo3 = { version = "0.17.1", features = ["extension-module", "macros", "anyhow", "abi3", "abi3-py37"] }
+pyo3-log = "0.7.0"
+pythonize = "0.17.0"
+regex = "1.6.0"
+serde = { version = "1.0.144", features = ["derive"] }
+serde_json = "1.0.85"
+
+[build-dependencies]
+blake2 = "0.10.4"
+hex = "0.4.3"
diff --git a/rust/benches/evaluator.rs b/rust/benches/evaluator.rs
new file mode 100644
index 0000000000..ed411461d1
--- /dev/null
+++ b/rust/benches/evaluator.rs
@@ -0,0 +1,149 @@
+// Copyright 2022 The Matrix.org Foundation C.I.C.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#![feature(test)]
+use synapse::push::{
+ evaluator::PushRuleEvaluator, Condition, EventMatchCondition, FilteredPushRules, PushRules,
+};
+use test::Bencher;
+
+extern crate test;
+
+#[bench]
+fn bench_match_exact(b: &mut Bencher) {
+ let flattened_keys = [
+ ("type".to_string(), "m.text".to_string()),
+ ("room_id".to_string(), "!room:server".to_string()),
+ ("content.body".to_string(), "test message".to_string()),
+ ]
+ .into_iter()
+ .collect();
+
+ let eval = PushRuleEvaluator::py_new(
+ flattened_keys,
+ 10,
+ 0,
+ Default::default(),
+ Default::default(),
+ true,
+ )
+ .unwrap();
+
+ let condition = Condition::Known(synapse::push::KnownCondition::EventMatch(
+ EventMatchCondition {
+ key: "room_id".into(),
+ pattern: Some("!room:server".into()),
+ pattern_type: None,
+ },
+ ));
+
+ let matched = eval.match_condition(&condition, None, None).unwrap();
+ assert!(matched, "Didn't match");
+
+ b.iter(|| eval.match_condition(&condition, None, None).unwrap());
+}
+
+#[bench]
+fn bench_match_word(b: &mut Bencher) {
+ let flattened_keys = [
+ ("type".to_string(), "m.text".to_string()),
+ ("room_id".to_string(), "!room:server".to_string()),
+ ("content.body".to_string(), "test message".to_string()),
+ ]
+ .into_iter()
+ .collect();
+
+ let eval = PushRuleEvaluator::py_new(
+ flattened_keys,
+ 10,
+ 0,
+ Default::default(),
+ Default::default(),
+ true,
+ )
+ .unwrap();
+
+ let condition = Condition::Known(synapse::push::KnownCondition::EventMatch(
+ EventMatchCondition {
+ key: "content.body".into(),
+ pattern: Some("test".into()),
+ pattern_type: None,
+ },
+ ));
+
+ let matched = eval.match_condition(&condition, None, None).unwrap();
+ assert!(matched, "Didn't match");
+
+ b.iter(|| eval.match_condition(&condition, None, None).unwrap());
+}
+
+#[bench]
+fn bench_match_word_miss(b: &mut Bencher) {
+ let flattened_keys = [
+ ("type".to_string(), "m.text".to_string()),
+ ("room_id".to_string(), "!room:server".to_string()),
+ ("content.body".to_string(), "test message".to_string()),
+ ]
+ .into_iter()
+ .collect();
+
+ let eval = PushRuleEvaluator::py_new(
+ flattened_keys,
+ 10,
+ 0,
+ Default::default(),
+ Default::default(),
+ true,
+ )
+ .unwrap();
+
+ let condition = Condition::Known(synapse::push::KnownCondition::EventMatch(
+ EventMatchCondition {
+ key: "content.body".into(),
+ pattern: Some("foobar".into()),
+ pattern_type: None,
+ },
+ ));
+
+ let matched = eval.match_condition(&condition, None, None).unwrap();
+ assert!(!matched, "Didn't match");
+
+ b.iter(|| eval.match_condition(&condition, None, None).unwrap());
+}
+
+#[bench]
+fn bench_eval_message(b: &mut Bencher) {
+ let flattened_keys = [
+ ("type".to_string(), "m.text".to_string()),
+ ("room_id".to_string(), "!room:server".to_string()),
+ ("content.body".to_string(), "test message".to_string()),
+ ]
+ .into_iter()
+ .collect();
+
+ let eval = PushRuleEvaluator::py_new(
+ flattened_keys,
+ 10,
+ 0,
+ Default::default(),
+ Default::default(),
+ true,
+ )
+ .unwrap();
+
+ let rules =
+ FilteredPushRules::py_new(PushRules::new(Vec::new()), Default::default(), false, false);
+
+ b.iter(|| eval.run(&rules, Some("bob"), Some("person")));
+}
diff --git a/rust/benches/glob.rs b/rust/benches/glob.rs
new file mode 100644
index 0000000000..b6697d9285
--- /dev/null
+++ b/rust/benches/glob.rs
@@ -0,0 +1,40 @@
+// Copyright 2022 The Matrix.org Foundation C.I.C.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#![feature(test)]
+
+use synapse::push::utils::{glob_to_regex, GlobMatchType};
+use test::Bencher;
+
+extern crate test;
+
+#[bench]
+fn bench_whole(b: &mut Bencher) {
+ b.iter(|| glob_to_regex("test", GlobMatchType::Whole));
+}
+
+#[bench]
+fn bench_word(b: &mut Bencher) {
+ b.iter(|| glob_to_regex("test", GlobMatchType::Word));
+}
+
+#[bench]
+fn bench_whole_wildcard_run(b: &mut Bencher) {
+ b.iter(|| glob_to_regex("test***??*?*?foo", GlobMatchType::Whole));
+}
+
+#[bench]
+fn bench_word_wildcard_run(b: &mut Bencher) {
+ b.iter(|| glob_to_regex("test***??*?*?foo", GlobMatchType::Whole));
+}
diff --git a/rust/build.rs b/rust/build.rs
new file mode 100644
index 0000000000..ef370e6b41
--- /dev/null
+++ b/rust/build.rs
@@ -0,0 +1,45 @@
+//! This build script calculates the hash of all files in the `src/`
+//! directory and adds it as an environment variable during build time.
+//!
+//! This is used so that the python code can detect when the built native module
+//! does not match the source in-tree, helping to detect the case where the
+//! source has been updated but the library hasn't been rebuilt.
+
+use std::path::PathBuf;
+
+use blake2::{Blake2b512, Digest};
+
+fn main() -> Result<(), std::io::Error> {
+ let mut dirs = vec![PathBuf::from("src")];
+
+ let mut paths = Vec::new();
+ while let Some(path) = dirs.pop() {
+ let mut entries = std::fs::read_dir(path)?
+ .map(|res| res.map(|e| e.path()))
+ .collect::<Result<Vec<_>, std::io::Error>>()?;
+
+ entries.sort();
+
+ for entry in entries {
+ if entry.is_dir() {
+ dirs.push(entry);
+ } else {
+ paths.push(entry.to_str().expect("valid rust paths").to_string());
+ }
+ }
+ }
+
+ paths.sort();
+
+ let mut hasher = Blake2b512::new();
+
+ for path in paths {
+ let bytes = std::fs::read(path)?;
+ hasher.update(bytes);
+ }
+
+ let hex_digest = hex::encode(hasher.finalize());
+ println!("cargo:rustc-env=SYNAPSE_RUST_DIGEST={hex_digest}");
+
+ Ok(())
+}
diff --git a/rust/src/lib.rs b/rust/src/lib.rs
new file mode 100644
index 0000000000..c7b60e58a7
--- /dev/null
+++ b/rust/src/lib.rs
@@ -0,0 +1,31 @@
+use pyo3::prelude::*;
+
+pub mod push;
+
+/// Returns the hash of all the rust source files at the time it was compiled.
+///
+/// Used by python to detect if the rust library is outdated.
+#[pyfunction]
+fn get_rust_file_digest() -> &'static str {
+ env!("SYNAPSE_RUST_DIGEST")
+}
+
+/// Formats the sum of two numbers as string.
+#[pyfunction]
+#[pyo3(text_signature = "(a, b, /)")]
+fn sum_as_string(a: usize, b: usize) -> PyResult<String> {
+ Ok((a + b).to_string())
+}
+
+/// The entry point for defining the Python module.
+#[pymodule]
+fn synapse_rust(py: Python<'_>, m: &PyModule) -> PyResult<()> {
+ pyo3_log::init();
+
+ m.add_function(wrap_pyfunction!(sum_as_string, m)?)?;
+ m.add_function(wrap_pyfunction!(get_rust_file_digest, m)?)?;
+
+ push::register_module(py, m)?;
+
+ Ok(())
+}
diff --git a/rust/src/push/base_rules.rs b/rust/src/push/base_rules.rs
new file mode 100644
index 0000000000..49802fa4eb
--- /dev/null
+++ b/rust/src/push/base_rules.rs
@@ -0,0 +1,340 @@
+// Copyright 2022 The Matrix.org Foundation C.I.C.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Contains the definitions of the "base" push rules.
+
+use std::borrow::Cow;
+use std::collections::HashMap;
+
+use lazy_static::lazy_static;
+use serde_json::Value;
+
+use super::KnownCondition;
+use crate::push::Action;
+use crate::push::Condition;
+use crate::push::EventMatchCondition;
+use crate::push::PushRule;
+use crate::push::RelatedEventMatchCondition;
+use crate::push::SetTweak;
+use crate::push::TweakValue;
+
+const HIGHLIGHT_ACTION: Action = Action::SetTweak(SetTweak {
+ set_tweak: Cow::Borrowed("highlight"),
+ value: None,
+ other_keys: Value::Null,
+});
+
+const HIGHLIGHT_FALSE_ACTION: Action = Action::SetTweak(SetTweak {
+ set_tweak: Cow::Borrowed("highlight"),
+ value: Some(TweakValue::Other(Value::Bool(false))),
+ other_keys: Value::Null,
+});
+
+const SOUND_ACTION: Action = Action::SetTweak(SetTweak {
+ set_tweak: Cow::Borrowed("sound"),
+ value: Some(TweakValue::String(Cow::Borrowed("default"))),
+ other_keys: Value::Null,
+});
+
+const RING_ACTION: Action = Action::SetTweak(SetTweak {
+ set_tweak: Cow::Borrowed("sound"),
+ value: Some(TweakValue::String(Cow::Borrowed("ring"))),
+ other_keys: Value::Null,
+});
+
+pub const BASE_PREPEND_OVERRIDE_RULES: &[PushRule] = &[PushRule {
+ rule_id: Cow::Borrowed("global/override/.m.rule.master"),
+ priority_class: 5,
+ conditions: Cow::Borrowed(&[]),
+ actions: Cow::Borrowed(&[Action::DontNotify]),
+ default: true,
+ default_enabled: false,
+}];
+
+pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
+ PushRule {
+ rule_id: Cow::Borrowed("global/override/.m.rule.suppress_notices"),
+ priority_class: 5,
+ conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch(
+ EventMatchCondition {
+ key: Cow::Borrowed("content.msgtype"),
+ pattern: Some(Cow::Borrowed("m.notice")),
+ pattern_type: None,
+ },
+ ))]),
+ actions: Cow::Borrowed(&[Action::DontNotify]),
+ default: true,
+ default_enabled: true,
+ },
+ PushRule {
+ rule_id: Cow::Borrowed("global/override/.m.rule.invite_for_me"),
+ priority_class: 5,
+ conditions: Cow::Borrowed(&[
+ Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
+ key: Cow::Borrowed("type"),
+ pattern: Some(Cow::Borrowed("m.room.member")),
+ pattern_type: None,
+ })),
+ Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
+ key: Cow::Borrowed("content.membership"),
+ pattern: Some(Cow::Borrowed("invite")),
+ pattern_type: None,
+ })),
+ Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
+ key: Cow::Borrowed("state_key"),
+ pattern: None,
+ pattern_type: Some(Cow::Borrowed("user_id")),
+ })),
+ ]),
+ actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION, SOUND_ACTION]),
+ default: true,
+ default_enabled: true,
+ },
+ PushRule {
+ rule_id: Cow::Borrowed("global/override/.m.rule.member_event"),
+ priority_class: 5,
+ conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch(
+ EventMatchCondition {
+ key: Cow::Borrowed("type"),
+ pattern: Some(Cow::Borrowed("m.room.member")),
+ pattern_type: None,
+ },
+ ))]),
+ actions: Cow::Borrowed(&[Action::DontNotify]),
+ default: true,
+ default_enabled: true,
+ },
+ PushRule {
+ rule_id: Cow::Borrowed("global/override/.im.nheko.msc3664.reply"),
+ priority_class: 5,
+ conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::RelatedEventMatch(
+ RelatedEventMatchCondition {
+ key: Some(Cow::Borrowed("sender")),
+ pattern: None,
+ pattern_type: Some(Cow::Borrowed("user_id")),
+ rel_type: Cow::Borrowed("m.in_reply_to"),
+ include_fallbacks: None,
+ },
+ ))]),
+ actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_ACTION, SOUND_ACTION]),
+ default: true,
+ default_enabled: true,
+ },
+ PushRule {
+ rule_id: Cow::Borrowed("global/override/.m.rule.contains_display_name"),
+ priority_class: 5,
+ conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::ContainsDisplayName)]),
+ actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_ACTION, SOUND_ACTION]),
+ default: true,
+ default_enabled: true,
+ },
+ PushRule {
+ rule_id: Cow::Borrowed("global/override/.m.rule.roomnotif"),
+ priority_class: 5,
+ conditions: Cow::Borrowed(&[
+ Condition::Known(KnownCondition::SenderNotificationPermission {
+ key: Cow::Borrowed("room"),
+ }),
+ Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
+ key: Cow::Borrowed("content.body"),
+ pattern: Some(Cow::Borrowed("@room")),
+ pattern_type: None,
+ })),
+ ]),
+ actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_ACTION]),
+ default: true,
+ default_enabled: true,
+ },
+ PushRule {
+ rule_id: Cow::Borrowed("global/override/.m.rule.tombstone"),
+ priority_class: 5,
+ conditions: Cow::Borrowed(&[
+ Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
+ key: Cow::Borrowed("type"),
+ pattern: Some(Cow::Borrowed("m.room.tombstone")),
+ pattern_type: None,
+ })),
+ Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
+ key: Cow::Borrowed("state_key"),
+ pattern: Some(Cow::Borrowed("")),
+ pattern_type: None,
+ })),
+ ]),
+ actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_ACTION]),
+ default: true,
+ default_enabled: true,
+ },
+ PushRule {
+ rule_id: Cow::Borrowed("global/override/.m.rule.reaction"),
+ priority_class: 5,
+ conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch(
+ EventMatchCondition {
+ key: Cow::Borrowed("type"),
+ pattern: Some(Cow::Borrowed("m.reaction")),
+ pattern_type: None,
+ },
+ ))]),
+ actions: Cow::Borrowed(&[Action::DontNotify]),
+ default: true,
+ default_enabled: true,
+ },
+ PushRule {
+ rule_id: Cow::Borrowed("global/override/.m.rule.room.server_acl"),
+ priority_class: 5,
+ conditions: Cow::Borrowed(&[
+ Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
+ key: Cow::Borrowed("type"),
+ pattern: Some(Cow::Borrowed("m.room.server_acl")),
+ pattern_type: None,
+ })),
+ Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
+ key: Cow::Borrowed("state_key"),
+ pattern: Some(Cow::Borrowed("")),
+ pattern_type: None,
+ })),
+ ]),
+ actions: Cow::Borrowed(&[]),
+ default: true,
+ default_enabled: true,
+ },
+];
+
+pub const BASE_APPEND_CONTENT_RULES: &[PushRule] = &[PushRule {
+ rule_id: Cow::Borrowed("global/content/.m.rule.contains_user_name"),
+ priority_class: 4,
+ conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch(
+ EventMatchCondition {
+ key: Cow::Borrowed("content.body"),
+ pattern: None,
+ pattern_type: Some(Cow::Borrowed("user_localpart")),
+ },
+ ))]),
+ actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_ACTION, SOUND_ACTION]),
+ default: true,
+ default_enabled: true,
+}];
+
+pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[
+ PushRule {
+ rule_id: Cow::Borrowed("global/underride/.m.rule.call"),
+ priority_class: 1,
+ conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch(
+ EventMatchCondition {
+ key: Cow::Borrowed("type"),
+ pattern: Some(Cow::Borrowed("m.call.invite")),
+ pattern_type: None,
+ },
+ ))]),
+ actions: Cow::Borrowed(&[Action::Notify, RING_ACTION, HIGHLIGHT_FALSE_ACTION]),
+ default: true,
+ default_enabled: true,
+ },
+ PushRule {
+ rule_id: Cow::Borrowed("global/underride/.m.rule.room_one_to_one"),
+ priority_class: 1,
+ conditions: Cow::Borrowed(&[
+ Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
+ key: Cow::Borrowed("type"),
+ pattern: Some(Cow::Borrowed("m.room.message")),
+ pattern_type: None,
+ })),
+ Condition::Known(KnownCondition::RoomMemberCount {
+ is: Some(Cow::Borrowed("2")),
+ }),
+ ]),
+ actions: Cow::Borrowed(&[Action::Notify, SOUND_ACTION, HIGHLIGHT_FALSE_ACTION]),
+ default: true,
+ default_enabled: true,
+ },
+ PushRule {
+ rule_id: Cow::Borrowed("global/underride/.m.rule.encrypted_room_one_to_one"),
+ priority_class: 1,
+ conditions: Cow::Borrowed(&[
+ Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
+ key: Cow::Borrowed("type"),
+ pattern: Some(Cow::Borrowed("m.room.encrypted")),
+ pattern_type: None,
+ })),
+ Condition::Known(KnownCondition::RoomMemberCount {
+ is: Some(Cow::Borrowed("2")),
+ }),
+ ]),
+ actions: Cow::Borrowed(&[Action::Notify, SOUND_ACTION, HIGHLIGHT_FALSE_ACTION]),
+ default: true,
+ default_enabled: true,
+ },
+ PushRule {
+ rule_id: Cow::Borrowed("global/underride/.m.rule.message"),
+ priority_class: 1,
+ conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch(
+ EventMatchCondition {
+ key: Cow::Borrowed("type"),
+ pattern: Some(Cow::Borrowed("m.room.message")),
+ pattern_type: None,
+ },
+ ))]),
+ actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]),
+ default: true,
+ default_enabled: true,
+ },
+ PushRule {
+ rule_id: Cow::Borrowed("global/underride/.m.rule.encrypted"),
+ priority_class: 1,
+ conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch(
+ EventMatchCondition {
+ key: Cow::Borrowed("type"),
+ pattern: Some(Cow::Borrowed("m.room.encrypted")),
+ pattern_type: None,
+ },
+ ))]),
+ actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]),
+ default: true,
+ default_enabled: true,
+ },
+ PushRule {
+ rule_id: Cow::Borrowed("global/underride/.im.vector.jitsi"),
+ priority_class: 1,
+ conditions: Cow::Borrowed(&[
+ Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
+ key: Cow::Borrowed("type"),
+ pattern: Some(Cow::Borrowed("im.vector.modular.widgets")),
+ pattern_type: None,
+ })),
+ Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
+ key: Cow::Borrowed("content.type"),
+ pattern: Some(Cow::Borrowed("jitsi")),
+ pattern_type: None,
+ })),
+ Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
+ key: Cow::Borrowed("state_key"),
+ pattern: Some(Cow::Borrowed("*")),
+ pattern_type: None,
+ })),
+ ]),
+ actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]),
+ default: true,
+ default_enabled: true,
+ },
+];
+
+lazy_static! {
+ pub static ref BASE_RULES_BY_ID: HashMap<&'static str, &'static PushRule> =
+ BASE_PREPEND_OVERRIDE_RULES
+ .iter()
+ .chain(BASE_APPEND_OVERRIDE_RULES.iter())
+ .chain(BASE_APPEND_CONTENT_RULES.iter())
+ .chain(BASE_APPEND_UNDERRIDE_RULES.iter())
+ .map(|rule| { (&*rule.rule_id, rule) })
+ .collect();
+}
diff --git a/rust/src/push/evaluator.rs b/rust/src/push/evaluator.rs
new file mode 100644
index 0000000000..cedd42c54d
--- /dev/null
+++ b/rust/src/push/evaluator.rs
@@ -0,0 +1,370 @@
+// Copyright 2022 The Matrix.org Foundation C.I.C.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::collections::BTreeMap;
+
+use anyhow::{Context, Error};
+use lazy_static::lazy_static;
+use log::warn;
+use pyo3::prelude::*;
+use regex::Regex;
+
+use super::{
+ utils::{get_glob_matcher, get_localpart_from_id, GlobMatchType},
+ Action, Condition, EventMatchCondition, FilteredPushRules, KnownCondition,
+ RelatedEventMatchCondition,
+};
+
+lazy_static! {
+ /// Used to parse the `is` clause in the room member count condition.
+ static ref INEQUALITY_EXPR: Regex = Regex::new(r"^([=<>]*)([0-9]+)$").expect("valid regex");
+}
+
+/// Allows running a set of push rules against a particular event.
+#[pyclass]
+pub struct PushRuleEvaluator {
+ /// A mapping of "flattened" keys to string values in the event, e.g.
+ /// includes things like "type" and "content.msgtype".
+ flattened_keys: BTreeMap<String, String>,
+
+ /// The "content.body", if any.
+ body: String,
+
+ /// The number of users in the room.
+ room_member_count: u64,
+
+ /// The `notifications` section of the current power levels in the room.
+ notification_power_levels: BTreeMap<String, i64>,
+
+ /// The power level of the sender of the event, or None if event is an
+ /// outlier.
+ sender_power_level: Option<i64>,
+
+ /// The related events, indexed by relation type. Flattened in the same manner as
+ /// `flattened_keys`.
+ related_events_flattened: BTreeMap<String, BTreeMap<String, String>>,
+
+ /// If msc3664, push rules for related events, is enabled.
+ related_event_match_enabled: bool,
+}
+
+#[pymethods]
+impl PushRuleEvaluator {
+ /// Create a new `PushRuleEvaluator`. See struct docstring for details.
+ #[new]
+ pub fn py_new(
+ flattened_keys: BTreeMap<String, String>,
+ room_member_count: u64,
+ sender_power_level: Option<i64>,
+ notification_power_levels: BTreeMap<String, i64>,
+ related_events_flattened: BTreeMap<String, BTreeMap<String, String>>,
+ related_event_match_enabled: bool,
+ ) -> Result<Self, Error> {
+ let body = flattened_keys
+ .get("content.body")
+ .cloned()
+ .unwrap_or_default();
+
+ Ok(PushRuleEvaluator {
+ flattened_keys,
+ body,
+ room_member_count,
+ notification_power_levels,
+ sender_power_level,
+ related_events_flattened,
+ related_event_match_enabled,
+ })
+ }
+
+ /// Run the evaluator with the given push rules, for the given user ID and
+ /// display name of the user.
+ ///
+ /// Passing in None will skip evaluating rules matching user ID and display
+ /// name.
+ ///
+ /// Returns the set of actions, if any, that match (filtering out any
+ /// `dont_notify` actions).
+ pub fn run(
+ &self,
+ push_rules: &FilteredPushRules,
+ user_id: Option<&str>,
+ display_name: Option<&str>,
+ ) -> Vec<Action> {
+ 'outer: for (push_rule, enabled) in push_rules.iter() {
+ if !enabled {
+ continue;
+ }
+
+ for condition in push_rule.conditions.iter() {
+ match self.match_condition(condition, user_id, display_name) {
+ Ok(true) => {}
+ Ok(false) => continue 'outer,
+ Err(err) => {
+ warn!("Condition match failed {err}");
+ continue 'outer;
+ }
+ }
+ }
+
+ let actions = push_rule
+ .actions
+ .iter()
+ // Filter out "dont_notify" actions, as we don't store them.
+ .filter(|a| **a != Action::DontNotify)
+ .cloned()
+ .collect();
+
+ return actions;
+ }
+
+ Vec::new()
+ }
+
+ /// Check if the given condition matches.
+ fn matches(
+ &self,
+ condition: Condition,
+ user_id: Option<&str>,
+ display_name: Option<&str>,
+ ) -> bool {
+ match self.match_condition(&condition, user_id, display_name) {
+ Ok(true) => true,
+ Ok(false) => false,
+ Err(err) => {
+ warn!("Condition match failed {err}");
+ false
+ }
+ }
+ }
+}
+
+impl PushRuleEvaluator {
+ /// Match a given `Condition` for a push rule.
+ pub fn match_condition(
+ &self,
+ condition: &Condition,
+ user_id: Option<&str>,
+ display_name: Option<&str>,
+ ) -> Result<bool, Error> {
+ let known_condition = match condition {
+ Condition::Known(known) => known,
+ Condition::Unknown(_) => {
+ return Ok(false);
+ }
+ };
+
+ let result = match known_condition {
+ KnownCondition::EventMatch(event_match) => {
+ self.match_event_match(event_match, user_id)?
+ }
+ KnownCondition::RelatedEventMatch(event_match) => {
+ self.match_related_event_match(event_match, user_id)?
+ }
+ KnownCondition::ContainsDisplayName => {
+ if let Some(dn) = display_name {
+ if !dn.is_empty() {
+ get_glob_matcher(dn, GlobMatchType::Word)?.is_match(&self.body)?
+ } else {
+ // We specifically ignore empty display names, as otherwise
+ // they would always match.
+ false
+ }
+ } else {
+ false
+ }
+ }
+ KnownCondition::RoomMemberCount { is } => {
+ if let Some(is) = is {
+ self.match_member_count(is)?
+ } else {
+ false
+ }
+ }
+ KnownCondition::SenderNotificationPermission { key } => {
+ if let Some(sender_power_level) = &self.sender_power_level {
+ let required_level = self
+ .notification_power_levels
+ .get(key.as_ref())
+ .copied()
+ .unwrap_or(50);
+
+ *sender_power_level >= required_level
+ } else {
+ false
+ }
+ }
+ };
+
+ Ok(result)
+ }
+
+ /// Evaluates a `event_match` condition.
+ fn match_event_match(
+ &self,
+ event_match: &EventMatchCondition,
+ user_id: Option<&str>,
+ ) -> Result<bool, Error> {
+ let pattern = if let Some(pattern) = &event_match.pattern {
+ pattern
+ } else if let Some(pattern_type) = &event_match.pattern_type {
+ // The `pattern_type` can either be "user_id" or "user_localpart",
+ // either way if we don't have a `user_id` then the condition can't
+ // match.
+ let user_id = if let Some(user_id) = user_id {
+ user_id
+ } else {
+ return Ok(false);
+ };
+
+ match &**pattern_type {
+ "user_id" => user_id,
+ "user_localpart" => get_localpart_from_id(user_id)?,
+ _ => return Ok(false),
+ }
+ } else {
+ return Ok(false);
+ };
+
+ let haystack = if let Some(haystack) = self.flattened_keys.get(&*event_match.key) {
+ haystack
+ } else {
+ return Ok(false);
+ };
+
+ // For the content.body we match against "words", but for everything
+ // else we match against the entire value.
+ let match_type = if event_match.key == "content.body" {
+ GlobMatchType::Word
+ } else {
+ GlobMatchType::Whole
+ };
+
+ let mut compiled_pattern = get_glob_matcher(pattern, match_type)?;
+ compiled_pattern.is_match(haystack)
+ }
+
+ /// Evaluates a `related_event_match` condition. (MSC3664)
+ fn match_related_event_match(
+ &self,
+ event_match: &RelatedEventMatchCondition,
+ user_id: Option<&str>,
+ ) -> Result<bool, Error> {
+ // First check if related event matching is enabled...
+ if !self.related_event_match_enabled {
+ return Ok(false);
+ }
+
+ // get the related event, fail if there is none.
+ let event = if let Some(event) = self.related_events_flattened.get(&*event_match.rel_type) {
+ event
+ } else {
+ return Ok(false);
+ };
+
+ // If we are not matching fallbacks, don't match if our special key indicating this is a
+ // fallback relation is not present.
+ if !event_match.include_fallbacks.unwrap_or(false)
+ && event.contains_key("im.vector.is_falling_back")
+ {
+ return Ok(false);
+ }
+
+ // if we have no key, accept the event as matching, if it existed without matching any
+ // fields.
+ let key = if let Some(key) = &event_match.key {
+ key
+ } else {
+ return Ok(true);
+ };
+
+ let pattern = if let Some(pattern) = &event_match.pattern {
+ pattern
+ } else if let Some(pattern_type) = &event_match.pattern_type {
+ // The `pattern_type` can either be "user_id" or "user_localpart",
+ // either way if we don't have a `user_id` then the condition can't
+ // match.
+ let user_id = if let Some(user_id) = user_id {
+ user_id
+ } else {
+ return Ok(false);
+ };
+
+ match &**pattern_type {
+ "user_id" => user_id,
+ "user_localpart" => get_localpart_from_id(user_id)?,
+ _ => return Ok(false),
+ }
+ } else {
+ return Ok(false);
+ };
+
+ let haystack = if let Some(haystack) = event.get(&**key) {
+ haystack
+ } else {
+ return Ok(false);
+ };
+
+ // For the content.body we match against "words", but for everything
+ // else we match against the entire value.
+ let match_type = if key == "content.body" {
+ GlobMatchType::Word
+ } else {
+ GlobMatchType::Whole
+ };
+
+ let mut compiled_pattern = get_glob_matcher(pattern, match_type)?;
+ compiled_pattern.is_match(haystack)
+ }
+
+ /// Match the member count against an 'is' condition
+ /// The `is` condition can be things like '>2', '==3' or even just '4'.
+ fn match_member_count(&self, is: &str) -> Result<bool, Error> {
+ let captures = INEQUALITY_EXPR.captures(is).context("bad 'is' clause")?;
+ let ineq = captures.get(1).map_or("==", |m| m.as_str());
+ let rhs: u64 = captures
+ .get(2)
+ .context("missing number")?
+ .as_str()
+ .parse()?;
+
+ let matches = match ineq {
+ "" | "==" => self.room_member_count == rhs,
+ "<" => self.room_member_count < rhs,
+ ">" => self.room_member_count > rhs,
+ ">=" => self.room_member_count >= rhs,
+ "<=" => self.room_member_count <= rhs,
+ _ => false,
+ };
+
+ Ok(matches)
+ }
+}
+
+#[test]
+fn push_rule_evaluator() {
+ let mut flattened_keys = BTreeMap::new();
+ flattened_keys.insert("content.body".to_string(), "foo bar bob hello".to_string());
+ let evaluator = PushRuleEvaluator::py_new(
+ flattened_keys,
+ 10,
+ Some(0),
+ BTreeMap::new(),
+ BTreeMap::new(),
+ true,
+ )
+ .unwrap();
+
+ let result = evaluator.run(&FilteredPushRules::default(), None, Some("bob"));
+ assert_eq!(result.len(), 3);
+}
diff --git a/rust/src/push/mod.rs b/rust/src/push/mod.rs
new file mode 100644
index 0000000000..d57800aa4a
--- /dev/null
+++ b/rust/src/push/mod.rs
@@ -0,0 +1,522 @@
+// Copyright 2022 The Matrix.org Foundation C.I.C.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! An implementation of Matrix push rules.
+//!
+//! The `Cow<_>` type is used extensively within this module to allow creating
+//! the base rules as constants (in Rust constants can't require explicit
+//! allocation atm).
+//!
+//! ---
+//!
+//! Push rules is the system used to determine which events trigger a push (and a
+//! bump in notification counts).
+//!
+//! This consists of a list of "push rules" for each user, where a push rule is a
+//! pair of "conditions" and "actions". When a user receives an event Synapse
+//! iterates over the list of push rules until it finds one where all the conditions
+//! match the event, at which point "actions" describe the outcome (e.g. notify,
+//! highlight, etc).
+//!
+//! Push rules are split up into 5 different "kinds" (aka "priority classes"), which
+//! are run in order:
+//! 1. Override — highest priority rules, e.g. always ignore notices
+//! 2. Content — content specific rules, e.g. @ notifications
+//! 3. Room — per room rules, e.g. enable/disable notifications for all messages
+//! in a room
+//! 4. Sender — per sender rules, e.g. never notify for messages from a given
+//! user
+//! 5. Underride — the lowest priority "default" rules, e.g. notify for every
+//! message.
+//!
+//! The set of "base rules" are the list of rules that every user has by default. A
+//! user can modify their copy of the push rules in one of three ways:
+//! 1. Adding a new push rule of a certain kind
+//! 2. Changing the actions of a base rule
+//! 3. Enabling/disabling a base rule.
+//!
+//! The base rules are split into whether they come before or after a particular
+//! kind, so the order of push rule evaluation would be: base rules for before
+//! "override" kind, user defined "override" rules, base rules after "override"
+//! kind, etc, etc.
+
+use std::borrow::Cow;
+use std::collections::{BTreeMap, HashMap, HashSet};
+
+use anyhow::{Context, Error};
+use log::warn;
+use pyo3::prelude::*;
+use pythonize::{depythonize, pythonize};
+use serde::de::Error as _;
+use serde::{Deserialize, Serialize};
+use serde_json::Value;
+
+use self::evaluator::PushRuleEvaluator;
+
+mod base_rules;
+pub mod evaluator;
+pub mod utils;
+
+/// Called when registering modules with python.
+pub fn register_module(py: Python<'_>, m: &PyModule) -> PyResult<()> {
+ let child_module = PyModule::new(py, "push")?;
+ child_module.add_class::<PushRule>()?;
+ child_module.add_class::<PushRules>()?;
+ child_module.add_class::<FilteredPushRules>()?;
+ child_module.add_class::<PushRuleEvaluator>()?;
+ child_module.add_function(wrap_pyfunction!(get_base_rule_ids, m)?)?;
+
+ m.add_submodule(child_module)?;
+
+ // We need to manually add the module to sys.modules to make `from
+ // synapse.synapse_rust import push` work.
+ py.import("sys")?
+ .getattr("modules")?
+ .set_item("synapse.synapse_rust.push", child_module)?;
+
+ Ok(())
+}
+
+#[pyfunction]
+fn get_base_rule_ids() -> HashSet<&'static str> {
+ base_rules::BASE_RULES_BY_ID.keys().copied().collect()
+}
+
+/// A single push rule for a user.
+#[derive(Debug, Clone)]
+#[pyclass(frozen)]
+pub struct PushRule {
+ /// A unique ID for this rule
+ pub rule_id: Cow<'static, str>,
+ /// The "kind" of push rule this is (see `PRIORITY_CLASS_MAP` in Python)
+ #[pyo3(get)]
+ pub priority_class: i32,
+ /// The conditions that must all match for actions to be applied
+ pub conditions: Cow<'static, [Condition]>,
+ /// The actions to apply if all conditions are met
+ pub actions: Cow<'static, [Action]>,
+ /// Whether this is a base rule
+ #[pyo3(get)]
+ pub default: bool,
+ /// Whether this is enabled by default
+ #[pyo3(get)]
+ pub default_enabled: bool,
+}
+
+#[pymethods]
+impl PushRule {
+ #[staticmethod]
+ pub fn from_db(
+ rule_id: String,
+ priority_class: i32,
+ conditions: &str,
+ actions: &str,
+ ) -> Result<PushRule, Error> {
+ let conditions = serde_json::from_str(conditions).context("parsing conditions")?;
+ let actions = serde_json::from_str(actions).context("parsing actions")?;
+
+ Ok(PushRule {
+ rule_id: Cow::Owned(rule_id),
+ priority_class,
+ conditions,
+ actions,
+ default: false,
+ default_enabled: true,
+ })
+ }
+
+ #[getter]
+ fn rule_id(&self) -> &str {
+ &self.rule_id
+ }
+
+ #[getter]
+ fn actions(&self) -> Vec<Action> {
+ self.actions.clone().into_owned()
+ }
+
+ #[getter]
+ fn conditions(&self) -> Vec<Condition> {
+ self.conditions.clone().into_owned()
+ }
+
+ fn __repr__(&self) -> String {
+ format!(
+ "<PushRule rule_id={}, conditions={:?}, actions={:?}>",
+ self.rule_id, self.conditions, self.actions
+ )
+ }
+}
+
+/// The "action" Synapse should perform for a matching push rule.
+#[derive(Debug, Clone, PartialEq, Eq)]
+pub enum Action {
+ DontNotify,
+ Notify,
+ Coalesce,
+ SetTweak(SetTweak),
+
+ // An unrecognized custom action.
+ Unknown(Value),
+}
+
+impl IntoPy<PyObject> for Action {
+ fn into_py(self, py: Python<'_>) -> PyObject {
+ // When we pass the `Action` struct to Python we want it to be converted
+ // to a dict. We use `pythonize`, which converts the struct using the
+ // `serde` serialization.
+ pythonize(py, &self).expect("valid action")
+ }
+}
+
+/// The body of a `SetTweak` push action.
+#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
+pub struct SetTweak {
+ set_tweak: Cow<'static, str>,
+
+ #[serde(skip_serializing_if = "Option::is_none")]
+ value: Option<TweakValue>,
+
+ // This picks up any other fields that may have been added by clients.
+ // These get added when we convert the `Action` to a python object.
+ #[serde(flatten)]
+ other_keys: Value,
+}
+
+/// The value of a `set_tweak`.
+///
+/// We need this (rather than using `TweakValue` directly) so that we can use
+/// `&'static str` in the value when defining the constant base rules.
+#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
+#[serde(untagged)]
+pub enum TweakValue {
+ String(Cow<'static, str>),
+ Other(Value),
+}
+
+impl Serialize for Action {
+ fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
+ where
+ S: serde::Serializer,
+ {
+ match self {
+ Action::DontNotify => serializer.serialize_str("dont_notify"),
+ Action::Notify => serializer.serialize_str("notify"),
+ Action::Coalesce => serializer.serialize_str("coalesce"),
+ Action::SetTweak(tweak) => tweak.serialize(serializer),
+ Action::Unknown(value) => value.serialize(serializer),
+ }
+ }
+}
+
+/// Simple helper class for deserializing Action from JSON.
+#[derive(Deserialize)]
+#[serde(untagged)]
+enum ActionDeserializeHelper {
+ Str(String),
+ SetTweak(SetTweak),
+ Unknown(Value),
+}
+
+impl<'de> Deserialize<'de> for Action {
+ fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
+ where
+ D: serde::Deserializer<'de>,
+ {
+ let helper: ActionDeserializeHelper = Deserialize::deserialize(deserializer)?;
+ match helper {
+ ActionDeserializeHelper::Str(s) => match &*s {
+ "dont_notify" => Ok(Action::DontNotify),
+ "notify" => Ok(Action::Notify),
+ "coalesce" => Ok(Action::Coalesce),
+ _ => Err(D::Error::custom("unrecognized action")),
+ },
+ ActionDeserializeHelper::SetTweak(set_tweak) => Ok(Action::SetTweak(set_tweak)),
+ ActionDeserializeHelper::Unknown(value) => Ok(Action::Unknown(value)),
+ }
+ }
+}
+
+/// A condition used in push rules to match against an event.
+///
+/// We need this split as `serde` doesn't give us the ability to have a
+/// "catchall" variant in tagged enums.
+#[derive(Serialize, Deserialize, Debug, Clone)]
+#[serde(untagged)]
+pub enum Condition {
+ /// A recognized condition that we can match against
+ Known(KnownCondition),
+ /// An unrecognized condition that we ignore.
+ Unknown(Value),
+}
+
+/// The set of "known" conditions that we can handle.
+#[derive(Serialize, Deserialize, Debug, Clone)]
+#[serde(rename_all = "snake_case")]
+#[serde(tag = "kind")]
+pub enum KnownCondition {
+ EventMatch(EventMatchCondition),
+ #[serde(rename = "im.nheko.msc3664.related_event_match")]
+ RelatedEventMatch(RelatedEventMatchCondition),
+ ContainsDisplayName,
+ RoomMemberCount {
+ #[serde(skip_serializing_if = "Option::is_none")]
+ is: Option<Cow<'static, str>>,
+ },
+ SenderNotificationPermission {
+ key: Cow<'static, str>,
+ },
+}
+
+impl IntoPy<PyObject> for Condition {
+ fn into_py(self, py: Python<'_>) -> PyObject {
+ pythonize(py, &self).expect("valid condition")
+ }
+}
+
+impl<'source> FromPyObject<'source> for Condition {
+ fn extract(ob: &'source PyAny) -> PyResult<Self> {
+ Ok(depythonize(ob)?)
+ }
+}
+
+/// The body of a [`Condition::EventMatch`]
+#[derive(Serialize, Deserialize, Debug, Clone)]
+pub struct EventMatchCondition {
+ pub key: Cow<'static, str>,
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub pattern: Option<Cow<'static, str>>,
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub pattern_type: Option<Cow<'static, str>>,
+}
+
+/// The body of a [`Condition::RelatedEventMatch`]
+#[derive(Serialize, Deserialize, Debug, Clone)]
+pub struct RelatedEventMatchCondition {
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub key: Option<Cow<'static, str>>,
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub pattern: Option<Cow<'static, str>>,
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub pattern_type: Option<Cow<'static, str>>,
+ pub rel_type: Cow<'static, str>,
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub include_fallbacks: Option<bool>,
+}
+
+/// The collection of push rules for a user.
+#[derive(Debug, Clone, Default)]
+#[pyclass(frozen)]
+pub struct PushRules {
+ /// Custom push rules that override a base rule.
+ overridden_base_rules: HashMap<Cow<'static, str>, PushRule>,
+
+ /// Custom rules that come between the prepend/append override base rules.
+ override_rules: Vec<PushRule>,
+ /// Custom rules that come before the base content rules.
+ content: Vec<PushRule>,
+ /// Custom rules that come before the base room rules.
+ room: Vec<PushRule>,
+ /// Custom rules that come before the base sender rules.
+ sender: Vec<PushRule>,
+ /// Custom rules that come before the base underride rules.
+ underride: Vec<PushRule>,
+}
+
+#[pymethods]
+impl PushRules {
+ #[new]
+ pub fn new(rules: Vec<PushRule>) -> PushRules {
+ let mut push_rules: PushRules = Default::default();
+
+ for rule in rules {
+ if let Some(&o) = base_rules::BASE_RULES_BY_ID.get(&*rule.rule_id) {
+ push_rules.overridden_base_rules.insert(
+ rule.rule_id.clone(),
+ PushRule {
+ actions: rule.actions.clone(),
+ ..o.clone()
+ },
+ );
+
+ continue;
+ }
+
+ match rule.priority_class {
+ 5 => push_rules.override_rules.push(rule),
+ 4 => push_rules.content.push(rule),
+ 3 => push_rules.room.push(rule),
+ 2 => push_rules.sender.push(rule),
+ 1 => push_rules.underride.push(rule),
+ _ => {
+ warn!(
+ "Unrecognized priority class for rule {}: {}",
+ rule.rule_id, rule.priority_class
+ );
+ }
+ }
+ }
+
+ push_rules
+ }
+
+ /// Returns the list of all rules, including base rules, in the order they
+ /// should be executed in.
+ fn rules(&self) -> Vec<PushRule> {
+ self.iter().cloned().collect()
+ }
+}
+
+impl PushRules {
+ /// Iterates over all the rules, including base rules, in the order they
+ /// should be executed in.
+ pub fn iter(&self) -> impl Iterator<Item = &PushRule> {
+ base_rules::BASE_PREPEND_OVERRIDE_RULES
+ .iter()
+ .chain(self.override_rules.iter())
+ .chain(base_rules::BASE_APPEND_OVERRIDE_RULES.iter())
+ .chain(self.content.iter())
+ .chain(base_rules::BASE_APPEND_CONTENT_RULES.iter())
+ .chain(self.room.iter())
+ .chain(self.sender.iter())
+ .chain(self.underride.iter())
+ .chain(base_rules::BASE_APPEND_UNDERRIDE_RULES.iter())
+ .map(|rule| {
+ self.overridden_base_rules
+ .get(&*rule.rule_id)
+ .unwrap_or(rule)
+ })
+ }
+}
+
+/// A wrapper around `PushRules` that checks the enabled state of rules and
+/// filters out disabled experimental rules.
+#[derive(Debug, Clone, Default)]
+#[pyclass(frozen)]
+pub struct FilteredPushRules {
+ push_rules: PushRules,
+ enabled_map: BTreeMap<String, bool>,
+ msc3664_enabled: bool,
+}
+
+#[pymethods]
+impl FilteredPushRules {
+ #[new]
+ pub fn py_new(
+ push_rules: PushRules,
+ enabled_map: BTreeMap<String, bool>,
+ msc3664_enabled: bool,
+ ) -> Self {
+ Self {
+ push_rules,
+ enabled_map,
+ msc3664_enabled,
+ }
+ }
+
+ /// Returns the list of all rules and their enabled state, including base
+ /// rules, in the order they should be executed in.
+ fn rules(&self) -> Vec<(PushRule, bool)> {
+ self.iter().map(|(r, e)| (r.clone(), e)).collect()
+ }
+}
+
+impl FilteredPushRules {
+ /// Iterates over all the rules and their enabled state, including base
+ /// rules, in the order they should be executed in.
+ fn iter(&self) -> impl Iterator<Item = (&PushRule, bool)> {
+ self.push_rules
+ .iter()
+ .filter(|rule| {
+ // Ignore disabled experimental push rules
+ if !self.msc3664_enabled
+ && rule.rule_id == "global/override/.im.nheko.msc3664.reply"
+ {
+ return false;
+ }
+
+ true
+ })
+ .map(|r| {
+ let enabled = *self
+ .enabled_map
+ .get(&*r.rule_id)
+ .unwrap_or(&r.default_enabled);
+ (r, enabled)
+ })
+ }
+}
+
+#[test]
+fn test_serialize_condition() {
+ let condition = Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
+ key: "content.body".into(),
+ pattern: Some("coffee".into()),
+ pattern_type: None,
+ }));
+
+ let json = serde_json::to_string(&condition).unwrap();
+ assert_eq!(
+ json,
+ r#"{"kind":"event_match","key":"content.body","pattern":"coffee"}"#
+ )
+}
+
+#[test]
+fn test_deserialize_condition() {
+ let json = r#"{"kind":"event_match","key":"content.body","pattern":"coffee"}"#;
+
+ let _: Condition = serde_json::from_str(json).unwrap();
+}
+
+#[test]
+fn test_deserialize_unstable_msc3664_condition() {
+ let json = r#"{"kind":"im.nheko.msc3664.related_event_match","key":"content.body","pattern":"coffee","rel_type":"m.in_reply_to"}"#;
+
+ let condition: Condition = serde_json::from_str(json).unwrap();
+ assert!(matches!(
+ condition,
+ Condition::Known(KnownCondition::RelatedEventMatch(_))
+ ));
+}
+
+#[test]
+fn test_deserialize_custom_condition() {
+ let json = r#"{"kind":"custom_tag"}"#;
+
+ let condition: Condition = serde_json::from_str(json).unwrap();
+ assert!(matches!(condition, Condition::Unknown(_)));
+
+ let new_json = serde_json::to_string(&condition).unwrap();
+ assert_eq!(json, new_json);
+}
+
+#[test]
+fn test_deserialize_action() {
+ let _: Action = serde_json::from_str(r#""notify""#).unwrap();
+ let _: Action = serde_json::from_str(r#""dont_notify""#).unwrap();
+ let _: Action = serde_json::from_str(r#""coalesce""#).unwrap();
+ let _: Action = serde_json::from_str(r#"{"set_tweak": "highlight"}"#).unwrap();
+}
+
+#[test]
+fn test_custom_action() {
+ let json = r#"{"some_custom":"action_fields"}"#;
+
+ let action: Action = serde_json::from_str(json).unwrap();
+ assert!(matches!(action, Action::Unknown(_)));
+
+ let new_json = serde_json::to_string(&action).unwrap();
+ assert_eq!(json, new_json);
+}
diff --git a/rust/src/push/utils.rs b/rust/src/push/utils.rs
new file mode 100644
index 0000000000..8759340473
--- /dev/null
+++ b/rust/src/push/utils.rs
@@ -0,0 +1,215 @@
+// Copyright 2022 The Matrix.org Foundation C.I.C.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use anyhow::bail;
+use anyhow::Context;
+use anyhow::Error;
+use lazy_static::lazy_static;
+use regex;
+use regex::Regex;
+use regex::RegexBuilder;
+
+lazy_static! {
+ /// Matches runs of non-wildcard characters followed by wildcard characters.
+ static ref WILDCARD_RUN: Regex = Regex::new(r"([^\?\*]*)([\?\*]*)").expect("valid regex");
+}
+
+/// Extract the localpart from a Matrix style ID
+pub(crate) fn get_localpart_from_id(id: &str) -> Result<&str, Error> {
+ let (localpart, _) = id
+ .split_once(':')
+ .with_context(|| format!("ID does not contain colon: {id}"))?;
+
+ // We need to strip off the first character, which is the ID type.
+ if localpart.is_empty() {
+ bail!("Invalid ID {id}");
+ }
+
+ Ok(&localpart[1..])
+}
+
+/// Used by `glob_to_regex` to specify what to match the regex against.
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+pub enum GlobMatchType {
+ /// The generated regex will match against the entire input.
+ Whole,
+ /// The generated regex will match against words.
+ Word,
+}
+
+/// Convert a "glob" style expression to a regex, anchoring either to the entire
+/// input or to individual words.
+pub fn glob_to_regex(glob: &str, match_type: GlobMatchType) -> Result<Regex, Error> {
+ let mut chunks = Vec::new();
+
+ // Patterns with wildcards must be simplified to avoid performance cliffs
+ // - The glob `?**?**?` is equivalent to the glob `???*`
+ // - The glob `???*` is equivalent to the regex `.{3,}`
+ for captures in WILDCARD_RUN.captures_iter(glob) {
+ if let Some(chunk) = captures.get(1) {
+ chunks.push(regex::escape(chunk.as_str()));
+ }
+
+ if let Some(wildcards) = captures.get(2) {
+ if wildcards.as_str() == "" {
+ continue;
+ }
+
+ let question_marks = wildcards.as_str().chars().filter(|c| *c == '?').count();
+
+ if wildcards.as_str().contains('*') {
+ chunks.push(format!(".{{{question_marks},}}"));
+ } else {
+ chunks.push(format!(".{{{question_marks}}}"));
+ }
+ }
+ }
+
+ let joined = chunks.join("");
+
+ let regex_str = match match_type {
+ GlobMatchType::Whole => format!(r"\A{joined}\z"),
+
+ // `^|\W` and `\W|$` handle the case where `pattern` starts or ends with a non-word
+ // character.
+ GlobMatchType::Word => format!(r"(?:^|\b|\W){joined}(?:\b|\W|$)"),
+ };
+
+ Ok(RegexBuilder::new(®ex_str)
+ .case_insensitive(true)
+ .build()?)
+}
+
+/// Compiles the glob into a `Matcher`.
+pub fn get_glob_matcher(glob: &str, match_type: GlobMatchType) -> Result<Matcher, Error> {
+ // There are a number of shortcuts we can make if the glob doesn't contain a
+ // wild card.
+ let matcher = if glob.contains(['*', '?']) {
+ let regex = glob_to_regex(glob, match_type)?;
+ Matcher::Regex(regex)
+ } else if match_type == GlobMatchType::Whole {
+ // If there aren't any wildcards and we're matching the whole thing,
+ // then we simply can do a case-insensitive string match.
+ Matcher::Whole(glob.to_lowercase())
+ } else {
+ // Otherwise, if we're matching against words then can first check
+ // if the haystack contains the glob at all.
+ Matcher::Word {
+ word: glob.to_lowercase(),
+ regex: None,
+ }
+ };
+
+ Ok(matcher)
+}
+
+/// Matches against a glob
+pub enum Matcher {
+ /// Plain regex matching.
+ Regex(Regex),
+
+ /// Case-insensitive equality.
+ Whole(String),
+
+ /// Word matching. `regex` is a cache of calling [`glob_to_regex`] on word.
+ Word { word: String, regex: Option<Regex> },
+}
+
+impl Matcher {
+ /// Checks if the glob matches the given haystack.
+ pub fn is_match(&mut self, haystack: &str) -> Result<bool, Error> {
+ // We want to to do case-insensitive matching, so we convert to
+ // lowercase first.
+ let haystack = haystack.to_lowercase();
+
+ match self {
+ Matcher::Regex(regex) => Ok(regex.is_match(&haystack)),
+ Matcher::Whole(whole) => Ok(whole == &haystack),
+ Matcher::Word { word, regex } => {
+ // If we're looking for a literal word, then we first check if
+ // the haystack contains the word as a substring.
+ if !haystack.contains(&*word) {
+ return Ok(false);
+ }
+
+ // If it does contain the word as a substring, then we need to
+ // check if it is an actual word by testing it against the regex.
+ let regex = if let Some(regex) = regex {
+ regex
+ } else {
+ let compiled_regex = glob_to_regex(word, GlobMatchType::Word)?;
+ regex.insert(compiled_regex)
+ };
+
+ Ok(regex.is_match(&haystack))
+ }
+ }
+ }
+}
+
+#[test]
+fn test_get_domain_from_id() {
+ get_localpart_from_id("").unwrap_err();
+ get_localpart_from_id(":").unwrap_err();
+ get_localpart_from_id(":asd").unwrap_err();
+ get_localpart_from_id("::as::asad").unwrap_err();
+
+ assert_eq!(get_localpart_from_id("@test:foo").unwrap(), "test");
+ assert_eq!(get_localpart_from_id("@:").unwrap(), "");
+ assert_eq!(get_localpart_from_id("@test:foo:907").unwrap(), "test");
+}
+
+#[test]
+fn tset_glob() -> Result<(), Error> {
+ assert_eq!(
+ glob_to_regex("simple", GlobMatchType::Whole)?.as_str(),
+ r"\Asimple\z"
+ );
+ assert_eq!(
+ glob_to_regex("simple*", GlobMatchType::Whole)?.as_str(),
+ r"\Asimple.{0,}\z"
+ );
+ assert_eq!(
+ glob_to_regex("simple?", GlobMatchType::Whole)?.as_str(),
+ r"\Asimple.{1}\z"
+ );
+ assert_eq!(
+ glob_to_regex("simple?*?*", GlobMatchType::Whole)?.as_str(),
+ r"\Asimple.{2,}\z"
+ );
+ assert_eq!(
+ glob_to_regex("simple???", GlobMatchType::Whole)?.as_str(),
+ r"\Asimple.{3}\z"
+ );
+
+ assert_eq!(
+ glob_to_regex("escape.", GlobMatchType::Whole)?.as_str(),
+ r"\Aescape\.\z"
+ );
+
+ assert!(glob_to_regex("simple", GlobMatchType::Whole)?.is_match("simple"));
+ assert!(!glob_to_regex("simple", GlobMatchType::Whole)?.is_match("simples"));
+ assert!(glob_to_regex("simple*", GlobMatchType::Whole)?.is_match("simples"));
+ assert!(glob_to_regex("simple?", GlobMatchType::Whole)?.is_match("simples"));
+ assert!(glob_to_regex("simple*", GlobMatchType::Whole)?.is_match("simple"));
+
+ assert!(glob_to_regex("simple", GlobMatchType::Word)?.is_match("some simple."));
+ assert!(glob_to_regex("simple", GlobMatchType::Word)?.is_match("simple"));
+ assert!(!glob_to_regex("simple", GlobMatchType::Word)?.is_match("simples"));
+
+ assert!(glob_to_regex("@user:foo", GlobMatchType::Word)?.is_match("Some @user:foo test"));
+ assert!(glob_to_regex("@user:foo", GlobMatchType::Word)?.is_match("@user:foo"));
+
+ Ok(())
+}
diff --git a/scripts-dev/build_debian_packages.py b/scripts-dev/build_debian_packages.py
index cd2e64b75f..7442300196 100755
--- a/scripts-dev/build_debian_packages.py
+++ b/scripts-dev/build_debian_packages.py
@@ -27,6 +27,7 @@ DISTS = (
"debian:sid",
"ubuntu:focal", # 20.04 LTS (our EOL forced by Py38 on 2024-10-14)
"ubuntu:jammy", # 22.04 LTS (EOL 2027-04)
+ "ubuntu:kinetic", # 22.10 (EOL 2023-07-20)
)
DESC = """\
diff --git a/scripts-dev/check_pydantic_models.py b/scripts-dev/check_pydantic_models.py
new file mode 100755
index 0000000000..9f2b7ded5b
--- /dev/null
+++ b/scripts-dev/check_pydantic_models.py
@@ -0,0 +1,424 @@
+#! /usr/bin/env python
+# Copyright 2022 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+A script which enforces that Synapse always uses strict types when defining a Pydantic
+model.
+
+Pydantic does not yet offer a strict mode, but it is planned for pydantic v2. See
+
+ https://github.com/pydantic/pydantic/issues/1098
+ https://pydantic-docs.helpmanual.io/blog/pydantic-v2/#strict-mode
+
+until then, this script is a best effort to stop us from introducing type coersion bugs
+(like the infamous stringy power levels fixed in room version 10).
+"""
+import argparse
+import contextlib
+import functools
+import importlib
+import logging
+import os
+import pkgutil
+import sys
+import textwrap
+import traceback
+import unittest.mock
+from contextlib import contextmanager
+from typing import Any, Callable, Dict, Generator, List, Set, Type, TypeVar
+
+from parameterized import parameterized
+from pydantic import BaseModel as PydanticBaseModel, conbytes, confloat, conint, constr
+from pydantic.typing import get_args
+from typing_extensions import ParamSpec
+
+logger = logging.getLogger(__name__)
+
+CONSTRAINED_TYPE_FACTORIES_WITH_STRICT_FLAG: List[Callable] = [
+ constr,
+ conbytes,
+ conint,
+ confloat,
+]
+
+TYPES_THAT_PYDANTIC_WILL_COERCE_TO = [
+ str,
+ bytes,
+ int,
+ float,
+ bool,
+]
+
+
+P = ParamSpec("P")
+R = TypeVar("R")
+
+
+class ModelCheckerException(Exception):
+ """Dummy exception. Allows us to detect unwanted types during a module import."""
+
+
+class MissingStrictInConstrainedTypeException(ModelCheckerException):
+ factory_name: str
+
+ def __init__(self, factory_name: str):
+ self.factory_name = factory_name
+
+
+class FieldHasUnwantedTypeException(ModelCheckerException):
+ message: str
+
+ def __init__(self, message: str):
+ self.message = message
+
+
+def make_wrapper(factory: Callable[P, R]) -> Callable[P, R]:
+ """We patch `constr` and friends with wrappers that enforce strict=True."""
+
+ @functools.wraps(factory)
+ def wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
+ if "strict" not in kwargs:
+ raise MissingStrictInConstrainedTypeException(factory.__name__)
+ if not kwargs["strict"]:
+ raise MissingStrictInConstrainedTypeException(factory.__name__)
+ return factory(*args, **kwargs)
+
+ return wrapper
+
+
+def field_type_unwanted(type_: Any) -> bool:
+ """Very rough attempt to detect if a type is unwanted as a Pydantic annotation.
+
+ At present, we exclude types which will coerce, or any generic type involving types
+ which will coerce."""
+ logger.debug("Is %s unwanted?")
+ if type_ in TYPES_THAT_PYDANTIC_WILL_COERCE_TO:
+ logger.debug("yes")
+ return True
+ logger.debug("Maybe. Subargs are %s", get_args(type_))
+ rv = any(field_type_unwanted(t) for t in get_args(type_))
+ logger.debug("Conclusion: %s %s unwanted", type_, "is" if rv else "is not")
+ return rv
+
+
+class PatchedBaseModel(PydanticBaseModel):
+ """A patched version of BaseModel that inspects fields after models are defined.
+
+ We complain loudly if we see an unwanted type.
+
+ Beware: ModelField.type_ is presumably private; this is likely to be very brittle.
+ """
+
+ @classmethod
+ def __init_subclass__(cls: Type[PydanticBaseModel], **kwargs: object):
+ for field in cls.__fields__.values():
+ # Note that field.type_ and field.outer_type are computed based on the
+ # annotation type, see pydantic.fields.ModelField._type_analysis
+ if field_type_unwanted(field.outer_type_):
+ # TODO: this only reports the first bad field. Can we find all bad ones
+ # and report them all?
+ raise FieldHasUnwantedTypeException(
+ f"{cls.__module__}.{cls.__qualname__} has field '{field.name}' "
+ f"with unwanted type `{field.outer_type_}`"
+ )
+
+
+@contextmanager
+def monkeypatch_pydantic() -> Generator[None, None, None]:
+ """Patch pydantic with our snooping versions of BaseModel and the con* functions.
+
+ If the snooping functions see something they don't like, they'll raise a
+ ModelCheckingException instance.
+ """
+ with contextlib.ExitStack() as patches:
+ # Most Synapse code ought to import the patched objects directly from
+ # `pydantic`. But we also patch their containing modules `pydantic.main` and
+ # `pydantic.types` for completeness.
+ patch_basemodel1 = unittest.mock.patch(
+ "pydantic.BaseModel", new=PatchedBaseModel
+ )
+ patch_basemodel2 = unittest.mock.patch(
+ "pydantic.main.BaseModel", new=PatchedBaseModel
+ )
+ patches.enter_context(patch_basemodel1)
+ patches.enter_context(patch_basemodel2)
+ for factory in CONSTRAINED_TYPE_FACTORIES_WITH_STRICT_FLAG:
+ wrapper: Callable = make_wrapper(factory)
+ patch1 = unittest.mock.patch(f"pydantic.{factory.__name__}", new=wrapper)
+ patch2 = unittest.mock.patch(
+ f"pydantic.types.{factory.__name__}", new=wrapper
+ )
+ patches.enter_context(patch1)
+ patches.enter_context(patch2)
+ yield
+
+
+def format_model_checker_exception(e: ModelCheckerException) -> str:
+ """Work out which line of code caused e. Format the line in a human-friendly way."""
+ # TODO. FieldHasUnwantedTypeException gives better error messages. Can we ditch the
+ # patches of constr() etc, and instead inspect fields to look for ConstrainedStr
+ # with strict=False? There is some difficulty with the inheritance hierarchy
+ # because StrictStr < ConstrainedStr < str.
+ if isinstance(e, FieldHasUnwantedTypeException):
+ return e.message
+ elif isinstance(e, MissingStrictInConstrainedTypeException):
+ frame_summary = traceback.extract_tb(e.__traceback__)[-2]
+ return (
+ f"Missing `strict=True` from {e.factory_name}() call \n"
+ + traceback.format_list([frame_summary])[0].lstrip()
+ )
+ else:
+ raise ValueError(f"Unknown exception {e}") from e
+
+
+def lint() -> int:
+ """Try to import all of Synapse and see if we spot any Pydantic type coercions.
+
+ Print any problems, then return a status code suitable for sys.exit."""
+ failures = do_lint()
+ if failures:
+ print(f"Found {len(failures)} problem(s)")
+ for failure in sorted(failures):
+ print(failure)
+ return os.EX_DATAERR if failures else os.EX_OK
+
+
+def do_lint() -> Set[str]:
+ """Try to import all of Synapse and see if we spot any Pydantic type coercions."""
+ failures = set()
+
+ with monkeypatch_pydantic():
+ logger.debug("Importing synapse")
+ try:
+ # TODO: make "synapse" an argument so we can target this script at
+ # a subpackage
+ module = importlib.import_module("synapse")
+ except ModelCheckerException as e:
+ logger.warning("Bad annotation found when importing synapse")
+ failures.add(format_model_checker_exception(e))
+ return failures
+
+ try:
+ logger.debug("Fetching subpackages")
+ module_infos = list(
+ pkgutil.walk_packages(module.__path__, f"{module.__name__}.")
+ )
+ except ModelCheckerException as e:
+ logger.warning("Bad annotation found when looking for modules to import")
+ failures.add(format_model_checker_exception(e))
+ return failures
+
+ for module_info in module_infos:
+ logger.debug("Importing %s", module_info.name)
+ try:
+ importlib.import_module(module_info.name)
+ except ModelCheckerException as e:
+ logger.warning(
+ f"Bad annotation found when importing {module_info.name}"
+ )
+ failures.add(format_model_checker_exception(e))
+
+ return failures
+
+
+def run_test_snippet(source: str) -> None:
+ """Exec a snippet of source code in an isolated environment."""
+ # To emulate `source` being called at the top level of the module,
+ # the globals and locals we provide apparently have to be the same mapping.
+ #
+ # > Remember that at the module level, globals and locals are the same dictionary.
+ # > If exec gets two separate objects as globals and locals, the code will be
+ # > executed as if it were embedded in a class definition.
+ globals_: Dict[str, object]
+ locals_: Dict[str, object]
+ globals_ = locals_ = {}
+ exec(textwrap.dedent(source), globals_, locals_)
+
+
+class TestConstrainedTypesPatch(unittest.TestCase):
+ def test_expression_without_strict_raises(self) -> None:
+ with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException):
+ run_test_snippet(
+ """
+ from pydantic import constr
+ constr()
+ """
+ )
+
+ def test_called_as_module_attribute_raises(self) -> None:
+ with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException):
+ run_test_snippet(
+ """
+ import pydantic
+ pydantic.constr()
+ """
+ )
+
+ def test_wildcard_import_raises(self) -> None:
+ with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException):
+ run_test_snippet(
+ """
+ from pydantic import *
+ constr()
+ """
+ )
+
+ def test_alternative_import_raises(self) -> None:
+ with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException):
+ run_test_snippet(
+ """
+ from pydantic.types import constr
+ constr()
+ """
+ )
+
+ def test_alternative_import_attribute_raises(self) -> None:
+ with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException):
+ run_test_snippet(
+ """
+ import pydantic.types
+ pydantic.types.constr()
+ """
+ )
+
+ def test_kwarg_but_no_strict_raises(self) -> None:
+ with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException):
+ run_test_snippet(
+ """
+ from pydantic import constr
+ constr(min_length=10)
+ """
+ )
+
+ def test_kwarg_strict_False_raises(self) -> None:
+ with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException):
+ run_test_snippet(
+ """
+ from pydantic import constr
+ constr(strict=False)
+ """
+ )
+
+ def test_kwarg_strict_True_doesnt_raise(self) -> None:
+ with monkeypatch_pydantic():
+ run_test_snippet(
+ """
+ from pydantic import constr
+ constr(strict=True)
+ """
+ )
+
+ def test_annotation_without_strict_raises(self) -> None:
+ with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException):
+ run_test_snippet(
+ """
+ from pydantic import constr
+ x: constr()
+ """
+ )
+
+ def test_field_annotation_without_strict_raises(self) -> None:
+ with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException):
+ run_test_snippet(
+ """
+ from pydantic import BaseModel, conint
+ class C:
+ x: conint()
+ """
+ )
+
+
+class TestFieldTypeInspection(unittest.TestCase):
+ @parameterized.expand(
+ [
+ ("str",),
+ ("bytes"),
+ ("int",),
+ ("float",),
+ ("bool"),
+ ("Optional[str]",),
+ ("Union[None, str]",),
+ ("List[str]",),
+ ("List[List[str]]",),
+ ("Dict[StrictStr, str]",),
+ ("Dict[str, StrictStr]",),
+ ("TypedDict('D', x=int)",),
+ ]
+ )
+ def test_field_holding_unwanted_type_raises(self, annotation: str) -> None:
+ with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException):
+ run_test_snippet(
+ f"""
+ from typing import *
+ from pydantic import *
+ class C(BaseModel):
+ f: {annotation}
+ """
+ )
+
+ @parameterized.expand(
+ [
+ ("StrictStr",),
+ ("StrictBytes"),
+ ("StrictInt",),
+ ("StrictFloat",),
+ ("StrictBool"),
+ ("constr(strict=True, min_length=10)",),
+ ("Optional[StrictStr]",),
+ ("Union[None, StrictStr]",),
+ ("List[StrictStr]",),
+ ("List[List[StrictStr]]",),
+ ("Dict[StrictStr, StrictStr]",),
+ ("TypedDict('D', x=StrictInt)",),
+ ]
+ )
+ def test_field_holding_accepted_type_doesnt_raise(self, annotation: str) -> None:
+ with monkeypatch_pydantic():
+ run_test_snippet(
+ f"""
+ from typing import *
+ from pydantic import *
+ class C(BaseModel):
+ f: {annotation}
+ """
+ )
+
+ def test_field_holding_str_raises_with_alternative_import(self) -> None:
+ with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException):
+ run_test_snippet(
+ """
+ from pydantic.main import BaseModel
+ class C(BaseModel):
+ f: str
+ """
+ )
+
+
+parser = argparse.ArgumentParser()
+parser.add_argument("mode", choices=["lint", "test"], default="lint", nargs="?")
+parser.add_argument("-v", "--verbose", action="store_true")
+
+
+if __name__ == "__main__":
+ args = parser.parse_args(sys.argv[1:])
+ logging.basicConfig(
+ format="%(asctime)s %(name)s:%(lineno)d %(levelname)s %(message)s",
+ level=logging.DEBUG if args.verbose else logging.INFO,
+ )
+ # suppress logs we don't care about
+ logging.getLogger("xmlschema").setLevel(logging.WARNING)
+ if args.mode == "lint":
+ sys.exit(lint())
+ elif args.mode == "test":
+ unittest.main(argv=sys.argv[:1])
diff --git a/scripts-dev/complement.sh b/scripts-dev/complement.sh
index eab23f18f1..803c6ce92d 100755
--- a/scripts-dev/complement.sh
+++ b/scripts-dev/complement.sh
@@ -126,7 +126,7 @@ export COMPLEMENT_BASE_IMAGE=complement-synapse
extra_test_args=()
-test_tags="synapse_blacklist,msc2716,msc3030,msc3787"
+test_tags="synapse_blacklist,msc3787,msc3874"
# All environment variables starting with PASS_ will be shared.
# (The prefix is stripped off before reaching the container.)
@@ -139,6 +139,9 @@ if [[ -n "$WORKERS" ]]; then
# Use workers.
export PASS_SYNAPSE_COMPLEMENT_USE_WORKERS=true
+ # Pass through the workers defined. If none, it will be an empty string
+ export PASS_SYNAPSE_WORKER_TYPES="$WORKER_TYPES"
+
# Workers can only use Postgres as a database.
export PASS_SYNAPSE_COMPLEMENT_DATABASE=postgres
@@ -158,7 +161,10 @@ else
# We only test faster room joins on monoliths, because they are purposefully
# being developed without worker support to start with.
- test_tags="$test_tags,faster_joins"
+ #
+ # The tests for importing historical messages (MSC2716) and jump to date (MSC3030)
+ # also only pass with monoliths, currently.
+ test_tags="$test_tags,faster_joins,msc2716,msc3030"
fi
diff --git a/scripts-dev/federation_client.py b/scripts-dev/federation_client.py
index 763dd02c47..b1d5e2e616 100755
--- a/scripts-dev/federation_client.py
+++ b/scripts-dev/federation_client.py
@@ -46,11 +46,12 @@ import signedjson.key
import signedjson.types
import srvlookup
import yaml
+from requests import PreparedRequest, Response
from requests.adapters import HTTPAdapter
from urllib3 import HTTPConnectionPool
# uncomment the following to enable debug logging of http requests
-# from httplib import HTTPConnection
+# from http.client import HTTPConnection
# HTTPConnection.debuglevel = 1
@@ -103,6 +104,7 @@ def request(
destination: str,
path: str,
content: Optional[str],
+ verify_tls: bool,
) -> requests.Response:
if method is None:
if content is None:
@@ -141,7 +143,6 @@ def request(
s.mount("matrix://", MatrixConnectionAdapter())
headers: Dict[str, str] = {
- "Host": destination,
"Authorization": authorization_headers[0],
}
@@ -152,7 +153,7 @@ def request(
method=method,
url=dest,
headers=headers,
- verify=False,
+ verify=verify_tls,
data=content,
stream=True,
)
@@ -203,6 +204,12 @@ def main() -> None:
parser.add_argument("--body", help="Data to send as the body of the HTTP request")
parser.add_argument(
+ "--insecure",
+ action="store_true",
+ help="Disable TLS certificate verification",
+ )
+
+ parser.add_argument(
"path", help="request path, including the '/_matrix/federation/...' prefix."
)
@@ -227,6 +234,7 @@ def main() -> None:
args.destination,
args.path,
content=args.body,
+ verify_tls=not args.insecure,
)
sys.stderr.write("Status Code: %d\n" % (result.status_code,))
@@ -254,36 +262,93 @@ def read_args_from_config(args: argparse.Namespace) -> None:
class MatrixConnectionAdapter(HTTPAdapter):
+ def send(
+ self,
+ request: PreparedRequest,
+ *args: Any,
+ **kwargs: Any,
+ ) -> Response:
+ # overrides the send() method in the base class.
+
+ # We need to look for .well-known redirects before passing the request up to
+ # HTTPAdapter.send().
+ assert isinstance(request.url, str)
+ parsed = urlparse.urlsplit(request.url)
+ server_name = parsed.netloc
+ well_known = self._get_well_known(parsed.netloc)
+
+ if well_known:
+ server_name = well_known
+
+ # replace the scheme in the uri with https, so that cert verification is done
+ # also replace the hostname if we got a .well-known result
+ request.url = urlparse.urlunsplit(
+ ("https", server_name, parsed.path, parsed.query, parsed.fragment)
+ )
+
+ # at this point we also add the host header (otherwise urllib will add one
+ # based on the `host` from the connection returned by `get_connection`,
+ # which will be wrong if there is an SRV record).
+ request.headers["Host"] = server_name
+
+ return super().send(request, *args, **kwargs)
+
+ def get_connection(
+ self, url: str, proxies: Optional[Dict[str, str]] = None
+ ) -> HTTPConnectionPool:
+ # overrides the get_connection() method in the base class
+ parsed = urlparse.urlsplit(url)
+ (host, port, ssl_server_name) = self._lookup(parsed.netloc)
+ print(
+ f"Connecting to {host}:{port} with SNI {ssl_server_name}", file=sys.stderr
+ )
+ return self.poolmanager.connection_from_host(
+ host,
+ port=port,
+ scheme="https",
+ pool_kwargs={"server_hostname": ssl_server_name},
+ )
+
@staticmethod
- def lookup(s: str, skip_well_known: bool = False) -> Tuple[str, int]:
- if s[-1] == "]":
+ def _lookup(server_name: str) -> Tuple[str, int, str]:
+ """
+ Do an SRV lookup on a server name and return the host:port to connect to
+ Given the server_name (after any .well-known lookup), return the host, port and
+ the ssl server name
+ """
+ if server_name[-1] == "]":
# ipv6 literal (with no port)
- return s, 8448
+ return server_name, 8448, server_name
- if ":" in s:
- out = s.rsplit(":", 1)
+ if ":" in server_name:
+ # explicit port
+ out = server_name.rsplit(":", 1)
try:
port = int(out[1])
except ValueError:
- raise ValueError("Invalid host:port '%s'" % s)
- return out[0], port
-
- # try a .well-known lookup
- if not skip_well_known:
- well_known = MatrixConnectionAdapter.get_well_known(s)
- if well_known:
- return MatrixConnectionAdapter.lookup(well_known, skip_well_known=True)
+ raise ValueError("Invalid host:port '%s'" % (server_name,))
+ return out[0], port, out[0]
try:
- srv = srvlookup.lookup("matrix", "tcp", s)[0]
- return srv.host, srv.port
+ srv = srvlookup.lookup("matrix", "tcp", server_name)[0]
+ print(
+ f"SRV lookup on _matrix._tcp.{server_name} gave {srv}",
+ file=sys.stderr,
+ )
+ return srv.host, srv.port, server_name
except Exception:
- return s, 8448
+ return server_name, 8448, server_name
@staticmethod
- def get_well_known(server_name: str) -> Optional[str]:
- uri = "https://%s/.well-known/matrix/server" % (server_name,)
- print("fetching %s" % (uri,), file=sys.stderr)
+ def _get_well_known(server_name: str) -> Optional[str]:
+ if ":" in server_name:
+ # explicit port, or ipv6 literal. Either way, no .well-known
+ return None
+
+ # TODO: check for ipv4 literals
+
+ uri = f"https://{server_name}/.well-known/matrix/server"
+ print(f"fetching {uri}", file=sys.stderr)
try:
resp = requests.get(uri)
@@ -304,19 +369,6 @@ class MatrixConnectionAdapter(HTTPAdapter):
print("Invalid response from %s: %s" % (uri, e), file=sys.stderr)
return None
- def get_connection(
- self, url: str, proxies: Optional[Dict[str, str]] = None
- ) -> HTTPConnectionPool:
- parsed = urlparse.urlparse(url)
-
- (host, port) = self.lookup(parsed.netloc)
- netloc = "%s:%d" % (host, port)
- print("Connecting to %s" % (netloc,), file=sys.stderr)
- url = urlparse.urlunparse(
- ("https", netloc, parsed.path, parsed.params, parsed.query, parsed.fragment)
- )
- return super().get_connection(url, proxies)
-
if __name__ == "__main__":
main()
diff --git a/scripts-dev/lint.sh b/scripts-dev/lint.sh
index 377348b107..bf900645b1 100755
--- a/scripts-dev/lint.sh
+++ b/scripts-dev/lint.sh
@@ -106,4 +106,5 @@ isort "${files[@]}"
python3 -m black "${files[@]}"
./scripts-dev/config-lint.sh
flake8 "${files[@]}"
+./scripts-dev/check_pydantic_models.py lint
mypy
diff --git a/scripts-dev/make_full_schema.sh b/scripts-dev/make_full_schema.sh
index f0e22d4ca2..e2bc1640bb 100755
--- a/scripts-dev/make_full_schema.sh
+++ b/scripts-dev/make_full_schema.sh
@@ -2,34 +2,37 @@
#
# This script generates SQL files for creating a brand new Synapse DB with the latest
# schema, on both SQLite3 and Postgres.
-#
-# It does so by having Synapse generate an up-to-date SQLite DB, then running
-# synapse_port_db to convert it to Postgres. It then dumps the contents of both.
export PGHOST="localhost"
-POSTGRES_DB_NAME="synapse_full_schema.$$"
-
-SQLITE_FULL_SCHEMA_OUTPUT_FILE="full.sql.sqlite"
-POSTGRES_FULL_SCHEMA_OUTPUT_FILE="full.sql.postgres"
-
+POSTGRES_MAIN_DB_NAME="synapse_full_schema_main.$$"
+POSTGRES_COMMON_DB_NAME="synapse_full_schema_common.$$"
+POSTGRES_STATE_DB_NAME="synapse_full_schema_state.$$"
REQUIRED_DEPS=("matrix-synapse" "psycopg2")
usage() {
echo
- echo "Usage: $0 -p <postgres_username> -o <path> [-c] [-n] [-h]"
+ echo "Usage: $0 -p <postgres_username> -o <path> [-c] [-n <schema number>] [-h]"
echo
echo "-p <postgres_username>"
echo " Username to connect to local postgres instance. The password will be requested"
echo " during script execution."
echo "-c"
- echo " CI mode. Enables coverage tracking and prints every command that the script runs."
+ echo " CI mode. Prints every command that the script runs."
echo "-o <path>"
echo " Directory to output full schema files to."
+ echo "-n <schema number>"
+ echo " Schema number for the new snapshot. Used to set the location of files within "
+ echo " the output directory, mimicking that of synapse/storage/schemas."
+ echo " Defaults to 9999."
echo "-h"
echo " Display this help text."
+ echo ""
+ echo " NB: make sure to run this against the *oldest* supported version of postgres,"
+ echo " or else pg_dump might output non-backwards-compatible syntax."
}
-while getopts "p:co:h" opt; do
+SCHEMA_NUMBER="9999"
+while getopts "p:co:hn:" opt; do
case $opt in
p)
export PGUSER=$OPTARG
@@ -37,11 +40,6 @@ while getopts "p:co:h" opt; do
c)
# Print all commands that are being executed
set -x
-
- # Modify required dependencies for coverage
- REQUIRED_DEPS+=("coverage" "coverage-enable-subprocess")
-
- COVERAGE=1
;;
o)
command -v realpath > /dev/null || (echo "The -o flag requires the 'realpath' binary to be installed" && exit 1)
@@ -51,6 +49,9 @@ while getopts "p:co:h" opt; do
usage
exit
;;
+ n)
+ SCHEMA_NUMBER="$OPTARG"
+ ;;
\?)
echo "ERROR: Invalid option: -$OPTARG" >&2
usage
@@ -98,11 +99,21 @@ cd "$(dirname "$0")/.."
TMPDIR=$(mktemp -d)
KEY_FILE=$TMPDIR/test.signing.key # default Synapse signing key path
SQLITE_CONFIG=$TMPDIR/sqlite.conf
-SQLITE_DB=$TMPDIR/homeserver.db
+SQLITE_MAIN_DB=$TMPDIR/main.db
+SQLITE_STATE_DB=$TMPDIR/state.db
+SQLITE_COMMON_DB=$TMPDIR/common.db
POSTGRES_CONFIG=$TMPDIR/postgres.conf
# Ensure these files are delete on script exit
-trap 'rm -rf $TMPDIR' EXIT
+cleanup() {
+ echo "Cleaning up temporary sqlite database and config files..."
+ rm -r "$TMPDIR"
+ echo "Cleaning up temporary Postgres database..."
+ dropdb --if-exists "$POSTGRES_COMMON_DB_NAME"
+ dropdb --if-exists "$POSTGRES_MAIN_DB_NAME"
+ dropdb --if-exists "$POSTGRES_STATE_DB_NAME"
+}
+trap 'cleanup' EXIT
cat > "$SQLITE_CONFIG" <<EOF
server_name: "test"
@@ -112,10 +123,22 @@ macaroon_secret_key: "abcde"
report_stats: false
-database:
- name: "sqlite3"
- args:
- database: "$SQLITE_DB"
+databases:
+ common:
+ name: "sqlite3"
+ data_stores: []
+ args:
+ database: "$SQLITE_COMMON_DB"
+ main:
+ name: "sqlite3"
+ data_stores: ["main"]
+ args:
+ database: "$SQLITE_MAIN_DB"
+ state:
+ name: "sqlite3"
+ data_stores: ["state"]
+ args:
+ database: "$SQLITE_STATE_DB"
# Suppress the key server warning.
trusted_key_servers: []
@@ -129,13 +152,32 @@ macaroon_secret_key: "abcde"
report_stats: false
-database:
- name: "psycopg2"
- args:
- user: "$PGUSER"
- host: "$PGHOST"
- password: "$PGPASSWORD"
- database: "$POSTGRES_DB_NAME"
+databases:
+ common:
+ name: "psycopg2"
+ data_stores: []
+ args:
+ user: "$PGUSER"
+ host: "$PGHOST"
+ password: "$PGPASSWORD"
+ database: "$POSTGRES_COMMON_DB_NAME"
+ main:
+ name: "psycopg2"
+ data_stores: ["main"]
+ args:
+ user: "$PGUSER"
+ host: "$PGHOST"
+ password: "$PGPASSWORD"
+ database: "$POSTGRES_MAIN_DB_NAME"
+ state:
+ name: "psycopg2"
+ data_stores: ["state"]
+ args:
+ user: "$PGUSER"
+ host: "$PGHOST"
+ password: "$PGPASSWORD"
+ database: "$POSTGRES_STATE_DB_NAME"
+
# Suppress the key server warning.
trusted_key_servers: []
@@ -147,29 +189,46 @@ python -m synapse.app.homeserver --generate-keys -c "$SQLITE_CONFIG"
# Make sure the SQLite3 database is using the latest schema and has no pending background update.
echo "Running db background jobs..."
-synapse/_scripts/update_synapse_database.py --database-config --run-background-updates "$SQLITE_CONFIG"
+synapse/_scripts/update_synapse_database.py --database-config "$SQLITE_CONFIG" --run-background-updates
# Create the PostgreSQL database.
-echo "Creating postgres database..."
-createdb --lc-collate=C --lc-ctype=C --template=template0 "$POSTGRES_DB_NAME"
-
-echo "Copying data from SQLite3 to Postgres with synapse_port_db..."
-if [ -z "$COVERAGE" ]; then
- # No coverage needed
- synapse/_scripts/synapse_port_db.py --sqlite-database "$SQLITE_DB" --postgres-config "$POSTGRES_CONFIG"
-else
- # Coverage desired
- coverage run synapse/_scripts/synapse_port_db.py --sqlite-database "$SQLITE_DB" --postgres-config "$POSTGRES_CONFIG"
-fi
+echo "Creating postgres databases..."
+createdb --lc-collate=C --lc-ctype=C --template=template0 "$POSTGRES_COMMON_DB_NAME"
+createdb --lc-collate=C --lc-ctype=C --template=template0 "$POSTGRES_MAIN_DB_NAME"
+createdb --lc-collate=C --lc-ctype=C --template=template0 "$POSTGRES_STATE_DB_NAME"
+
+echo "Running db background jobs..."
+synapse/_scripts/update_synapse_database.py --database-config "$POSTGRES_CONFIG" --run-background-updates
+
-# Delete schema_version, applied_schema_deltas and applied_module_schemas tables
-# Also delete any shadow tables from fts4
-# This needs to be done after synapse_port_db is run
echo "Dropping unwanted db tables..."
-SQL="
+
+# Some common tables are created and updated by Synapse itself and do not belong in the
+# schema.
+DROP_APP_MANAGED_TABLES="
DROP TABLE schema_version;
+DROP TABLE schema_compat_version;
DROP TABLE applied_schema_deltas;
DROP TABLE applied_module_schemas;
+"
+# Other common tables are not created by Synapse and do belong in the schema.
+# TODO: we could derive DROP_COMMON_TABLES from the dump of the common-only DB. But
+# since there's only one table there, I haven't bothered to do so.
+DROP_COMMON_TABLES="$DROP_APP_MANAGED_TABLES
+DROP TABLE background_updates;
+"
+
+sqlite3 "$SQLITE_COMMON_DB" <<< "$DROP_APP_MANAGED_TABLES"
+sqlite3 "$SQLITE_MAIN_DB" <<< "$DROP_COMMON_TABLES"
+sqlite3 "$SQLITE_STATE_DB" <<< "$DROP_COMMON_TABLES"
+psql "$POSTGRES_COMMON_DB_NAME" -w <<< "$DROP_APP_MANAGED_TABLES"
+psql "$POSTGRES_MAIN_DB_NAME" -w <<< "$DROP_COMMON_TABLES"
+psql "$POSTGRES_STATE_DB_NAME" -w <<< "$DROP_COMMON_TABLES"
+
+# For Reasons(TM), SQLite's `.schema` also dumps out "shadow tables", the implementation
+# details behind full text search tables. Omit these from the dumps.
+
+sqlite3 "$SQLITE_MAIN_DB" <<< "
DROP TABLE event_search_content;
DROP TABLE event_search_segments;
DROP TABLE event_search_segdir;
@@ -181,16 +240,57 @@ DROP TABLE user_directory_search_segdir;
DROP TABLE user_directory_search_docsize;
DROP TABLE user_directory_search_stat;
"
-sqlite3 "$SQLITE_DB" <<< "$SQL"
-psql "$POSTGRES_DB_NAME" -w <<< "$SQL"
-echo "Dumping SQLite3 schema to '$OUTPUT_DIR/$SQLITE_FULL_SCHEMA_OUTPUT_FILE'..."
-sqlite3 "$SQLITE_DB" ".dump" > "$OUTPUT_DIR/$SQLITE_FULL_SCHEMA_OUTPUT_FILE"
+echo "Dumping SQLite3 schema..."
+
+mkdir -p "$OUTPUT_DIR/"{common,main,state}"/full_schemas/$SCHEMA_NUMBER"
+sqlite3 "$SQLITE_COMMON_DB" ".schema" > "$OUTPUT_DIR/common/full_schemas/$SCHEMA_NUMBER/full.sql.sqlite"
+sqlite3 "$SQLITE_COMMON_DB" ".dump --data-only --nosys" >> "$OUTPUT_DIR/common/full_schemas/$SCHEMA_NUMBER/full.sql.sqlite"
+sqlite3 "$SQLITE_MAIN_DB" ".schema" > "$OUTPUT_DIR/main/full_schemas/$SCHEMA_NUMBER/full.sql.sqlite"
+sqlite3 "$SQLITE_MAIN_DB" ".dump --data-only --nosys" >> "$OUTPUT_DIR/main/full_schemas/$SCHEMA_NUMBER/full.sql.sqlite"
+sqlite3 "$SQLITE_STATE_DB" ".schema" > "$OUTPUT_DIR/state/full_schemas/$SCHEMA_NUMBER/full.sql.sqlite"
+sqlite3 "$SQLITE_STATE_DB" ".dump --data-only --nosys" >> "$OUTPUT_DIR/state/full_schemas/$SCHEMA_NUMBER/full.sql.sqlite"
+
+cleanup_pg_schema() {
+ # Cleanup as follows:
+ # - Remove empty lines. pg_dump likes to output a lot of these.
+ # - Remove comment-only lines. pg_dump also likes to output a lot of these to visually
+ # separate tables etc.
+ # - Remove "public." prefix --- the schema name.
+ # - Remove "SET" commands. Last time I ran this, the output commands were
+ # SET statement_timeout = 0;
+ # SET lock_timeout = 0;
+ # SET idle_in_transaction_session_timeout = 0;
+ # SET client_encoding = 'UTF8';
+ # SET standard_conforming_strings = on;
+ # SET check_function_bodies = false;
+ # SET xmloption = content;
+ # SET client_min_messages = warning;
+ # SET row_security = off;
+ # SET default_table_access_method = heap;
+ # - Very carefully remove specific SELECT statements. We CANNOT blanket remove all
+ # SELECT statements because some of those have side-effects which we do want in the
+ # schema. Last time I ran this, the only SELECTS were
+ # SELECT pg_catalog.set_config('search_path', '', false);
+ # and
+ # SELECT pg_catalog.setval(text, bigint, bool);
+ # We do want to remove the former, but the latter is important. If the last argument
+ # is `true` or omitted, this marks the given integer as having been consumed and
+ # will NOT appear as the nextval.
+ sed -e '/^$/d' \
+ -e '/^--/d' \
+ -e 's/public\.//g' \
+ -e '/^SET /d' \
+ -e '/^SELECT pg_catalog.set_config/d'
+}
-echo "Dumping Postgres schema to '$OUTPUT_DIR/$POSTGRES_FULL_SCHEMA_OUTPUT_FILE'..."
-pg_dump --format=plain --no-tablespaces --no-acl --no-owner $POSTGRES_DB_NAME | sed -e '/^--/d' -e 's/public\.//g' -e '/^SET /d' -e '/^SELECT /d' > "$OUTPUT_DIR/$POSTGRES_FULL_SCHEMA_OUTPUT_FILE"
+echo "Dumping Postgres schema..."
-echo "Cleaning up temporary Postgres database..."
-dropdb $POSTGRES_DB_NAME
+pg_dump --format=plain --schema-only --no-tablespaces --no-acl --no-owner "$POSTGRES_COMMON_DB_NAME" | cleanup_pg_schema > "$OUTPUT_DIR/common/full_schemas/$SCHEMA_NUMBER/full.sql.postgres"
+pg_dump --format=plain --data-only --inserts --no-tablespaces --no-acl --no-owner "$POSTGRES_COMMON_DB_NAME" | cleanup_pg_schema >> "$OUTPUT_DIR/common/full_schemas/$SCHEMA_NUMBER/full.sql.postgres"
+pg_dump --format=plain --schema-only --no-tablespaces --no-acl --no-owner "$POSTGRES_MAIN_DB_NAME" | cleanup_pg_schema > "$OUTPUT_DIR/main/full_schemas/$SCHEMA_NUMBER/full.sql.postgres"
+pg_dump --format=plain --data-only --inserts --no-tablespaces --no-acl --no-owner "$POSTGRES_MAIN_DB_NAME" | cleanup_pg_schema >> "$OUTPUT_DIR/main/full_schemas/$SCHEMA_NUMBER/full.sql.postgres"
+pg_dump --format=plain --schema-only --no-tablespaces --no-acl --no-owner "$POSTGRES_STATE_DB_NAME" | cleanup_pg_schema > "$OUTPUT_DIR/state/full_schemas/$SCHEMA_NUMBER/full.sql.postgres"
+pg_dump --format=plain --data-only --inserts --no-tablespaces --no-acl --no-owner "$POSTGRES_STATE_DB_NAME" | cleanup_pg_schema >> "$OUTPUT_DIR/state/full_schemas/$SCHEMA_NUMBER/full.sql.postgres"
echo "Done! Files dumped to: $OUTPUT_DIR"
diff --git a/scripts-dev/mypy_synapse_plugin.py b/scripts-dev/mypy_synapse_plugin.py
index d08517a953..2c377533c0 100644
--- a/scripts-dev/mypy_synapse_plugin.py
+++ b/scripts-dev/mypy_synapse_plugin.py
@@ -29,7 +29,7 @@ class SynapsePlugin(Plugin):
self, fullname: str
) -> Optional[Callable[[MethodSigContext], CallableType]]:
if fullname.startswith(
- "synapse.util.caches.descriptors._CachedFunction.__call__"
+ "synapse.util.caches.descriptors.CachedFunction.__call__"
) or fullname.startswith(
"synapse.util.caches.descriptors._LruCachedFunction.__call__"
):
@@ -38,7 +38,7 @@ class SynapsePlugin(Plugin):
def cached_function_method_signature(ctx: MethodSigContext) -> CallableType:
- """Fixes the `_CachedFunction.__call__` signature to be correct.
+ """Fixes the `CachedFunction.__call__` signature to be correct.
It already has *almost* the correct signature, except:
diff --git a/scripts-dev/release.py b/scripts-dev/release.py
index 46220c4dd3..bf47b6c713 100755
--- a/scripts-dev/release.py
+++ b/scripts-dev/release.py
@@ -18,10 +18,12 @@
"""
import glob
+import json
import os
import re
import subprocess
import sys
+import time
import urllib.request
from os import path
from tempfile import TemporaryDirectory
@@ -71,18 +73,21 @@ def cli() -> None:
./scripts-dev/release.py tag
- # ... wait for assets to build ...
+ # wait for assets to build, either manually or with:
+ ./scripts-dev/release.py wait-for-actions
./scripts-dev/release.py publish
./scripts-dev/release.py upload
- # Optional: generate some nice links for the announcement
-
./scripts-dev/release.py merge-back
+ # Optional: generate some nice links for the announcement
./scripts-dev/release.py announce
+ Alternatively, `./scripts-dev/release.py full` will do all the above
+ as well as guiding you through the manual steps.
+
If the env var GH_TOKEN (or GITHUB_TOKEN) is set, or passed into the
`tag`/`publish` command, then a new draft release will be created/published.
"""
@@ -90,6 +95,10 @@ def cli() -> None:
@cli.command()
def prepare() -> None:
+ _prepare()
+
+
+def _prepare() -> None:
"""Do the initial stages of creating a release, including creating release
branch, updating changelog and pushing to GitHub.
"""
@@ -210,9 +219,7 @@ def prepare() -> None:
update_branch(repo)
# Create the new release branch
- # Type ignore will no longer be needed after GitPython 3.1.28.
- # See https://github.com/gitpython-developers/GitPython/pull/1419
- repo.create_head(release_branch_name, commit=base_branch) # type: ignore[arg-type]
+ repo.create_head(release_branch_name, commit=base_branch)
# Special-case SyTest: we don't actually prepare any files so we may
# as well push it now (and only when we create a release branch;
@@ -284,6 +291,10 @@ def prepare() -> None:
@cli.command()
@click.option("--gh-token", envvar=["GH_TOKEN", "GITHUB_TOKEN"])
def tag(gh_token: Optional[str]) -> None:
+ _tag(gh_token)
+
+
+def _tag(gh_token: Optional[str]) -> None:
"""Tags the release and generates a draft GitHub release"""
# Make sure we're in a git repo.
@@ -374,6 +385,10 @@ def tag(gh_token: Optional[str]) -> None:
@cli.command()
@click.option("--gh-token", envvar=["GH_TOKEN", "GITHUB_TOKEN"], required=True)
def publish(gh_token: str) -> None:
+ _publish(gh_token)
+
+
+def _publish(gh_token: str) -> None:
"""Publish release on GitHub."""
# Make sure we're in a git repo.
@@ -410,7 +425,12 @@ def publish(gh_token: str) -> None:
@cli.command()
-def upload() -> None:
+@click.option("--gh-token", envvar=["GH_TOKEN", "GITHUB_TOKEN"], required=False)
+def upload(gh_token: Optional[str]) -> None:
+ _upload(gh_token)
+
+
+def _upload(gh_token: Optional[str]) -> None:
"""Upload release to pypi."""
current_version = get_package_version()
@@ -423,18 +443,40 @@ def upload() -> None:
click.echo("Tag {tag_name} (tag.commit) is not currently checked out!")
click.get_current_context().abort()
- pypi_asset_names = [
- f"matrix_synapse-{current_version}-py3-none-any.whl",
- f"matrix-synapse-{current_version}.tar.gz",
- ]
+ # Query all the assets corresponding to this release.
+ gh = Github(gh_token)
+ gh_repo = gh.get_repo("matrix-org/synapse")
+ gh_release = gh_repo.get_release(tag_name)
+
+ all_assets = set(gh_release.get_assets())
+
+ # Only accept the wheels and sdist.
+ # Notably: we don't care about debs.tar.xz.
+ asset_names_and_urls = sorted(
+ (asset.name, asset.browser_download_url)
+ for asset in all_assets
+ if asset.name.endswith((".whl", ".tar.gz"))
+ )
+
+ # Print out what we've determined.
+ print("Found relevant assets:")
+ for asset_name, _ in asset_names_and_urls:
+ print(f" - {asset_name}")
+
+ ignored_asset_names = sorted(
+ {asset.name for asset in all_assets}
+ - {asset_name for asset_name, _ in asset_names_and_urls}
+ )
+ print("\nIgnoring irrelevant assets:")
+ for asset_name in ignored_asset_names:
+ print(f" - {asset_name}")
with TemporaryDirectory(prefix=f"synapse_upload_{tag_name}_") as tmpdir:
- for name in pypi_asset_names:
+ for name, asset_download_url in asset_names_and_urls:
filename = path.join(tmpdir, name)
- url = f"https://github.com/matrix-org/synapse/releases/download/{tag_name}/{name}"
click.echo(f"Downloading {name} into {filename}")
- urllib.request.urlretrieve(url, filename=filename)
+ urllib.request.urlretrieve(asset_download_url, filename=filename)
if click.confirm("Upload to PyPI?", default=True):
subprocess.run("twine upload *", shell=True, cwd=tmpdir)
@@ -480,7 +522,74 @@ def _merge_into(repo: Repo, source: str, target: str) -> None:
@cli.command()
+@click.option("--gh-token", envvar=["GH_TOKEN", "GITHUB_TOKEN"], required=False)
+def wait_for_actions(gh_token: Optional[str]) -> None:
+ _wait_for_actions(gh_token)
+
+
+def _wait_for_actions(gh_token: Optional[str]) -> None:
+ # Find out the version and tag name.
+ current_version = get_package_version()
+ tag_name = f"v{current_version}"
+
+ # Authentication is optional on this endpoint,
+ # but use a token if we have one to reduce the chance of being rate-limited.
+ url = f"https://api.github.com/repos/matrix-org/synapse/actions/runs?branch={tag_name}"
+ headers = {"Accept": "application/vnd.github+json"}
+ if gh_token is not None:
+ headers["authorization"] = f"token {gh_token}"
+ req = urllib.request.Request(url, headers=headers)
+
+ time.sleep(10 * 60)
+ while True:
+ time.sleep(5 * 60)
+ response = urllib.request.urlopen(req)
+ resp = json.loads(response.read())
+
+ if len(resp["workflow_runs"]) == 0:
+ continue
+
+ if all(
+ workflow["status"] != "in_progress" for workflow in resp["workflow_runs"]
+ ):
+ success = (
+ workflow["status"] == "completed" for workflow in resp["workflow_runs"]
+ )
+ if success:
+ _notify("Workflows successful. You can now continue the release.")
+ else:
+ _notify("Workflows failed.")
+ click.confirm("Continue anyway?", abort=True)
+
+ break
+
+
+def _notify(message: str) -> None:
+ # Send a bell character. Most terminals will play a sound or show a notification
+ # for this.
+ click.echo(f"\a{message}")
+
+ # Try and run notify-send, but don't raise an Exception if this fails
+ # (This is best-effort)
+ # TODO Support other platforms?
+ subprocess.run(
+ [
+ "notify-send",
+ "--app-name",
+ "Synapse Release Script",
+ "--expire-time",
+ "3600000",
+ message,
+ ]
+ )
+
+
+@cli.command()
def merge_back() -> None:
+ _merge_back()
+
+
+def _merge_back() -> None:
"""Merge the release branch back into the appropriate branches.
All branches will be automatically pulled from the remote and the results
will be pushed to the remote."""
@@ -519,6 +628,10 @@ def merge_back() -> None:
@cli.command()
def announce() -> None:
+ _announce()
+
+
+def _announce() -> None:
"""Generate markdown to announce the release."""
current_version = get_package_version()
@@ -548,10 +661,56 @@ Announce the release in
- #homeowners:matrix.org (Synapse Announcements), bumping the version in the topic
- #synapse:matrix.org (Synapse Admins), bumping the version in the topic
- #synapse-dev:matrix.org
-- #synapse-package-maintainers:matrix.org"""
+- #synapse-package-maintainers:matrix.org
+
+Ask the designated people to do the blog and tweets."""
)
+@cli.command()
+@click.option("--gh-token", envvar=["GH_TOKEN", "GITHUB_TOKEN"], required=True)
+def full(gh_token: str) -> None:
+ click.echo("1. If this is a security release, read the security wiki page.")
+ click.echo("2. Check for any release blockers before proceeding.")
+ click.echo(" https://github.com/matrix-org/synapse/labels/X-Release-Blocker")
+
+ click.confirm("Ready?", abort=True)
+
+ click.echo("\n*** prepare ***")
+ _prepare()
+
+ click.echo("Deploy to matrix.org and ensure that it hasn't fallen over.")
+ click.echo("Remember to silence the alerts to prevent alert spam.")
+ click.confirm("Deployed?", abort=True)
+
+ click.echo("\n*** tag ***")
+ _tag(gh_token)
+
+ click.echo("\n*** wait for actions ***")
+ _wait_for_actions(gh_token)
+
+ click.echo("\n*** publish ***")
+ _publish(gh_token)
+
+ click.echo("\n*** upload ***")
+ _upload(gh_token)
+
+ click.echo("\n*** merge back ***")
+ _merge_back()
+
+ click.echo("\nUpdate the Debian repository")
+ click.confirm("Started updating Debian repository?", abort=True)
+
+ click.echo("\nWait for all release methods to be ready.")
+ # Docker should be ready because it was done by the workflows earlier
+ # PyPI should be ready because we just ran upload().
+ # TODO Automatically poll until the Debs have made it to packages.matrix.org
+ click.confirm("Debs ready?", abort=True)
+
+ click.echo("\n*** announce ***")
+ _announce()
+
+
def get_package_version() -> version.Version:
version_string = subprocess.check_output(["poetry", "version", "--short"]).decode(
"utf-8"
diff --git a/stubs/synapse/__init__.pyi b/stubs/synapse/__init__.pyi
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/stubs/synapse/__init__.pyi
diff --git a/stubs/synapse/synapse_rust/__init__.pyi b/stubs/synapse/synapse_rust/__init__.pyi
new file mode 100644
index 0000000000..8658d3138f
--- /dev/null
+++ b/stubs/synapse/synapse_rust/__init__.pyi
@@ -0,0 +1,2 @@
+def sum_as_string(a: int, b: int) -> str: ...
+def get_rust_file_digest() -> str: ...
diff --git a/stubs/synapse/synapse_rust/push.pyi b/stubs/synapse/synapse_rust/push.pyi
new file mode 100644
index 0000000000..ceade65ef9
--- /dev/null
+++ b/stubs/synapse/synapse_rust/push.pyi
@@ -0,0 +1,50 @@
+from typing import Any, Collection, Dict, Mapping, Optional, Sequence, Tuple, Union
+
+from synapse.types import JsonDict
+
+class PushRule:
+ @property
+ def rule_id(self) -> str: ...
+ @property
+ def priority_class(self) -> int: ...
+ @property
+ def conditions(self) -> Sequence[Mapping[str, str]]: ...
+ @property
+ def actions(self) -> Sequence[Union[Mapping[str, Any], str]]: ...
+ @property
+ def default(self) -> bool: ...
+ @property
+ def default_enabled(self) -> bool: ...
+ @staticmethod
+ def from_db(
+ rule_id: str, priority_class: int, conditions: str, actions: str
+ ) -> "PushRule": ...
+
+class PushRules:
+ def __init__(self, rules: Collection[PushRule]): ...
+ def rules(self) -> Collection[PushRule]: ...
+
+class FilteredPushRules:
+ def __init__(
+ self, push_rules: PushRules, enabled_map: Dict[str, bool], msc3664_enabled: bool
+ ): ...
+ def rules(self) -> Collection[Tuple[PushRule, bool]]: ...
+
+def get_base_rule_ids() -> Collection[str]: ...
+
+class PushRuleEvaluator:
+ def __init__(
+ self,
+ flattened_keys: Mapping[str, str],
+ room_member_count: int,
+ sender_power_level: Optional[int],
+ notification_power_levels: Mapping[str, int],
+ related_events_flattened: Mapping[str, Mapping[str, str]],
+ related_event_match_enabled: bool,
+ ): ...
+ def run(
+ self,
+ push_rules: FilteredPushRules,
+ user_id: Optional[str],
+ display_name: Optional[str],
+ ) -> Collection[Union[Mapping, str]]: ...
diff --git a/synapse/__init__.py b/synapse/__init__.py
index b1369aca8f..fbfd506a43 100644
--- a/synapse/__init__.py
+++ b/synapse/__init__.py
@@ -20,31 +20,31 @@ import json
import os
import sys
+from synapse.util.rust import check_rust_lib_up_to_date
+from synapse.util.stringutils import strtobool
+
# Check that we're not running on an unsupported Python version.
if sys.version_info < (3, 7):
print("Synapse requires Python 3.7 or above.")
sys.exit(1)
# Allow using the asyncio reactor via env var.
-if bool(os.environ.get("SYNAPSE_ASYNC_IO_REACTOR", False)):
- try:
- from incremental import Version
+if strtobool(os.environ.get("SYNAPSE_ASYNC_IO_REACTOR", "0")):
+ from incremental import Version
- import twisted
+ import twisted
- # We need a bugfix that is included in Twisted 21.2.0:
- # https://twistedmatrix.com/trac/ticket/9787
- if twisted.version < Version("Twisted", 21, 2, 0):
- print("Using asyncio reactor requires Twisted>=21.2.0")
- sys.exit(1)
+ # We need a bugfix that is included in Twisted 21.2.0:
+ # https://twistedmatrix.com/trac/ticket/9787
+ if twisted.version < Version("Twisted", 21, 2, 0):
+ print("Using asyncio reactor requires Twisted>=21.2.0")
+ sys.exit(1)
- import asyncio
+ import asyncio
- from twisted.internet import asyncioreactor
+ from twisted.internet import asyncioreactor
- asyncioreactor.install(asyncio.get_event_loop())
- except ImportError:
- pass
+ asyncioreactor.install(asyncio.get_event_loop())
# Twisted and canonicaljson will fail to import when this file is executed to
# get the __version__ during a fresh install. That's OK and subsequent calls to
@@ -78,3 +78,6 @@ if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
from synapse.util.patch_inline_callbacks import do_patch
do_patch()
+
+
+check_rust_lib_up_to_date()
diff --git a/synapse/_scripts/register_new_matrix_user.py b/synapse/_scripts/register_new_matrix_user.py
index 092601f530..0c4504d5d8 100644
--- a/synapse/_scripts/register_new_matrix_user.py
+++ b/synapse/_scripts/register_new_matrix_user.py
@@ -1,6 +1,6 @@
# Copyright 2015, 2016 OpenMarket Ltd
# Copyright 2018 New Vector
-# Copyright 2021 The Matrix.org Foundation C.I.C.
+# Copyright 2021-22 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -20,11 +20,22 @@ import hashlib
import hmac
import logging
import sys
-from typing import Callable, Optional
+from typing import Any, Callable, Dict, Optional
import requests
import yaml
+_CONFLICTING_SHARED_SECRET_OPTS_ERROR = """\
+Conflicting options 'registration_shared_secret' and 'registration_shared_secret_path'
+are both defined in config file.
+"""
+
+_NO_SHARED_SECRET_OPTS_ERROR = """\
+No 'registration_shared_secret' or 'registration_shared_secret_path' defined in config.
+"""
+
+_DEFAULT_SERVER_URL = "http://localhost:8008"
+
def request_registration(
user: str,
@@ -203,31 +214,104 @@ def main() -> None:
parser.add_argument(
"server_url",
- default="https://localhost:8448",
nargs="?",
- help="URL to use to talk to the homeserver. Defaults to "
- " 'https://localhost:8448'.",
+ help="URL to use to talk to the homeserver. By default, tries to find a "
+ "suitable URL from the configuration file. Otherwise, defaults to "
+ f"'{_DEFAULT_SERVER_URL}'.",
)
args = parser.parse_args()
if "config" in args and args.config:
config = yaml.safe_load(args.config)
- secret = config.get("registration_shared_secret", None)
+
+ if args.shared_secret:
+ secret = args.shared_secret
+ else:
+ # argparse should check that we have either config or shared secret
+ assert config
+
+ secret = config.get("registration_shared_secret")
+ secret_file = config.get("registration_shared_secret_path")
+ if secret_file:
+ if secret:
+ print(_CONFLICTING_SHARED_SECRET_OPTS_ERROR, file=sys.stderr)
+ sys.exit(1)
+ secret = _read_file(secret_file, "registration_shared_secret_path").strip()
if not secret:
- print("No 'registration_shared_secret' defined in config.")
+ print(_NO_SHARED_SECRET_OPTS_ERROR, file=sys.stderr)
sys.exit(1)
+
+ if args.server_url:
+ server_url = args.server_url
+ elif config:
+ server_url = _find_client_listener(config)
+ if not server_url:
+ server_url = _DEFAULT_SERVER_URL
+ print(
+ "Unable to find a suitable HTTP listener in the configuration file. "
+ f"Trying {server_url} as a last resort.",
+ file=sys.stderr,
+ )
else:
- secret = args.shared_secret
+ server_url = _DEFAULT_SERVER_URL
+ print(
+ f"No server url or configuration file given. Defaulting to {server_url}.",
+ file=sys.stderr,
+ )
admin = None
if args.admin or args.no_admin:
admin = args.admin
register_new_user(
- args.user, args.password, args.server_url, secret, admin, args.user_type
+ args.user, args.password, server_url, secret, admin, args.user_type
)
+def _read_file(file_path: Any, config_path: str) -> str:
+ """Check the given file exists, and read it into a string
+
+ If it does not, exit with an error indicating the problem
+
+ Args:
+ file_path: the file to be read
+ config_path: where in the configuration file_path came from, so that a useful
+ error can be emitted if it does not exist.
+ Returns:
+ content of the file.
+ """
+ if not isinstance(file_path, str):
+ print(f"{config_path} setting is not a string", file=sys.stderr)
+ sys.exit(1)
+
+ try:
+ with open(file_path) as file_stream:
+ return file_stream.read()
+ except OSError as e:
+ print(f"Error accessing file {file_path}: {e}", file=sys.stderr)
+ sys.exit(1)
+
+
+def _find_client_listener(config: Dict[str, Any]) -> Optional[str]:
+ # try to find a listener in the config. Returns a host:port pair
+ for listener in config.get("listeners", []):
+ if listener.get("type") != "http" or listener.get("tls", False):
+ continue
+
+ if not any(
+ name == "client"
+ for resource in listener.get("resources", [])
+ for name in resource.get("names", [])
+ ):
+ continue
+
+ # TODO: consider bind_addresses
+ return f"http://localhost:{listener['port']}"
+
+ # no suitable listeners?
+ return None
+
+
if __name__ == "__main__":
main()
diff --git a/synapse/_scripts/synapse_port_db.py b/synapse/_scripts/synapse_port_db.py
index 543bba27c2..d850e54e17 100755
--- a/synapse/_scripts/synapse_port_db.py
+++ b/synapse/_scripts/synapse_port_db.py
@@ -67,10 +67,12 @@ from synapse.storage.databases.main.media_repository import (
)
from synapse.storage.databases.main.presence import PresenceBackgroundUpdateStore
from synapse.storage.databases.main.pusher import PusherWorkerStore
+from synapse.storage.databases.main.receipts import ReceiptsBackgroundUpdateStore
from synapse.storage.databases.main.registration import (
RegistrationBackgroundUpdateStore,
find_max_generated_user_id_localpart,
)
+from synapse.storage.databases.main.relations import RelationsWorkerStore
from synapse.storage.databases.main.room import RoomBackgroundUpdateStore
from synapse.storage.databases.main.roommember import RoomMemberBackgroundUpdateStore
from synapse.storage.databases.main.search import SearchBackgroundUpdateStore
@@ -106,10 +108,11 @@ BOOLEAN_COLUMNS = {
"redactions": ["have_censored"],
"room_stats_state": ["is_federatable"],
"local_media_repository": ["safe_from_quarantine"],
- "users": ["shadow_banned"],
+ "users": ["shadow_banned", "approved"],
"e2e_fallback_keys_json": ["used"],
"access_tokens": ["used"],
"device_lists_changes_in_room": ["converted_to_destinations"],
+ "pushers": ["enabled"],
}
@@ -203,6 +206,8 @@ class Store(
PushRuleStore,
PusherWorkerStore,
PresenceBackgroundUpdateStore,
+ ReceiptsBackgroundUpdateStore,
+ RelationsWorkerStore,
):
def execute(self, f: Callable[..., R], *args: Any, **kwargs: Any) -> Awaitable[R]:
return self.db_pool.runInteraction(f.__name__, f, *args, **kwargs)
diff --git a/synapse/_scripts/update_synapse_database.py b/synapse/_scripts/update_synapse_database.py
index b4aeae6dd5..0adf94bba6 100755..100644
--- a/synapse/_scripts/update_synapse_database.py
+++ b/synapse/_scripts/update_synapse_database.py
@@ -15,7 +15,6 @@
import argparse
import logging
-import sys
from typing import cast
import yaml
@@ -48,10 +47,13 @@ class MockHomeserver(HomeServer):
def run_background_updates(hs: HomeServer) -> None:
- store = hs.get_datastores().main
+ main = hs.get_datastores().main
+ state = hs.get_datastores().state
async def run_background_updates() -> None:
- await store.db_pool.updates.run_background_updates(sleep=False)
+ await main.db_pool.updates.run_background_updates(sleep=False)
+ if state:
+ await state.db_pool.updates.run_background_updates(sleep=False)
# Stop the reactor to exit the script once every background update is run.
reactor.stop()
@@ -97,10 +99,6 @@ def main() -> None:
# Load, process and sanity-check the config.
hs_config = yaml.safe_load(args.database_config)
- if "database" not in hs_config:
- sys.stderr.write("The configuration file must have a 'database' section.\n")
- sys.exit(4)
-
config = HomeServerConfig()
config.parse_config_dict(hs_config, "", "")
diff --git a/synapse/api/auth.py b/synapse/api/auth.py
index 82e6475ef5..3d7f986ac7 100644
--- a/synapse/api/auth.py
+++ b/synapse/api/auth.py
@@ -31,9 +31,15 @@ from synapse.api.errors import (
from synapse.appservice import ApplicationService
from synapse.http import get_request_user_agent
from synapse.http.site import SynapseRequest
-from synapse.logging.opentracing import active_span, force_tracing, start_active_span
-from synapse.storage.databases.main.registration import TokenLookupResult
-from synapse.types import Requester, UserID, create_requester
+from synapse.logging.opentracing import (
+ SynapseTags,
+ active_span,
+ force_tracing,
+ start_active_span,
+ trace,
+)
+from synapse.types import Requester, create_requester
+from synapse.util.cancellation import cancellable
if TYPE_CHECKING:
from synapse.server import HomeServer
@@ -65,14 +71,14 @@ class Auth:
async def check_user_in_room(
self,
room_id: str,
- user_id: str,
+ requester: Requester,
allow_departed_users: bool = False,
) -> Tuple[str, Optional[str]]:
"""Check if the user is in the room, or was at some point.
Args:
room_id: The room to check.
- user_id: The user to check.
+ requester: The user making the request, according to the access token.
current_state: Optional map of the current state of the room.
If provided then that map is used to check whether they are a
@@ -89,6 +95,7 @@ class Auth:
membership event ID of the user.
"""
+ user_id = requester.user.to_string()
(
membership,
member_event_id,
@@ -113,6 +120,7 @@ class Auth:
errcode=Codes.NOT_JOINED,
)
+ @cancellable
async def get_user_by_req(
self,
request: SynapseRequest,
@@ -154,6 +162,12 @@ class Auth:
parent_span.set_tag(
"authenticated_entity", requester.authenticated_entity
)
+ # We tag the Synapse instance name so that it's an easy jumping
+ # off point into the logs. Can also be used to filter for an
+ # instance that is under load.
+ parent_span.set_tag(
+ SynapseTags.INSTANCE_NAME, self.hs.get_instance_name()
+ )
parent_span.set_tag("user_id", requester.user.to_string())
if requester.device_id is not None:
parent_span.set_tag("device_id", requester.device_id)
@@ -161,6 +175,7 @@ class Auth:
parent_span.set_tag("appservice_id", requester.app_service.id)
return requester
+ @cancellable
async def _wrapped_get_user_by_req(
self,
request: SynapseRequest,
@@ -177,96 +192,69 @@ class Auth:
access_token = self.get_access_token_from_request(request)
- (
- user_id,
- device_id,
- app_service,
- ) = await self._get_appservice_user_id_and_device_id(request)
- if user_id and app_service:
- if ip_addr and self._track_appservice_user_ips:
- await self.store.insert_client_ip(
- user_id=user_id,
- access_token=access_token,
- ip=ip_addr,
- user_agent=user_agent,
- device_id="dummy-device"
- if device_id is None
- else device_id, # stubbed
- )
-
- requester = create_requester(
- user_id, app_service=app_service, device_id=device_id
+ # First check if it could be a request from an appservice
+ requester = await self._get_appservice_user(request)
+ if not requester:
+ # If not, it should be from a regular user
+ requester = await self.get_user_by_access_token(
+ access_token, allow_expired=allow_expired
)
- request.requester = user_id
- return requester
-
- user_info = await self.get_user_by_access_token(
- access_token, allow_expired=allow_expired
- )
- token_id = user_info.token_id
- is_guest = user_info.is_guest
- shadow_banned = user_info.shadow_banned
-
- # Deny the request if the user account has expired.
- if not allow_expired:
- if await self._account_validity_handler.is_user_expired(
- user_info.user_id
- ):
- # Raise the error if either an account validity module has determined
- # the account has expired, or the legacy account validity
- # implementation is enabled and determined the account has expired
- raise AuthError(
- 403,
- "User account has expired",
- errcode=Codes.EXPIRED_ACCOUNT,
- )
-
- device_id = user_info.device_id
-
- if access_token and ip_addr:
+ # Deny the request if the user account has expired.
+ # This check is only done for regular users, not appservice ones.
+ if not allow_expired:
+ if await self._account_validity_handler.is_user_expired(
+ requester.user.to_string()
+ ):
+ # Raise the error if either an account validity module has determined
+ # the account has expired, or the legacy account validity
+ # implementation is enabled and determined the account has expired
+ raise AuthError(
+ 403,
+ "User account has expired",
+ errcode=Codes.EXPIRED_ACCOUNT,
+ )
+
+ if ip_addr and (
+ not requester.app_service or self._track_appservice_user_ips
+ ):
+ # XXX(quenting): I'm 95% confident that we could skip setting the
+ # device_id to "dummy-device" for appservices, and that the only impact
+ # would be some rows which whould not deduplicate in the 'user_ips'
+ # table during the transition
+ recorded_device_id = (
+ "dummy-device"
+ if requester.device_id is None and requester.app_service is not None
+ else requester.device_id
+ )
await self.store.insert_client_ip(
- user_id=user_info.token_owner,
+ user_id=requester.authenticated_entity,
access_token=access_token,
ip=ip_addr,
user_agent=user_agent,
- device_id=device_id,
+ device_id=recorded_device_id,
)
+
# Track also the puppeted user client IP if enabled and the user is puppeting
if (
- user_info.user_id != user_info.token_owner
+ requester.user.to_string() != requester.authenticated_entity
and self._track_puppeted_user_ips
):
await self.store.insert_client_ip(
- user_id=user_info.user_id,
+ user_id=requester.user.to_string(),
access_token=access_token,
ip=ip_addr,
user_agent=user_agent,
- device_id=device_id,
+ device_id=requester.device_id,
)
- if is_guest and not allow_guest:
+ if requester.is_guest and not allow_guest:
raise AuthError(
403,
"Guest access not allowed",
errcode=Codes.GUEST_ACCESS_FORBIDDEN,
)
- # Mark the token as used. This is used to invalidate old refresh
- # tokens after some time.
- if not user_info.token_used and token_id is not None:
- await self.store.mark_access_token_as_used(token_id)
-
- requester = create_requester(
- user_info.user_id,
- token_id,
- is_guest,
- shadow_banned,
- device_id,
- app_service=app_service,
- authenticated_entity=user_info.token_owner,
- )
-
request.requester = requester
return requester
except KeyError:
@@ -303,9 +291,8 @@ class Auth:
403, "Application service has not registered this user (%s)" % user_id
)
- async def _get_appservice_user_id_and_device_id(
- self, request: Request
- ) -> Tuple[Optional[str], Optional[str], Optional[ApplicationService]]:
+ @cancellable
+ async def _get_appservice_user(self, request: Request) -> Optional[Requester]:
"""
Given a request, reads the request parameters to determine:
- whether it's an application service that's making this request
@@ -320,15 +307,13 @@ class Auth:
Must use `org.matrix.msc3202.device_id` in place of `device_id` for now.
Returns:
- 3-tuple of
- (user ID?, device ID?, application service?)
+ the application service `Requester` of that request
Postconditions:
- - If an application service is returned, so is a user ID
- - A user ID is never returned without an application service
- - A device ID is never returned without a user ID or an application service
- - The returned application service, if present, is permitted to control the
- returned user ID.
+ - The `app_service` field in the returned `Requester` is set
+ - The `user_id` field in the returned `Requester` is either the application
+ service sender or the controlled user set by the `user_id` URI parameter
+ - The returned application service is permitted to control the returned user ID.
- The returned device ID, if present, has been checked to be a valid device ID
for the returned user ID.
"""
@@ -338,12 +323,12 @@ class Auth:
self.get_access_token_from_request(request)
)
if app_service is None:
- return None, None, None
+ return None
if app_service.ip_range_whitelist:
ip_address = IPAddress(request.getClientAddress().host)
if ip_address not in app_service.ip_range_whitelist:
- return None, None, None
+ return None
# This will always be set by the time Twisted calls us.
assert request.args is not None
@@ -377,13 +362,15 @@ class Auth:
Codes.EXCLUSIVE,
)
- return effective_user_id, effective_device_id, app_service
+ return create_requester(
+ effective_user_id, app_service=app_service, device_id=effective_device_id
+ )
async def get_user_by_access_token(
self,
token: str,
allow_expired: bool = False,
- ) -> TokenLookupResult:
+ ) -> Requester:
"""Validate access token and get user_id from it
Args:
@@ -400,9 +387,9 @@ class Auth:
# First look in the database to see if the access token is present
# as an opaque token.
- r = await self.store.get_user_by_access_token(token)
- if r:
- valid_until_ms = r.valid_until_ms
+ user_info = await self.store.get_user_by_access_token(token)
+ if user_info:
+ valid_until_ms = user_info.valid_until_ms
if (
not allow_expired
and valid_until_ms is not None
@@ -414,7 +401,20 @@ class Auth:
msg="Access token has expired", soft_logout=True
)
- return r
+ # Mark the token as used. This is used to invalidate old refresh
+ # tokens after some time.
+ await self.store.mark_access_token_as_used(user_info.token_id)
+
+ requester = create_requester(
+ user_id=user_info.user_id,
+ access_token_id=user_info.token_id,
+ is_guest=user_info.is_guest,
+ shadow_banned=user_info.shadow_banned,
+ device_id=user_info.device_id,
+ authenticated_entity=user_info.token_owner,
+ )
+
+ return requester
# If the token isn't found in the database, then it could still be a
# macaroon for a guest, so we check that here.
@@ -440,11 +440,12 @@ class Auth:
"Guest access token used for regular user"
)
- return TokenLookupResult(
+ return create_requester(
user_id=user_id,
is_guest=True,
# all guests get the same device id
device_id=GUEST_DEVICE_ID,
+ authenticated_entity=user_id,
)
except (
pymacaroons.exceptions.MacaroonException,
@@ -458,41 +459,33 @@ class Auth:
)
raise InvalidClientTokenError("Invalid access token passed.")
- def get_appservice_by_req(self, request: SynapseRequest) -> ApplicationService:
- token = self.get_access_token_from_request(request)
- service = self.store.get_app_service_by_token(token)
- if not service:
- logger.warning("Unrecognised appservice access token.")
- raise InvalidClientTokenError()
- request.requester = create_requester(service.sender, app_service=service)
- return service
-
- async def is_server_admin(self, user: UserID) -> bool:
+ async def is_server_admin(self, requester: Requester) -> bool:
"""Check if the given user is a local server admin.
Args:
- user: user to check
+ requester: The user making the request, according to the access token.
Returns:
True if the user is an admin
"""
- return await self.store.is_server_admin(user)
+ return await self.store.is_server_admin(requester.user)
- async def check_can_change_room_list(self, room_id: str, user: UserID) -> bool:
+ async def check_can_change_room_list(
+ self, room_id: str, requester: Requester
+ ) -> bool:
"""Determine whether the user is allowed to edit the room's entry in the
published room list.
Args:
- room_id
- user
+ room_id: The room to check.
+ requester: The user making the request, according to the access token.
"""
- is_admin = await self.is_server_admin(user)
+ is_admin = await self.is_server_admin(requester)
if is_admin:
return True
- user_id = user.to_string()
- await self.check_user_in_room(room_id, user_id)
+ await self.check_user_in_room(room_id, requester)
# We currently require the user is a "moderator" in the room. We do this
# by checking if they would (theoretically) be able to change the
@@ -511,7 +504,9 @@ class Auth:
send_level = event_auth.get_send_level(
EventTypes.CanonicalAlias, "", power_level_event
)
- user_level = event_auth.get_user_power_level(user_id, auth_events)
+ user_level = event_auth.get_user_power_level(
+ requester.user.to_string(), auth_events
+ )
return user_level >= send_level
@@ -530,6 +525,7 @@ class Auth:
return bool(query_params) or bool(auth_headers)
@staticmethod
+ @cancellable
def get_access_token_from_request(request: Request) -> str:
"""Extracts the access_token from the request.
@@ -567,17 +563,18 @@ class Auth:
return query_params[0].decode("ascii")
+ @trace
async def check_user_in_room_or_world_readable(
- self, room_id: str, user_id: str, allow_departed_users: bool = False
+ self, room_id: str, requester: Requester, allow_departed_users: bool = False
) -> Tuple[str, Optional[str]]:
"""Checks that the user is or was in the room or the room is world
readable. If it isn't then an exception is raised.
Args:
- room_id: room to check
- user_id: user to check
- allow_departed_users: if True, accept users that were previously
- members but have now departed
+ room_id: The room to check.
+ requester: The user making the request, according to the access token.
+ allow_departed_users: If True, accept users that were previously
+ members but have now departed.
Returns:
Resolves to the current membership of the user in the room and the
@@ -592,7 +589,7 @@ class Auth:
# * The user is a guest user, and has joined the room
# else it will throw.
return await self.check_user_in_room(
- room_id, user_id, allow_departed_users=allow_departed_users
+ room_id, requester, allow_departed_users=allow_departed_users
)
except AuthError:
visibility = await self._storage_controllers.state.get_current_state_event(
@@ -607,6 +604,6 @@ class Auth:
raise UnstableSpecAuthError(
403,
"User %s not in room %s, and room previews are disabled"
- % (user_id, room_id),
+ % (requester.user, room_id),
errcode=Codes.NOT_JOINED,
)
diff --git a/synapse/api/constants.py b/synapse/api/constants.py
index 789859e69e..bc04a0755b 100644
--- a/synapse/api/constants.py
+++ b/synapse/api/constants.py
@@ -31,6 +31,9 @@ MAX_ALIAS_LENGTH = 255
# the maximum length for a user id is 255 characters
MAX_USERID_LENGTH = 255
+# Constant value used for the pseudo-thread which is the main timeline.
+MAIN_TIMELINE: Final = "main"
+
class Membership:
@@ -122,6 +125,8 @@ class EventTypes:
MSC2716_BATCH: Final = "org.matrix.msc2716.batch"
MSC2716_MARKER: Final = "org.matrix.msc2716.marker"
+ Reaction: Final = "m.reaction"
+
class ToDeviceEventTypes:
RoomKeyRequest: Final = "m.room_key_request"
@@ -216,11 +221,11 @@ class EventContentFields:
MSC2716_HISTORICAL: Final = "org.matrix.msc2716.historical"
# For "insertion" events to indicate what the next batch ID should be in
# order to connect to it
- MSC2716_NEXT_BATCH_ID: Final = "org.matrix.msc2716.next_batch_id"
+ MSC2716_NEXT_BATCH_ID: Final = "next_batch_id"
# Used on "batch" events to indicate which insertion event it connects to
- MSC2716_BATCH_ID: Final = "org.matrix.msc2716.batch_id"
+ MSC2716_BATCH_ID: Final = "batch_id"
# For "marker" events
- MSC2716_MARKER_INSERTION: Final = "org.matrix.msc2716.marker.insertion"
+ MSC2716_INSERTION_EVENT_REFERENCE: Final = "insertion_event_reference"
# The authorising user for joining a restricted room.
AUTHORISING_USER: Final = "join_authorised_via_users_server"
@@ -257,7 +262,7 @@ class GuestAccess:
class ReceiptTypes:
READ: Final = "m.read"
- READ_PRIVATE: Final = "org.matrix.msc2285.read.private"
+ READ_PRIVATE: Final = "m.read.private"
FULLY_READ: Final = "m.fully_read"
@@ -269,3 +274,14 @@ class PublicRoomsFilterFields:
GENERIC_SEARCH_TERM: Final = "generic_search_term"
ROOM_TYPES: Final = "room_types"
+
+
+class ApprovalNoticeMedium:
+ """Identifier for the medium this server will use to serve notice of approval for a
+ specific user's registration.
+
+ As defined in https://github.com/matrix-org/matrix-spec-proposals/blob/babolivier/m_not_approved/proposals/3866-user-not-approved-error.md
+ """
+
+ NONE = "org.matrix.msc3866.none"
+ EMAIL = "org.matrix.msc3866.email"
diff --git a/synapse/api/errors.py b/synapse/api/errors.py
index e6dea89c6d..e2cfcea0f2 100644
--- a/synapse/api/errors.py
+++ b/synapse/api/errors.py
@@ -100,6 +100,14 @@ class Codes(str, Enum):
UNREDACTED_CONTENT_DELETED = "FI.MAU.MSC2815_UNREDACTED_CONTENT_DELETED"
+ # Returned for federation requests where we can't process a request as we
+ # can't ensure the sending server is in a room which is partial-stated on
+ # our side.
+ # Part of MSC3895.
+ UNABLE_DUE_TO_PARTIAL_STATE = "ORG.MATRIX.MSC3895_UNABLE_DUE_TO_PARTIAL_STATE"
+
+ USER_AWAITING_APPROVAL = "ORG.MATRIX.MSC3866_USER_AWAITING_APPROVAL"
+
class CodeMessageException(RuntimeError):
"""An exception with integer code and message string attributes.
@@ -147,7 +155,13 @@ class RedirectException(CodeMessageException):
class SynapseError(CodeMessageException):
"""A base exception type for matrix errors which have an errcode and error
- message (as well as an HTTP status code).
+ message (as well as an HTTP status code). These often bubble all the way up to the
+ client API response so the error code and status often reach the client directly as
+ defined here. If the error doesn't make sense to present to a client, then it
+ probably shouldn't be a `SynapseError`. For example, if we contact another
+ homeserver over federation, we shouldn't automatically ferry response errors back to
+ the client on our end (a 500 from a remote server does not make sense to a client
+ when our server did not experience a 500).
Attributes:
errcode: Matrix error code e.g 'M_FORBIDDEN'
@@ -560,6 +574,20 @@ class UnredactedContentDeletedError(SynapseError):
return cs_error(self.msg, self.errcode, **extra)
+class NotApprovedError(SynapseError):
+ def __init__(
+ self,
+ msg: str,
+ approval_notice_medium: str,
+ ):
+ super().__init__(
+ code=403,
+ msg=msg,
+ errcode=Codes.USER_AWAITING_APPROVAL,
+ additional_fields={"approval_notice_medium": approval_notice_medium},
+ )
+
+
def cs_error(msg: str, code: str = Codes.UNKNOWN, **kwargs: Any) -> "JsonDict":
"""Utility method for constructing an error response for client-server
interactions.
@@ -578,8 +606,20 @@ def cs_error(msg: str, code: str = Codes.UNKNOWN, **kwargs: Any) -> "JsonDict":
class FederationError(RuntimeError):
- """This class is used to inform remote homeservers about erroneous
- PDUs they sent us.
+ """
+ Raised when we process an erroneous PDU.
+
+ There are two kinds of scenarios where this exception can be raised:
+
+ 1. We may pull an invalid PDU from a remote homeserver (e.g. during backfill). We
+ raise this exception to signal an error to the rest of the application.
+ 2. We may be pushed an invalid PDU as part of a `/send` transaction from a remote
+ homeserver. We raise so that we can respond to the transaction and include the
+ error string in the "PDU Processing Result". The message which will likely be
+ ignored by the remote homeserver and is not machine parse-able since it's just a
+ string.
+
+ TODO: In the future, we should split these usage scenarios into their own error types.
FATAL: The remote server could not interpret the source event.
(e.g., it was missing a required field)
@@ -618,6 +658,27 @@ class FederationError(RuntimeError):
}
+class FederationPullAttemptBackoffError(RuntimeError):
+ """
+ Raised to indicate that we are are deliberately not attempting to pull the given
+ event over federation because we've already done so recently and are backing off.
+
+ Attributes:
+ event_id: The event_id which we are refusing to pull
+ message: A custom error message that gives more context
+ """
+
+ def __init__(self, event_ids: List[str], message: Optional[str]):
+ self.event_ids = event_ids
+
+ if message:
+ error_message = message
+ else:
+ error_message = f"Not attempting to pull event_ids={self.event_ids} because we already tried to pull them recently (backing off)."
+
+ super().__init__(error_message)
+
+
class HttpResponseException(CodeMessageException):
"""
Represents an HTTP-level failure of an outbound request
@@ -652,7 +713,7 @@ class HttpResponseException(CodeMessageException):
set to the reason code from the HTTP response.
Returns:
- SynapseError:
+ The error converted to a SynapseError.
"""
# try to parse the body as json, to get better errcode/msg, but
# default to M_UNKNOWN with the HTTP status as the error text
diff --git a/synapse/api/filtering.py b/synapse/api/filtering.py
index b007147519..a9888381b4 100644
--- a/synapse/api/filtering.py
+++ b/synapse/api/filtering.py
@@ -36,14 +36,14 @@ from jsonschema import FormatChecker
from synapse.api.constants import EduTypes, EventContentFields
from synapse.api.errors import SynapseError
from synapse.api.presence import UserPresenceState
-from synapse.events import EventBase
+from synapse.events import EventBase, relation_from_event
from synapse.types import JsonDict, RoomID, UserID
if TYPE_CHECKING:
from synapse.server import HomeServer
FILTER_SCHEMA = {
- "additionalProperties": False,
+ "additionalProperties": True, # Allow new fields for forward compatibility
"type": "object",
"properties": {
"limit": {"type": "number"},
@@ -53,11 +53,17 @@ FILTER_SCHEMA = {
# check types are valid event types
"types": {"type": "array", "items": {"type": "string"}},
"not_types": {"type": "array", "items": {"type": "string"}},
+ # MSC3874, filtering /messages.
+ "org.matrix.msc3874.rel_types": {"type": "array", "items": {"type": "string"}},
+ "org.matrix.msc3874.not_rel_types": {
+ "type": "array",
+ "items": {"type": "string"},
+ },
},
}
ROOM_FILTER_SCHEMA = {
- "additionalProperties": False,
+ "additionalProperties": True, # Allow new fields for forward compatibility
"type": "object",
"properties": {
"not_rooms": {"$ref": "#/definitions/room_id_array"},
@@ -71,7 +77,7 @@ ROOM_FILTER_SCHEMA = {
}
ROOM_EVENT_FILTER_SCHEMA = {
- "additionalProperties": False,
+ "additionalProperties": True, # Allow new fields for forward compatibility
"type": "object",
"properties": {
"limit": {"type": "number"},
@@ -84,6 +90,8 @@ ROOM_EVENT_FILTER_SCHEMA = {
"contains_url": {"type": "boolean"},
"lazy_load_members": {"type": "boolean"},
"include_redundant_members": {"type": "boolean"},
+ "unread_thread_notifications": {"type": "boolean"},
+ "org.matrix.msc3773.unread_thread_notifications": {"type": "boolean"},
# Include or exclude events with the provided labels.
# cf https://github.com/matrix-org/matrix-doc/pull/2326
"org.matrix.labels": {"type": "array", "items": {"type": "string"}},
@@ -135,18 +143,18 @@ USER_FILTER_SCHEMA = {
},
},
},
- "additionalProperties": False,
+ "additionalProperties": True, # Allow new fields for forward compatibility
}
@FormatChecker.cls_checks("matrix_room_id")
-def matrix_room_id_validator(room_id_str: str) -> RoomID:
- return RoomID.from_string(room_id_str)
+def matrix_room_id_validator(room_id: object) -> bool:
+ return isinstance(room_id, str) and RoomID.is_valid(room_id)
@FormatChecker.cls_checks("matrix_user_id")
-def matrix_user_id_validator(user_id_str: str) -> UserID:
- return UserID.from_string(user_id_str)
+def matrix_user_id_validator(user_id: object) -> bool:
+ return isinstance(user_id, str) and UserID.is_valid(user_id)
class Filtering:
@@ -240,6 +248,9 @@ class FilterCollection:
def include_redundant_members(self) -> bool:
return self._room_state_filter.include_redundant_members
+ def unread_thread_notifications(self) -> bool:
+ return self._room_timeline_filter.unread_thread_notifications
+
async def filter_presence(
self, events: Iterable[UserPresenceState]
) -> List[UserPresenceState]:
@@ -304,6 +315,16 @@ class Filter:
self.include_redundant_members = filter_json.get(
"include_redundant_members", False
)
+ self.unread_thread_notifications: bool = filter_json.get(
+ "unread_thread_notifications", False
+ )
+ if (
+ not self.unread_thread_notifications
+ and hs.config.experimental.msc3773_enabled
+ ):
+ self.unread_thread_notifications = filter_json.get(
+ "org.matrix.msc3773.unread_thread_notifications", False
+ )
self.types = filter_json.get("types", None)
self.not_types = filter_json.get("not_types", [])
@@ -319,8 +340,15 @@ class Filter:
self.labels = filter_json.get("org.matrix.labels", None)
self.not_labels = filter_json.get("org.matrix.not_labels", [])
- self.related_by_senders = self.filter_json.get("related_by_senders", None)
- self.related_by_rel_types = self.filter_json.get("related_by_rel_types", None)
+ self.related_by_senders = filter_json.get("related_by_senders", None)
+ self.related_by_rel_types = filter_json.get("related_by_rel_types", None)
+
+ # For compatibility with _check_fields.
+ self.rel_types = None
+ self.not_rel_types = []
+ if hs.config.experimental.msc3874_enabled:
+ self.rel_types = filter_json.get("org.matrix.msc3874.rel_types", None)
+ self.not_rel_types = filter_json.get("org.matrix.msc3874.not_rel_types", [])
def filters_all_types(self) -> bool:
return "*" in self.not_types
@@ -371,11 +399,19 @@ class Filter:
# check if there is a string url field in the content for filtering purposes
labels = content.get(EventContentFields.LABELS, [])
+ # Check if the event has a relation.
+ rel_type = None
+ if isinstance(event, EventBase):
+ relation = relation_from_event(event)
+ if relation:
+ rel_type = relation.rel_type
+
field_matchers = {
"rooms": lambda v: room_id == v,
"senders": lambda v: sender == v,
"types": lambda v: _matches_wildcard(ev_type, v),
"labels": lambda v: v in labels,
+ "rel_types": lambda v: rel_type == v,
}
result = self._check_fields(field_matchers)
diff --git a/synapse/api/ratelimiting.py b/synapse/api/ratelimiting.py
index 044c7d4926..511790c7c5 100644
--- a/synapse/api/ratelimiting.py
+++ b/synapse/api/ratelimiting.py
@@ -343,6 +343,7 @@ class RequestRatelimiter:
requester: Requester,
update: bool = True,
is_admin_redaction: bool = False,
+ n_actions: int = 1,
) -> None:
"""Ratelimits requests.
@@ -355,6 +356,8 @@ class RequestRatelimiter:
is_admin_redaction: Whether this is a room admin/moderator
redacting an event. If so then we may apply different
ratelimits depending on config.
+ n_actions: Multiplier for the number of actions to apply to the
+ rate limiter at once.
Raises:
LimitExceededError if the request should be ratelimited
@@ -383,7 +386,9 @@ class RequestRatelimiter:
if is_admin_redaction and self.admin_redaction_ratelimiter:
# If we have separate config for admin redactions, use a separate
# ratelimiter as to not have user_ids clash
- await self.admin_redaction_ratelimiter.ratelimit(requester, update=update)
+ await self.admin_redaction_ratelimiter.ratelimit(
+ requester, update=update, n_actions=n_actions
+ )
else:
# Override rate and burst count per-user
await self.request_ratelimiter.ratelimit(
@@ -391,4 +396,5 @@ class RequestRatelimiter:
rate_hz=messages_per_second,
burst_count=burst_count,
update=update,
+ n_actions=n_actions,
)
diff --git a/synapse/api/room_versions.py b/synapse/api/room_versions.py
index 00e81b3afc..e37acb0f1e 100644
--- a/synapse/api/room_versions.py
+++ b/synapse/api/room_versions.py
@@ -19,18 +19,23 @@ import attr
class EventFormatVersions:
"""This is an internal enum for tracking the version of the event format,
- independently from the room version.
+ independently of the room version.
+
+ To reduce confusion, the event format versions are named after the room
+ versions that they were used or introduced in.
+ The concept of an 'event format version' is specific to Synapse (the
+ specification does not mention this term.)
"""
- V1 = 1 # $id:server event id format
- V2 = 2 # MSC1659-style $hash event id format: introduced for room v3
- V3 = 3 # MSC1884-style $hash format: introduced for room v4
+ ROOM_V1_V2 = 1 # $id:server event id format: used for room v1 and v2
+ ROOM_V3 = 2 # MSC1659-style $hash event id format: used for room v3
+ ROOM_V4_PLUS = 3 # MSC1884-style $hash format: introduced for room v4
KNOWN_EVENT_FORMAT_VERSIONS = {
- EventFormatVersions.V1,
- EventFormatVersions.V2,
- EventFormatVersions.V3,
+ EventFormatVersions.ROOM_V1_V2,
+ EventFormatVersions.ROOM_V3,
+ EventFormatVersions.ROOM_V4_PLUS,
}
@@ -92,7 +97,7 @@ class RoomVersions:
V1 = RoomVersion(
"1",
RoomDisposition.STABLE,
- EventFormatVersions.V1,
+ EventFormatVersions.ROOM_V1_V2,
StateResolutionVersions.V1,
enforce_key_validity=False,
special_case_aliases_auth=True,
@@ -110,7 +115,7 @@ class RoomVersions:
V2 = RoomVersion(
"2",
RoomDisposition.STABLE,
- EventFormatVersions.V1,
+ EventFormatVersions.ROOM_V1_V2,
StateResolutionVersions.V2,
enforce_key_validity=False,
special_case_aliases_auth=True,
@@ -128,7 +133,7 @@ class RoomVersions:
V3 = RoomVersion(
"3",
RoomDisposition.STABLE,
- EventFormatVersions.V2,
+ EventFormatVersions.ROOM_V3,
StateResolutionVersions.V2,
enforce_key_validity=False,
special_case_aliases_auth=True,
@@ -146,7 +151,7 @@ class RoomVersions:
V4 = RoomVersion(
"4",
RoomDisposition.STABLE,
- EventFormatVersions.V3,
+ EventFormatVersions.ROOM_V4_PLUS,
StateResolutionVersions.V2,
enforce_key_validity=False,
special_case_aliases_auth=True,
@@ -164,7 +169,7 @@ class RoomVersions:
V5 = RoomVersion(
"5",
RoomDisposition.STABLE,
- EventFormatVersions.V3,
+ EventFormatVersions.ROOM_V4_PLUS,
StateResolutionVersions.V2,
enforce_key_validity=True,
special_case_aliases_auth=True,
@@ -182,7 +187,7 @@ class RoomVersions:
V6 = RoomVersion(
"6",
RoomDisposition.STABLE,
- EventFormatVersions.V3,
+ EventFormatVersions.ROOM_V4_PLUS,
StateResolutionVersions.V2,
enforce_key_validity=True,
special_case_aliases_auth=False,
@@ -200,7 +205,7 @@ class RoomVersions:
MSC2176 = RoomVersion(
"org.matrix.msc2176",
RoomDisposition.UNSTABLE,
- EventFormatVersions.V3,
+ EventFormatVersions.ROOM_V4_PLUS,
StateResolutionVersions.V2,
enforce_key_validity=True,
special_case_aliases_auth=False,
@@ -218,7 +223,7 @@ class RoomVersions:
V7 = RoomVersion(
"7",
RoomDisposition.STABLE,
- EventFormatVersions.V3,
+ EventFormatVersions.ROOM_V4_PLUS,
StateResolutionVersions.V2,
enforce_key_validity=True,
special_case_aliases_auth=False,
@@ -236,7 +241,7 @@ class RoomVersions:
V8 = RoomVersion(
"8",
RoomDisposition.STABLE,
- EventFormatVersions.V3,
+ EventFormatVersions.ROOM_V4_PLUS,
StateResolutionVersions.V2,
enforce_key_validity=True,
special_case_aliases_auth=False,
@@ -254,7 +259,7 @@ class RoomVersions:
V9 = RoomVersion(
"9",
RoomDisposition.STABLE,
- EventFormatVersions.V3,
+ EventFormatVersions.ROOM_V4_PLUS,
StateResolutionVersions.V2,
enforce_key_validity=True,
special_case_aliases_auth=False,
@@ -269,28 +274,10 @@ class RoomVersions:
msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False,
)
- MSC2716v3 = RoomVersion(
- "org.matrix.msc2716v3",
- RoomDisposition.UNSTABLE,
- EventFormatVersions.V3,
- StateResolutionVersions.V2,
- enforce_key_validity=True,
- special_case_aliases_auth=False,
- strict_canonicaljson=True,
- limit_notifications_power_levels=True,
- msc2176_redaction_rules=False,
- msc3083_join_rules=False,
- msc3375_redaction_rules=False,
- msc2403_knocking=True,
- msc2716_historical=True,
- msc2716_redactions=True,
- msc3787_knock_restricted_join_rule=False,
- msc3667_int_only_power_levels=False,
- )
MSC3787 = RoomVersion(
"org.matrix.msc3787",
RoomDisposition.UNSTABLE,
- EventFormatVersions.V3,
+ EventFormatVersions.ROOM_V4_PLUS,
StateResolutionVersions.V2,
enforce_key_validity=True,
special_case_aliases_auth=False,
@@ -308,7 +295,7 @@ class RoomVersions:
V10 = RoomVersion(
"10",
RoomDisposition.STABLE,
- EventFormatVersions.V3,
+ EventFormatVersions.ROOM_V4_PLUS,
StateResolutionVersions.V2,
enforce_key_validity=True,
special_case_aliases_auth=False,
@@ -323,6 +310,24 @@ class RoomVersions:
msc3787_knock_restricted_join_rule=True,
msc3667_int_only_power_levels=True,
)
+ MSC2716v4 = RoomVersion(
+ "org.matrix.msc2716v4",
+ RoomDisposition.UNSTABLE,
+ EventFormatVersions.ROOM_V4_PLUS,
+ StateResolutionVersions.V2,
+ enforce_key_validity=True,
+ special_case_aliases_auth=False,
+ strict_canonicaljson=True,
+ limit_notifications_power_levels=True,
+ msc2176_redaction_rules=False,
+ msc3083_join_rules=False,
+ msc3375_redaction_rules=False,
+ msc2403_knocking=True,
+ msc2716_historical=True,
+ msc2716_redactions=True,
+ msc3787_knock_restricted_join_rule=False,
+ msc3667_int_only_power_levels=False,
+ )
KNOWN_ROOM_VERSIONS: Dict[str, RoomVersion] = {
@@ -338,9 +343,9 @@ KNOWN_ROOM_VERSIONS: Dict[str, RoomVersion] = {
RoomVersions.V7,
RoomVersions.V8,
RoomVersions.V9,
- RoomVersions.MSC2716v3,
RoomVersions.MSC3787,
RoomVersions.V10,
+ RoomVersions.MSC2716v4,
)
}
diff --git a/synapse/api/urls.py b/synapse/api/urls.py
index bd49fa6a5f..a918579f50 100644
--- a/synapse/api/urls.py
+++ b/synapse/api/urls.py
@@ -28,7 +28,7 @@ FEDERATION_V1_PREFIX = FEDERATION_PREFIX + "/v1"
FEDERATION_V2_PREFIX = FEDERATION_PREFIX + "/v2"
FEDERATION_UNSTABLE_PREFIX = FEDERATION_PREFIX + "/unstable"
STATIC_PREFIX = "/_matrix/static"
-SERVER_KEY_V2_PREFIX = "/_matrix/key/v2"
+SERVER_KEY_PREFIX = "/_matrix/key"
MEDIA_R0_PREFIX = "/_matrix/media/r0"
MEDIA_V3_PREFIX = "/_matrix/media/v3"
LEGACY_MEDIA_PREFIX = "/_matrix/media/v1"
diff --git a/synapse/app/_base.py b/synapse/app/_base.py
index 923891ae0d..a5aa2185a2 100644
--- a/synapse/app/_base.py
+++ b/synapse/app/_base.py
@@ -47,6 +47,7 @@ from twisted.internet.tcp import Port
from twisted.logger import LoggingFile, LogLevel
from twisted.protocols.tls import TLSMemoryBIOFactory
from twisted.python.threadpool import ThreadPool
+from twisted.web.resource import Resource
import synapse.util.caches
from synapse.api.constants import MAX_PDU_SIZE
@@ -55,12 +56,13 @@ from synapse.app.phone_stats_home import start_phone_stats_home
from synapse.config import ConfigError
from synapse.config._base import format_config_error
from synapse.config.homeserver import HomeServerConfig
-from synapse.config.server import ManholeConfig
+from synapse.config.server import ListenerConfig, ManholeConfig
from synapse.crypto import context_factory
from synapse.events.presence_router import load_legacy_presence_router
from synapse.events.spamcheck import load_legacy_spam_checkers
from synapse.events.third_party_rules import load_legacy_third_party_event_rules
from synapse.handlers.auth import load_legacy_password_auth_providers
+from synapse.http.site import SynapseSite
from synapse.logging.context import PreserveLoggingContext
from synapse.logging.opentracing import init_tracer
from synapse.metrics import install_gc_manager, register_threadpool
@@ -98,9 +100,7 @@ def register_sighup(func: Callable[P, None], *args: P.args, **kwargs: P.kwargs)
func: Function to be called when sent a SIGHUP signal.
*args, **kwargs: args and kwargs to be passed to the target function.
"""
- # This type-ignore should be redundant once we use a mypy release with
- # https://github.com/python/mypy/pull/12668.
- _sighup_callbacks.append((func, args, kwargs)) # type: ignore[arg-type]
+ _sighup_callbacks.append((func, args, kwargs))
def start_worker_reactor(
@@ -270,11 +270,36 @@ def listen_metrics(bind_addresses: Iterable[str], port: int) -> None:
"""
Start Prometheus metrics server.
"""
- from synapse.metrics import RegistryProxy, start_http_server
+ from prometheus_client import start_http_server as start_http_server_prometheus
+
+ from synapse.metrics import RegistryProxy
for host in bind_addresses:
logger.info("Starting metrics listener on %s:%d", host, port)
- start_http_server(port, addr=host, registry=RegistryProxy)
+ _set_prometheus_client_use_created_metrics(False)
+ start_http_server_prometheus(port, addr=host, registry=RegistryProxy)
+
+
+def _set_prometheus_client_use_created_metrics(new_value: bool) -> None:
+ """
+ Sets whether prometheus_client should expose `_created`-suffixed metrics for
+ all gauges, histograms and summaries.
+ There is no programmatic way to disable this without poking at internals;
+ the proper way is to use an environment variable which prometheus_client
+ loads at import time.
+
+ The motivation for disabling these `_created` metrics is that they're
+ a waste of space as they're not useful but they take up space in Prometheus.
+ """
+
+ import prometheus_client.metrics
+
+ if hasattr(prometheus_client.metrics, "_use_created"):
+ prometheus_client.metrics._use_created = new_value
+ else:
+ logger.error(
+ "Can't disable `_created` metrics in prometheus_client (brittle hack broken?)"
+ )
def listen_manhole(
@@ -326,6 +351,55 @@ def listen_tcp(
return r # type: ignore[return-value]
+def listen_http(
+ listener_config: ListenerConfig,
+ root_resource: Resource,
+ version_string: str,
+ max_request_body_size: int,
+ context_factory: Optional[IOpenSSLContextFactory],
+ reactor: ISynapseReactor = reactor,
+) -> List[Port]:
+ port = listener_config.port
+ bind_addresses = listener_config.bind_addresses
+ tls = listener_config.tls
+
+ assert listener_config.http_options is not None
+
+ site_tag = listener_config.http_options.tag
+ if site_tag is None:
+ site_tag = str(port)
+
+ site = SynapseSite(
+ "synapse.access.%s.%s" % ("https" if tls else "http", site_tag),
+ site_tag,
+ listener_config,
+ root_resource,
+ version_string,
+ max_request_body_size=max_request_body_size,
+ reactor=reactor,
+ )
+ if tls:
+ # refresh_certificate should have been called before this.
+ assert context_factory is not None
+ ports = listen_ssl(
+ bind_addresses,
+ port,
+ site,
+ context_factory,
+ reactor=reactor,
+ )
+ logger.info("Synapse now listening on TCP port %d (TLS)", port)
+ else:
+ ports = listen_tcp(
+ bind_addresses,
+ port,
+ site,
+ reactor=reactor,
+ )
+ logger.info("Synapse now listening on TCP port %d", port)
+ return ports
+
+
def listen_ssl(
bind_addresses: Collection[str],
port: int,
@@ -478,9 +552,10 @@ async def start(hs: "HomeServer") -> None:
setup_sentry(hs)
setup_sdnotify(hs)
- # If background tasks are running on the main process, start collecting the
- # phone home stats.
+ # If background tasks are running on the main process or this is the worker in
+ # charge of them, start collecting the phone home stats and shared usage metrics.
if hs.config.worker.run_background_tasks:
+ await hs.get_common_usage_metrics_manager().setup()
start_phone_stats_home(hs)
# We now freeze all allocated objects in the hopes that (almost)
@@ -526,7 +601,7 @@ def reload_cache_config(config: HomeServerConfig) -> None:
logger.warning(f)
else:
logger.debug(
- "New cache config. Was:\n %s\nNow:\n",
+ "New cache config. Was:\n %s\nNow:\n %s",
previous_cache_config.__dict__,
config.caches.__dict__,
)
diff --git a/synapse/app/admin_cmd.py b/synapse/app/admin_cmd.py
index 8a583d3ec6..165d1c5db0 100644
--- a/synapse/app/admin_cmd.py
+++ b/synapse/app/admin_cmd.py
@@ -28,10 +28,6 @@ from synapse.config.homeserver import HomeServerConfig
from synapse.config.logger import setup_logging
from synapse.events import EventBase
from synapse.handlers.admin import ExfiltrationWriter
-from synapse.replication.slave.storage.devices import SlavedDeviceStore
-from synapse.replication.slave.storage.events import SlavedEventStore
-from synapse.replication.slave.storage.filtering import SlavedFilteringStore
-from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
from synapse.server import HomeServer
from synapse.storage.database import DatabasePool, LoggingDatabaseConnection
from synapse.storage.databases.main.account_data import AccountDataWorkerStore
@@ -40,10 +36,24 @@ from synapse.storage.databases.main.appservice import (
ApplicationServiceWorkerStore,
)
from synapse.storage.databases.main.deviceinbox import DeviceInboxWorkerStore
+from synapse.storage.databases.main.devices import DeviceWorkerStore
+from synapse.storage.databases.main.event_federation import EventFederationWorkerStore
+from synapse.storage.databases.main.event_push_actions import (
+ EventPushActionsWorkerStore,
+)
+from synapse.storage.databases.main.events_worker import EventsWorkerStore
+from synapse.storage.databases.main.filtering import FilteringWorkerStore
+from synapse.storage.databases.main.push_rule import PushRulesWorkerStore
from synapse.storage.databases.main.receipts import ReceiptsWorkerStore
from synapse.storage.databases.main.registration import RegistrationWorkerStore
+from synapse.storage.databases.main.relations import RelationsWorkerStore
from synapse.storage.databases.main.room import RoomWorkerStore
+from synapse.storage.databases.main.roommember import RoomMemberWorkerStore
+from synapse.storage.databases.main.signatures import SignatureWorkerStore
+from synapse.storage.databases.main.state import StateGroupWorkerStore
+from synapse.storage.databases.main.stream import StreamWorkerStore
from synapse.storage.databases.main.tags import TagsWorkerStore
+from synapse.storage.databases.main.user_erasure_store import UserErasureWorkerStore
from synapse.types import StateMap
from synapse.util import SYNAPSE_VERSION
from synapse.util.logcontext import LoggingContext
@@ -52,17 +62,25 @@ logger = logging.getLogger("synapse.app.admin_cmd")
class AdminCmdSlavedStore(
- SlavedFilteringStore,
- SlavedDeviceStore,
- SlavedPushRuleStore,
- SlavedEventStore,
+ FilteringWorkerStore,
+ DeviceWorkerStore,
TagsWorkerStore,
DeviceInboxWorkerStore,
AccountDataWorkerStore,
+ PushRulesWorkerStore,
ApplicationServiceTransactionWorkerStore,
ApplicationServiceWorkerStore,
- RegistrationWorkerStore,
+ RoomMemberWorkerStore,
+ RelationsWorkerStore,
+ EventFederationWorkerStore,
+ EventPushActionsWorkerStore,
+ StateGroupWorkerStore,
+ SignatureWorkerStore,
+ UserErasureWorkerStore,
ReceiptsWorkerStore,
+ StreamWorkerStore,
+ EventsWorkerStore,
+ RegistrationWorkerStore,
RoomWorkerStore,
):
def __init__(
diff --git a/synapse/app/complement_fork_starter.py b/synapse/app/complement_fork_starter.py
index 89eb07df27..8c0f4a57e7 100644
--- a/synapse/app/complement_fork_starter.py
+++ b/synapse/app/complement_fork_starter.py
@@ -51,11 +51,18 @@ import argparse
import importlib
import itertools
import multiprocessing
+import os
+import signal
import sys
-from typing import Any, Callable, List
+from types import FrameType
+from typing import Any, Callable, Dict, List, Optional
from twisted.internet.main import installReactor
+# a list of the original signal handlers, before we installed our custom ones.
+# We restore these in our child processes.
+_original_signal_handlers: Dict[int, Any] = {}
+
class ProxiedReactor:
"""
@@ -105,6 +112,11 @@ def _worker_entrypoint(
sys.argv = args
+ # reset the custom signal handlers that we installed, so that the children start
+ # from a clean slate.
+ for sig, handler in _original_signal_handlers.items():
+ signal.signal(sig, handler)
+
from twisted.internet.epollreactor import EPollReactor
proxy_reactor._install_real_reactor(EPollReactor())
@@ -167,13 +179,29 @@ def main() -> None:
update_proc.join()
print("===== PREPARED DATABASE =====", file=sys.stderr)
+ processes: List[multiprocessing.Process] = []
+
+ # Install signal handlers to propagate signals to all our children, so that they
+ # shut down cleanly. This also inhibits our own exit, but that's good: we want to
+ # wait until the children have exited.
+ def handle_signal(signum: int, frame: Optional[FrameType]) -> None:
+ print(
+ f"complement_fork_starter: Caught signal {signum}. Stopping children.",
+ file=sys.stderr,
+ )
+ for p in processes:
+ if p.pid:
+ os.kill(p.pid, signum)
+
+ for sig in (signal.SIGINT, signal.SIGTERM):
+ _original_signal_handlers[sig] = signal.signal(sig, handle_signal)
+
# At this point, we've imported all the main entrypoints for all the workers.
# Now we basically just fork() out to create the workers we need.
# Because we're using fork(), all the workers get a clone of this launcher's
# memory space and don't need to repeat the work of loading the code!
# Instead of using fork() directly, we use the multiprocessing library,
# which uses fork() on Unix platforms.
- processes = []
for (func, worker_args) in zip(worker_functions, args_by_worker):
process = multiprocessing.Process(
target=_worker_entrypoint, args=(func, proxy_reactor, worker_args)
diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py
index 42d1f6d219..46dc731696 100644
--- a/synapse/app/generic_worker.py
+++ b/synapse/app/generic_worker.py
@@ -14,21 +14,19 @@
# limitations under the License.
import logging
import sys
-from typing import Dict, List, Optional, Tuple
+from typing import Dict, List
-from twisted.internet import address
from twisted.web.resource import Resource
import synapse
import synapse.events
-from synapse.api.errors import HttpResponseException, RequestSendFailed, SynapseError
from synapse.api.urls import (
CLIENT_API_PREFIX,
FEDERATION_PREFIX,
LEGACY_MEDIA_PREFIX,
MEDIA_R0_PREFIX,
MEDIA_V3_PREFIX,
- SERVER_KEY_V2_PREFIX,
+ SERVER_KEY_PREFIX,
)
from synapse.app import _base
from synapse.app._base import (
@@ -43,17 +41,9 @@ from synapse.config.logger import setup_logging
from synapse.config.server import ListenerConfig
from synapse.federation.transport.server import TransportLayerServer
from synapse.http.server import JsonResource, OptionsResource
-from synapse.http.servlet import RestServlet, parse_json_object_from_request
-from synapse.http.site import SynapseRequest, SynapseSite
from synapse.logging.context import LoggingContext
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource
-from synapse.replication.slave.storage.devices import SlavedDeviceStore
-from synapse.replication.slave.storage.events import SlavedEventStore
-from synapse.replication.slave.storage.filtering import SlavedFilteringStore
-from synapse.replication.slave.storage.keys import SlavedKeyStore
-from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
-from synapse.replication.slave.storage.pushers import SlavedPusherStore
from synapse.rest.admin import register_servlets_for_media_repo
from synapse.rest.client import (
account_data,
@@ -65,6 +55,7 @@ from synapse.rest.client import (
push_rule,
read_marker,
receipts,
+ relations,
room,
room_batch,
room_keys,
@@ -75,12 +66,12 @@ from synapse.rest.client import (
versions,
voip,
)
-from synapse.rest.client._base import client_patterns
from synapse.rest.client.account import ThreepidRestServlet, WhoamiRestServlet
from synapse.rest.client.devices import DevicesRestServlet
from synapse.rest.client.keys import (
KeyChangesServlet,
KeyQueryServlet,
+ KeyUploadServlet,
OneTimeKeyServlet,
)
from synapse.rest.client.register import (
@@ -88,7 +79,7 @@ from synapse.rest.client.register import (
RegistrationTokenValidityRestServlet,
)
from synapse.rest.health import HealthResource
-from synapse.rest.key.v2 import KeyApiV2Resource
+from synapse.rest.key.v2 import KeyResource
from synapse.rest.synapse.client import build_synapse_client_resource_tree
from synapse.rest.well_known import well_known_resource
from synapse.server import HomeServer
@@ -100,8 +91,16 @@ from synapse.storage.databases.main.appservice import (
from synapse.storage.databases.main.censor_events import CensorEventsStore
from synapse.storage.databases.main.client_ips import ClientIpWorkerStore
from synapse.storage.databases.main.deviceinbox import DeviceInboxWorkerStore
+from synapse.storage.databases.main.devices import DeviceWorkerStore
from synapse.storage.databases.main.directory import DirectoryWorkerStore
from synapse.storage.databases.main.e2e_room_keys import EndToEndRoomKeyStore
+from synapse.storage.databases.main.event_federation import EventFederationWorkerStore
+from synapse.storage.databases.main.event_push_actions import (
+ EventPushActionsWorkerStore,
+)
+from synapse.storage.databases.main.events_worker import EventsWorkerStore
+from synapse.storage.databases.main.filtering import FilteringWorkerStore
+from synapse.storage.databases.main.keys import KeyStore
from synapse.storage.databases.main.lock import LockStore
from synapse.storage.databases.main.media_repository import MediaRepositoryStore
from synapse.storage.databases.main.metrics import ServerMetricsStore
@@ -110,118 +109,31 @@ from synapse.storage.databases.main.monthly_active_users import (
)
from synapse.storage.databases.main.presence import PresenceStore
from synapse.storage.databases.main.profile import ProfileWorkerStore
+from synapse.storage.databases.main.push_rule import PushRulesWorkerStore
+from synapse.storage.databases.main.pusher import PusherWorkerStore
from synapse.storage.databases.main.receipts import ReceiptsWorkerStore
from synapse.storage.databases.main.registration import RegistrationWorkerStore
+from synapse.storage.databases.main.relations import RelationsWorkerStore
from synapse.storage.databases.main.room import RoomWorkerStore
from synapse.storage.databases.main.room_batch import RoomBatchStore
+from synapse.storage.databases.main.roommember import RoomMemberWorkerStore
from synapse.storage.databases.main.search import SearchStore
from synapse.storage.databases.main.session import SessionStore
+from synapse.storage.databases.main.signatures import SignatureWorkerStore
+from synapse.storage.databases.main.state import StateGroupWorkerStore
from synapse.storage.databases.main.stats import StatsStore
+from synapse.storage.databases.main.stream import StreamWorkerStore
from synapse.storage.databases.main.tags import TagsWorkerStore
from synapse.storage.databases.main.transactions import TransactionWorkerStore
from synapse.storage.databases.main.ui_auth import UIAuthWorkerStore
from synapse.storage.databases.main.user_directory import UserDirectoryStore
-from synapse.types import JsonDict
+from synapse.storage.databases.main.user_erasure_store import UserErasureWorkerStore
from synapse.util import SYNAPSE_VERSION
from synapse.util.httpresourcetree import create_resource_tree
logger = logging.getLogger("synapse.app.generic_worker")
-class KeyUploadServlet(RestServlet):
- """An implementation of the `KeyUploadServlet` that responds to read only
- requests, but otherwise proxies through to the master instance.
- """
-
- PATTERNS = client_patterns("/keys/upload(/(?P<device_id>[^/]+))?$")
-
- def __init__(self, hs: HomeServer):
- """
- Args:
- hs: server
- """
- super().__init__()
- self.auth = hs.get_auth()
- self.store = hs.get_datastores().main
- self.http_client = hs.get_simple_http_client()
- self.main_uri = hs.config.worker.worker_main_http_uri
-
- async def on_POST(
- self, request: SynapseRequest, device_id: Optional[str]
- ) -> Tuple[int, JsonDict]:
- requester = await self.auth.get_user_by_req(request, allow_guest=True)
- user_id = requester.user.to_string()
- body = parse_json_object_from_request(request)
-
- if device_id is not None:
- # passing the device_id here is deprecated; however, we allow it
- # for now for compatibility with older clients.
- if requester.device_id is not None and device_id != requester.device_id:
- logger.warning(
- "Client uploading keys for a different device "
- "(logged in as %s, uploading for %s)",
- requester.device_id,
- device_id,
- )
- else:
- device_id = requester.device_id
-
- if device_id is None:
- raise SynapseError(
- 400, "To upload keys, you must pass device_id when authenticating"
- )
-
- if body:
- # They're actually trying to upload something, proxy to main synapse.
-
- # Proxy headers from the original request, such as the auth headers
- # (in case the access token is there) and the original IP /
- # User-Agent of the request.
- headers = {
- header: request.requestHeaders.getRawHeaders(header, [])
- for header in (b"Authorization", b"User-Agent")
- }
- # Add the previous hop to the X-Forwarded-For header.
- x_forwarded_for = request.requestHeaders.getRawHeaders(
- b"X-Forwarded-For", []
- )
- # we use request.client here, since we want the previous hop, not the
- # original client (as returned by request.getClientAddress()).
- if isinstance(request.client, (address.IPv4Address, address.IPv6Address)):
- previous_host = request.client.host.encode("ascii")
- # If the header exists, add to the comma-separated list of the first
- # instance of the header. Otherwise, generate a new header.
- if x_forwarded_for:
- x_forwarded_for = [x_forwarded_for[0] + b", " + previous_host]
- x_forwarded_for.extend(x_forwarded_for[1:])
- else:
- x_forwarded_for = [previous_host]
- headers[b"X-Forwarded-For"] = x_forwarded_for
-
- # Replicate the original X-Forwarded-Proto header. Note that
- # XForwardedForRequest overrides isSecure() to give us the original protocol
- # used by the client, as opposed to the protocol used by our upstream proxy
- # - which is what we want here.
- headers[b"X-Forwarded-Proto"] = [
- b"https" if request.isSecure() else b"http"
- ]
-
- try:
- result = await self.http_client.post_json_get_json(
- self.main_uri + request.uri.decode("ascii"), body, headers=headers
- )
- except HttpResponseException as e:
- raise e.to_synapse_error() from e
- except RequestSendFailed as e:
- raise SynapseError(502, "Failed to talk to master") from e
-
- return 200, result
- else:
- # Just interested in counts.
- result = await self.store.count_e2e_one_time_keys(user_id, device_id)
- return 200, {"one_time_key_counts": result}
-
-
class GenericWorkerSlavedStore(
# FIXME(#3714): We need to add UserDirectoryStore as we write directly
# rather than going via the correct worker.
@@ -231,26 +143,36 @@ class GenericWorkerSlavedStore(
EndToEndRoomKeyStore,
PresenceStore,
DeviceInboxWorkerStore,
- SlavedDeviceStore,
- SlavedPushRuleStore,
+ DeviceWorkerStore,
TagsWorkerStore,
AccountDataWorkerStore,
- SlavedPusherStore,
CensorEventsStore,
ClientIpWorkerStore,
- SlavedEventStore,
- SlavedKeyStore,
+ # KeyStore isn't really safe to use from a worker, but for now we do so and hope that
+ # the races it creates aren't too bad.
+ KeyStore,
RoomWorkerStore,
RoomBatchStore,
DirectoryWorkerStore,
+ PushRulesWorkerStore,
ApplicationServiceTransactionWorkerStore,
ApplicationServiceWorkerStore,
ProfileWorkerStore,
- SlavedFilteringStore,
+ FilteringWorkerStore,
MonthlyActiveUsersWorkerStore,
MediaRepositoryStore,
ServerMetricsStore,
+ PusherWorkerStore,
+ RoomMemberWorkerStore,
+ RelationsWorkerStore,
+ EventFederationWorkerStore,
+ EventPushActionsWorkerStore,
+ StateGroupWorkerStore,
+ SignatureWorkerStore,
+ UserErasureWorkerStore,
ReceiptsWorkerStore,
+ StreamWorkerStore,
+ EventsWorkerStore,
RegistrationWorkerStore,
SearchStore,
TransactionWorkerStore,
@@ -267,15 +189,9 @@ class GenericWorkerServer(HomeServer):
DATASTORE_CLASS = GenericWorkerSlavedStore # type: ignore
def _listen_http(self, listener_config: ListenerConfig) -> None:
- port = listener_config.port
- bind_addresses = listener_config.bind_addresses
assert listener_config.http_options is not None
- site_tag = listener_config.http_options.tag
- if site_tag is None:
- site_tag = str(port)
-
# We always include a health resource.
resources: Dict[str, Resource] = {"/health": HealthResource()}
@@ -308,6 +224,7 @@ class GenericWorkerServer(HomeServer):
sync.register_servlets(self, resource)
events.register_servlets(self, resource)
room.register_servlets(self, resource, is_worker=True)
+ relations.register_servlets(self, resource)
room.register_deprecated_servlets(self, resource)
initial_sync.register_servlets(self, resource)
room_batch.register_servlets(self, resource)
@@ -323,13 +240,13 @@ class GenericWorkerServer(HomeServer):
presence.register_servlets(self, resource)
- resources.update({CLIENT_API_PREFIX: resource})
+ resources[CLIENT_API_PREFIX] = resource
resources.update(build_synapse_client_resource_tree(self))
- resources.update({"/.well-known": well_known_resource(self)})
+ resources["/.well-known"] = well_known_resource(self)
elif name == "federation":
- resources.update({FEDERATION_PREFIX: TransportLayerServer(self)})
+ resources[FEDERATION_PREFIX] = TransportLayerServer(self)
elif name == "media":
if self.config.media.can_load_media_repo:
media_repo = self.get_media_repository_resource()
@@ -357,16 +274,12 @@ class GenericWorkerServer(HomeServer):
# Only load the openid resource separately if federation resource
# is not specified since federation resource includes openid
# resource.
- resources.update(
- {
- FEDERATION_PREFIX: TransportLayerServer(
- self, servlet_groups=["openid"]
- )
- }
+ resources[FEDERATION_PREFIX] = TransportLayerServer(
+ self, servlet_groups=["openid"]
)
if name in ["keys", "federation"]:
- resources[SERVER_KEY_V2_PREFIX] = KeyApiV2Resource(self)
+ resources[SERVER_KEY_PREFIX] = KeyResource(self)
if name == "replication":
resources[REPLICATION_PREFIX] = ReplicationRestResource(self)
@@ -377,23 +290,15 @@ class GenericWorkerServer(HomeServer):
root_resource = create_resource_tree(resources, OptionsResource())
- _base.listen_tcp(
- bind_addresses,
- port,
- SynapseSite(
- "synapse.access.http.%s" % (site_tag,),
- site_tag,
- listener_config,
- root_resource,
- self.version_string,
- max_request_body_size=max_request_body_size(self.config),
- reactor=self.get_reactor(),
- ),
+ _base.listen_http(
+ listener_config,
+ root_resource,
+ self.version_string,
+ max_request_body_size(self.config),
+ self.tls_server_context_factory,
reactor=self.get_reactor(),
)
- logger.info("Synapse worker now listening on port %d", port)
-
def start_listening(self) -> None:
for listener in self.config.worker.worker_listeners:
if listener.type == "http":
@@ -412,7 +317,10 @@ class GenericWorkerServer(HomeServer):
"enable_metrics is not True!"
)
else:
- _base.listen_metrics(listener.bind_addresses, listener.port)
+ _base.listen_metrics(
+ listener.bind_addresses,
+ listener.port,
+ )
else:
logger.warning("Unsupported listener type: %s", listener.type)
@@ -441,6 +349,13 @@ def start(config_options: List[str]) -> None:
"synapse.app.user_dir",
)
+ if config.experimental.faster_joins_enabled:
+ raise ConfigError(
+ "You have enabled the experimental `faster_joins` config option, but it is "
+ "not compatible with worker deployments yet. Please disable `faster_joins` "
+ "or run Synapse as a single process deployment instead."
+ )
+
synapse.events.USE_FROZEN_DICTS = config.server.use_frozen_dicts
synapse.util.caches.TRACK_MEMORY_USAGE = config.caches.track_memory_usage
diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py
index 745e704141..b9be558c7e 100644
--- a/synapse/app/homeserver.py
+++ b/synapse/app/homeserver.py
@@ -31,20 +31,18 @@ from synapse.api.urls import (
LEGACY_MEDIA_PREFIX,
MEDIA_R0_PREFIX,
MEDIA_V3_PREFIX,
- SERVER_KEY_V2_PREFIX,
+ SERVER_KEY_PREFIX,
STATIC_PREFIX,
)
from synapse.app import _base
from synapse.app._base import (
handle_startup_exception,
- listen_ssl,
- listen_tcp,
+ listen_http,
max_request_body_size,
redirect_stdio_to_logs,
register_start,
)
from synapse.config._base import ConfigError, format_config_error
-from synapse.config.emailconfig import ThreepidBehaviour
from synapse.config.homeserver import HomeServerConfig
from synapse.config.server import ListenerConfig
from synapse.federation.transport.server import TransportLayerServer
@@ -54,15 +52,13 @@ from synapse.http.server import (
RootOptionsRedirectResource,
StaticResource,
)
-from synapse.http.site import SynapseSite
from synapse.logging.context import LoggingContext
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource
-from synapse.replication.tcp.resource import ReplicationStreamProtocolFactory
from synapse.rest import ClientRestResource
from synapse.rest.admin import AdminRestResource
from synapse.rest.health import HealthResource
-from synapse.rest.key.v2 import KeyApiV2Resource
+from synapse.rest.key.v2 import KeyResource
from synapse.rest.synapse.client import build_synapse_client_resource_tree
from synapse.rest.well_known import well_known_resource
from synapse.server import HomeServer
@@ -85,8 +81,6 @@ class SynapseHomeServer(HomeServer):
self, config: HomeServerConfig, listener_config: ListenerConfig
) -> Iterable[Port]:
port = listener_config.port
- bind_addresses = listener_config.bind_addresses
- tls = listener_config.tls
# Must exist since this is an HTTP listener.
assert listener_config.http_options is not None
site_tag = listener_config.http_options.tag
@@ -142,37 +136,15 @@ class SynapseHomeServer(HomeServer):
else:
root_resource = OptionsResource()
- site = SynapseSite(
- "synapse.access.%s.%s" % ("https" if tls else "http", site_tag),
- site_tag,
+ ports = listen_http(
listener_config,
create_resource_tree(resources, root_resource),
self.version_string,
- max_request_body_size=max_request_body_size(self.config),
+ max_request_body_size(self.config),
+ self.tls_server_context_factory,
reactor=self.get_reactor(),
)
- if tls:
- # refresh_certificate should have been called before this.
- assert self.tls_server_context_factory is not None
- ports = listen_ssl(
- bind_addresses,
- port,
- site,
- self.tls_server_context_factory,
- reactor=self.get_reactor(),
- )
- logger.info("Synapse now listening on TCP port %d (TLS)", port)
-
- else:
- ports = listen_tcp(
- bind_addresses,
- port,
- site,
- reactor=self.get_reactor(),
- )
- logger.info("Synapse now listening on TCP port %d", port)
-
return ports
def _configure_named_resource(
@@ -202,7 +174,7 @@ class SynapseHomeServer(HomeServer):
}
)
- if self.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
+ if self.config.email.can_verify_email:
from synapse.rest.synapse.client.password_reset import (
PasswordResetSubmitTokenResource,
)
@@ -217,27 +189,22 @@ class SynapseHomeServer(HomeServer):
consent_resource: Resource = ConsentResource(self)
if compress:
consent_resource = gz_wrap(consent_resource)
- resources.update({"/_matrix/consent": consent_resource})
+ resources["/_matrix/consent"] = consent_resource
if name == "federation":
- resources.update({FEDERATION_PREFIX: TransportLayerServer(self)})
+ federation_resource: Resource = TransportLayerServer(self)
+ if compress:
+ federation_resource = gz_wrap(federation_resource)
+ resources[FEDERATION_PREFIX] = federation_resource
if name == "openid":
- resources.update(
- {
- FEDERATION_PREFIX: TransportLayerServer(
- self, servlet_groups=["openid"]
- )
- }
+ resources[FEDERATION_PREFIX] = TransportLayerServer(
+ self, servlet_groups=["openid"]
)
if name in ["static", "client"]:
- resources.update(
- {
- STATIC_PREFIX: StaticResource(
- os.path.join(os.path.dirname(synapse.__file__), "static")
- )
- }
+ resources[STATIC_PREFIX] = StaticResource(
+ os.path.join(os.path.dirname(synapse.__file__), "static")
)
if name in ["media", "federation", "client"]:
@@ -256,7 +223,7 @@ class SynapseHomeServer(HomeServer):
)
if name in ["keys", "federation"]:
- resources[SERVER_KEY_V2_PREFIX] = KeyApiV2Resource(self)
+ resources[SERVER_KEY_PREFIX] = KeyResource(self)
if name == "metrics" and self.config.metrics.enable_metrics:
metrics_resource: Resource = MetricsResource(RegistryProxy)
@@ -288,16 +255,6 @@ class SynapseHomeServer(HomeServer):
manhole_settings=self.config.server.manhole_settings,
manhole_globals={"hs": self},
)
- elif listener.type == "replication":
- services = listen_tcp(
- listener.bind_addresses,
- listener.port,
- ReplicationStreamProtocolFactory(self),
- )
- for s in services:
- self.get_reactor().addSystemEventTrigger(
- "before", "shutdown", s.stopListening
- )
elif listener.type == "metrics":
if not self.config.metrics.enable_metrics:
logger.warning(
@@ -305,7 +262,10 @@ class SynapseHomeServer(HomeServer):
"enable_metrics is not True!"
)
else:
- _base.listen_metrics(listener.bind_addresses, listener.port)
+ _base.listen_metrics(
+ listener.bind_addresses,
+ listener.port,
+ )
else:
# this shouldn't happen, as the listener type should have been checked
# during parsing
diff --git a/synapse/app/phone_stats_home.py b/synapse/app/phone_stats_home.py
index 40dbdace8e..53db1e85b3 100644
--- a/synapse/app/phone_stats_home.py
+++ b/synapse/app/phone_stats_home.py
@@ -32,15 +32,15 @@ logger = logging.getLogger("synapse.app.homeserver")
_stats_process: List[Tuple[int, "resource.struct_rusage"]] = []
# Gauges to expose monthly active user control metrics
-current_mau_gauge = Gauge("synapse_admin_mau:current", "Current MAU")
+current_mau_gauge = Gauge("synapse_admin_mau_current", "Current MAU")
current_mau_by_service_gauge = Gauge(
"synapse_admin_mau_current_mau_by_service",
"Current MAU by service",
["app_service"],
)
-max_mau_gauge = Gauge("synapse_admin_mau:max", "MAU Limit")
+max_mau_gauge = Gauge("synapse_admin_mau_max", "MAU Limit")
registered_reserved_users_mau_gauge = Gauge(
- "synapse_admin_mau:registered_reserved_users",
+ "synapse_admin_mau_registered_reserved_users",
"Registered users with reserved threepids",
)
@@ -51,6 +51,16 @@ async def phone_stats_home(
stats: JsonDict,
stats_process: List[Tuple[int, "resource.struct_rusage"]] = _stats_process,
) -> None:
+ """Collect usage statistics and send them to the configured endpoint.
+
+ Args:
+ hs: the HomeServer object to use for gathering usage data.
+ stats: the dict in which to store the statistics sent to the configured
+ endpoint. Mostly used in tests to figure out the data that is supposed to
+ be sent.
+ stats_process: statistics about resource usage of the process.
+ """
+
logger.info("Gathering stats for reporting")
now = int(hs.get_clock().time())
# Ensure the homeserver has started.
@@ -83,6 +93,7 @@ async def phone_stats_home(
#
store = hs.get_datastores().main
+ common_metrics = await hs.get_common_usage_metrics_manager().get_metrics()
stats["homeserver"] = hs.config.server.server_name
stats["server_context"] = hs.config.server.server_context
@@ -104,7 +115,7 @@ async def phone_stats_home(
room_count = await store.get_room_count()
stats["total_room_count"] = room_count
- stats["daily_active_users"] = await store.count_daily_users()
+ stats["daily_active_users"] = common_metrics.daily_active_users
stats["monthly_active_users"] = await store.count_monthly_users()
daily_active_e2ee_rooms = await store.count_daily_active_e2ee_rooms()
stats["daily_active_e2ee_rooms"] = daily_active_e2ee_rooms
diff --git a/synapse/appservice/__init__.py b/synapse/appservice/__init__.py
index 0dfa00df44..500bdde3a9 100644
--- a/synapse/appservice/__init__.py
+++ b/synapse/appservice/__init__.py
@@ -172,12 +172,24 @@ class ApplicationService:
Returns:
True if this service would like to know about this room.
"""
- member_list = await store.get_users_in_room(
+ # We can use `get_local_users_in_room(...)` here because an application service
+ # can only be interested in local users of the server it's on (ignore any remote
+ # users that might match the user namespace regex).
+ #
+ # In the future, we can consider re-using
+ # `store.get_app_service_users_in_room` which is very similar to this
+ # function but has a slightly worse performance than this because we
+ # have an early escape-hatch if we find a single user that the
+ # appservice is interested in. The juice would be worth the squeeze if
+ # `store.get_app_service_users_in_room` was used in more places besides
+ # an experimental MSC. But for now we can avoid doing more work and
+ # barely using it later.
+ local_user_ids = await store.get_local_users_in_room(
room_id, on_invalidate=cache_context.invalidate
)
# check joined member events
- for user_id in member_list:
+ for user_id in local_user_ids:
if self.is_interested_in_user(user_id):
return True
return False
diff --git a/synapse/appservice/api.py b/synapse/appservice/api.py
index 0963fb3bb4..60774b240d 100644
--- a/synapse/appservice/api.py
+++ b/synapse/appservice/api.py
@@ -120,7 +120,11 @@ class ApplicationServiceApi(SimpleHttpClient):
uri = service.url + ("/users/%s" % urllib.parse.quote(user_id))
try:
- response = await self.get_json(uri, {"access_token": service.hs_token})
+ response = await self.get_json(
+ uri,
+ {"access_token": service.hs_token},
+ headers={"Authorization": [f"Bearer {service.hs_token}"]},
+ )
if response is not None: # just an empty json object
return True
except CodeMessageException as e:
@@ -140,7 +144,11 @@ class ApplicationServiceApi(SimpleHttpClient):
uri = service.url + ("/rooms/%s" % urllib.parse.quote(alias))
try:
- response = await self.get_json(uri, {"access_token": service.hs_token})
+ response = await self.get_json(
+ uri,
+ {"access_token": service.hs_token},
+ headers={"Authorization": [f"Bearer {service.hs_token}"]},
+ )
if response is not None: # just an empty json object
return True
except CodeMessageException as e:
@@ -181,7 +189,11 @@ class ApplicationServiceApi(SimpleHttpClient):
**fields,
b"access_token": service.hs_token,
}
- response = await self.get_json(uri, args=args)
+ response = await self.get_json(
+ uri,
+ args=args,
+ headers={"Authorization": [f"Bearer {service.hs_token}"]},
+ )
if not isinstance(response, list):
logger.warning(
"query_3pe to %s returned an invalid response %r", uri, response
@@ -217,7 +229,11 @@ class ApplicationServiceApi(SimpleHttpClient):
urllib.parse.quote(protocol),
)
try:
- info = await self.get_json(uri, {"access_token": service.hs_token})
+ info = await self.get_json(
+ uri,
+ {"access_token": service.hs_token},
+ headers={"Authorization": [f"Bearer {service.hs_token}"]},
+ )
if not _is_valid_3pe_metadata(info):
logger.warning(
@@ -313,6 +329,7 @@ class ApplicationServiceApi(SimpleHttpClient):
uri=uri,
json_body=body,
args={"access_token": service.hs_token},
+ headers={"Authorization": [f"Bearer {service.hs_token}"]},
)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(
diff --git a/synapse/config/_base.py b/synapse/config/_base.py
index 7c9cf403ef..1f6362aedd 100644
--- a/synapse/config/_base.py
+++ b/synapse/config/_base.py
@@ -20,6 +20,7 @@ import logging
import os
import re
from collections import OrderedDict
+from enum import Enum, auto
from hashlib import sha256
from textwrap import dedent
from typing import (
@@ -603,18 +604,44 @@ class RootConfig:
" may specify directories containing *.yaml files.",
)
- generate_group = parser.add_argument_group("Config generation")
- generate_group.add_argument(
+ # we nest the mutually-exclusive group inside another group so that the help
+ # text shows them in their own group.
+ generate_mode_group = parser.add_argument_group(
+ "Config generation mode",
+ )
+ generate_mode_exclusive = generate_mode_group.add_mutually_exclusive_group()
+ generate_mode_exclusive.add_argument(
+ # hidden option to make the type and default work
+ "--generate-mode",
+ help=argparse.SUPPRESS,
+ type=_ConfigGenerateMode,
+ default=_ConfigGenerateMode.GENERATE_MISSING_AND_RUN,
+ )
+ generate_mode_exclusive.add_argument(
"--generate-config",
- action="store_true",
help="Generate a config file, then exit.",
+ action="store_const",
+ const=_ConfigGenerateMode.GENERATE_EVERYTHING_AND_EXIT,
+ dest="generate_mode",
)
- generate_group.add_argument(
+ generate_mode_exclusive.add_argument(
"--generate-missing-configs",
"--generate-keys",
- action="store_true",
help="Generate any missing additional config files, then exit.",
+ action="store_const",
+ const=_ConfigGenerateMode.GENERATE_MISSING_AND_EXIT,
+ dest="generate_mode",
)
+ generate_mode_exclusive.add_argument(
+ "--generate-missing-and-run",
+ help="Generate any missing additional config files, then run. This is the "
+ "default behaviour.",
+ action="store_const",
+ const=_ConfigGenerateMode.GENERATE_MISSING_AND_RUN,
+ dest="generate_mode",
+ )
+
+ generate_group = parser.add_argument_group("Details for --generate-config")
generate_group.add_argument(
"-H", "--server-name", help="The server name to generate a config file for."
)
@@ -670,11 +697,12 @@ class RootConfig:
config_dir_path = os.path.abspath(config_dir_path)
data_dir_path = os.getcwd()
- generate_missing_configs = config_args.generate_missing_configs
-
obj = cls(config_files)
- if config_args.generate_config:
+ if (
+ config_args.generate_mode
+ == _ConfigGenerateMode.GENERATE_EVERYTHING_AND_EXIT
+ ):
if config_args.report_stats is None:
parser.error(
"Please specify either --report-stats=yes or --report-stats=no\n\n"
@@ -732,11 +760,14 @@ class RootConfig:
)
% (config_path,)
)
- generate_missing_configs = True
config_dict = read_config_files(config_files)
- if generate_missing_configs:
- obj.generate_missing_files(config_dict, config_dir_path)
+ obj.generate_missing_files(config_dict, config_dir_path)
+
+ if config_args.generate_mode in (
+ _ConfigGenerateMode.GENERATE_EVERYTHING_AND_EXIT,
+ _ConfigGenerateMode.GENERATE_MISSING_AND_EXIT,
+ ):
return None
obj.parse_config_dict(
@@ -965,6 +996,12 @@ def read_file(file_path: Any, config_path: Iterable[str]) -> str:
raise ConfigError("Error accessing file %r" % (file_path,), config_path) from e
+class _ConfigGenerateMode(Enum):
+ GENERATE_MISSING_AND_RUN = auto()
+ GENERATE_MISSING_AND_EXIT = auto()
+ GENERATE_EVERYTHING_AND_EXIT = auto()
+
+
__all__ = [
"Config",
"RootConfig",
diff --git a/synapse/config/account_validity.py b/synapse/config/account_validity.py
index d1335e77cd..b3972ede96 100644
--- a/synapse/config/account_validity.py
+++ b/synapse/config/account_validity.py
@@ -23,7 +23,7 @@ LEGACY_TEMPLATE_DIR_WARNING = """
This server's configuration file is using the deprecated 'template_dir' setting in the
'account_validity' section. Support for this setting has been deprecated and will be
removed in a future version of Synapse. Server admins should instead use the new
-'custom_templates_directory' setting documented here:
+'custom_template_directory' setting documented here:
https://matrix-org.github.io/synapse/latest/templates.html
---------------------------------------------------------------------------------------"""
diff --git a/synapse/config/cache.py b/synapse/config/cache.py
index 2db8cfb005..eb4194a5a9 100644
--- a/synapse/config/cache.py
+++ b/synapse/config/cache.py
@@ -159,7 +159,7 @@ class CacheConfig(Config):
self.track_memory_usage = cache_config.get("track_memory_usage", False)
if self.track_memory_usage:
- check_requirements("cache_memory")
+ check_requirements("cache-memory")
expire_caches = cache_config.get("expire_caches", True)
cache_entry_ttl = cache_config.get("cache_entry_ttl", "30m")
diff --git a/synapse/config/emailconfig.py b/synapse/config/emailconfig.py
index 7765c5b454..a3af35b7c4 100644
--- a/synapse/config/emailconfig.py
+++ b/synapse/config/emailconfig.py
@@ -18,7 +18,6 @@
import email.utils
import logging
import os
-from enum import Enum
from typing import Any
import attr
@@ -53,7 +52,7 @@ LEGACY_TEMPLATE_DIR_WARNING = """
This server's configuration file is using the deprecated 'template_dir' setting in the
'email' section. Support for this setting has been deprecated and will be removed in a
future version of Synapse. Server admins should instead use the new
-'custom_templates_directory' setting documented here:
+'custom_template_directory' setting documented here:
https://matrix-org.github.io/synapse/latest/templates.html
---------------------------------------------------------------------------------------"""
@@ -136,40 +135,22 @@ class EmailConfig(Config):
self.email_enable_notifs = email_config.get("enable_notifs", False)
- self.threepid_behaviour_email = (
- # Have Synapse handle the email sending if account_threepid_delegates.email
- # is not defined
- # msisdn is currently always remote while Synapse does not support any method of
- # sending SMS messages
- ThreepidBehaviour.REMOTE
- if self.root.registration.account_threepid_delegate_email
- else ThreepidBehaviour.LOCAL
- )
-
if config.get("trust_identity_server_for_password_resets"):
raise ConfigError(
- 'The config option "trust_identity_server_for_password_resets" has been removed.'
- "Please consult the configuration manual at docs/usage/configuration/config_documentation.md for "
- "details and update your config file."
+ 'The config option "trust_identity_server_for_password_resets" '
+ "is no longer supported. Please remove it from the config file."
)
- self.local_threepid_handling_disabled_due_to_email_config = False
- if (
- self.threepid_behaviour_email == ThreepidBehaviour.LOCAL
- and email_config == {}
- ):
- # We cannot warn the user this has happened here
- # Instead do so when a user attempts to reset their password
- self.local_threepid_handling_disabled_due_to_email_config = True
-
- self.threepid_behaviour_email = ThreepidBehaviour.OFF
+ # If we have email config settings, assume that we can verify ownership of
+ # email addresses.
+ self.can_verify_email = email_config != {}
# Get lifetime of a validation token in milliseconds
self.email_validation_token_lifetime = self.parse_duration(
email_config.get("validation_token_lifetime", "1h")
)
- if self.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
+ if self.can_verify_email:
missing = []
if not self.email_notif_from:
missing.append("email.notif_from")
@@ -360,18 +341,3 @@ class EmailConfig(Config):
"Config option email.invite_client_location must be a http or https URL",
path=("email", "invite_client_location"),
)
-
-
-class ThreepidBehaviour(Enum):
- """
- Enum to define the behaviour of Synapse with regards to when it contacts an identity
- server for 3pid registration and password resets
-
- REMOTE = use an external server to send tokens
- LOCAL = send tokens ourselves
- OFF = disable registration via 3pid and password resets
- """
-
- REMOTE = "remote"
- LOCAL = "local"
- OFF = "off"
diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py
index c2ecd977cd..d4b71d1673 100644
--- a/synapse/config/experimental.py
+++ b/synapse/config/experimental.py
@@ -12,12 +12,27 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from typing import Any
+from typing import Any, Optional
+
+import attr
from synapse.config._base import Config
from synapse.types import JsonDict
+@attr.s(auto_attribs=True, frozen=True, slots=True)
+class MSC3866Config:
+ """Configuration for MSC3866 (mandating approval for new users)"""
+
+ # Whether the base support for the approval process is enabled. This includes the
+ # ability for administrators to check and update the approval of users, even if no
+ # approval is currently required.
+ enabled: bool = False
+ # Whether to require that new users are approved by an admin before their account
+ # can be used. Note that this setting is ignored if 'enabled' is false.
+ require_approval_for_new_accounts: bool = False
+
+
class ExperimentalConfig(Config):
"""Config section for enabling experimental features"""
@@ -32,9 +47,6 @@ class ExperimentalConfig(Config):
# MSC2716 (importing historical messages)
self.msc2716_enabled: bool = experimental.get("msc2716_enabled", False)
- # MSC2285 (private read receipts)
- self.msc2285_enabled: bool = experimental.get("msc2285_enabled", False)
-
# MSC3244 (room version capabilities)
self.msc3244_enabled: bool = experimental.get("msc3244_enabled", True)
@@ -66,7 +78,8 @@ class ExperimentalConfig(Config):
# MSC3706 (server-side support for partial state in /send_join responses)
self.msc3706_enabled: bool = experimental.get("msc3706_enabled", False)
- # experimental support for faster joins over federation (msc2775, msc3706)
+ # experimental support for faster joins over federation
+ # (MSC2775, MSC3706, MSC3895)
# requires a target server with msc3706_enabled enabled.
self.faster_joins_enabled: bool = experimental.get("faster_joins", False)
@@ -74,19 +87,47 @@ class ExperimentalConfig(Config):
self.msc3720_enabled: bool = experimental.get("msc3720_enabled", False)
# MSC2654: Unread counts
+ #
+ # Note that enabling this will result in an incorrect unread count for
+ # previously calculated push actions.
self.msc2654_enabled: bool = experimental.get("msc2654_enabled", False)
# MSC2815 (allow room moderators to view redacted event content)
self.msc2815_enabled: bool = experimental.get("msc2815_enabled", False)
- # MSC3786 (Add a default push rule to ignore m.room.server_acl events)
- self.msc3786_enabled: bool = experimental.get("msc3786_enabled", False)
-
- # MSC3772: A push rule for mutual relations.
- self.msc3772_enabled: bool = experimental.get("msc3772_enabled", False)
+ # MSC3773: Thread notifications
+ self.msc3773_enabled: bool = experimental.get("msc3773_enabled", False)
- # MSC3715: dir param on /relations.
- self.msc3715_enabled: bool = experimental.get("msc3715_enabled", False)
+ # MSC3664: Pushrules to match on related events
+ self.msc3664_enabled: bool = experimental.get("msc3664_enabled", False)
# MSC3848: Introduce errcodes for specific event sending failures
self.msc3848_enabled: bool = experimental.get("msc3848_enabled", False)
+
+ # MSC3852: Expose last seen user agent field on /_matrix/client/v3/devices.
+ self.msc3852_enabled: bool = experimental.get("msc3852_enabled", False)
+
+ # MSC3866: M_USER_AWAITING_APPROVAL error code
+ raw_msc3866_config = experimental.get("msc3866", {})
+ self.msc3866 = MSC3866Config(**raw_msc3866_config)
+
+ # MSC3881: Remotely toggle push notifications for another client
+ self.msc3881_enabled: bool = experimental.get("msc3881_enabled", False)
+
+ # MSC3882: Allow an existing session to sign in a new session
+ self.msc3882_enabled: bool = experimental.get("msc3882_enabled", False)
+ self.msc3882_ui_auth: bool = experimental.get("msc3882_ui_auth", True)
+ self.msc3882_token_timeout = self.parse_duration(
+ experimental.get("msc3882_token_timeout", "5m")
+ )
+
+ # MSC3874: Filtering /messages with rel_types / not_rel_types.
+ self.msc3874_enabled: bool = experimental.get("msc3874_enabled", False)
+
+ # MSC3886: Simple client rendezvous capability
+ self.msc3886_endpoint: Optional[str] = experimental.get(
+ "msc3886_endpoint", None
+ )
+
+ # MSC3912: Relation-based redactions.
+ self.msc3912_enabled: bool = experimental.get("msc3912_enabled", False)
diff --git a/synapse/config/groups.py b/synapse/config/groups.py
deleted file mode 100644
index baa051fdd4..0000000000
--- a/synapse/config/groups.py
+++ /dev/null
@@ -1,27 +0,0 @@
-# Copyright 2017 New Vector Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from typing import Any
-
-from synapse.types import JsonDict
-
-from ._base import Config
-
-
-class GroupsConfig(Config):
- section = "groups"
-
- def read_config(self, config: JsonDict, **kwargs: Any) -> None:
- self.enable_group_creation = config.get("enable_group_creation", False)
- self.group_creation_prefix = config.get("group_creation_prefix", "")
diff --git a/synapse/config/key.py b/synapse/config/key.py
index cc75efdf8f..f3dc4df695 100644
--- a/synapse/config/key.py
+++ b/synapse/config/key.py
@@ -217,7 +217,18 @@ class KeyConfig(Config):
signing_keys = self.read_file(signing_key_path, name)
try:
- return read_signing_keys(signing_keys.splitlines(True))
+ loaded_signing_keys = read_signing_keys(
+ [
+ signing_key_line
+ for signing_key_line in signing_keys.splitlines(keepends=False)
+ if signing_key_line.strip()
+ ]
+ )
+
+ if not loaded_signing_keys:
+ raise ConfigError(f"No signing keys in file {signing_key_path}")
+
+ return loaded_signing_keys
except Exception as e:
raise ConfigError("Error reading %s: %s" % (name, str(e)))
diff --git a/synapse/config/logger.py b/synapse/config/logger.py
index 6c1f78f8df..5468b963a2 100644
--- a/synapse/config/logger.py
+++ b/synapse/config/logger.py
@@ -53,7 +53,7 @@ DEFAULT_LOG_CONFIG = Template(
# Synapse also supports structured logging for machine readable logs which can
# be ingested by ELK stacks. See [2] for details.
#
-# [1]: https://docs.python.org/3.7/library/logging.config.html#configuration-dictionary-schema
+# [1]: https://docs.python.org/3/library/logging.config.html#configuration-dictionary-schema
# [2]: https://matrix-org.github.io/synapse/latest/structured_logging.html
version: 1
@@ -317,15 +317,16 @@ def setup_logging(
Set up the logging subsystem.
Args:
- config (LoggingConfig | synapse.config.worker.WorkerConfig):
- configuration data
+ config: configuration data
- use_worker_options (bool): True to use the 'worker_log_config' option
+ use_worker_options: True to use the 'worker_log_config' option
instead of 'log_config'.
logBeginner: The Twisted logBeginner to use.
"""
+ from twisted.internet import reactor
+
log_config_path = (
config.worker.worker_log_config
if use_worker_options
@@ -348,3 +349,4 @@ def setup_logging(
)
logging.info("Server hostname: %s", config.server.server_name)
logging.info("Instance name: %s", hs.get_instance_name())
+ logging.info("Twisted reactor: %s", type(reactor).__name__)
diff --git a/synapse/config/metrics.py b/synapse/config/metrics.py
index 3b42be5b5b..8c1c9bd12d 100644
--- a/synapse/config/metrics.py
+++ b/synapse/config/metrics.py
@@ -42,6 +42,7 @@ class MetricsConfig(Config):
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
self.enable_metrics = config.get("enable_metrics", False)
+
self.report_stats = config.get("report_stats", None)
self.report_stats_endpoint = config.get(
"report_stats_endpoint", "https://matrix.org/report-usage-stats/push"
diff --git a/synapse/config/oidc.py b/synapse/config/oidc.py
index 5418a332da..0bd83f4010 100644
--- a/synapse/config/oidc.py
+++ b/synapse/config/oidc.py
@@ -123,6 +123,8 @@ OIDC_PROVIDER_CONFIG_SCHEMA = {
"userinfo_endpoint": {"type": "string"},
"jwks_uri": {"type": "string"},
"skip_verification": {"type": "boolean"},
+ "backchannel_logout_enabled": {"type": "boolean"},
+ "backchannel_logout_ignore_sub": {"type": "boolean"},
"user_profile_method": {
"type": "string",
"enum": ["auto", "userinfo_endpoint"],
@@ -292,6 +294,10 @@ def _parse_oidc_config_dict(
token_endpoint=oidc_config.get("token_endpoint"),
userinfo_endpoint=oidc_config.get("userinfo_endpoint"),
jwks_uri=oidc_config.get("jwks_uri"),
+ backchannel_logout_enabled=oidc_config.get("backchannel_logout_enabled", False),
+ backchannel_logout_ignore_sub=oidc_config.get(
+ "backchannel_logout_ignore_sub", False
+ ),
skip_verification=oidc_config.get("skip_verification", False),
user_profile_method=oidc_config.get("user_profile_method", "auto"),
allow_existing_users=oidc_config.get("allow_existing_users", False),
@@ -368,6 +374,12 @@ class OidcProviderConfig:
# "openid" scope is used.
jwks_uri: Optional[str]
+ # Whether Synapse should react to backchannel logouts
+ backchannel_logout_enabled: bool
+
+ # Whether Synapse should ignore the `sub` claim in backchannel logouts or not.
+ backchannel_logout_ignore_sub: bool
+
# Whether to skip metadata verification
skip_verification: bool
diff --git a/synapse/config/ratelimiting.py b/synapse/config/ratelimiting.py
index 1ed001e105..5c13fe428a 100644
--- a/synapse/config/ratelimiting.py
+++ b/synapse/config/ratelimiting.py
@@ -150,8 +150,5 @@ class RatelimitConfig(Config):
self.rc_third_party_invite = RatelimitSettings(
config.get("rc_third_party_invite", {}),
- defaults={
- "per_second": self.rc_message.per_second,
- "burst_count": self.rc_message.burst_count,
- },
+ defaults={"per_second": 0.0025, "burst_count": 5},
)
diff --git a/synapse/config/registration.py b/synapse/config/registration.py
index 01fb0331bc..df1d83dfaa 100644
--- a/synapse/config/registration.py
+++ b/synapse/config/registration.py
@@ -13,23 +13,23 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
-import logging
-from typing import Any, Optional
+from typing import Any, Dict, Optional
from synapse.api.constants import RoomCreationPreset
-from synapse.config._base import Config, ConfigError
+from synapse.config._base import Config, ConfigError, read_file
from synapse.types import JsonDict, RoomAlias, UserID
from synapse.util.stringutils import random_string_with_symbols, strtobool
-logger = logging.getLogger(__name__)
-
-LEGACY_EMAIL_DELEGATE_WARNING = """\
-Delegation of email verification to an identity server is now deprecated. To
+NO_EMAIL_DELEGATE_ERROR = """\
+Delegation of email verification to an identity server is no longer supported. To
continue to allow users to add email addresses to their accounts, and use them for
password resets, configure Synapse with an SMTP server via the `email` setting, and
remove `account_threepid_delegates.email`.
+"""
-This will be an error in a future version.
+CONFLICTING_SHARED_SECRET_OPTS_ERROR = """\
+You have configured both `registration_shared_secret` and
+`registration_shared_secret_path`. These are mutually incompatible.
"""
@@ -58,15 +58,22 @@ class RegistrationConfig(Config):
self.enable_registration_token_3pid_bypass = config.get(
"enable_registration_token_3pid_bypass", False
)
+
+ # read the shared secret, either inline or from an external file
self.registration_shared_secret = config.get("registration_shared_secret")
+ registration_shared_secret_path = config.get("registration_shared_secret_path")
+ if registration_shared_secret_path:
+ if self.registration_shared_secret:
+ raise ConfigError(CONFLICTING_SHARED_SECRET_OPTS_ERROR)
+ self.registration_shared_secret = read_file(
+ registration_shared_secret_path, ("registration_shared_secret_path",)
+ ).strip()
self.bcrypt_rounds = config.get("bcrypt_rounds", 12)
account_threepid_delegates = config.get("account_threepid_delegates") or {}
if "email" in account_threepid_delegates:
- logger.warning(LEGACY_EMAIL_DELEGATE_WARNING)
-
- self.account_threepid_delegate_email = account_threepid_delegates.get("email")
+ raise ConfigError(NO_EMAIL_DELEGATE_ERROR)
self.account_threepid_delegate_msisdn = account_threepid_delegates.get("msisdn")
self.default_identity_server = config.get("default_identity_server")
self.allow_guest_access = config.get("allow_guest_access", False)
@@ -225,6 +232,21 @@ class RegistrationConfig(Config):
else:
return ""
+ def generate_files(self, config: Dict[str, Any], config_dir_path: str) -> None:
+ # if 'registration_shared_secret_path' is specified, and the target file
+ # does not exist, generate it.
+ registration_shared_secret_path = config.get("registration_shared_secret_path")
+ if registration_shared_secret_path and not self.path_exists(
+ registration_shared_secret_path
+ ):
+ print(
+ "Generating registration shared secret file "
+ + registration_shared_secret_path
+ )
+ secret = random_string_with_symbols(50)
+ with open(registration_shared_secret_path, "w") as f:
+ f.write(f"{secret}\n")
+
@staticmethod
def add_arguments(parser: argparse.ArgumentParser) -> None:
reg_group = parser.add_argument_group("registration")
diff --git a/synapse/config/repository.py b/synapse/config/repository.py
index 1033496bb4..e4759711ed 100644
--- a/synapse/config/repository.py
+++ b/synapse/config/repository.py
@@ -205,7 +205,7 @@ class ContentRepositoryConfig(Config):
)
self.url_preview_enabled = config.get("url_preview_enabled", False)
if self.url_preview_enabled:
- check_requirements("url_preview")
+ check_requirements("url-preview")
proxy_env = getproxies_environment()
if "url_preview_ip_range_blacklist" not in config:
diff --git a/synapse/config/server.py b/synapse/config/server.py
index 085fe22c51..ec46ca63ad 100644
--- a/synapse/config/server.py
+++ b/synapse/config/server.py
@@ -36,6 +36,12 @@ from ._util import validate_config
logger = logging.Logger(__name__)
+DIRECT_TCP_ERROR = """
+Using direct TCP replication for workers is no longer supported.
+
+Please see https://matrix-org.github.io/synapse/latest/upgrade.html#direct-tcp-replication-is-no-longer-supported-migrate-to-redis
+"""
+
# by default, we attempt to listen on both '::' *and* '0.0.0.0' because some OSes
# (Windows, macOS, other BSD/Linux where net.ipv6.bindv6only is set) will only listen
# on IPv6 when '::' is set.
@@ -165,7 +171,6 @@ KNOWN_LISTENER_TYPES = {
"http",
"metrics",
"manhole",
- "replication",
}
KNOWN_RESOURCES = {
@@ -201,6 +206,10 @@ class HttpListenerConfig:
resources: List[HttpResourceConfig] = attr.Factory(list)
additional_resources: Dict[str, dict] = attr.Factory(dict)
tag: Optional[str] = None
+ request_id_header: Optional[str] = None
+ # If true, the listener will return CORS response headers compatible with MSC3886:
+ # https://github.com/matrix-org/matrix-spec-proposals/pull/3886
+ experimental_cors_msc3886: bool = False
@attr.s(slots=True, frozen=True, auto_attribs=True)
@@ -515,7 +524,11 @@ class ServerConfig(Config):
):
raise ConfigError("allowed_avatar_mimetypes must be a list")
- self.listeners = [parse_listener_def(x) for x in config.get("listeners", [])]
+ listeners = config.get("listeners", [])
+ if not isinstance(listeners, list):
+ raise ConfigError("Expected a list", ("listeners",))
+
+ self.listeners = [parse_listener_def(i, x) for i, x in enumerate(listeners)]
# no_tls is not really supported any more, but let's grandfather it in
# here.
@@ -880,9 +893,15 @@ def read_gc_thresholds(
)
-def parse_listener_def(listener: Any) -> ListenerConfig:
+def parse_listener_def(num: int, listener: Any) -> ListenerConfig:
"""parse a listener config from the config file"""
+ if not isinstance(listener, dict):
+ raise ConfigError("Expected a dictionary", ("listeners", str(num)))
+
listener_type = listener["type"]
+ # Raise a helpful error if direct TCP replication is still configured.
+ if listener_type == "replication":
+ raise ConfigError(DIRECT_TCP_ERROR, ("listeners", str(num), "type"))
port = listener.get("port")
if not isinstance(port, int):
@@ -918,6 +937,8 @@ def parse_listener_def(listener: Any) -> ListenerConfig:
resources=resources,
additional_resources=listener.get("additional_resources", {}),
tag=listener.get("tag"),
+ request_id_header=listener.get("request_id_header"),
+ experimental_cors_msc3886=listener.get("experimental_cors_msc3886", False),
)
return ListenerConfig(port, bind_addresses, listener_type, tls, http_config)
diff --git a/synapse/config/sso.py b/synapse/config/sso.py
index 2178cbf983..a452cc3a49 100644
--- a/synapse/config/sso.py
+++ b/synapse/config/sso.py
@@ -26,7 +26,7 @@ LEGACY_TEMPLATE_DIR_WARNING = """
This server's configuration file is using the deprecated 'template_dir' setting in the
'sso' section. Support for this setting has been deprecated and will be removed in a
future version of Synapse. Server admins should instead use the new
-'custom_templates_directory' setting documented here:
+'custom_template_directory' setting documented here:
https://matrix-org.github.io/synapse/latest/templates.html
---------------------------------------------------------------------------------------"""
diff --git a/synapse/config/workers.py b/synapse/config/workers.py
index f2716422b5..2580660b6c 100644
--- a/synapse/config/workers.py
+++ b/synapse/config/workers.py
@@ -27,21 +27,7 @@ from ._base import (
RoutableShardedWorkerHandlingConfig,
ShardedWorkerHandlingConfig,
)
-from .server import ListenerConfig, parse_listener_def
-
-_FEDERATION_SENDER_WITH_SEND_FEDERATION_ENABLED_ERROR = """
-The send_federation config option must be disabled in the main
-synapse process before they can be run in a separate worker.
-
-Please add ``send_federation: false`` to the main config
-"""
-
-_PUSHER_WITH_START_PUSHERS_ENABLED_ERROR = """
-The start_pushers config option must be disabled in the main
-synapse process before they can be run in a separate worker.
-
-Please add ``start_pushers: false`` to the main config
-"""
+from .server import DIRECT_TCP_ERROR, ListenerConfig, parse_listener_def
_DEPRECATED_WORKER_DUTY_OPTION_USED = """
The '%s' configuration option is deprecated and will be removed in a future
@@ -67,6 +53,7 @@ class InstanceLocationConfig:
host: str
port: int
+ tls: bool = False
@attr.s
@@ -128,7 +115,8 @@ class WorkerConfig(Config):
self.worker_app = None
self.worker_listeners = [
- parse_listener_def(x) for x in config.get("worker_listeners", [])
+ parse_listener_def(i, x)
+ for i, x in enumerate(config.get("worker_listeners", []))
]
self.worker_daemonize = bool(config.get("worker_daemonize"))
self.worker_pid_file = config.get("worker_pid_file")
@@ -142,18 +130,31 @@ class WorkerConfig(Config):
self.worker_replication_host = config.get("worker_replication_host", None)
# The port on the main synapse for TCP replication
- self.worker_replication_port = config.get("worker_replication_port", None)
+ if "worker_replication_port" in config:
+ raise ConfigError(DIRECT_TCP_ERROR, ("worker_replication_port",))
# The port on the main synapse for HTTP replication endpoint
self.worker_replication_http_port = config.get("worker_replication_http_port")
+ # The tls mode on the main synapse for HTTP replication endpoint.
+ # For backward compatibility this defaults to False.
+ self.worker_replication_http_tls = config.get(
+ "worker_replication_http_tls", False
+ )
+
# The shared secret used for authentication when connecting to the main synapse.
self.worker_replication_secret = config.get("worker_replication_secret", None)
self.worker_name = config.get("worker_name", self.worker_app)
self.instance_name = self.worker_name or "master"
+ # FIXME: Remove this check after a suitable amount of time.
self.worker_main_http_uri = config.get("worker_main_http_uri", None)
+ if self.worker_main_http_uri is not None:
+ logger.warning(
+ "The config option worker_main_http_uri is unused since Synapse 1.73. "
+ "It can be safely removed from your configuration."
+ )
# This option is really only here to support `--manhole` command line
# argument.
@@ -167,40 +168,12 @@ class WorkerConfig(Config):
)
)
- # Handle federation sender configuration.
- #
- # There are two ways of configuring which instances handle federation
- # sending:
- # 1. The old way where "send_federation" is set to false and running a
- # `synapse.app.federation_sender` worker app.
- # 2. Specifying the workers sending federation in
- # `federation_sender_instances`.
- #
-
- send_federation = config.get("send_federation", True)
-
- federation_sender_instances = config.get("federation_sender_instances")
- if federation_sender_instances is None:
- # Default to an empty list, which means "another, unknown, worker is
- # responsible for it".
- federation_sender_instances = []
-
- # If no federation sender instances are set we check if
- # `send_federation` is set, which means use master
- if send_federation:
- federation_sender_instances = ["master"]
-
- if self.worker_app == "synapse.app.federation_sender":
- if send_federation:
- # If we're running federation senders, and not using
- # `federation_sender_instances`, then we should have
- # explicitly set `send_federation` to false.
- raise ConfigError(
- _FEDERATION_SENDER_WITH_SEND_FEDERATION_ENABLED_ERROR
- )
-
- federation_sender_instances = [self.worker_name]
-
+ federation_sender_instances = self._worker_names_performing_this_duty(
+ config,
+ "send_federation",
+ "synapse.app.federation_sender",
+ "federation_sender_instances",
+ )
self.send_federation = self.instance_name in federation_sender_instances
self.federation_shard_config = ShardedWorkerHandlingConfig(
federation_sender_instances
@@ -267,27 +240,12 @@ class WorkerConfig(Config):
)
# Handle sharded push
- start_pushers = config.get("start_pushers", True)
- pusher_instances = config.get("pusher_instances")
- if pusher_instances is None:
- # Default to an empty list, which means "another, unknown, worker is
- # responsible for it".
- pusher_instances = []
-
- # If no pushers instances are set we check if `start_pushers` is
- # set, which means use master
- if start_pushers:
- pusher_instances = ["master"]
-
- if self.worker_app == "synapse.app.pusher":
- if start_pushers:
- # If we're running pushers, and not using
- # `pusher_instances`, then we should have explicitly set
- # `start_pushers` to false.
- raise ConfigError(_PUSHER_WITH_START_PUSHERS_ENABLED_ERROR)
-
- pusher_instances = [self.instance_name]
-
+ pusher_instances = self._worker_names_performing_this_duty(
+ config,
+ "start_pushers",
+ "synapse.app.pusher",
+ "pusher_instances",
+ )
self.start_pushers = self.instance_name in pusher_instances
self.pusher_shard_config = ShardedWorkerHandlingConfig(pusher_instances)
@@ -410,6 +368,64 @@ class WorkerConfig(Config):
# (By this point, these are either the same value or only one is not None.)
return bool(new_option_should_run_here or legacy_option_should_run_here)
+ def _worker_names_performing_this_duty(
+ self,
+ config: Dict[str, Any],
+ legacy_option_name: str,
+ legacy_app_name: str,
+ modern_instance_list_name: str,
+ ) -> List[str]:
+ """
+ Retrieves the names of the workers handling a given duty, by either legacy
+ option or instance list.
+
+ There are two ways of configuring which instances handle a given duty, e.g.
+ for configuring pushers:
+
+ 1. The old way where "start_pushers" is set to false and running a
+ `synapse.app.pusher'` worker app.
+ 2. Specifying the workers sending federation in `pusher_instances`.
+
+ Args:
+ config: settings read from yaml.
+ legacy_option_name: the old way of enabling options. e.g. 'start_pushers'
+ legacy_app_name: The historical app name. e.g. 'synapse.app.pusher'
+ modern_instance_list_name: the string name of the new instance_list. e.g.
+ 'pusher_instances'
+
+ Returns:
+ A list of worker instance names handling the given duty.
+ """
+
+ legacy_option = config.get(legacy_option_name, True)
+
+ worker_instances = config.get(modern_instance_list_name)
+ if worker_instances is None:
+ # Default to an empty list, which means "another, unknown, worker is
+ # responsible for it".
+ worker_instances = []
+
+ # If no worker instances are set we check if the legacy option
+ # is set, which means use the main process.
+ if legacy_option:
+ worker_instances = ["master"]
+
+ if self.worker_app == legacy_app_name:
+ if legacy_option:
+ # If we're using `legacy_app_name`, and not using
+ # `modern_instance_list_name`, then we should have
+ # explicitly set `legacy_option_name` to false.
+ raise ConfigError(
+ f"The '{legacy_option_name}' config option must be disabled in "
+ "the main synapse process before they can be run in a separate "
+ "worker.\n"
+ f"Please add `{legacy_option_name}: false` to the main config.\n",
+ )
+
+ worker_instances = [self.worker_name]
+
+ return worker_instances
+
def read_arguments(self, args: argparse.Namespace) -> None:
# We support a bunch of command line arguments that override options in
# the config. A lot of these options have a worker_* prefix when running
diff --git a/synapse/crypto/event_signing.py b/synapse/crypto/event_signing.py
index 7520647d1e..23b799ac32 100644
--- a/synapse/crypto/event_signing.py
+++ b/synapse/crypto/event_signing.py
@@ -28,6 +28,7 @@ from synapse.api.errors import Codes, SynapseError
from synapse.api.room_versions import RoomVersion
from synapse.events import EventBase
from synapse.events.utils import prune_event, prune_event_dict
+from synapse.logging.opentracing import trace
from synapse.types import JsonDict
logger = logging.getLogger(__name__)
@@ -35,6 +36,7 @@ logger = logging.getLogger(__name__)
Hasher = Callable[[bytes], "hashlib._Hash"]
+@trace
def check_event_content_hash(
event: EventBase, hash_algorithm: Hasher = hashlib.sha256
) -> bool:
diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py
index c88afb2986..ed15f88350 100644
--- a/synapse/crypto/keyring.py
+++ b/synapse/crypto/keyring.py
@@ -213,7 +213,7 @@ class Keyring:
def verify_json_objects_for_server(
self, server_and_json: Iterable[Tuple[str, dict, int]]
- ) -> List[defer.Deferred]:
+ ) -> List["defer.Deferred[None]"]:
"""Bulk verifies signatures of json objects, bulk fetching keys as
necessary.
@@ -226,10 +226,9 @@ class Keyring:
valid.
Returns:
- List<Deferred[None]>: for each input triplet, a deferred indicating success
- or failure to verify each json object's signature for the given
- server_name. The deferreds run their callbacks in the sentinel
- logcontext.
+ For each input triplet, a deferred indicating success or failure to
+ verify each json object's signature for the given server_name. The
+ deferreds run their callbacks in the sentinel logcontext.
"""
return [
run_in_background(
@@ -858,7 +857,7 @@ class ServerKeyFetcher(BaseV2KeyFetcher):
response = await self.client.get_json(
destination=server_name,
path="/_matrix/key/v2/server/"
- + urllib.parse.quote(requested_key_id),
+ + urllib.parse.quote(requested_key_id, safe=""),
ignore_backoff=True,
# we only give the remote server 10s to respond. It should be an
# easy request to handle, so if it doesn't reply within 10s, it's
diff --git a/synapse/event_auth.py b/synapse/event_auth.py
index 389b0c5d53..bab31e33c5 100644
--- a/synapse/event_auth.py
+++ b/synapse/event_auth.py
@@ -15,7 +15,18 @@
import logging
import typing
-from typing import Any, Collection, Dict, Iterable, List, Optional, Set, Tuple, Union
+from typing import (
+ Any,
+ Collection,
+ Dict,
+ Iterable,
+ List,
+ Mapping,
+ Optional,
+ Set,
+ Tuple,
+ Union,
+)
from canonicaljson import encode_canonical_json
from signedjson.key import decode_verify_key_bytes
@@ -109,7 +120,7 @@ def validate_event_for_room_version(event: "EventBase") -> None:
if not is_invite_via_3pid:
raise AuthError(403, "Event not signed by sender's server")
- if event.format_version in (EventFormatVersions.V1,):
+ if event.format_version in (EventFormatVersions.ROOM_V1_V2,):
# Only older room versions have event IDs to check.
event_id_domain = get_domain_from_id(event.event_id)
@@ -134,6 +145,7 @@ def validate_event_for_room_version(event: "EventBase") -> None:
async def check_state_independent_auth_rules(
store: _EventSourceStore,
event: "EventBase",
+ batched_auth_events: Optional[Mapping[str, "EventBase"]] = None,
) -> None:
"""Check that an event complies with auth rules that are independent of room state
@@ -143,6 +155,8 @@ async def check_state_independent_auth_rules(
Args:
store: the datastore; used to fetch the auth events for validation
event: the event being checked.
+ batched_auth_events: if the event being authed is part of a batch, any events
+ from the same batch that may be necessary to auth the current event
Raises:
AuthError if the checks fail
@@ -162,6 +176,9 @@ async def check_state_independent_auth_rules(
redact_behaviour=EventRedactBehaviour.as_is,
allow_rejected=True,
)
+ if batched_auth_events:
+ auth_events.update(batched_auth_events)
+
room_id = event.room_id
auth_dict: MutableStateMap[str] = {}
expected_auth_types = auth_types_for_event(event.room_version, event)
@@ -716,7 +733,7 @@ def check_redaction(
if user_level >= redact_level:
return False
- if room_version_obj.event_format == EventFormatVersions.V1:
+ if room_version_obj.event_format == EventFormatVersions.ROOM_V1_V2:
redacter_domain = get_domain_from_id(event.event_id)
if not isinstance(event.redacts, str):
return False
diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py
index 39ad2793d9..8aca9a3ab9 100644
--- a/synapse/events/__init__.py
+++ b/synapse/events/__init__.py
@@ -289,6 +289,10 @@ class _EventInternalMetadata:
"""
return self._dict.get("historical", False)
+ def is_notifiable(self) -> bool:
+ """Whether this event can trigger a push notification"""
+ return not self.is_outlier() or self.is_out_of_band_membership()
+
class EventBase(metaclass=abc.ABCMeta):
@property
@@ -442,7 +446,7 @@ class EventBase(metaclass=abc.ABCMeta):
class FrozenEvent(EventBase):
- format_version = EventFormatVersions.V1 # All events of this type are V1
+ format_version = EventFormatVersions.ROOM_V1_V2 # All events of this type are V1
def __init__(
self,
@@ -490,7 +494,7 @@ class FrozenEvent(EventBase):
class FrozenEventV2(EventBase):
- format_version = EventFormatVersions.V2 # All events of this type are V2
+ format_version = EventFormatVersions.ROOM_V3 # All events of this type are V2
def __init__(
self,
@@ -567,7 +571,7 @@ class FrozenEventV2(EventBase):
class FrozenEventV3(FrozenEventV2):
"""FrozenEventV3, which differs from FrozenEventV2 only in the event_id format"""
- format_version = EventFormatVersions.V3 # All events of this type are V3
+ format_version = EventFormatVersions.ROOM_V4_PLUS # All events of this type are V3
@property
def event_id(self) -> str:
@@ -593,15 +597,14 @@ def _event_type_from_format_version(
format_version: The event format version
Returns:
- type: A type that can be initialized as per the initializer of
- `FrozenEvent`
+ A type that can be initialized as per the initializer of `FrozenEvent`
"""
- if format_version == EventFormatVersions.V1:
+ if format_version == EventFormatVersions.ROOM_V1_V2:
return FrozenEvent
- elif format_version == EventFormatVersions.V2:
+ elif format_version == EventFormatVersions.ROOM_V3:
return FrozenEventV2
- elif format_version == EventFormatVersions.V3:
+ elif format_version == EventFormatVersions.ROOM_V4_PLUS:
return FrozenEventV3
else:
raise Exception("No event format %r" % (format_version,))
diff --git a/synapse/events/builder.py b/synapse/events/builder.py
index 17f624b68f..d62906043f 100644
--- a/synapse/events/builder.py
+++ b/synapse/events/builder.py
@@ -128,6 +128,7 @@ class EventBuilder:
state_filter=StateFilter.from_types(
auth_types_for_event(self.room_version, self)
),
+ await_full_state=False,
)
auth_event_ids = self._event_auth_handler.compute_auth_events(
self, state_ids
@@ -137,7 +138,7 @@ class EventBuilder:
# The types of auth/prev events changes between event versions.
prev_events: Union[List[str], List[Tuple[str, Dict[str, str]]]]
auth_events: Union[List[str], List[Tuple[str, Dict[str, str]]]]
- if format_version == EventFormatVersions.V1:
+ if format_version == EventFormatVersions.ROOM_V1_V2:
auth_events = await self._store.add_event_hashes(auth_event_ids)
prev_events = await self._store.add_event_hashes(prev_event_ids)
else:
@@ -167,7 +168,6 @@ class EventBuilder:
"content": self.content,
"unsigned": self.unsigned,
"depth": depth,
- "prev_state": [],
}
if self.is_state():
@@ -253,7 +253,7 @@ def create_local_event_from_event_dict(
time_now = int(clock.time_msec())
- if format_version == EventFormatVersions.V1:
+ if format_version == EventFormatVersions.ROOM_V1_V2:
event_dict["event_id"] = _create_event_id(clock, hostname)
event_dict["origin"] = hostname
diff --git a/synapse/events/snapshot.py b/synapse/events/snapshot.py
index b700cbbfa1..1c0e96bec7 100644
--- a/synapse/events/snapshot.py
+++ b/synapse/events/snapshot.py
@@ -11,11 +11,10 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-from typing import TYPE_CHECKING, List, Optional, Tuple, Union
+from typing import TYPE_CHECKING, List, Optional, Tuple
import attr
from frozendict import frozendict
-from typing_extensions import Literal
from synapse.appservice import ApplicationService
from synapse.events import EventBase
@@ -33,7 +32,7 @@ class EventContext:
Holds information relevant to persisting an event
Attributes:
- rejected: A rejection reason if the event was rejected, else False
+ rejected: A rejection reason if the event was rejected, else None
_state_group: The ID of the state group for this event. Note that state events
are persisted with a state group which includes the new event, so this is
@@ -66,7 +65,8 @@ class EventContext:
None does not necessarily mean that ``state_group`` does not have
a prev_group!
- If the event is a state event, this is normally the same as ``prev_group``.
+ If the event is a state event, this is normally the same as
+ ``state_group_before_event``.
If ``state_group`` is None (ie, the event is an outlier), ``prev_group``
will always also be ``None``.
@@ -85,7 +85,7 @@ class EventContext:
"""
_storage: "StorageControllers"
- rejected: Union[Literal[False], str] = False
+ rejected: Optional[str] = None
_state_group: Optional[int] = None
state_group_before_event: Optional[int] = None
_state_delta_due_to_event: Optional[StateMap[str]] = None
diff --git a/synapse/events/spamcheck.py b/synapse/events/spamcheck.py
index 4a3bfb38f1..623a2c71ea 100644
--- a/synapse/events/spamcheck.py
+++ b/synapse/events/spamcheck.py
@@ -32,6 +32,7 @@ from typing_extensions import Literal
import synapse
from synapse.api.errors import Codes
+from synapse.logging.opentracing import trace
from synapse.rest.media.v1._base import FileInfo
from synapse.rest.media.v1.media_storage import ReadableFileWrapper
from synapse.spam_checker_api import RegistrationBehaviour
@@ -378,6 +379,7 @@ class SpamChecker:
if check_media_file_for_spam is not None:
self._check_media_file_for_spam_callbacks.append(check_media_file_for_spam)
+ @trace
async def check_event_for_spam(
self, event: "synapse.events.EventBase"
) -> Union[Tuple[Codes, JsonDict], str]:
diff --git a/synapse/events/utils.py b/synapse/events/utils.py
index ac91c5eb57..71853caad8 100644
--- a/synapse/events/utils.py
+++ b/synapse/events/utils.py
@@ -161,7 +161,7 @@ def prune_event_dict(room_version: RoomVersion, event_dict: JsonDict) -> JsonDic
elif room_version.msc2716_redactions and event_type == EventTypes.MSC2716_BATCH:
add_fields(EventContentFields.MSC2716_BATCH_ID)
elif room_version.msc2716_redactions and event_type == EventTypes.MSC2716_MARKER:
- add_fields(EventContentFields.MSC2716_MARKER_INSERTION)
+ add_fields(EventContentFields.MSC2716_INSERTION_EVENT_REFERENCE)
allowed_fields = {k: v for k, v in event_dict.items() if k in allowed_keys}
diff --git a/synapse/events/validator.py b/synapse/events/validator.py
index 27c8beba25..a6f0104396 100644
--- a/synapse/events/validator.py
+++ b/synapse/events/validator.py
@@ -45,7 +45,7 @@ class EventValidator:
"""
self.validate_builder(event)
- if event.format_version == EventFormatVersions.V1:
+ if event.format_version == EventFormatVersions.ROOM_V1_V2:
EventID.from_string(event.event_id)
required = [
diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py
index 2522bf78fc..6bd4742140 100644
--- a/synapse/federation/federation_base.py
+++ b/synapse/federation/federation_base.py
@@ -13,7 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
-from typing import TYPE_CHECKING
+from typing import TYPE_CHECKING, Awaitable, Callable, Optional
from synapse.api.constants import MAX_DEPTH, EventContentFields, EventTypes, Membership
from synapse.api.errors import Codes, SynapseError
@@ -23,6 +23,7 @@ from synapse.crypto.keyring import Keyring
from synapse.events import EventBase, make_event_from_dict
from synapse.events.utils import prune_event, validate_canonicaljson
from synapse.http.servlet import assert_params_in_dict
+from synapse.logging.opentracing import log_kv, trace
from synapse.types import JsonDict, get_domain_from_id
if TYPE_CHECKING:
@@ -55,8 +56,14 @@ class FederationBase:
self._clock = hs.get_clock()
self._storage_controllers = hs.get_storage_controllers()
+ @trace
async def _check_sigs_and_hash(
- self, room_version: RoomVersion, pdu: EventBase
+ self,
+ room_version: RoomVersion,
+ pdu: EventBase,
+ record_failure_callback: Optional[
+ Callable[[EventBase, str], Awaitable[None]]
+ ] = None,
) -> EventBase:
"""Checks that event is correctly signed by the sending server.
@@ -68,6 +75,11 @@ class FederationBase:
Args:
room_version: The room version of the PDU
pdu: the event to be checked
+ record_failure_callback: A callback to run whenever the given event
+ fails signature or hash checks. This includes exceptions
+ that would be normally be thrown/raised but also things like
+ checking for event tampering where we just return the redacted
+ event.
Returns:
* the original event if the checks pass
@@ -78,7 +90,12 @@ class FederationBase:
InvalidEventSignatureError if the signature check failed. Nothing
will be logged in this case.
"""
- await _check_sigs_on_pdu(self.keyring, room_version, pdu)
+ try:
+ await _check_sigs_on_pdu(self.keyring, room_version, pdu)
+ except InvalidEventSignatureError as exc:
+ if record_failure_callback:
+ await record_failure_callback(pdu, str(exc))
+ raise exc
if not check_event_content_hash(pdu):
# let's try to distinguish between failures because the event was
@@ -97,17 +114,40 @@ class FederationBase:
"Event %s seems to have been redacted; using our redacted copy",
pdu.event_id,
)
+ log_kv(
+ {
+ "message": "Event seems to have been redacted; using our redacted copy",
+ "event_id": pdu.event_id,
+ }
+ )
else:
logger.warning(
"Event %s content has been tampered, redacting",
pdu.event_id,
)
+ log_kv(
+ {
+ "message": "Event content has been tampered, redacting",
+ "event_id": pdu.event_id,
+ }
+ )
+ if record_failure_callback:
+ await record_failure_callback(
+ pdu, "Event content has been tampered with"
+ )
return redacted_event
spam_check = await self.spam_checker.check_event_for_spam(pdu)
if spam_check != self.spam_checker.NOT_SPAM:
logger.warning("Event contains spam, soft-failing %s", pdu.event_id)
+ log_kv(
+ {
+ "message": "Event contains spam, redacting (to save disk space) "
+ "as well as soft-failing (to stop using the event in prev_events)",
+ "event_id": pdu.event_id,
+ }
+ )
# we redact (to save disk space) as well as soft-failing (to stop
# using the event in prev_events).
redacted_event = prune_event(pdu)
@@ -117,6 +157,7 @@ class FederationBase:
return pdu
+@trace
async def _check_sigs_on_pdu(
keyring: Keyring, room_version: RoomVersion, pdu: EventBase
) -> None:
@@ -172,7 +213,7 @@ async def _check_sigs_on_pdu(
# event id's domain (normally only the case for joins/leaves), and add additional
# checks. Only do this if the room version has a concept of event ID domain
# (ie, the room version uses old-style non-hash event IDs).
- if room_version.event_format == EventFormatVersions.V1:
+ if room_version.event_format == EventFormatVersions.ROOM_V1_V2:
event_domain = get_domain_from_id(pdu.event_id)
if event_domain != sender_domain:
try:
diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py
index 6a8d76529b..c4c0bc7315 100644
--- a/synapse/federation/federation_client.py
+++ b/synapse/federation/federation_client.py
@@ -61,6 +61,7 @@ from synapse.federation.federation_base import (
)
from synapse.federation.transport.client import SendJoinResponse
from synapse.http.types import QueryParams
+from synapse.logging.opentracing import SynapseTags, log_kv, set_tag, tag_args, trace
from synapse.types import JsonDict, UserID, get_domain_from_id
from synapse.util.async_helpers import concurrently_execute
from synapse.util.caches.expiringcache import ExpiringCache
@@ -79,6 +80,18 @@ PDU_RETRY_TIME_MS = 1 * 60 * 1000
T = TypeVar("T")
+@attr.s(frozen=True, slots=True, auto_attribs=True)
+class PulledPduInfo:
+ """
+ A result object that stores the PDU and info about it like which homeserver we
+ pulled it from (`pull_origin`)
+ """
+
+ pdu: EventBase
+ # Which homeserver we pulled the PDU from
+ pull_origin: str
+
+
class InvalidResponseError(RuntimeError):
"""Helper for _try_destination_list: indicates that the server returned a response
we couldn't parse
@@ -113,7 +126,9 @@ class FederationClient(FederationBase):
self.hostname = hs.hostname
self.signing_key = hs.signing_key
- self._get_pdu_cache: ExpiringCache[str, EventBase] = ExpiringCache(
+ # Cache mapping `event_id` to a tuple of the event itself and the `pull_origin`
+ # (which server we pulled the event from)
+ self._get_pdu_cache: ExpiringCache[str, Tuple[EventBase, str]] = ExpiringCache(
cache_name="get_pdu_cache",
clock=self._clock,
max_len=1000,
@@ -233,6 +248,8 @@ class FederationClient(FederationBase):
destination, content, timeout
)
+ @trace
+ @tag_args
async def backfill(
self, dest: str, room_id: str, limit: int, extremities: Collection[str]
) -> Optional[List[EventBase]]:
@@ -275,7 +292,7 @@ class FederationClient(FederationBase):
pdus = [event_from_pdu_json(p, room_version) for p in transaction_data_pdus]
# Check signatures and hash of pdus, removing any from the list that fail checks
- pdus[:] = await self._check_sigs_and_hash_and_fetch(
+ pdus[:] = await self._check_sigs_and_hash_for_pulled_events_and_fetch(
dest, pdus, room_version=room_version
)
@@ -325,7 +342,17 @@ class FederationClient(FederationBase):
# Check signatures are correct.
try:
- signed_pdu = await self._check_sigs_and_hash(room_version, pdu)
+
+ async def _record_failure_callback(
+ event: EventBase, cause: str
+ ) -> None:
+ await self.store.record_event_failed_pull_attempt(
+ event.room_id, event.event_id, cause
+ )
+
+ signed_pdu = await self._check_sigs_and_hash(
+ room_version, pdu, _record_failure_callback
+ )
except InvalidEventSignatureError as e:
errmsg = f"event id {pdu.event_id}: {e}"
logger.warning("%s", errmsg)
@@ -335,13 +362,15 @@ class FederationClient(FederationBase):
return None
+ @trace
+ @tag_args
async def get_pdu(
self,
- destinations: Iterable[str],
+ destinations: Collection[str],
event_id: str,
room_version: RoomVersion,
timeout: Optional[int] = None,
- ) -> Optional[EventBase]:
+ ) -> Optional[PulledPduInfo]:
"""Requests the PDU with given origin and ID from the remote home
servers.
@@ -356,11 +385,11 @@ class FederationClient(FederationBase):
moving to the next destination. None indicates no timeout.
Returns:
- The requested PDU, or None if we were unable to find it.
+ The requested PDU wrapped in `PulledPduInfo`, or None if we were unable to find it.
"""
logger.debug(
- "get_pdu: event_id=%s from destinations=%s", event_id, destinations
+ "get_pdu(event_id=%s): from destinations=%s", event_id, destinations
)
# TODO: Rate limit the number of times we try and get the same event.
@@ -369,19 +398,25 @@ class FederationClient(FederationBase):
# it gets persisted to the database), so we cache the results of the lookup.
# Note that this is separate to the regular get_event cache which caches
# events once they have been persisted.
- event = self._get_pdu_cache.get(event_id)
+ get_pdu_cache_entry = self._get_pdu_cache.get(event_id)
+ event = None
+ pull_origin = None
+ if get_pdu_cache_entry:
+ event, pull_origin = get_pdu_cache_entry
# If we don't see the event in the cache, go try to fetch it from the
# provided remote federated destinations
- if not event:
+ else:
pdu_attempts = self.pdu_destination_tried.setdefault(event_id, {})
+ # TODO: We can probably refactor this to use `_try_destination_list`
for destination in destinations:
now = self._clock.time_msec()
last_attempt = pdu_attempts.get(destination, 0)
if last_attempt + PDU_RETRY_TIME_MS > now:
logger.debug(
- "get_pdu: skipping destination=%s because we tried it recently last_attempt=%s and we only check every %s (now=%s)",
+ "get_pdu(event_id=%s): skipping destination=%s because we tried it recently last_attempt=%s and we only check every %s (now=%s)",
+ event_id,
destination,
last_attempt,
PDU_RETRY_TIME_MS,
@@ -396,43 +431,48 @@ class FederationClient(FederationBase):
room_version=room_version,
timeout=timeout,
)
+ pull_origin = destination
pdu_attempts[destination] = now
if event:
# Prime the cache
- self._get_pdu_cache[event.event_id] = event
+ self._get_pdu_cache[event.event_id] = (event, pull_origin)
# Now that we have an event, we can break out of this
# loop and stop asking other destinations.
break
+ except NotRetryingDestination as e:
+ logger.info("get_pdu(event_id=%s): %s", event_id, e)
+ continue
+ except FederationDeniedError:
+ logger.info(
+ "get_pdu(event_id=%s): Not attempting to fetch PDU from %s because the homeserver is not on our federation whitelist",
+ event_id,
+ destination,
+ )
+ continue
except SynapseError as e:
logger.info(
- "Failed to get PDU %s from %s because %s",
+ "get_pdu(event_id=%s): Failed to get PDU from %s because %s",
event_id,
destination,
e,
)
continue
- except NotRetryingDestination as e:
- logger.info(str(e))
- continue
- except FederationDeniedError as e:
- logger.info(str(e))
- continue
except Exception as e:
pdu_attempts[destination] = now
logger.info(
- "Failed to get PDU %s from %s because %s",
+ "get_pdu(event_id=%s): Failed to get PDU from %s because %s",
event_id,
destination,
e,
)
continue
- if not event:
+ if not event or not pull_origin:
return None
# `event` now refers to an object stored in `get_pdu_cache`. Our
@@ -444,8 +484,10 @@ class FederationClient(FederationBase):
event.room_version,
)
- return event_copy
+ return PulledPduInfo(event_copy, pull_origin)
+ @trace
+ @tag_args
async def get_room_state_ids(
self, destination: str, room_id: str, event_id: str
) -> Tuple[List[str], List[str]]:
@@ -465,6 +507,23 @@ class FederationClient(FederationBase):
state_event_ids = result["pdu_ids"]
auth_event_ids = result.get("auth_chain_ids", [])
+ set_tag(
+ SynapseTags.RESULT_PREFIX + "state_event_ids",
+ str(state_event_ids),
+ )
+ set_tag(
+ SynapseTags.RESULT_PREFIX + "state_event_ids.length",
+ str(len(state_event_ids)),
+ )
+ set_tag(
+ SynapseTags.RESULT_PREFIX + "auth_event_ids",
+ str(auth_event_ids),
+ )
+ set_tag(
+ SynapseTags.RESULT_PREFIX + "auth_event_ids.length",
+ str(len(auth_event_ids)),
+ )
+
if not isinstance(state_event_ids, list) or not isinstance(
auth_event_ids, list
):
@@ -472,6 +531,8 @@ class FederationClient(FederationBase):
return state_event_ids, auth_event_ids
+ @trace
+ @tag_args
async def get_room_state(
self,
destination: str,
@@ -521,23 +582,28 @@ class FederationClient(FederationBase):
len(auth_event_map),
)
- valid_auth_events = await self._check_sigs_and_hash_and_fetch(
+ valid_auth_events = await self._check_sigs_and_hash_for_pulled_events_and_fetch(
destination, auth_event_map.values(), room_version
)
- valid_state_events = await self._check_sigs_and_hash_and_fetch(
- destination, state_event_map.values(), room_version
+ valid_state_events = (
+ await self._check_sigs_and_hash_for_pulled_events_and_fetch(
+ destination, state_event_map.values(), room_version
+ )
)
return valid_state_events, valid_auth_events
- async def _check_sigs_and_hash_and_fetch(
+ @trace
+ async def _check_sigs_and_hash_for_pulled_events_and_fetch(
self,
origin: str,
pdus: Collection[EventBase],
room_version: RoomVersion,
) -> List[EventBase]:
- """Checks the signatures and hashes of a list of events.
+ """
+ Checks the signatures and hashes of a list of pulled events we got from
+ federation and records any signature failures as failed pull attempts.
If a PDU fails its signature check then we check if we have it in
the database, and if not then request it from the sender's server (if that
@@ -560,17 +626,27 @@ class FederationClient(FederationBase):
Returns:
A list of PDUs that have valid signatures and hashes.
"""
+ set_tag(
+ SynapseTags.RESULT_PREFIX + "pdus.length",
+ str(len(pdus)),
+ )
# We limit how many PDUs we check at once, as if we try to do hundreds
# of thousands of PDUs at once we see large memory spikes.
- valid_pdus = []
+ valid_pdus: List[EventBase] = []
+
+ async def _record_failure_callback(event: EventBase, cause: str) -> None:
+ await self.store.record_event_failed_pull_attempt(
+ event.room_id, event.event_id, cause
+ )
async def _execute(pdu: EventBase) -> None:
valid_pdu = await self._check_sigs_and_hash_and_fetch_one(
pdu=pdu,
origin=origin,
room_version=room_version,
+ record_failure_callback=_record_failure_callback,
)
if valid_pdu:
@@ -580,11 +656,16 @@ class FederationClient(FederationBase):
return valid_pdus
+ @trace
+ @tag_args
async def _check_sigs_and_hash_and_fetch_one(
self,
pdu: EventBase,
origin: str,
room_version: RoomVersion,
+ record_failure_callback: Optional[
+ Callable[[EventBase, str], Awaitable[None]]
+ ] = None,
) -> Optional[EventBase]:
"""Takes a PDU and checks its signatures and hashes.
@@ -601,6 +682,11 @@ class FederationClient(FederationBase):
origin
pdu
room_version
+ record_failure_callback: A callback to run whenever the given event
+ fails signature or hash checks. This includes exceptions
+ that would be normally be thrown/raised but also things like
+ checking for event tampering where we just return the redacted
+ event.
Returns:
The PDU (possibly redacted) if it has valid signatures and hashes.
@@ -608,29 +694,44 @@ class FederationClient(FederationBase):
"""
try:
- return await self._check_sigs_and_hash(room_version, pdu)
+ return await self._check_sigs_and_hash(
+ room_version, pdu, record_failure_callback
+ )
except InvalidEventSignatureError as e:
logger.warning(
"Signature on retrieved event %s was invalid (%s). "
- "Checking local store/orgin server",
+ "Checking local store/origin server",
pdu.event_id,
e,
)
+ log_kv(
+ {
+ "message": "Signature on retrieved event was invalid. "
+ "Checking local store/origin server",
+ "event_id": pdu.event_id,
+ "InvalidEventSignatureError": e,
+ }
+ )
# Check local db.
res = await self.store.get_event(
pdu.event_id, allow_rejected=True, allow_none=True
)
+ # If the PDU fails its signature check and we don't have it in our
+ # database, we then request it from sender's server (if that is not the
+ # same as `origin`).
pdu_origin = get_domain_from_id(pdu.sender)
if not res and pdu_origin != origin:
try:
- res = await self.get_pdu(
+ pulled_pdu_info = await self.get_pdu(
destinations=[pdu_origin],
event_id=pdu.event_id,
room_version=room_version,
timeout=10000,
)
+ if pulled_pdu_info is not None:
+ res = pulled_pdu_info.pdu
except SynapseError:
pass
@@ -650,7 +751,7 @@ class FederationClient(FederationBase):
auth_chain = [event_from_pdu_json(p, room_version) for p in res["auth_chain"]]
- signed_auth = await self._check_sigs_and_hash_and_fetch(
+ signed_auth = await self._check_sigs_and_hash_for_pulled_events_and_fetch(
destination, auth_chain, room_version=room_version
)
@@ -732,6 +833,7 @@ class FederationClient(FederationBase):
)
for destination in destinations:
+ # We don't want to ask our own server for information we don't have
if destination == self.server_name:
continue
@@ -740,9 +842,21 @@ class FederationClient(FederationBase):
except (
RequestSendFailed,
InvalidResponseError,
- NotRetryingDestination,
) as e:
logger.warning("Failed to %s via %s: %s", description, destination, e)
+ # Skip to the next homeserver in the list to try.
+ continue
+ except NotRetryingDestination as e:
+ logger.info("%s: %s", description, e)
+ continue
+ except FederationDeniedError:
+ logger.info(
+ "%s: Not attempting to %s from %s because the homeserver is not on our federation whitelist",
+ description,
+ description,
+ destination,
+ )
+ continue
except UnsupportedRoomVersionError:
raise
except HttpResponseException as e:
@@ -862,9 +976,6 @@ class FederationClient(FederationBase):
# The protoevent received over the JSON wire may not have all
# the required fields. Lets just gloss over that because
# there's some we never care about
- if "prev_state" not in pdu_dict:
- pdu_dict["prev_state"] = []
-
ev = builder.create_local_event_from_event_dict(
self._clock,
self.hostname,
@@ -1146,7 +1257,7 @@ class FederationClient(FederationBase):
# Otherwise, consider it a legitimate error and raise.
err = e.to_synapse_error()
if self._is_unknown_endpoint(e, err):
- if room_version.event_format != EventFormatVersions.V1:
+ if room_version.event_format != EventFormatVersions.ROOM_V1_V2:
raise SynapseError(
400,
"User's homeserver does not support this room version",
@@ -1223,7 +1334,7 @@ class FederationClient(FederationBase):
return resp[1]
async def send_knock(self, destinations: List[str], pdu: EventBase) -> JsonDict:
- """Attempts to send a knock event to given a list of servers. Iterates
+ """Attempts to send a knock event to a given list of servers. Iterates
through the list until one attempt succeeds.
Doing so will cause the remote server to add the event to the graph,
@@ -1360,7 +1471,7 @@ class FederationClient(FederationBase):
event_from_pdu_json(e, room_version) for e in content.get("events", [])
]
- signed_events = await self._check_sigs_and_hash_and_fetch(
+ signed_events = await self._check_sigs_and_hash_for_pulled_events_and_fetch(
destination, events, room_version=room_version
)
except HttpResponseException as e:
@@ -1538,6 +1649,54 @@ class FederationClient(FederationBase):
return result
async def timestamp_to_event(
+ self, *, destinations: List[str], room_id: str, timestamp: int, direction: str
+ ) -> Optional["TimestampToEventResponse"]:
+ """
+ Calls each remote federating server from `destinations` asking for their closest
+ event to the given timestamp in the given direction until we get a response.
+ Also validates the response to always return the expected keys or raises an
+ error.
+
+ Args:
+ destinations: The domains of homeservers to try fetching from
+ room_id: Room to fetch the event from
+ timestamp: The point in time (inclusive) we should navigate from in
+ the given direction to find the closest event.
+ direction: ["f"|"b"] to indicate whether we should navigate forward
+ or backward from the given timestamp to find the closest event.
+
+ Returns:
+ A parsed TimestampToEventResponse including the closest event_id
+ and origin_server_ts or None if no destination has a response.
+ """
+
+ async def _timestamp_to_event_from_destination(
+ destination: str,
+ ) -> TimestampToEventResponse:
+ return await self._timestamp_to_event_from_destination(
+ destination, room_id, timestamp, direction
+ )
+
+ try:
+ # Loop through each homeserver candidate until we get a succesful response
+ timestamp_to_event_response = await self._try_destination_list(
+ "timestamp_to_event",
+ destinations,
+ # TODO: The requested timestamp may lie in a part of the
+ # event graph that the remote server *also* didn't have,
+ # in which case they will have returned another event
+ # which may be nowhere near the requested timestamp. In
+ # the future, we may need to reconcile that gap and ask
+ # other homeservers, and/or extend `/timestamp_to_event`
+ # to return events on *both* sides of the timestamp to
+ # help reconcile the gap faster.
+ _timestamp_to_event_from_destination,
+ )
+ return timestamp_to_event_response
+ except SynapseError:
+ return None
+
+ async def _timestamp_to_event_from_destination(
self, destination: str, room_id: str, timestamp: int, direction: str
) -> "TimestampToEventResponse":
"""
diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py
index 1d60137411..bb20af6e91 100644
--- a/synapse/federation/federation_server.py
+++ b/synapse/federation/federation_server.py
@@ -61,7 +61,12 @@ from synapse.logging.context import (
nested_logging_context,
run_in_background,
)
-from synapse.logging.opentracing import log_kv, start_active_span_from_edu, trace
+from synapse.logging.opentracing import (
+ log_kv,
+ start_active_span_from_edu,
+ tag_args,
+ trace,
+)
from synapse.metrics.background_process_metrics import wrap_as_background_process
from synapse.replication.http.federation import (
ReplicationFederationSendEduRestServlet,
@@ -69,6 +74,8 @@ from synapse.replication.http.federation import (
)
from synapse.storage.databases.main.events import PartialStateConflictError
from synapse.storage.databases.main.lock import Lock
+from synapse.storage.databases.main.roommember import extract_heroes_from_room_summary
+from synapse.storage.roommember import MemberSummary
from synapse.types import JsonDict, StateMap, get_domain_from_id
from synapse.util import json_decoder, unwrapFirstError
from synapse.util.async_helpers import Linearizer, concurrently_execute, gather_results
@@ -476,6 +483,14 @@ class FederationServer(FederationBase):
pdu_results[pdu.event_id] = await process_pdu(pdu)
async def process_pdu(pdu: EventBase) -> JsonDict:
+ """
+ Processes a pushed PDU sent to us via a `/send` transaction
+
+ Returns:
+ JsonDict representing a "PDU Processing Result" that will be bundled up
+ with the other processed PDU's in the `/send` transaction and sent back
+ to remote homeserver.
+ """
event_id = pdu.event_id
with nested_logging_context(event_id):
try:
@@ -525,13 +540,10 @@ class FederationServer(FederationBase):
async def on_room_state_request(
self, origin: str, room_id: str, event_id: str
) -> Tuple[int, JsonDict]:
+ await self._event_auth_handler.assert_host_in_room(room_id, origin)
origin_host, _ = parse_server_name(origin)
await self.check_server_matches_acl(origin_host, room_id)
- in_room = await self._event_auth_handler.check_host_in_room(room_id, origin)
- if not in_room:
- raise AuthError(403, "Host not in room.")
-
# we grab the linearizer to protect ourselves from servers which hammer
# us. In theory we might already have the response to this query
# in the cache so we could return it without waiting for the linearizer
@@ -547,19 +559,18 @@ class FederationServer(FederationBase):
return 200, resp
+ @trace
+ @tag_args
async def on_state_ids_request(
self, origin: str, room_id: str, event_id: str
) -> Tuple[int, JsonDict]:
if not event_id:
raise NotImplementedError("Specify an event")
+ await self._event_auth_handler.assert_host_in_room(room_id, origin)
origin_host, _ = parse_server_name(origin)
await self.check_server_matches_acl(origin_host, room_id)
- in_room = await self._event_auth_handler.check_host_in_room(room_id, origin)
- if not in_room:
- raise AuthError(403, "Host not in room.")
-
resp = await self._state_ids_resp_cache.wrap(
(room_id, event_id),
self._on_state_ids_request_compute,
@@ -569,6 +580,8 @@ class FederationServer(FederationBase):
return 200, resp
+ @trace
+ @tag_args
async def _on_state_ids_request_compute(
self, room_id: str, event_id: str
) -> JsonDict:
@@ -680,8 +693,9 @@ class FederationServer(FederationBase):
state_event_ids: Collection[str]
servers_in_room: Optional[Collection[str]]
if caller_supports_partial_state:
+ summary = await self.store.get_room_summary(room_id)
state_event_ids = _get_event_ids_for_partial_state_join(
- event, prev_state_ids
+ event, prev_state_ids, summary
)
servers_in_room = await self.state.get_hosts_in_room_at_events(
room_id, event_ids=event.prev_event_ids()
@@ -754,6 +768,17 @@ class FederationServer(FederationBase):
The partial knock event.
"""
origin_host, _ = parse_server_name(origin)
+
+ if await self.store.is_partial_state_room(room_id):
+ # Before we do anything: check if the room is partial-stated.
+ # Note that at the time this check was added, `on_make_knock_request` would
+ # block due to https://github.com/matrix-org/synapse/issues/12997.
+ raise SynapseError(
+ 404,
+ "Unable to handle /make_knock right now; this server is not fully joined.",
+ errcode=Codes.NOT_FOUND,
+ )
+
await self.check_server_matches_acl(origin_host, room_id)
room_version = await self.store.get_room_version(room_id)
@@ -810,7 +835,14 @@ class FederationServer(FederationBase):
context, self._room_prejoin_state_types
)
)
- return {"knock_state_events": stripped_room_state}
+ return {
+ "knock_room_state": stripped_room_state,
+ # Since v1.37, Synapse incorrectly used "knock_state_events" for this field.
+ # Thus, we also populate a 'knock_state_events' with the same content to
+ # support old instances.
+ # See https://github.com/matrix-org/synapse/issues/14088.
+ "knock_state_events": stripped_room_state,
+ }
async def _on_send_membership_event(
self, origin: str, content: JsonDict, membership_type: str, room_id: str
@@ -843,8 +875,25 @@ class FederationServer(FederationBase):
Codes.BAD_JSON,
)
+ # Note that get_room_version throws if the room does not exist here.
room_version = await self.store.get_room_version(room_id)
+ if await self.store.is_partial_state_room(room_id):
+ # If our server is still only partially joined, we can't give a complete
+ # response to /send_join, /send_knock or /send_leave.
+ # This is because we will not be able to provide the server list (for partial
+ # joins) or the full state (for full joins).
+ # Return a 404 as we would if we weren't in the room at all.
+ logger.info(
+ f"Rejecting /send_{membership_type} to %s because it's a partial state room",
+ room_id,
+ )
+ raise SynapseError(
+ 404,
+ f"Unable to handle /send_{membership_type} right now; this server is not fully joined.",
+ errcode=Codes.NOT_FOUND,
+ )
+
if membership_type == Membership.KNOCK and not room_version.msc2403_knocking:
raise SynapseError(
403,
@@ -918,6 +967,7 @@ class FederationServer(FederationBase):
self, origin: str, room_id: str, event_id: str
) -> Tuple[int, Dict[str, Any]]:
async with self._server_linearizer.queue((origin, room_id)):
+ await self._event_auth_handler.assert_host_in_room(room_id, origin)
origin_host, _ = parse_server_name(origin)
await self.check_server_matches_acl(origin_host, room_id)
@@ -1448,6 +1498,7 @@ class FederationHandlerRegistry:
def _get_event_ids_for_partial_state_join(
join_event: EventBase,
prev_state_ids: StateMap[str],
+ summary: Dict[str, MemberSummary],
) -> Collection[str]:
"""Calculate state to be retuned in a partial_state send_join
@@ -1474,8 +1525,19 @@ def _get_event_ids_for_partial_state_join(
if current_membership_event_id is not None:
state_event_ids.add(current_membership_event_id)
- # TODO: return a few more members:
- # - those with invites
- # - those that are kicked? / banned
+ name_id = prev_state_ids.get((EventTypes.Name, ""))
+ canonical_alias_id = prev_state_ids.get((EventTypes.CanonicalAlias, ""))
+ if not name_id and not canonical_alias_id:
+ # Also include the hero members of the room (for DM rooms without a title).
+ # To do this properly, we should select the correct subset of membership events
+ # from `prev_state_ids`. Instead, we are lazier and use the (cached)
+ # `get_room_summary` function, which is based on the current state of the room.
+ # This introduces races; we choose to ignore them because a) they should be rare
+ # and b) even if it's wrong, joining servers will get the full state eventually.
+ heroes = extract_heroes_from_room_summary(summary, join_event.state_key)
+ for hero in heroes:
+ membership_event_id = prev_state_ids.get((EventTypes.Member, hero))
+ if membership_event_id:
+ state_event_ids.add(membership_event_id)
return state_event_ids
diff --git a/synapse/federation/sender/__init__.py b/synapse/federation/sender/__init__.py
index 94a65ac65f..fc1d8c88a7 100644
--- a/synapse/federation/sender/__init__.py
+++ b/synapse/federation/sender/__init__.py
@@ -62,12 +62,12 @@ if TYPE_CHECKING:
logger = logging.getLogger(__name__)
sent_pdus_destination_dist_count = Counter(
- "synapse_federation_client_sent_pdu_destinations:count",
+ "synapse_federation_client_sent_pdu_destinations_count",
"Number of PDUs queued for sending to one or more destinations",
)
sent_pdus_destination_dist_total = Counter(
- "synapse_federation_client_sent_pdu_destinations:total",
+ "synapse_federation_client_sent_pdu_destinations",
"Total number of PDUs queued for sending across all destinations",
)
@@ -353,21 +353,25 @@ class FederationSender(AbstractFederationSender):
last_token = await self.store.get_federation_out_pos("events")
(
next_token,
- events,
event_to_received_ts,
- ) = await self.store.get_all_new_events_stream(
+ ) = await self.store.get_all_new_event_ids_stream(
last_token, self._last_poked_id, limit=100
)
+ event_ids = event_to_received_ts.keys()
+ event_entries = await self.store.get_unredacted_events_from_cache_or_db(
+ event_ids
+ )
+
logger.debug(
"Handling %i -> %i: %i events to send (current id %i)",
last_token,
next_token,
- len(events),
+ len(event_entries),
self._last_poked_id,
)
- if not events and next_token >= self._last_poked_id:
+ if not event_entries and next_token >= self._last_poked_id:
logger.debug("All events processed")
break
@@ -430,7 +434,23 @@ class FederationSender(AbstractFederationSender):
# If there are no prev event IDs then the state is empty
# and so no remote servers in the room
destinations = set()
- else:
+
+ if destinations is None:
+ # During partial join we use the set of servers that we got
+ # when beginning the join. It's still possible that we send
+ # events to servers that left the room in the meantime, but
+ # we consider that an acceptable risk since it is only our own
+ # events that we leak and not other server's ones.
+ partial_state_destinations = (
+ await self.store.get_partial_state_servers_at_join(
+ event.room_id
+ )
+ )
+
+ if len(partial_state_destinations) > 0:
+ destinations = partial_state_destinations
+
+ if destinations is None:
# We check the external cache for the destinations, which is
# stored per state group.
@@ -441,6 +461,19 @@ class FederationSender(AbstractFederationSender):
destinations = await self._external_cache.get(
"get_joined_hosts", str(sg)
)
+ if destinations is None:
+ # Add logging to help track down #13444
+ logger.info(
+ "Unexpectedly did not have cached destinations for %s / %s",
+ sg,
+ event.event_id,
+ )
+ else:
+ # Add logging to help track down #13444
+ logger.info(
+ "Unexpectedly did not have cached prev group for %s",
+ event.event_id,
+ )
if destinations is None:
try:
@@ -495,8 +528,14 @@ class FederationSender(AbstractFederationSender):
await handle_event(event)
events_by_room: Dict[str, List[EventBase]] = {}
- for event in events:
- events_by_room.setdefault(event.room_id, []).append(event)
+
+ for event_id in event_ids:
+ # `event_entries` is unsorted, so we have to iterate over `event_ids`
+ # to ensure the events are in the right order
+ event_cache = event_entries.get(event_id)
+ if event_cache:
+ event = event_cache.event
+ events_by_room.setdefault(event.room_id, []).append(event)
await make_deferred_yieldable(
defer.gatherResults(
@@ -511,9 +550,9 @@ class FederationSender(AbstractFederationSender):
logger.debug("Successfully handled up to %i", next_token)
await self.store.update_federation_out_pos("events", next_token)
- if events:
+ if event_entries:
now = self.clock.time_msec()
- ts = event_to_received_ts[events[-1].event_id]
+ ts = max(t for t in event_to_received_ts.values() if t)
assert ts is not None
synapse.metrics.event_processing_lag.labels(
@@ -523,7 +562,7 @@ class FederationSender(AbstractFederationSender):
"federation_sender"
).set(ts)
- events_processed_counter.inc(len(events))
+ events_processed_counter.inc(len(event_entries))
event_processing_loop_room_count.labels("federation_sender").inc(
len(events_by_room)
diff --git a/synapse/federation/sender/per_destination_queue.py b/synapse/federation/sender/per_destination_queue.py
index 41d8b937af..3ae5e8634c 100644
--- a/synapse/federation/sender/per_destination_queue.py
+++ b/synapse/federation/sender/per_destination_queue.py
@@ -505,6 +505,7 @@ class PerDestinationQueue:
new_pdus = await filter_events_for_server(
self._storage_controllers,
self._destination,
+ self._server_name,
new_pdus,
redact=False,
)
@@ -646,29 +647,32 @@ class _TransactionQueueManager:
# We start by fetching device related EDUs, i.e device updates and to
# device messages. We have to keep 2 free slots for presence and rr_edus.
- limit = MAX_EDUS_PER_TRANSACTION - 2
-
- device_update_edus, dev_list_id = await self.queue._get_device_update_edus(
- limit
- )
-
- if device_update_edus:
- self._device_list_id = dev_list_id
- else:
- self.queue._last_device_list_stream_id = dev_list_id
-
- limit -= len(device_update_edus)
+ device_edu_limit = MAX_EDUS_PER_TRANSACTION - 2
+ # We prioritize to-device messages so that existing encryption channels
+ # work. We also keep a few slots spare (by reducing the limit) so that
+ # we can still trickle out some device list updates.
(
to_device_edus,
device_stream_id,
- ) = await self.queue._get_to_device_message_edus(limit)
+ ) = await self.queue._get_to_device_message_edus(device_edu_limit - 10)
if to_device_edus:
self._device_stream_id = device_stream_id
else:
self.queue._last_device_stream_id = device_stream_id
+ device_edu_limit -= len(to_device_edus)
+
+ device_update_edus, dev_list_id = await self.queue._get_device_update_edus(
+ device_edu_limit
+ )
+
+ if device_update_edus:
+ self._device_list_id = dev_list_id
+ else:
+ self.queue._last_device_list_stream_id = dev_list_id
+
pending_edus = device_update_edus + to_device_edus
# Now add the read receipt EDU.
diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py
index 32074b8ca6..a3cfc701cd 100644
--- a/synapse/federation/transport/client.py
+++ b/synapse/federation/transport/client.py
@@ -45,6 +45,7 @@ from synapse.federation.units import Transaction
from synapse.http.matrixfederationclient import ByteParser
from synapse.http.types import QueryParams
from synapse.types import JsonDict
+from synapse.util import ExceptionBundle
logger = logging.getLogger(__name__)
@@ -279,12 +280,11 @@ class TransportLayerClient:
Note that this does not append any events to any graphs.
Args:
- destination (str): address of remote homeserver
- room_id (str): room to join/leave
- user_id (str): user to be joined/left
- membership (str): one of join/leave
- params (dict[str, str|Iterable[str]]): Query parameters to include in the
- request.
+ destination: address of remote homeserver
+ room_id: room to join/leave
+ user_id: user to be joined/left
+ membership: one of join/leave
+ params: Query parameters to include in the request.
Returns:
Succeeds when we get a 2xx HTTP response. The result
@@ -926,8 +926,7 @@ class SendJoinParser(ByteParser[SendJoinResponse]):
return len(data)
def finish(self) -> SendJoinResponse:
- for c in self._coros:
- c.close()
+ _close_coros(self._coros)
if self._response.event_dict:
self._response.event = make_event_from_dict(
@@ -970,6 +969,27 @@ class _StateParser(ByteParser[StateRequestResponse]):
return len(data)
def finish(self) -> StateRequestResponse:
- for c in self._coros:
- c.close()
+ _close_coros(self._coros)
return self._response
+
+
+def _close_coros(coros: Iterable[Generator[None, bytes, None]]) -> None:
+ """Close each of the given coroutines.
+
+ Always calls .close() on each coroutine, even if doing so raises an exception.
+ Any exceptions raised are aggregated into an ExceptionBundle.
+
+ :raises ExceptionBundle: if at least one coroutine fails to close.
+ """
+ exceptions = []
+ for c in coros:
+ try:
+ c.close()
+ except Exception as e:
+ exceptions.append(e)
+
+ if exceptions:
+ # raise from the first exception so that the traceback has slightly more context
+ raise ExceptionBundle(
+ f"There were {len(exceptions)} errors closing coroutines", exceptions
+ ) from exceptions[0]
diff --git a/synapse/federation/transport/server/_base.py b/synapse/federation/transport/server/_base.py
index bb0f8d6b7b..cdaf0d5de7 100644
--- a/synapse/federation/transport/server/_base.py
+++ b/synapse/federation/transport/server/_base.py
@@ -21,7 +21,7 @@ from typing import TYPE_CHECKING, Any, Awaitable, Callable, Dict, Optional, Tupl
from synapse.api.errors import Codes, FederationDeniedError, SynapseError
from synapse.api.urls import FEDERATION_V1_PREFIX
-from synapse.http.server import HttpServer, ServletCallback, is_method_cancellable
+from synapse.http.server import HttpServer, ServletCallback
from synapse.http.servlet import parse_json_object_from_request
from synapse.http.site import SynapseRequest
from synapse.logging.context import run_in_background
@@ -34,6 +34,7 @@ from synapse.logging.opentracing import (
whitelisted_homeserver,
)
from synapse.types import JsonDict
+from synapse.util.cancellation import is_function_cancellable
from synapse.util.ratelimitutils import FederationRateLimiter
from synapse.util.stringutils import parse_and_validate_server_name
@@ -223,10 +224,10 @@ class BaseFederationServlet:
With arguments:
- origin (unicode|None): The authenticated server_name of the calling server,
+ origin (str|None): The authenticated server_name of the calling server,
unless REQUIRE_AUTH is set to False and authentication failed.
- content (unicode|None): decoded json body of the request. None if the
+ content (str|None): decoded json body of the request. None if the
request was a GET.
query (dict[bytes, list[bytes]]): Query params from the request. url-decoded
@@ -375,7 +376,7 @@ class BaseFederationServlet:
if code is None:
continue
- if is_method_cancellable(code):
+ if is_function_cancellable(code):
# The wrapper added by `self._wrap` will inherit the cancellable flag,
# but the wrapper itself does not support cancellation yet.
# Once resolved, the cancellation tests in
diff --git a/synapse/federation/transport/server/federation.py b/synapse/federation/transport/server/federation.py
index f7884bfbe0..205fd16daa 100644
--- a/synapse/federation/transport/server/federation.py
+++ b/synapse/federation/transport/server/federation.py
@@ -489,7 +489,7 @@ class FederationV2InviteServlet(BaseFederationServerServlet):
room_version = content["room_version"]
event = content["event"]
- invite_room_state = content["invite_room_state"]
+ invite_room_state = content.get("invite_room_state", [])
# Synapse expects invite_room_state to be in unsigned, as it is in v1
# API
@@ -499,6 +499,11 @@ class FederationV2InviteServlet(BaseFederationServerServlet):
result = await self.handler.on_invite_request(
origin, event, room_version_id=room_version
)
+
+ # We only store invite_room_state for internal use, so remove it before
+ # returning the event to the remote homeserver.
+ result["event"].get("unsigned", {}).pop("invite_room_state", None)
+
return 200, result
@@ -549,8 +554,7 @@ class FederationClientKeysClaimServlet(BaseFederationServerServlet):
class FederationGetMissingEventsServlet(BaseFederationServerServlet):
- # TODO(paul): Why does this path alone end with "/?" optional?
- PATH = "/get_missing_events/(?P<room_id>[^/]*)/?"
+ PATH = "/get_missing_events/(?P<room_id>[^/]*)"
async def on_POST(
self,
diff --git a/synapse/handlers/account_data.py b/synapse/handlers/account_data.py
index 0478448b47..fc21d58001 100644
--- a/synapse/handlers/account_data.py
+++ b/synapse/handlers/account_data.py
@@ -225,7 +225,7 @@ class AccountDataEventSource(EventSource[int, JsonDict]):
self,
user: UserID,
from_key: int,
- limit: Optional[int],
+ limit: int,
room_ids: Collection[str],
is_guest: bool,
explicit_room_id: Optional[str] = None,
diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py
index d4fe7df533..5bf8e86387 100644
--- a/synapse/handlers/admin.py
+++ b/synapse/handlers/admin.py
@@ -32,6 +32,7 @@ class AdminHandler:
self.store = hs.get_datastores().main
self._storage_controllers = hs.get_storage_controllers()
self._state_storage_controller = self._storage_controllers.state
+ self._msc3866_enabled = hs.config.experimental.msc3866.enabled
async def get_whois(self, user: UserID) -> JsonDict:
connections = []
@@ -70,10 +71,15 @@ class AdminHandler:
"appservice_id",
"consent_server_notice_sent",
"consent_version",
+ "consent_ts",
"user_type",
"is_guest",
}
+ if self._msc3866_enabled:
+ # Only include the approved flag if support for MSC3866 is enabled.
+ user_info_to_return.add("approved")
+
# Restrict returned keys to a known set.
user_info_dict = {
key: value
@@ -94,6 +100,7 @@ class AdminHandler:
user_info_dict["avatar_url"] = profile.avatar_url
user_info_dict["threepids"] = threepids
user_info_dict["external_ids"] = external_ids
+ user_info_dict["erased"] = await self.store.is_user_erased(user.to_string())
return user_info_dict
diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py
index 203b62e015..66f5b8d108 100644
--- a/synapse/handlers/appservice.py
+++ b/synapse/handlers/appservice.py
@@ -109,10 +109,13 @@ class ApplicationServicesHandler:
last_token = await self.store.get_appservice_last_pos()
(
upper_bound,
- events,
event_to_received_ts,
- ) = await self.store.get_all_new_events_stream(
- last_token, self.current_max, limit=100, get_prev_content=True
+ ) = await self.store.get_all_new_event_ids_stream(
+ last_token, self.current_max, limit=100
+ )
+
+ events = await self.store.get_events_as_list(
+ event_to_received_ts.keys(), get_prev_content=True
)
events_by_room: Dict[str, List[EventBase]] = {}
diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py
index bfa5535044..8b9ef25d29 100644
--- a/synapse/handlers/auth.py
+++ b/synapse/handlers/auth.py
@@ -38,6 +38,7 @@ from typing import (
import attr
import bcrypt
import unpaddedbase64
+from prometheus_client import Counter
from twisted.internet.defer import CancelledError
from twisted.web.server import Request
@@ -48,6 +49,7 @@ from synapse.api.errors import (
Codes,
InteractiveAuthIncompleteError,
LoginError,
+ NotFoundError,
StoreError,
SynapseError,
UserDeactivatedError,
@@ -63,11 +65,14 @@ from synapse.http.server import finish_request, respond_with_html
from synapse.http.site import SynapseRequest
from synapse.logging.context import defer_to_thread
from synapse.metrics.background_process_metrics import run_as_background_process
-from synapse.storage.roommember import ProfileInfo
+from synapse.storage.databases.main.registration import (
+ LoginTokenExpired,
+ LoginTokenLookupResult,
+ LoginTokenReused,
+)
from synapse.types import JsonDict, Requester, UserID
from synapse.util import stringutils as stringutils
from synapse.util.async_helpers import delay_cancellation, maybe_awaitable
-from synapse.util.macaroons import LoginTokenAttributes
from synapse.util.msisdn import phone_number_to_msisdn
from synapse.util.stringutils import base62_encode
from synapse.util.threepids import canonicalise_email
@@ -81,6 +86,12 @@ logger = logging.getLogger(__name__)
INVALID_USERNAME_OR_PASSWORD = "Invalid username or password"
+invalid_login_token_counter = Counter(
+ "synapse_user_login_invalid_login_tokens",
+ "Counts the number of rejected m.login.token on /login",
+ ["reason"],
+)
+
def convert_client_dict_legacy_fields_to_identifier(
submission: JsonDict,
@@ -280,7 +291,7 @@ class AuthHandler:
that it isn't stolen by re-authenticating them.
Args:
- requester: The user, as given by the access token
+ requester: The user making the request, according to the access token.
request: The request sent by the client.
@@ -884,6 +895,25 @@ class AuthHandler:
return True
+ async def create_login_token_for_user_id(
+ self,
+ user_id: str,
+ duration_ms: int = (2 * 60 * 1000),
+ auth_provider_id: Optional[str] = None,
+ auth_provider_session_id: Optional[str] = None,
+ ) -> str:
+ login_token = self.generate_login_token()
+ now = self._clock.time_msec()
+ expiry_ts = now + duration_ms
+ await self.store.add_login_token_to_user(
+ user_id=user_id,
+ token=login_token,
+ expiry_ts=expiry_ts,
+ auth_provider_id=auth_provider_id,
+ auth_provider_session_id=auth_provider_session_id,
+ )
+ return login_token
+
async def create_refresh_token_for_user_id(
self,
user_id: str,
@@ -1010,6 +1040,17 @@ class AuthHandler:
return res[0]
return None
+ async def is_user_approved(self, user_id: str) -> bool:
+ """Checks if a user is approved and therefore can be allowed to log in.
+
+ Args:
+ user_id: the user to check the approval status of.
+
+ Returns:
+ A boolean that is True if the user is approved, False otherwise.
+ """
+ return await self.store.is_user_approved(user_id)
+
async def _find_user_id_and_pwd_hash(
self, user_id: str
) -> Optional[Tuple[str, str]]:
@@ -1391,6 +1432,18 @@ class AuthHandler:
return None
return user_id
+ def generate_login_token(self) -> str:
+ """Generates an opaque string, for use as an short-term login token"""
+
+ # we use the following format for access tokens:
+ # syl_<random string>_<base62 crc check>
+
+ random_string = stringutils.random_string(20)
+ base = f"syl_{random_string}"
+
+ crc = base62_encode(crc32(base.encode("ascii")), minwidth=6)
+ return f"{base}_{crc}"
+
def generate_access_token(self, for_user: UserID) -> str:
"""Generates an opaque string, for use as an access token"""
@@ -1417,16 +1470,17 @@ class AuthHandler:
crc = base62_encode(crc32(base.encode("ascii")), minwidth=6)
return f"{base}_{crc}"
- async def validate_short_term_login_token(
- self, login_token: str
- ) -> LoginTokenAttributes:
+ async def consume_login_token(self, login_token: str) -> LoginTokenLookupResult:
try:
- res = self.macaroon_gen.verify_short_term_login_token(login_token)
- except Exception:
- raise AuthError(403, "Invalid login token", errcode=Codes.FORBIDDEN)
+ return await self.store.consume_login_token(login_token)
+ except LoginTokenExpired:
+ invalid_login_token_counter.labels("expired").inc()
+ except LoginTokenReused:
+ invalid_login_token_counter.labels("reused").inc()
+ except NotFoundError:
+ invalid_login_token_counter.labels("not found").inc()
- await self.auth_blocking.check_auth_blocking(res.user_id)
- return res
+ raise AuthError(403, "Invalid login token", errcode=Codes.FORBIDDEN)
async def delete_access_token(self, access_token: str) -> None:
"""Invalidate a single access token
@@ -1435,20 +1489,25 @@ class AuthHandler:
access_token: access token to be deleted
"""
- user_info = await self.auth.get_user_by_access_token(access_token)
+ token = await self.store.get_user_by_access_token(access_token)
+ if not token:
+ # At this point, the token should already have been fetched once by
+ # the caller, so this should not happen, unless of a race condition
+ # between two delete requests
+ raise SynapseError(HTTPStatus.UNAUTHORIZED, "Unrecognised access token")
await self.store.delete_access_token(access_token)
# see if any modules want to know about this
await self.password_auth_provider.on_logged_out(
- user_id=user_info.user_id,
- device_id=user_info.device_id,
+ user_id=token.user_id,
+ device_id=token.device_id,
access_token=access_token,
)
# delete pushers associated with this access token
- if user_info.token_id is not None:
+ if token.token_id is not None:
await self.hs.get_pusherpool().remove_pushers_by_access_token(
- user_info.user_id, (user_info.token_id,)
+ token.user_id, (token.token_id,)
)
async def delete_access_tokens_for_user(
@@ -1682,41 +1741,10 @@ class AuthHandler:
respond_with_html(request, 403, self._sso_account_deactivated_template)
return
- profile = await self.store.get_profileinfo(
+ user_profile_data = await self.store.get_profileinfo(
UserID.from_string(registered_user_id).localpart
)
- self._complete_sso_login(
- registered_user_id,
- auth_provider_id,
- request,
- client_redirect_url,
- extra_attributes,
- new_user=new_user,
- user_profile_data=profile,
- auth_provider_session_id=auth_provider_session_id,
- )
-
- def _complete_sso_login(
- self,
- registered_user_id: str,
- auth_provider_id: str,
- request: Request,
- client_redirect_url: str,
- extra_attributes: Optional[JsonDict] = None,
- new_user: bool = False,
- user_profile_data: Optional[ProfileInfo] = None,
- auth_provider_session_id: Optional[str] = None,
- ) -> None:
- """
- The synchronous portion of complete_sso_login.
-
- This exists purely for backwards compatibility of synapse.module_api.ModuleApi.
- """
-
- if user_profile_data is None:
- user_profile_data = ProfileInfo(None, None)
-
# Store any extra attributes which will be passed in the login response.
# Note that this is per-user so it may overwrite a previous value, this
# is considered OK since the newest SSO attributes should be most valid.
@@ -1727,7 +1755,7 @@ class AuthHandler:
)
# Create a login token
- login_token = self.macaroon_gen.generate_short_term_login_token(
+ login_token = await self.create_login_token_for_user_id(
registered_user_id,
auth_provider_id=auth_provider_id,
auth_provider_session_id=auth_provider_session_id,
diff --git a/synapse/handlers/cas.py b/synapse/handlers/cas.py
index 7163af8004..fc467bc7c1 100644
--- a/synapse/handlers/cas.py
+++ b/synapse/handlers/cas.py
@@ -130,6 +130,9 @@ class CasHandler:
except PartialDownloadError as pde:
# Twisted raises this error if the connection is closed,
# even if that's being used old-http style to signal end-of-data
+ # Assertion is for mypy's benefit. Error.response is Optional[bytes],
+ # but a PartialDownloadError should always have a non-None response.
+ assert pde.response is not None
body = pde.response
except HttpResponseException as e:
description = (
diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py
index 816e1a6d79..d74d135c0c 100644
--- a/synapse/handlers/deactivate_account.py
+++ b/synapse/handlers/deactivate_account.py
@@ -16,6 +16,7 @@ import logging
from typing import TYPE_CHECKING, Optional
from synapse.api.errors import SynapseError
+from synapse.handlers.device import DeviceHandler
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.types import Codes, Requester, UserID, create_requester
@@ -76,6 +77,9 @@ class DeactivateAccountHandler:
True if identity server supports removing threepids, otherwise False.
"""
+ # This can only be called on the main process.
+ assert isinstance(self._device_handler, DeviceHandler)
+
# Check if this user can be deactivated
if not await self._third_party_rules.check_can_deactivate_user(
user_id, by_admin
diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py
index 1a8379854c..b1e55e1b9e 100644
--- a/synapse/handlers/device.py
+++ b/synapse/handlers/device.py
@@ -45,13 +45,13 @@ from synapse.types import (
JsonDict,
StreamKeyType,
StreamToken,
- UserID,
get_domain_from_id,
get_verify_key_from_cross_signing_key,
)
from synapse.util import stringutils
from synapse.util.async_helpers import Linearizer
from synapse.util.caches.expiringcache import ExpiringCache
+from synapse.util.cancellation import cancellable
from synapse.util.metrics import measure_func
from synapse.util.retryutils import NotRetryingDestination
@@ -65,6 +65,8 @@ DELETE_STALE_DEVICES_INTERVAL_MS = 24 * 60 * 60 * 1000
class DeviceWorkerHandler:
+ device_list_updater: "DeviceListWorkerUpdater"
+
def __init__(self, hs: "HomeServer"):
self.clock = hs.get_clock()
self.hs = hs
@@ -74,6 +76,9 @@ class DeviceWorkerHandler:
self._state_storage = hs.get_storage_controllers().state
self._auth_handler = hs.get_auth_handler()
self.server_name = hs.hostname
+ self._msc3852_enabled = hs.config.experimental.msc3852_enabled
+
+ self.device_list_updater = DeviceListWorkerUpdater(hs)
@trace
async def get_devices_by_user(self, user_id: str) -> List[JsonDict]:
@@ -98,6 +103,19 @@ class DeviceWorkerHandler:
log_kv(device_map)
return devices
+ async def get_dehydrated_device(
+ self, user_id: str
+ ) -> Optional[Tuple[str, JsonDict]]:
+ """Retrieve the information for a dehydrated device.
+
+ Args:
+ user_id: the user whose dehydrated device we are looking for
+ Returns:
+ a tuple whose first item is the device ID, and the second item is
+ the dehydrated device information
+ """
+ return await self.store.get_dehydrated_device(user_id)
+
@trace
async def get_device(self, user_id: str, device_id: str) -> JsonDict:
"""Retrieve the given device
@@ -123,9 +141,10 @@ class DeviceWorkerHandler:
return device
+ @cancellable
async def get_device_changes_in_shared_rooms(
self, user_id: str, room_ids: Collection[str], from_token: StreamToken
- ) -> Collection[str]:
+ ) -> Set[str]:
"""Get the set of users whose devices have changed who share a room with
the given user.
"""
@@ -162,6 +181,7 @@ class DeviceWorkerHandler:
@trace
@measure_func("device.get_user_ids_changed")
+ @cancellable
async def get_user_ids_changed(
self, user_id: str, from_token: StreamToken
) -> JsonDict:
@@ -192,7 +212,9 @@ class DeviceWorkerHandler:
possibly_changed = set(changed)
possibly_left = set()
for room_id in rooms_changed:
- current_state_ids = await self._state_storage.get_current_state_ids(room_id)
+ current_state_ids = await self._state_storage.get_current_state_ids(
+ room_id, await_full_state=False
+ )
# The user may have left the room
# TODO: Check if they actually did or if we were just invited.
@@ -231,7 +253,8 @@ class DeviceWorkerHandler:
# mapping from event_id -> state_dict
prev_state_ids = await self._state_storage.get_state_ids_for_events(
- event_ids
+ event_ids,
+ await_full_state=False,
)
# Check if we've joined the room? If so we just blindly add all the users to
@@ -267,11 +290,9 @@ class DeviceWorkerHandler:
possibly_left = possibly_changed | possibly_left
# Double check if we still share rooms with the given user.
- users_rooms = await self.store.get_rooms_for_users_with_stream_ordering(
- possibly_left
- )
+ users_rooms = await self.store.get_rooms_for_users(possibly_left)
for changed_user_id, entries in users_rooms.items():
- if any(e.room_id in room_ids for e in entries):
+ if any(rid in room_ids for rid in entries):
possibly_left.discard(changed_user_id)
else:
possibly_joined.discard(changed_user_id)
@@ -303,12 +324,26 @@ class DeviceWorkerHandler:
"self_signing_key": self_signing_key,
}
+ async def handle_room_un_partial_stated(self, room_id: str) -> None:
+ """Handles sending appropriate device list updates in a room that has
+ gone from partial to full state.
+ """
+
+ # TODO(faster_joins): worker mode support
+ # https://github.com/matrix-org/synapse/issues/12994
+ logger.error(
+ "Trying handling device list state for partial join: not supported on workers."
+ )
+
class DeviceHandler(DeviceWorkerHandler):
+ device_list_updater: "DeviceListUpdater"
+
def __init__(self, hs: "HomeServer"):
super().__init__(hs)
self.federation_sender = hs.get_federation_sender()
+ self._storage_controllers = hs.get_storage_controllers()
self.device_list_updater = DeviceListUpdater(hs, self)
@@ -319,8 +354,6 @@ class DeviceHandler(DeviceWorkerHandler):
self.device_list_updater.incoming_device_list_update,
)
- hs.get_distributor().observe("user_left_room", self.user_left_room)
-
# Whether `_handle_new_device_update_async` is currently processing.
self._handle_new_device_update_is_processing = False
@@ -564,14 +597,6 @@ class DeviceHandler(DeviceWorkerHandler):
StreamKeyType.DEVICE_LIST, position, users=[from_user_id]
)
- async def user_left_room(self, user: UserID, room_id: str) -> None:
- user_id = user.to_string()
- room_ids = await self.store.get_rooms_for_user(user_id)
- if not room_ids:
- # We no longer share rooms with this user, so we'll no longer
- # receive device updates. Mark this in DB.
- await self.store.mark_remote_user_device_list_as_unsubscribed(user_id)
-
async def store_dehydrated_device(
self,
user_id: str,
@@ -600,19 +625,6 @@ class DeviceHandler(DeviceWorkerHandler):
await self.delete_devices(user_id, [old_device_id])
return device_id
- async def get_dehydrated_device(
- self, user_id: str
- ) -> Optional[Tuple[str, JsonDict]]:
- """Retrieve the information for a dehydrated device.
-
- Args:
- user_id: the user whose dehydrated device we are looking for
- Returns:
- a tuple whose first item is the device ID, and the second item is
- the dehydrated device information
- """
- return await self.store.get_dehydrated_device(user_id)
-
async def rehydrate_device(
self, user_id: str, access_token: str, device_id: str
) -> dict:
@@ -676,13 +688,33 @@ class DeviceHandler(DeviceWorkerHandler):
hosts_already_sent_to: Set[str] = set()
try:
+ stream_id, room_id = await self.store.get_device_change_last_converted_pos()
+
while True:
self._handle_new_device_update_new_data = False
- rows = await self.store.get_uncoverted_outbound_room_pokes()
+ max_stream_id = self.store.get_device_stream_token()
+ rows = await self.store.get_uncoverted_outbound_room_pokes(
+ stream_id, room_id
+ )
if not rows:
# If the DB returned nothing then there is nothing left to
# do, *unless* a new device list update happened during the
# DB query.
+
+ # Advance `(stream_id, room_id)`.
+ # `max_stream_id` comes from *before* the query for unconverted
+ # rows, which means that any unconverted rows must have a larger
+ # stream ID.
+ if max_stream_id > stream_id:
+ stream_id, room_id = max_stream_id, ""
+ await self.store.set_device_change_last_converted_pos(
+ stream_id, room_id
+ )
+ else:
+ assert max_stream_id == stream_id
+ # Avoid moving `room_id` backwards.
+ pass
+
if self._handle_new_device_update_new_data:
continue
else:
@@ -693,9 +725,16 @@ class DeviceHandler(DeviceWorkerHandler):
# Ignore any users that aren't ours
if self.hs.is_mine_id(user_id):
- joined_user_ids = await self.store.get_users_in_room(room_id)
- hosts = {get_domain_from_id(u) for u in joined_user_ids}
+ hosts = set(
+ await self._storage_controllers.state.get_current_hosts_in_room_or_partial_state_approximation(
+ room_id
+ )
+ )
hosts.discard(self.server_name)
+ # For rooms with partial state, `hosts` is merely an
+ # approximation. When we transition to a full state room, we
+ # will have to send out device list updates to any servers we
+ # missed.
# Check if we've already sent this update to some hosts
if current_stream_id == stream_id:
@@ -705,7 +744,6 @@ class DeviceHandler(DeviceWorkerHandler):
user_id=user_id,
device_id=device_id,
room_id=room_id,
- stream_id=stream_id,
hosts=hosts,
context=opentracing_context,
)
@@ -739,18 +777,147 @@ class DeviceHandler(DeviceWorkerHandler):
hosts_already_sent_to.update(hosts)
current_stream_id = stream_id
+ # Advance `(stream_id, room_id)`.
+ _, _, room_id, stream_id, _ = rows[-1]
+ await self.store.set_device_change_last_converted_pos(
+ stream_id, room_id
+ )
+
finally:
self._handle_new_device_update_is_processing = False
+ async def handle_room_un_partial_stated(self, room_id: str) -> None:
+ """Handles sending appropriate device list updates in a room that has
+ gone from partial to full state.
+ """
+
+ # We defer to the device list updater to handle pending remote device
+ # list updates.
+ await self.device_list_updater.handle_room_un_partial_stated(room_id)
+
+ # Replay local updates.
+ (
+ join_event_id,
+ device_lists_stream_id,
+ ) = await self.store.get_join_event_id_and_device_lists_stream_id_for_partial_state(
+ room_id
+ )
+
+ # Get the local device list changes that have happened in the room since
+ # we started joining. If there are no updates there's nothing left to do.
+ changes = await self.store.get_device_list_changes_in_room(
+ room_id, device_lists_stream_id
+ )
+ local_changes = {(u, d) for u, d in changes if self.hs.is_mine_id(u)}
+ if not local_changes:
+ return
+
+ # Note: We have persisted the full state at this point, we just haven't
+ # cleared the `partial_room` flag.
+ join_state_ids = await self._state_storage.get_state_ids_for_event(
+ join_event_id, await_full_state=False
+ )
+ current_state_ids = await self.store.get_partial_current_state_ids(room_id)
+
+ # Now we need to work out all servers that might have been in the room
+ # at any point during our join.
+
+ # First we look for any membership states that have changed between the
+ # initial join and now...
+ all_keys = set(join_state_ids)
+ all_keys.update(current_state_ids)
+
+ potentially_changed_hosts = set()
+ for etype, state_key in all_keys:
+ if etype != EventTypes.Member:
+ continue
+
+ prev = join_state_ids.get((etype, state_key))
+ current = current_state_ids.get((etype, state_key))
+
+ if prev != current:
+ potentially_changed_hosts.add(get_domain_from_id(state_key))
+
+ # ... then we add all the hosts that are currently joined to the room...
+ current_hosts_in_room = await self.store.get_current_hosts_in_room(room_id)
+ potentially_changed_hosts.update(current_hosts_in_room)
+
+ # ... and finally we remove any hosts that we were told about, as we
+ # will have sent device list updates to those hosts when they happened.
+ known_hosts_at_join = await self.store.get_partial_state_servers_at_join(
+ room_id
+ )
+ potentially_changed_hosts.difference_update(known_hosts_at_join)
+
+ potentially_changed_hosts.discard(self.server_name)
+
+ if not potentially_changed_hosts:
+ # Nothing to do.
+ return
+
+ logger.info(
+ "Found %d changed hosts to send device list updates to",
+ len(potentially_changed_hosts),
+ )
+
+ for user_id, device_id in local_changes:
+ await self.store.add_device_list_outbound_pokes(
+ user_id=user_id,
+ device_id=device_id,
+ room_id=room_id,
+ hosts=potentially_changed_hosts,
+ context=None,
+ )
+
+ # Notify things that device lists need to be sent out.
+ self.notifier.notify_replication()
+ for host in potentially_changed_hosts:
+ self.federation_sender.send_device_messages(host, immediate=False)
+
def _update_device_from_client_ips(
device: JsonDict, client_ips: Mapping[Tuple[str, str], Mapping[str, Any]]
) -> None:
ip = client_ips.get((device["user_id"], device["device_id"]), {})
- device.update({"last_seen_ts": ip.get("last_seen"), "last_seen_ip": ip.get("ip")})
+ device.update(
+ {
+ "last_seen_user_agent": ip.get("user_agent"),
+ "last_seen_ts": ip.get("last_seen"),
+ "last_seen_ip": ip.get("ip"),
+ }
+ )
-class DeviceListUpdater:
+class DeviceListWorkerUpdater:
+ "Handles incoming device list updates from federation and contacts the main process over replication"
+
+ def __init__(self, hs: "HomeServer"):
+ from synapse.replication.http.devices import (
+ ReplicationUserDevicesResyncRestServlet,
+ )
+
+ self._user_device_resync_client = (
+ ReplicationUserDevicesResyncRestServlet.make_client(hs)
+ )
+
+ async def user_device_resync(
+ self, user_id: str, mark_failed_as_stale: bool = True
+ ) -> Optional[JsonDict]:
+ """Fetches all devices for a user and updates the device cache with them.
+
+ Args:
+ user_id: The user's id whose device_list will be updated.
+ mark_failed_as_stale: Whether to mark the user's device list as stale
+ if the attempt to resync failed.
+ Returns:
+ A dict with device info as under the "devices" in the result of this
+ request:
+ https://matrix.org/docs/spec/server_server/r0.1.2#get-matrix-federation-v1-user-devices-userid
+ """
+ return await self._user_device_resync_client(user_id=user_id)
+
+
+class DeviceListUpdater(DeviceListWorkerUpdater):
"Handles incoming device list updates from federation and updates the DB"
def __init__(self, hs: "HomeServer", device_handler: DeviceHandler):
@@ -826,6 +993,19 @@ class DeviceListUpdater:
)
return
+ # Check if we are partially joining any rooms. If so we need to store
+ # all device list updates so that we can handle them correctly once we
+ # know who is in the room.
+ # TODO(faster joins): this fetches and processes a bunch of data that we don't
+ # use. Could be replaced by a tighter query e.g.
+ # SELECT EXISTS(SELECT 1 FROM partial_state_rooms)
+ partial_rooms = await self.store.get_partial_state_room_resync_info()
+ if partial_rooms:
+ await self.store.add_remote_device_list_to_pending(
+ user_id,
+ device_id,
+ )
+
room_ids = await self.store.get_rooms_for_user(user_id)
if not room_ids:
# We don't share any rooms with this user. Ignore update, as we
@@ -1165,3 +1345,35 @@ class DeviceListUpdater:
device_ids.append(verify_key.version)
return device_ids
+
+ async def handle_room_un_partial_stated(self, room_id: str) -> None:
+ """Handles sending appropriate device list updates in a room that has
+ gone from partial to full state.
+ """
+
+ pending_updates = (
+ await self.store.get_pending_remote_device_list_updates_for_room(room_id)
+ )
+
+ for user_id, device_id in pending_updates:
+ logger.info(
+ "Got pending device list update in room %s: %s / %s",
+ room_id,
+ user_id,
+ device_id,
+ )
+ position = await self.store.add_device_change_to_streams(
+ user_id,
+ [device_id],
+ room_ids=[room_id],
+ )
+
+ if not position:
+ # This should only happen if there are no updates, which
+ # shouldn't happen when we've passed in a non-empty set of
+ # device IDs.
+ continue
+
+ self.device_handler.notifier.on_new_event(
+ StreamKeyType.DEVICE_LIST, position, rooms=[room_id]
+ )
diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py
index 09a7a4b238..2ea52257cb 100644
--- a/synapse/handlers/directory.py
+++ b/synapse/handlers/directory.py
@@ -16,6 +16,8 @@ import logging
import string
from typing import TYPE_CHECKING, Iterable, List, Optional
+from typing_extensions import Literal
+
from synapse.api.constants import MAX_ALIAS_LENGTH, EventTypes
from synapse.api.errors import (
AuthError,
@@ -30,7 +32,7 @@ from synapse.api.errors import (
from synapse.appservice import ApplicationService
from synapse.module_api import NOT_SPAM
from synapse.storage.databases.main.directory import RoomAliasMapping
-from synapse.types import JsonDict, Requester, RoomAlias, UserID, get_domain_from_id
+from synapse.types import JsonDict, Requester, RoomAlias
if TYPE_CHECKING:
from synapse.server import HomeServer
@@ -83,8 +85,9 @@ class DirectoryHandler:
# TODO(erikj): Add transactions.
# TODO(erikj): Check if there is a current association.
if not servers:
- users = await self.store.get_users_in_room(room_id)
- servers = {get_domain_from_id(u) for u in users}
+ servers = await self._storage_controllers.state.get_current_hosts_in_room_or_partial_state_approximation(
+ room_id
+ )
if not servers:
raise SynapseError(400, "Failed to get server list")
@@ -133,7 +136,7 @@ class DirectoryHandler:
else:
# Server admins are not subject to the same constraints as normal
# users when creating an alias (e.g. being in the room).
- is_admin = await self.auth.is_server_admin(requester.user)
+ is_admin = await self.auth.is_server_admin(requester)
if (self.require_membership and check_membership) and not is_admin:
rooms_for_user = await self.store.get_rooms_for_user(user_id)
@@ -197,7 +200,7 @@ class DirectoryHandler:
user_id = requester.user.to_string()
try:
- can_delete = await self._user_can_delete_alias(room_alias, user_id)
+ can_delete = await self._user_can_delete_alias(room_alias, requester)
except StoreError as e:
if e.code == 404:
raise NotFoundError("Unknown room alias")
@@ -287,8 +290,9 @@ class DirectoryHandler:
Codes.NOT_FOUND,
)
- users = await self.store.get_users_in_room(room_id)
- extra_servers = {get_domain_from_id(u) for u in users}
+ extra_servers = await self._storage_controllers.state.get_current_hosts_in_room_or_partial_state_approximation(
+ room_id
+ )
servers_set = set(extra_servers) | set(servers)
# If this server is in the list of servers, return it first.
@@ -400,7 +404,9 @@ class DirectoryHandler:
# either no interested services, or no service with an exclusive lock
return True
- async def _user_can_delete_alias(self, alias: RoomAlias, user_id: str) -> bool:
+ async def _user_can_delete_alias(
+ self, alias: RoomAlias, requester: Requester
+ ) -> bool:
"""Determine whether a user can delete an alias.
One of the following must be true:
@@ -413,7 +419,7 @@ class DirectoryHandler:
"""
creator = await self.store.get_room_alias_creator(alias.to_string())
- if creator == user_id:
+ if creator == requester.user.to_string():
return True
# Resolve the alias to the corresponding room.
@@ -422,12 +428,13 @@ class DirectoryHandler:
if not room_id:
return False
- return await self.auth.check_can_change_room_list(
- room_id, UserID.from_string(user_id)
- )
+ return await self.auth.check_can_change_room_list(room_id, requester)
async def edit_published_room_list(
- self, requester: Requester, room_id: str, visibility: str
+ self,
+ requester: Requester,
+ room_id: str,
+ visibility: Literal["public", "private"],
) -> None:
"""Edit the entry of the room in the published room list.
@@ -449,9 +456,6 @@ class DirectoryHandler:
if requester.is_guest:
raise AuthError(403, "Guests cannot edit the published room list")
- if visibility not in ["public", "private"]:
- raise SynapseError(400, "Invalid visibility setting")
-
if visibility == "public" and not self.enable_room_list_search:
# The room list has been disabled.
raise AuthError(
@@ -463,7 +467,7 @@ class DirectoryHandler:
raise SynapseError(400, "Unknown room")
can_change_room_list = await self.auth.check_can_change_room_list(
- room_id, requester.user
+ room_id, requester
)
if not can_change_room_list:
raise AuthError(
@@ -503,7 +507,11 @@ class DirectoryHandler:
await self.store.set_room_is_public(room_id, making_public)
async def edit_published_appservice_room_list(
- self, appservice_id: str, network_id: str, room_id: str, visibility: str
+ self,
+ appservice_id: str,
+ network_id: str,
+ room_id: str,
+ visibility: Literal["public", "private"],
) -> None:
"""Add or remove a room from the appservice/network specific public
room list.
@@ -514,9 +522,6 @@ class DirectoryHandler:
room_id
visibility: either "public" or "private"
"""
- if visibility not in ["public", "private"]:
- raise SynapseError(400, "Invalid visibility setting")
-
await self.store.set_room_is_public_appservice(
room_id, appservice_id, network_id, visibility == "public"
)
@@ -528,10 +533,8 @@ class DirectoryHandler:
Get a list of the aliases that currently point to this room on this server
"""
# allow access to server admins and current members of the room
- is_admin = await self.auth.is_server_admin(requester.user)
+ is_admin = await self.auth.is_server_admin(requester)
if not is_admin:
- await self.auth.check_user_in_room_or_world_readable(
- room_id, requester.user.to_string()
- )
+ await self.auth.check_user_in_room_or_world_readable(room_id, requester)
return await self.store.get_aliases_for_room(room_id)
diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py
index c938339ddd..5fe102e2f2 100644
--- a/synapse/handlers/e2e_keys.py
+++ b/synapse/handlers/e2e_keys.py
@@ -27,9 +27,9 @@ from twisted.internet import defer
from synapse.api.constants import EduTypes
from synapse.api.errors import CodeMessageException, Codes, NotFoundError, SynapseError
+from synapse.handlers.device import DeviceHandler
from synapse.logging.context import make_deferred_yieldable, run_in_background
from synapse.logging.opentracing import log_kv, set_tag, tag_args, trace
-from synapse.replication.http.devices import ReplicationUserDevicesResyncRestServlet
from synapse.types import (
JsonDict,
UserID,
@@ -37,7 +37,8 @@ from synapse.types import (
get_verify_key_from_cross_signing_key,
)
from synapse.util import json_decoder, unwrapFirstError
-from synapse.util.async_helpers import Linearizer
+from synapse.util.async_helpers import Linearizer, delay_cancellation
+from synapse.util.cancellation import cancellable
from synapse.util.retryutils import NotRetryingDestination
if TYPE_CHECKING:
@@ -48,33 +49,30 @@ logger = logging.getLogger(__name__)
class E2eKeysHandler:
def __init__(self, hs: "HomeServer"):
+ self.config = hs.config
self.store = hs.get_datastores().main
self.federation = hs.get_federation_client()
self.device_handler = hs.get_device_handler()
self.is_mine = hs.is_mine
self.clock = hs.get_clock()
- self._edu_updater = SigningKeyEduUpdater(hs, self)
-
federation_registry = hs.get_federation_registry()
- self._is_master = hs.config.worker.worker_app is None
- if not self._is_master:
- self._user_device_resync_client = (
- ReplicationUserDevicesResyncRestServlet.make_client(hs)
- )
- else:
+ is_master = hs.config.worker.worker_app is None
+ if is_master:
+ edu_updater = SigningKeyEduUpdater(hs)
+
# Only register this edu handler on master as it requires writing
# device updates to the db
federation_registry.register_edu_handler(
EduTypes.SIGNING_KEY_UPDATE,
- self._edu_updater.incoming_signing_key_update,
+ edu_updater.incoming_signing_key_update,
)
# also handle the unstable version
# FIXME: remove this when enough servers have upgraded
federation_registry.register_edu_handler(
EduTypes.UNSTABLE_SIGNING_KEY_UPDATE,
- self._edu_updater.incoming_signing_key_update,
+ edu_updater.incoming_signing_key_update,
)
# doesn't really work as part of the generic query API, because the
@@ -91,6 +89,7 @@ class E2eKeysHandler:
)
@trace
+ @cancellable
async def query_devices(
self,
query_body: JsonDict,
@@ -173,6 +172,35 @@ class E2eKeysHandler:
user_ids_not_in_cache,
remote_results,
) = await self.store.get_user_devices_from_cache(query_list)
+
+ # Check that the homeserver still shares a room with all cached users.
+ # Note that this check may be slightly racy when a remote user leaves a
+ # room after we have fetched their cached device list. In the worst case
+ # we will do extra federation queries for devices that we had cached.
+ cached_users = set(remote_results.keys())
+ valid_cached_users = (
+ await self.store.get_users_server_still_shares_room_with(
+ remote_results.keys()
+ )
+ )
+ invalid_cached_users = cached_users - valid_cached_users
+ if invalid_cached_users:
+ # Fix up results. If we get here, it means there was either a bug in
+ # device list tracking, or we hit the race mentioned above.
+ # TODO: In practice, this path is hit fairly often in existing
+ # deployments when clients query the keys of departed remote
+ # users. A background update to mark the appropriate device
+ # lists as unsubscribed is needed.
+ # https://github.com/matrix-org/synapse/issues/13651
+ # Note that this currently introduces a failure mode when clients
+ # are trying to decrypt old messages from a remote user whose
+ # homeserver is no longer available. We may want to consider falling
+ # back to the cached data when we fail to retrieve a device list
+ # over federation for such remote users.
+ user_ids_not_in_cache.update(invalid_cached_users)
+ for invalid_user_id in invalid_cached_users:
+ remote_results.pop(invalid_user_id)
+
for user_id, devices in remote_results.items():
user_devices = results.setdefault(user_id, {})
for device_id, device in devices.items():
@@ -208,22 +236,26 @@ class E2eKeysHandler:
r[user_id] = remote_queries[user_id]
# Now fetch any devices that we don't have in our cache
+ # TODO It might make sense to propagate cancellations into the
+ # deferreds which are querying remote homeservers.
await make_deferred_yieldable(
- defer.gatherResults(
- [
- run_in_background(
- self._query_devices_for_destination,
- results,
- cross_signing_keys,
- failures,
- destination,
- queries,
- timeout,
- )
- for destination, queries in remote_queries_not_in_cache.items()
- ],
- consumeErrors=True,
- ).addErrback(unwrapFirstError)
+ delay_cancellation(
+ defer.gatherResults(
+ [
+ run_in_background(
+ self._query_devices_for_destination,
+ results,
+ cross_signing_keys,
+ failures,
+ destination,
+ queries,
+ timeout,
+ )
+ for destination, queries in remote_queries_not_in_cache.items()
+ ],
+ consumeErrors=True,
+ ).addErrback(unwrapFirstError)
+ )
)
ret = {"device_keys": results, "failures": failures}
@@ -283,14 +315,13 @@ class E2eKeysHandler:
# probably be tracking their device lists. However, we haven't
# done an initial sync on the device list so we do it now.
try:
- if self._is_master:
- resync_results = await self.device_handler.device_list_updater.user_device_resync(
+ resync_results = (
+ await self.device_handler.device_list_updater.user_device_resync(
user_id
)
- else:
- resync_results = await self._user_device_resync_client(
- user_id=user_id
- )
+ )
+ if resync_results is None:
+ raise ValueError("Device resync failed")
# Add the device keys to the results.
user_devices = resync_results["devices"]
@@ -347,6 +378,7 @@ class E2eKeysHandler:
return
+ @cancellable
async def get_cross_signing_keys_from_cache(
self, query: Iterable[str], from_user_id: Optional[str]
) -> Dict[str, Dict[str, dict]]:
@@ -393,14 +425,19 @@ class E2eKeysHandler:
}
@trace
+ @cancellable
async def query_local_devices(
- self, query: Mapping[str, Optional[List[str]]]
+ self,
+ query: Mapping[str, Optional[List[str]]],
+ include_displaynames: bool = True,
) -> Dict[str, Dict[str, dict]]:
"""Get E2E device keys for local users
Args:
query: map from user_id to a list
of devices to query (None for all devices)
+ include_displaynames: Whether to include device displaynames in the returned
+ device details.
Returns:
A map from user_id -> device_id -> device details
@@ -432,7 +469,9 @@ class E2eKeysHandler:
# make sure that each queried user appears in the result dict
result_dict[user_id] = {}
- results = await self.store.get_e2e_device_keys_for_cs_api(local_query)
+ results = await self.store.get_e2e_device_keys_for_cs_api(
+ local_query, include_displaynames
+ )
# Build the result structure
for user_id, device_keys in results.items():
@@ -445,11 +484,33 @@ class E2eKeysHandler:
async def on_federation_query_client_keys(
self, query_body: Dict[str, Dict[str, Optional[List[str]]]]
) -> JsonDict:
- """Handle a device key query from a federated server"""
+ """Handle a device key query from a federated server:
+
+ Handles the path: GET /_matrix/federation/v1/users/keys/query
+
+ Args:
+ query_body: The body of the query request. Should contain a key
+ "device_keys" that map to a dictionary of user ID's -> list of
+ device IDs. If the list of device IDs is empty, all devices of
+ that user will be queried.
+
+ Returns:
+ A json dictionary containing the following:
+ - device_keys: A dictionary containing the requested device information.
+ - master_keys: An optional dictionary of user ID -> master cross-signing
+ key info.
+ - self_signing_key: An optional dictionary of user ID -> self-signing
+ key info.
+ """
device_keys_query: Dict[str, Optional[List[str]]] = query_body.get(
"device_keys", {}
)
- res = await self.query_local_devices(device_keys_query)
+ res = await self.query_local_devices(
+ device_keys_query,
+ include_displaynames=(
+ self.config.federation.allow_device_name_lookup_over_federation
+ ),
+ )
ret = {"device_keys": res}
# add in the cross-signing keys
@@ -539,6 +600,8 @@ class E2eKeysHandler:
async def upload_keys_for_user(
self, user_id: str, device_id: str, keys: JsonDict
) -> JsonDict:
+ # This can only be called from the main process.
+ assert isinstance(self.device_handler, DeviceHandler)
time_now = self.clock.time_msec()
@@ -666,6 +729,8 @@ class E2eKeysHandler:
user_id: the user uploading the keys
keys: the signing keys
"""
+ # This can only be called from the main process.
+ assert isinstance(self.device_handler, DeviceHandler)
# if a master key is uploaded, then check it. Otherwise, load the
# stored master key, to check signatures on other keys
@@ -757,6 +822,9 @@ class E2eKeysHandler:
Raises:
SynapseError: if the signatures dict is not valid.
"""
+ # This can only be called from the main process.
+ assert isinstance(self.device_handler, DeviceHandler)
+
failures = {}
# signatures to be stored. Each item will be a SignatureListItem
@@ -804,7 +872,7 @@ class E2eKeysHandler:
- signatures of the user's master key by the user's devices.
Args:
- user_id (string): the user uploading the keys
+ user_id: the user uploading the keys
signatures (dict[string, dict]): map of devices to signed keys
Returns:
@@ -1134,6 +1202,9 @@ class E2eKeysHandler:
A tuple of the retrieved key content, the key's ID and the matching VerifyKey.
If the key cannot be retrieved, all values in the tuple will instead be None.
"""
+ # This can only be called from the main process.
+ assert isinstance(self.device_handler, DeviceHandler)
+
try:
remote_result = await self.federation.query_user_devices(
user.domain, user.to_string()
@@ -1330,11 +1401,14 @@ class SignatureListItem:
class SigningKeyEduUpdater:
"""Handles incoming signing key updates from federation and updates the DB"""
- def __init__(self, hs: "HomeServer", e2e_keys_handler: E2eKeysHandler):
+ def __init__(self, hs: "HomeServer"):
self.store = hs.get_datastores().main
self.federation = hs.get_federation_client()
self.clock = hs.get_clock()
- self.e2e_keys_handler = e2e_keys_handler
+
+ device_handler = hs.get_device_handler()
+ assert isinstance(device_handler, DeviceHandler)
+ self._device_handler = device_handler
self._remote_edu_linearizer = Linearizer(name="remote_signing_key")
@@ -1379,9 +1453,6 @@ class SigningKeyEduUpdater:
user_id: the user whose updates we are processing
"""
- device_handler = self.e2e_keys_handler.device_handler
- device_list_updater = device_handler.device_list_updater
-
async with self._remote_edu_linearizer.queue(user_id):
pending_updates = self._pending_updates.pop(user_id, [])
if not pending_updates:
@@ -1393,13 +1464,11 @@ class SigningKeyEduUpdater:
logger.info("pending updates: %r", pending_updates)
for master_key, self_signing_key in pending_updates:
- new_device_ids = (
- await device_list_updater.process_cross_signing_key_update(
- user_id,
- master_key,
- self_signing_key,
- )
+ new_device_ids = await self._device_handler.device_list_updater.process_cross_signing_key_update(
+ user_id,
+ master_key,
+ self_signing_key,
)
device_ids = device_ids + new_device_ids
- await device_handler.notify_device_update(user_id, device_ids)
+ await self._device_handler.notify_device_update(user_id, device_ids)
diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py
index 28dc08c22a..83f53ceb88 100644
--- a/synapse/handlers/e2e_room_keys.py
+++ b/synapse/handlers/e2e_room_keys.py
@@ -377,8 +377,9 @@ class E2eRoomKeysHandler:
"""Deletes a given version of the user's e2e_room_keys backup
Args:
- user_id(str): the user whose current backup version we're deleting
- version(str): the version id of the backup being deleted
+ user_id: the user whose current backup version we're deleting
+ version: Optional. the version ID of the backup version we're deleting
+ If missing, we delete the current backup version info.
Raises:
NotFoundError: if this backup version doesn't exist
"""
diff --git a/synapse/handlers/event_auth.py b/synapse/handlers/event_auth.py
index a2dd9c7efa..f91dbbecb7 100644
--- a/synapse/handlers/event_auth.py
+++ b/synapse/handlers/event_auth.py
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
-from typing import TYPE_CHECKING, Collection, List, Optional, Union
+from typing import TYPE_CHECKING, Collection, List, Mapping, Optional, Union
from synapse import event_auth
from synapse.api.constants import (
@@ -29,9 +29,7 @@ from synapse.event_auth import (
)
from synapse.events import EventBase
from synapse.events.builder import EventBuilder
-from synapse.events.snapshot import EventContext
from synapse.types import StateMap, get_domain_from_id
-from synapse.util.metrics import Measure
if TYPE_CHECKING:
from synapse.server import HomeServer
@@ -47,17 +45,27 @@ class EventAuthHandler:
def __init__(self, hs: "HomeServer"):
self._clock = hs.get_clock()
self._store = hs.get_datastores().main
+ self._state_storage_controller = hs.get_storage_controllers().state
self._server_name = hs.hostname
async def check_auth_rules_from_context(
self,
event: EventBase,
- context: EventContext,
+ batched_auth_events: Optional[Mapping[str, EventBase]] = None,
) -> None:
- """Check an event passes the auth rules at its own auth events"""
- await check_state_independent_auth_rules(self._store, event)
+ """Check an event passes the auth rules at its own auth events
+ Args:
+ event: event to be authed
+ batched_auth_events: if the event being authed is part of a batch, any events
+ from the same batch that may be necessary to auth the current event
+ """
+ await check_state_independent_auth_rules(
+ self._store, event, batched_auth_events
+ )
auth_event_ids = event.auth_event_ids()
auth_events_by_id = await self._store.get_events(auth_event_ids)
+ if batched_auth_events:
+ auth_events_by_id.update(batched_auth_events)
check_state_dependent_auth_rules(event, auth_events_by_id.values())
def compute_auth_events(
@@ -129,12 +137,9 @@ class EventAuthHandler:
else:
users = {}
- # Find the user with the highest power level.
- users_in_room = await self._store.get_users_in_room(room_id)
- # Only interested in local users.
- local_users_in_room = [
- u for u in users_in_room if get_domain_from_id(u) == self._server_name
- ]
+ # Find the user with the highest power level (only interested in local
+ # users).
+ local_users_in_room = await self._store.get_local_users_in_room(room_id)
chosen_user = max(
local_users_in_room,
key=lambda user: users.get(user, users_default_level),
@@ -159,9 +164,38 @@ class EventAuthHandler:
Codes.UNABLE_TO_GRANT_JOIN,
)
- async def check_host_in_room(self, room_id: str, host: str) -> bool:
- with Measure(self._clock, "check_host_in_room"):
- return await self._store.is_host_joined(room_id, host)
+ async def is_host_in_room(self, room_id: str, host: str) -> bool:
+ return await self._store.is_host_joined(room_id, host)
+
+ async def assert_host_in_room(
+ self, room_id: str, host: str, allow_partial_state_rooms: bool = False
+ ) -> None:
+ """
+ Asserts that the host is in the room, or raises an AuthError.
+
+ If the room is partial-stated, we raise an AuthError with the
+ UNABLE_DUE_TO_PARTIAL_STATE error code, unless `allow_partial_state_rooms` is true.
+
+ If allow_partial_state_rooms is True and the room is partial-stated,
+ this function may return an incorrect result as we are not able to fully
+ track server membership in a room without full state.
+ """
+ if await self._store.is_partial_state_room(room_id):
+ if allow_partial_state_rooms:
+ current_hosts = await self._state_storage_controller.get_current_hosts_in_room_or_partial_state_approximation(
+ room_id
+ )
+ if host not in current_hosts:
+ raise AuthError(403, "Host not in room (partial-state approx).")
+ else:
+ raise AuthError(
+ 403,
+ "Unable to authorise you right now; room is partial-stated here.",
+ errcode=Codes.UNABLE_DUE_TO_PARTIAL_STATE,
+ )
+ else:
+ if not await self.is_host_in_room(room_id, host):
+ raise AuthError(403, "Host not in room.")
async def check_restricted_join_rules(
self,
diff --git a/synapse/handlers/events.py b/synapse/handlers/events.py
index ac13340d3a..949b69cb41 100644
--- a/synapse/handlers/events.py
+++ b/synapse/handlers/events.py
@@ -151,7 +151,7 @@ class EventHandler:
"""Retrieve a single specified event.
Args:
- user: The user requesting the event
+ user: The local user requesting the event
room_id: The expected room id. We'll return None if the
event's room does not match.
event_id: The event ID to obtain.
@@ -173,8 +173,11 @@ class EventHandler:
if not event:
return None
- users = await self.store.get_users_in_room(event.room_id)
- is_peeking = user.to_string() not in users
+ is_user_in_room = await self.store.check_local_user_in_room(
+ user_id=user.to_string(), room_id=event.room_id
+ )
+ # The user is peeking if they aren't in the room already
+ is_peeking = not is_user_in_room
filtered = await filter_events_for_client(
self._storage_controllers, user.to_string(), [event], is_peeking=is_peeking
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index 57ad6e5dce..d92582fd5c 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -32,18 +32,20 @@ from typing import (
)
import attr
+from prometheus_client import Histogram
from signedjson.key import decode_verify_key_bytes
from signedjson.sign import verify_signed_json
from unpaddedbase64 import decode_base64
from synapse import event_auth
-from synapse.api.constants import EventContentFields, EventTypes, Membership
+from synapse.api.constants import MAX_DEPTH, EventContentFields, EventTypes, Membership
from synapse.api.errors import (
AuthError,
CodeMessageException,
Codes,
FederationDeniedError,
FederationError,
+ FederationPullAttemptBackoffError,
HttpResponseException,
LimitExceededError,
NotFoundError,
@@ -59,6 +61,7 @@ from synapse.events.validator import EventValidator
from synapse.federation.federation_client import InvalidResponseError
from synapse.http.servlet import assert_params_in_dict
from synapse.logging.context import nested_logging_context
+from synapse.logging.opentracing import SynapseTags, set_tag, tag_args, trace
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.module_api import NOT_SPAM
from synapse.replication.http.federation import (
@@ -68,7 +71,7 @@ from synapse.replication.http.federation import (
from synapse.storage.databases.main.events import PartialStateConflictError
from synapse.storage.databases.main.events_worker import EventRedactBehaviour
from synapse.storage.state import StateFilter
-from synapse.types import JsonDict, StateMap, get_domain_from_id
+from synapse.types import JsonDict, get_domain_from_id
from synapse.util.async_helpers import Linearizer
from synapse.util.retryutils import NotRetryingDestination
from synapse.visibility import filter_events_for_server
@@ -78,36 +81,28 @@ if TYPE_CHECKING:
logger = logging.getLogger(__name__)
-
-def get_domains_from_state(state: StateMap[EventBase]) -> List[Tuple[str, int]]:
- """Get joined domains from state
-
- Args:
- state: State map from type/state key to event.
-
- Returns:
- Returns a list of servers with the lowest depth of their joins.
- Sorted by lowest depth first.
- """
- joined_users = [
- (state_key, int(event.depth))
- for (e_type, state_key), event in state.items()
- if e_type == EventTypes.Member and event.membership == Membership.JOIN
- ]
-
- joined_domains: Dict[str, int] = {}
- for u, d in joined_users:
- try:
- dom = get_domain_from_id(u)
- old_d = joined_domains.get(dom)
- if old_d:
- joined_domains[dom] = min(d, old_d)
- else:
- joined_domains[dom] = d
- except Exception:
- pass
-
- return sorted(joined_domains.items(), key=lambda d: d[1])
+# Added to debug performance and track progress on optimizations
+backfill_processing_before_timer = Histogram(
+ "synapse_federation_backfill_processing_before_time_seconds",
+ "sec",
+ [],
+ buckets=(
+ 0.1,
+ 0.5,
+ 1.0,
+ 2.5,
+ 5.0,
+ 7.5,
+ 10.0,
+ 15.0,
+ 20.0,
+ 30.0,
+ 40.0,
+ 60.0,
+ 80.0,
+ "+Inf",
+ ),
+)
class _BackfillPointType(Enum):
@@ -137,6 +132,7 @@ class FederationHandler:
def __init__(self, hs: "HomeServer"):
self.hs = hs
+ self.clock = hs.get_clock()
self.store = hs.get_datastores().main
self._storage_controllers = hs.get_storage_controllers()
self._state_storage_controller = self._storage_controllers.state
@@ -154,6 +150,8 @@ class FederationHandler:
self.http_client = hs.get_proxied_blacklisted_http_client()
self._replication = hs.get_replication_data_handler()
self._federation_event_handler = hs.get_federation_event_handler()
+ self._device_handler = hs.get_device_handler()
+ self._bulk_push_rule_evaluator = hs.get_bulk_push_rule_evaluator()
self._clean_room_for_join_client = ReplicationCleanRoomRestServlet.make_client(
hs
@@ -180,6 +178,7 @@ class FederationHandler:
"resume_sync_partial_state_room", self._resume_sync_partial_state_room
)
+ @trace
async def maybe_backfill(
self, room_id: str, current_depth: int, limit: int
) -> bool:
@@ -195,16 +194,52 @@ class FederationHandler:
return. This is used as part of the heuristic to decide if we
should back paginate.
"""
+ # Starting the processing time here so we can include the room backfill
+ # linearizer lock queue in the timing
+ processing_start_time = self.clock.time_msec()
+
async with self._room_backfill.queue(room_id):
- return await self._maybe_backfill_inner(room_id, current_depth, limit)
+ return await self._maybe_backfill_inner(
+ room_id,
+ current_depth,
+ limit,
+ processing_start_time=processing_start_time,
+ )
async def _maybe_backfill_inner(
- self, room_id: str, current_depth: int, limit: int
+ self,
+ room_id: str,
+ current_depth: int,
+ limit: int,
+ *,
+ processing_start_time: Optional[int],
) -> bool:
+ """
+ Checks whether the `current_depth` is at or approaching any backfill
+ points in the room and if so, will backfill. We only care about
+ checking backfill points that happened before the `current_depth`
+ (meaning less than or equal to the `current_depth`).
+
+ Args:
+ room_id: The room to backfill in.
+ current_depth: The depth to check at for any upcoming backfill points.
+ limit: The max number of events to request from the remote federated server.
+ processing_start_time: The time when `maybe_backfill` started processing.
+ Only used for timing. If `None`, no timing observation will be made.
+ """
backwards_extremities = [
_BackfillPoint(event_id, depth, _BackfillPointType.BACKWARDS_EXTREMITY)
- for event_id, depth in await self.store.get_oldest_event_ids_with_depth_in_room(
- room_id
+ for event_id, depth in await self.store.get_backfill_points_in_room(
+ room_id=room_id,
+ current_depth=current_depth,
+ # We only need to end up with 5 extremities combined with the
+ # insertion event extremities to make the `/backfill` request
+ # but fetch an order of magnitude more to make sure there is
+ # enough even after we filter them by whether visible in the
+ # history. This isn't fool-proof as all backfill points within
+ # our limit could be filtered out but seems like a good amount
+ # to try with at least.
+ limit=50,
)
]
@@ -213,7 +248,12 @@ class FederationHandler:
insertion_events_to_be_backfilled = [
_BackfillPoint(event_id, depth, _BackfillPointType.INSERTION_PONT)
for event_id, depth in await self.store.get_insertion_event_backward_extremities_in_room(
- room_id
+ room_id=room_id,
+ current_depth=current_depth,
+ # We only need to end up with 5 extremities combined with
+ # the backfill points to make the `/backfill` request ...
+ # (see the other comment above for more context).
+ limit=50,
)
]
logger.debug(
@@ -222,10 +262,6 @@ class FederationHandler:
insertion_events_to_be_backfilled,
)
- if not backwards_extremities and not insertion_events_to_be_backfilled:
- logger.debug("Not backfilling as no extremeties found.")
- return False
-
# we now have a list of potential places to backpaginate from. We prefer to
# start with the most recent (ie, max depth), so let's sort the list.
sorted_backfill_points: List[_BackfillPoint] = sorted(
@@ -246,6 +282,33 @@ class FederationHandler:
sorted_backfill_points,
)
+ # If we have no backfill points lower than the `current_depth` then
+ # either we can a) bail or b) still attempt to backfill. We opt to try
+ # backfilling anyway just in case we do get relevant events.
+ if not sorted_backfill_points and current_depth != MAX_DEPTH:
+ logger.debug(
+ "_maybe_backfill_inner: all backfill points are *after* current depth. Trying again with later backfill points."
+ )
+ return await self._maybe_backfill_inner(
+ room_id=room_id,
+ # We use `MAX_DEPTH` so that we find all backfill points next
+ # time (all events are below the `MAX_DEPTH`)
+ current_depth=MAX_DEPTH,
+ limit=limit,
+ # We don't want to start another timing observation from this
+ # nested recursive call. The top-most call can record the time
+ # overall otherwise the smaller one will throw off the results.
+ processing_start_time=None,
+ )
+
+ # Even after recursing with `MAX_DEPTH`, we didn't find any
+ # backward extremities to backfill from.
+ if not sorted_backfill_points:
+ logger.debug(
+ "_maybe_backfill_inner: Not backfilling as no backward extremeties found."
+ )
+ return False
+
# If we're approaching an extremity we trigger a backfill, otherwise we
# no-op.
#
@@ -255,47 +318,16 @@ class FederationHandler:
# chose more than one times the limit in case of failure, but choosing a
# much larger factor will result in triggering a backfill request much
# earlier than necessary.
- #
- # XXX: shouldn't we do this *after* the filter by depth below? Again, we don't
- # care about events that have happened after our current position.
- #
- max_depth = sorted_backfill_points[0].depth
- if current_depth - 2 * limit > max_depth:
+ max_depth_of_backfill_points = sorted_backfill_points[0].depth
+ if current_depth - 2 * limit > max_depth_of_backfill_points:
logger.debug(
"Not backfilling as we don't need to. %d < %d - 2 * %d",
- max_depth,
+ max_depth_of_backfill_points,
current_depth,
limit,
)
return False
- # We ignore extremities that have a greater depth than our current depth
- # as:
- # 1. we don't really care about getting events that have happened
- # after our current position; and
- # 2. we have likely previously tried and failed to backfill from that
- # extremity, so to avoid getting "stuck" requesting the same
- # backfill repeatedly we drop those extremities.
- #
- # However, we need to check that the filtered extremities are non-empty.
- # If they are empty then either we can a) bail or b) still attempt to
- # backfill. We opt to try backfilling anyway just in case we do get
- # relevant events.
- #
- filtered_sorted_backfill_points = [
- t for t in sorted_backfill_points if t.depth <= current_depth
- ]
- if filtered_sorted_backfill_points:
- logger.debug(
- "_maybe_backfill_inner: backfill points before current depth: %s",
- filtered_sorted_backfill_points,
- )
- sorted_backfill_points = filtered_sorted_backfill_points
- else:
- logger.debug(
- "_maybe_backfill_inner: all backfill points are *after* current depth. Backfilling anyway."
- )
-
# For performance's sake, we only want to paginate from a particular extremity
# if we can actually see the events we'll get. Otherwise, we'd just spend a lot
# of resources to get redacted events. We check each extremity in turn and
@@ -347,6 +379,7 @@ class FederationHandler:
filtered_extremities = await filter_events_for_server(
self._storage_controllers,
self.server_name,
+ self.server_name,
events_to_check,
redact=False,
check_history_visibility_only=True,
@@ -368,23 +401,40 @@ class FederationHandler:
logger.debug(
"_maybe_backfill_inner: extremities_to_request %s", extremities_to_request
)
+ set_tag(
+ SynapseTags.RESULT_PREFIX + "extremities_to_request",
+ str(extremities_to_request),
+ )
+ set_tag(
+ SynapseTags.RESULT_PREFIX + "extremities_to_request.length",
+ str(len(extremities_to_request)),
+ )
# Now we need to decide which hosts to hit first.
-
- # First we try hosts that are already in the room
+ # First we try hosts that are already in the room.
# TODO: HEURISTIC ALERT.
+ likely_domains = (
+ await self._storage_controllers.state.get_current_hosts_in_room_ordered(
+ room_id
+ )
+ )
- curr_state = await self._storage_controllers.state.get_current_state(room_id)
+ async def try_backfill(domains: Collection[str]) -> bool:
+ # TODO: Should we try multiple of these at a time?
- curr_domains = get_domains_from_state(curr_state)
+ # Number of contacted remote homeservers that have denied our backfill
+ # request with a 4xx code.
+ denied_count = 0
- likely_domains = [
- domain for domain, depth in curr_domains if domain != self.server_name
- ]
+ # Maximum number of contacted remote homeservers that can deny our
+ # backfill request with 4xx codes before we give up.
+ max_denied_count = 5
- async def try_backfill(domains: List[str]) -> bool:
- # TODO: Should we try multiple of these at a time?
for dom in domains:
+ # We don't want to ask our own server for information we don't have
+ if dom == self.server_name:
+ continue
+
try:
await self._federation_event_handler.backfill(
dom, room_id, limit=100, extremities=extremities_to_request
@@ -393,36 +443,69 @@ class FederationHandler:
# appropriate stuff.
# TODO: We can probably do something more intelligent here.
return True
+ except NotRetryingDestination as e:
+ logger.info("_maybe_backfill_inner: %s", e)
+ continue
+ except FederationDeniedError:
+ logger.info(
+ "_maybe_backfill_inner: Not attempting to backfill from %s because the homeserver is not on our federation whitelist",
+ dom,
+ )
+ continue
except (SynapseError, InvalidResponseError) as e:
logger.info("Failed to backfill from %s because %s", dom, e)
continue
except HttpResponseException as e:
if 400 <= e.code < 500:
- raise e.to_synapse_error()
+ logger.warning(
+ "Backfill denied from %s because %s [%d/%d]",
+ dom,
+ e,
+ denied_count,
+ max_denied_count,
+ )
+ denied_count += 1
+ if denied_count >= max_denied_count:
+ return False
+ continue
logger.info("Failed to backfill from %s because %s", dom, e)
continue
except CodeMessageException as e:
if 400 <= e.code < 500:
- raise
+ logger.warning(
+ "Backfill denied from %s because %s [%d/%d]",
+ dom,
+ e,
+ denied_count,
+ max_denied_count,
+ )
+ denied_count += 1
+ if denied_count >= max_denied_count:
+ return False
+ continue
logger.info("Failed to backfill from %s because %s", dom, e)
continue
- except NotRetryingDestination as e:
- logger.info(str(e))
- continue
except RequestSendFailed as e:
logger.info("Failed to get backfill from %s because %s", dom, e)
continue
- except FederationDeniedError as e:
- logger.info(e)
- continue
except Exception as e:
logger.exception("Failed to backfill from %s because %s", dom, e)
continue
return False
+ # If we have the `processing_start_time`, then we can make an
+ # observation. We wouldn't have the `processing_start_time` in the case
+ # where `_maybe_backfill_inner` is recursively called to find any
+ # backfill points regardless of `current_depth`.
+ if processing_start_time is not None:
+ processing_end_time = self.clock.time_msec()
+ backfill_processing_before_timer.observe(
+ (processing_end_time - processing_start_time) / 1000
+ )
+
success = await try_backfill(likely_domains)
if success:
return True
@@ -549,7 +632,12 @@ class FederationHandler:
# Mark the room as having partial state.
# The background process is responsible for unmarking this flag,
# even if the join fails.
- await self.store.store_partial_state_room(room_id, ret.servers_in_room)
+ await self.store.store_partial_state_room(
+ room_id=room_id,
+ servers=ret.servers_in_room,
+ device_lists_stream_id=self.store.get_device_stream_token(),
+ joined_via=origin,
+ )
try:
max_stream_id = (
@@ -574,6 +662,14 @@ class FederationHandler:
room_id,
)
raise LimitExceededError(msg=e.msg, errcode=e.errcode, retry_after_ms=0)
+ else:
+ # Record the join event id for future use (when we finish the full
+ # join). We have to do this after persisting the event to keep foreign
+ # key constraints intact.
+ if ret.partial_state:
+ await self.store.write_partial_state_rooms_join_event_id(
+ room_id, event.event_id
+ )
finally:
# Always kick off the background process that asynchronously fetches
# state for the room.
@@ -691,15 +787,27 @@ class FederationHandler:
# Send the signed event back to the room, and potentially receive some
# further information about the room in the form of partial state events
- stripped_room_state = await self.federation_client.send_knock(
- target_hosts, event
- )
+ knock_response = await self.federation_client.send_knock(target_hosts, event)
# Store any stripped room state events in the "unsigned" key of the event.
# This is a bit of a hack and is cribbing off of invites. Basically we
# store the room state here and retrieve it again when this event appears
# in the invitee's sync stream. It is stripped out for all other local users.
- event.unsigned["knock_room_state"] = stripped_room_state["knock_state_events"]
+ stripped_room_state = (
+ knock_response.get("knock_room_state")
+ # Since v1.37, Synapse incorrectly used "knock_state_events" for this field.
+ # Thus, we also check for a 'knock_state_events' to support old instances.
+ # See https://github.com/matrix-org/synapse/issues/14088.
+ or knock_response.get("knock_state_events")
+ )
+
+ if stripped_room_state is None:
+ raise KeyError(
+ "Missing 'knock_room_state' (or legacy 'knock_state_events') field in "
+ "send_knock response"
+ )
+
+ event.unsigned["knock_room_state"] = stripped_room_state
context = EventContext.for_outlier(self._storage_controllers)
stream_id = await self._federation_event_handler.persist_events_and_notify(
@@ -752,8 +860,25 @@ class FederationHandler:
# (and return a 404 otherwise)
room_version = await self.store.get_room_version(room_id)
+ if await self.store.is_partial_state_room(room_id):
+ # If our server is still only partially joined, we can't give a complete
+ # response to /make_join, so return a 404 as we would if we weren't in the
+ # room at all.
+ # The main reason we can't respond properly is that we need to know about
+ # the auth events for the join event that we would return.
+ # We also should not bother entertaining the /make_join since we cannot
+ # handle the /send_join.
+ logger.info(
+ "Rejecting /make_join to %s because it's a partial state room", room_id
+ )
+ raise SynapseError(
+ 404,
+ "Unable to handle /make_join right now; this server is not fully joined.",
+ errcode=Codes.NOT_FOUND,
+ )
+
# now check that we are *still* in the room
- is_in_room = await self._event_auth_handler.check_host_in_room(
+ is_in_room = await self._event_auth_handler.is_host_in_room(
room_id, self.server_name
)
if not is_in_room:
@@ -821,7 +946,7 @@ class FederationHandler:
# The remote hasn't signed it yet, obviously. We'll do the full checks
# when we get the event back in `on_send_join_request`
- await self._event_auth_handler.check_auth_rules_from_context(event, context)
+ await self._event_auth_handler.check_auth_rules_from_context(event)
return event
async def on_invite_request(
@@ -895,9 +1020,17 @@ class FederationHandler:
)
context = EventContext.for_outlier(self._storage_controllers)
- await self._federation_event_handler.persist_events_and_notify(
- event.room_id, [(event, context)]
+
+ await self._bulk_push_rule_evaluator.action_for_events_by_user(
+ [(event, context)]
)
+ try:
+ await self._federation_event_handler.persist_events_and_notify(
+ event.room_id, [(event, context)]
+ )
+ except Exception:
+ await self.store.remove_push_actions_from_staging(event.event_id)
+ raise
return event
@@ -996,7 +1129,7 @@ class FederationHandler:
try:
# The remote hasn't signed it yet, obviously. We'll do the full checks
# when we get the event back in `on_send_leave_request`
- await self._event_auth_handler.check_auth_rules_from_context(event, context)
+ await self._event_auth_handler.check_auth_rules_from_context(event)
except AuthError as e:
logger.warning("Failed to create new leave %r because %s", event, e)
raise e
@@ -1055,13 +1188,15 @@ class FederationHandler:
try:
# The remote hasn't signed it yet, obviously. We'll do the full checks
# when we get the event back in `on_send_knock_request`
- await self._event_auth_handler.check_auth_rules_from_context(event, context)
+ await self._event_auth_handler.check_auth_rules_from_context(event)
except AuthError as e:
logger.warning("Failed to create new knock %r because %s", event, e)
raise e
return event
+ @trace
+ @tag_args
async def get_state_ids_for_pdu(self, room_id: str, event_id: str) -> List[str]:
"""Returns the state at the event. i.e. not including said event."""
event = await self.store.get_event(event_id, check_room_id=room_id)
@@ -1097,9 +1232,9 @@ class FederationHandler:
async def on_backfill_request(
self, origin: str, room_id: str, pdu_list: List[str], limit: int
) -> List[EventBase]:
- in_room = await self._event_auth_handler.check_host_in_room(room_id, origin)
- if not in_room:
- raise AuthError(403, "Host not in room.")
+ # We allow partially joined rooms since in this case we are filtering out
+ # non-local events in `filter_events_for_server`.
+ await self._event_auth_handler.assert_host_in_room(room_id, origin, True)
# Synapse asks for 100 events per backfill request. Do not allow more.
limit = min(limit, 100)
@@ -1120,7 +1255,7 @@ class FederationHandler:
)
events = await filter_events_for_server(
- self._storage_controllers, origin, events
+ self._storage_controllers, origin, self.server_name, events
)
return events
@@ -1145,21 +1280,17 @@ class FederationHandler:
event_id, allow_none=True, allow_rejected=True
)
- if event:
- in_room = await self._event_auth_handler.check_host_in_room(
- event.room_id, origin
- )
- if not in_room:
- raise AuthError(403, "Host not in room.")
-
- events = await filter_events_for_server(
- self._storage_controllers, origin, [event]
- )
- event = events[0]
- return event
- else:
+ if not event:
return None
+ await self._event_auth_handler.assert_host_in_room(event.room_id, origin)
+
+ events = await filter_events_for_server(
+ self._storage_controllers, origin, self.server_name, [event]
+ )
+ event = events[0]
+ return event
+
async def on_get_missing_events(
self,
origin: str,
@@ -1168,9 +1299,9 @@ class FederationHandler:
latest_events: List[str],
limit: int,
) -> List[EventBase]:
- in_room = await self._event_auth_handler.check_host_in_room(room_id, origin)
- if not in_room:
- raise AuthError(403, "Host not in room.")
+ # We allow partially joined rooms since in this case we are filtering out
+ # non-local events in `filter_events_for_server`.
+ await self._event_auth_handler.assert_host_in_room(room_id, origin, True)
# Only allow up to 20 events to be retrieved per request.
limit = min(limit, 20)
@@ -1183,7 +1314,7 @@ class FederationHandler:
)
missing_events = await filter_events_for_server(
- self._storage_controllers, origin, missing_events
+ self._storage_controllers, origin, self.server_name, missing_events
)
return missing_events
@@ -1204,7 +1335,7 @@ class FederationHandler:
"state_key": target_user_id,
}
- if await self._event_auth_handler.check_host_in_room(room_id, self.hs.hostname):
+ if await self._event_auth_handler.is_host_in_room(room_id, self.hs.hostname):
room_version_obj = await self.store.get_room_version(room_id)
builder = self.event_builder_factory.for_room_version(
room_version_obj, event_dict
@@ -1227,9 +1358,7 @@ class FederationHandler:
try:
validate_event_for_room_version(event)
- await self._event_auth_handler.check_auth_rules_from_context(
- event, context
- )
+ await self._event_auth_handler.check_auth_rules_from_context(event)
except AuthError as e:
logger.warning("Denying new third party invite %r because %s", event, e)
raise e
@@ -1279,7 +1408,7 @@ class FederationHandler:
try:
validate_event_for_room_version(event)
- await self._event_auth_handler.check_auth_rules_from_context(event, context)
+ await self._event_auth_handler.check_auth_rules_from_context(event)
except AuthError as e:
logger.warning("Denying third party invite %r because %s", event, e)
raise e
@@ -1472,8 +1601,8 @@ class FederationHandler:
Fetch the complexity of a remote room over federation.
Args:
- remote_room_hosts (list[str]): The remote servers to ask.
- room_id (str): The room ID to ask about.
+ remote_room_hosts: The remote servers to ask.
+ room_id: The room ID to ask about.
Returns:
Dict contains the complexity
@@ -1495,13 +1624,13 @@ class FederationHandler:
"""Resumes resyncing of all partial-state rooms after a restart."""
assert not self.config.worker.worker_app
- partial_state_rooms = await self.store.get_partial_state_rooms_and_servers()
- for room_id, servers_in_room in partial_state_rooms.items():
+ partial_state_rooms = await self.store.get_partial_state_room_resync_info()
+ for room_id, resync_info in partial_state_rooms.items():
run_as_background_process(
desc="sync_partial_state_room",
func=self._sync_partial_state_room,
- initial_destination=None,
- other_destinations=servers_in_room,
+ initial_destination=resync_info.joined_via,
+ other_destinations=resync_info.servers_in_room,
room_id=room_id,
)
@@ -1530,28 +1659,12 @@ class FederationHandler:
# really leave, that might mean we have difficulty getting the room state over
# federation.
# https://github.com/matrix-org/synapse/issues/12802
- #
- # TODO(faster_joins): we need some way of prioritising which homeservers in
- # `other_destinations` to try first, otherwise we'll spend ages trying dead
- # homeservers for large rooms.
- # https://github.com/matrix-org/synapse/issues/12999
-
- if initial_destination is None and len(other_destinations) == 0:
- raise ValueError(
- f"Cannot resync state of {room_id}: no destinations provided"
- )
# Make an infinite iterator of destinations to try. Once we find a working
# destination, we'll stick with it until it flakes.
- destinations: Collection[str]
- if initial_destination is not None:
- # Move `initial_destination` to the front of the list.
- destinations = list(other_destinations)
- if initial_destination in destinations:
- destinations.remove(initial_destination)
- destinations = [initial_destination] + destinations
- else:
- destinations = other_destinations
+ destinations = _prioritise_destinations_for_partial_state_resync(
+ initial_destination, other_destinations, room_id
+ )
destination_iter = itertools.cycle(destinations)
# `destination` is the current remote homeserver we're pulling from.
@@ -1569,6 +1682,9 @@ class FederationHandler:
# https://github.com/matrix-org/synapse/issues/12994
await self.state_handler.update_current_state(room_id)
+ logger.info("Handling any pending device list updates")
+ await self._device_handler.handle_room_un_partial_stated(room_id)
+
logger.info("Clearing partial-state flag for %s", room_id)
success = await self.store.clear_partial_state_room(room_id)
if success:
@@ -1598,7 +1714,22 @@ class FederationHandler:
destination, event
)
break
+ except FederationPullAttemptBackoffError as exc:
+ # Log a warning about why we failed to process the event (the error message
+ # for `FederationPullAttemptBackoffError` is pretty good)
+ logger.warning("_sync_partial_state_room: %s", exc)
+ # We do not record a failed pull attempt when we backoff fetching a missing
+ # `prev_event` because not being able to fetch the `prev_events` just means
+ # we won't be able to de-outlier the pulled event. But we can still use an
+ # `outlier` in the state/auth chain for another event. So we shouldn't stop
+ # a downstream event from trying to pull it.
+ #
+ # This avoids a cascade of backoff for all events in the DAG downstream from
+ # one event backoff upstream.
except FederationError as e:
+ # TODO: We should `record_event_failed_pull_attempt` here,
+ # see https://github.com/matrix-org/synapse/issues/13700
+
if attempt == len(destinations) - 1:
# We have tried every remote server for this event. Give up.
# TODO(faster_joins) giving up isn't the right thing to do
@@ -1631,3 +1762,29 @@ class FederationHandler:
room_id,
destination,
)
+
+
+def _prioritise_destinations_for_partial_state_resync(
+ initial_destination: Optional[str],
+ other_destinations: Collection[str],
+ room_id: str,
+) -> Collection[str]:
+ """Work out the order in which we should ask servers to resync events.
+
+ If an `initial_destination` is given, it takes top priority. Otherwise
+ all servers are treated equally.
+
+ :raises ValueError: if no destination is provided at all.
+ """
+ if initial_destination is None and len(other_destinations) == 0:
+ raise ValueError(f"Cannot resync state of {room_id}: no destinations provided")
+
+ if initial_destination is None:
+ return other_destinations
+
+ # Move `initial_destination` to the front of the list.
+ destinations = list(other_destinations)
+ if initial_destination in destinations:
+ destinations.remove(initial_destination)
+ destinations = [initial_destination] + destinations
+ return destinations
diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py
index 91d1439191..f7223b03c3 100644
--- a/synapse/handlers/federation_event.py
+++ b/synapse/handlers/federation_event.py
@@ -29,7 +29,7 @@ from typing import (
Tuple,
)
-from prometheus_client import Counter
+from prometheus_client import Counter, Histogram
from synapse import event_auth
from synapse.api.constants import (
@@ -44,6 +44,7 @@ from synapse.api.errors import (
AuthError,
Codes,
FederationError,
+ FederationPullAttemptBackoffError,
HttpResponseException,
RequestSendFailed,
SynapseError,
@@ -57,8 +58,15 @@ from synapse.event_auth import (
)
from synapse.events import EventBase
from synapse.events.snapshot import EventContext
-from synapse.federation.federation_client import InvalidResponseError
+from synapse.federation.federation_client import InvalidResponseError, PulledPduInfo
from synapse.logging.context import nested_logging_context
+from synapse.logging.opentracing import (
+ SynapseTags,
+ set_tag,
+ start_active_span,
+ tag_args,
+ trace,
+)
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.replication.http.devices import ReplicationUserDevicesResyncRestServlet
from synapse.replication.http.federation import (
@@ -91,6 +99,36 @@ soft_failed_event_counter = Counter(
"Events received over federation that we marked as soft_failed",
)
+# Added to debug performance and track progress on optimizations
+backfill_processing_after_timer = Histogram(
+ "synapse_federation_backfill_processing_after_time_seconds",
+ "sec",
+ [],
+ buckets=(
+ 0.1,
+ 0.25,
+ 0.5,
+ 1.0,
+ 2.5,
+ 5.0,
+ 7.5,
+ 10.0,
+ 15.0,
+ 20.0,
+ 25.0,
+ 30.0,
+ 40.0,
+ 50.0,
+ 60.0,
+ 80.0,
+ 100.0,
+ 120.0,
+ 150.0,
+ 180.0,
+ "+Inf",
+ ),
+)
+
class FederationEventHandler:
"""Handles events that originated from federation.
@@ -201,7 +239,7 @@ class FederationEventHandler:
#
# Note that if we were never in the room then we would have already
# dropped the event, since we wouldn't know the room version.
- is_in_room = await self._event_auth_handler.check_host_in_room(
+ is_in_room = await self._event_auth_handler.is_host_in_room(
room_id, self._server_name
)
if not is_in_room:
@@ -377,7 +415,9 @@ class FederationEventHandler:
# First, precalculate the joined hosts so that the federation sender doesn't
# need to.
- await self._event_creation_handler.cache_joined_hosts_for_event(event, context)
+ await self._event_creation_handler.cache_joined_hosts_for_events(
+ [(event, context)]
+ )
await self._check_for_soft_fail(event, context=context, origin=origin)
await self._run_push_actions_and_persist_event(event, context)
@@ -409,6 +449,7 @@ class FederationEventHandler:
prev_member_event,
)
+ @trace
async def process_remote_join(
self,
origin: str,
@@ -527,6 +568,9 @@ class FederationEventHandler:
event: partial-state event to be de-partial-stated
Raises:
+ FederationPullAttemptBackoffError if we are are deliberately not attempting
+ to pull the given event over federation because we've already done so
+ recently and are backing off.
FederationError if we fail to request state from the remote server.
"""
logger.info("Updating state for %s", event.event_id)
@@ -566,6 +610,7 @@ class FederationEventHandler:
event.event_id
)
+ @trace
async def backfill(
self, dest: str, room_id: str, limit: int, extremities: Collection[str]
) -> None:
@@ -595,21 +640,23 @@ class FederationEventHandler:
if not events:
return
- # if there are any events in the wrong room, the remote server is buggy and
- # should not be trusted.
- for ev in events:
- if ev.room_id != room_id:
- raise InvalidResponseError(
- f"Remote server {dest} returned event {ev.event_id} which is in "
- f"room {ev.room_id}, when we were backfilling in {room_id}"
- )
+ with backfill_processing_after_timer.time():
+ # if there are any events in the wrong room, the remote server is buggy and
+ # should not be trusted.
+ for ev in events:
+ if ev.room_id != room_id:
+ raise InvalidResponseError(
+ f"Remote server {dest} returned event {ev.event_id} which is in "
+ f"room {ev.room_id}, when we were backfilling in {room_id}"
+ )
- await self._process_pulled_events(
- dest,
- events,
- backfilled=True,
- )
+ await self._process_pulled_events(
+ dest,
+ events,
+ backfilled=True,
+ )
+ @trace
async def _get_missing_events_for_pdu(
self, origin: str, pdu: EventBase, prevs: Set[str], min_depth: int
) -> None:
@@ -710,8 +757,9 @@ class FederationEventHandler:
logger.info("Got %d prev_events", len(missing_events))
await self._process_pulled_events(origin, missing_events, backfilled=False)
+ @trace
async def _process_pulled_events(
- self, origin: str, events: Iterable[EventBase], backfilled: bool
+ self, origin: str, events: Collection[EventBase], backfilled: bool
) -> None:
"""Process a batch of events we have pulled from a remote server
@@ -726,6 +774,15 @@ class FederationEventHandler:
backfilled: True if this is part of a historical batch of events (inhibits
notification to clients, and validation of device keys.)
"""
+ set_tag(
+ SynapseTags.FUNC_ARG_PREFIX + "event_ids",
+ str([event.event_id for event in events]),
+ )
+ set_tag(
+ SynapseTags.FUNC_ARG_PREFIX + "event_ids.length",
+ str(len(events)),
+ )
+ set_tag(SynapseTags.FUNC_ARG_PREFIX + "backfilled", str(backfilled))
logger.debug(
"processing pulled backfilled=%s events=%s",
backfilled,
@@ -741,13 +798,48 @@ class FederationEventHandler:
],
)
+ # Check if we already any of these have these events.
+ # Note: we currently make a lookup in the database directly here rather than
+ # checking the event cache, due to:
+ # https://github.com/matrix-org/synapse/issues/13476
+ existing_events_map = await self._store._get_events_from_db(
+ [event.event_id for event in events]
+ )
+
+ new_events = []
+ for event in events:
+ event_id = event.event_id
+
+ # If we've already seen this event ID...
+ if event_id in existing_events_map:
+ existing_event = existing_events_map[event_id]
+
+ # ...and the event itself was not previously stored as an outlier...
+ if not existing_event.event.internal_metadata.is_outlier():
+ # ...then there's no need to persist it. We have it already.
+ logger.info(
+ "_process_pulled_event: Ignoring received event %s which we "
+ "have already seen",
+ event.event_id,
+ )
+ continue
+
+ # While we have seen this event before, it was stored as an outlier.
+ # We'll now persist it as a non-outlier.
+ logger.info("De-outliering event %s", event_id)
+
+ # Continue on with the events that are new to us.
+ new_events.append(event)
+
# We want to sort these by depth so we process them and
# tell clients about them in order.
- sorted_events = sorted(events, key=lambda x: x.depth)
+ sorted_events = sorted(new_events, key=lambda x: x.depth)
for ev in sorted_events:
with nested_logging_context(ev.event_id):
await self._process_pulled_event(origin, ev, backfilled=backfilled)
+ @trace
+ @tag_args
async def _process_pulled_event(
self, origin: str, event: EventBase, backfilled: bool
) -> None:
@@ -793,22 +885,13 @@ class FederationEventHandler:
event_id = event.event_id
- existing = await self._store.get_event(
- event_id, allow_none=True, allow_rejected=True
- )
- if existing:
- if not existing.internal_metadata.is_outlier():
- logger.info(
- "_process_pulled_event: Ignoring received event %s which we have already seen",
- event_id,
- )
- return
- logger.info("De-outliering event %s", event_id)
-
try:
self._sanity_check_event(event)
except SynapseError as err:
logger.warning("Event %s failed sanity check: %s", event_id, err)
+ await self._store.record_event_failed_pull_attempt(
+ event.room_id, event_id, str(err)
+ )
return
try:
@@ -843,12 +926,29 @@ class FederationEventHandler:
context,
backfilled=backfilled,
)
+ except FederationPullAttemptBackoffError as exc:
+ # Log a warning about why we failed to process the event (the error message
+ # for `FederationPullAttemptBackoffError` is pretty good)
+ logger.warning("_process_pulled_event: %s", exc)
+ # We do not record a failed pull attempt when we backoff fetching a missing
+ # `prev_event` because not being able to fetch the `prev_events` just means
+ # we won't be able to de-outlier the pulled event. But we can still use an
+ # `outlier` in the state/auth chain for another event. So we shouldn't stop
+ # a downstream event from trying to pull it.
+ #
+ # This avoids a cascade of backoff for all events in the DAG downstream from
+ # one event backoff upstream.
except FederationError as e:
+ await self._store.record_event_failed_pull_attempt(
+ event.room_id, event_id, str(e)
+ )
+
if e.code == 403:
logger.warning("Pulled event %s failed history check.", event_id)
else:
raise
+ @trace
async def _compute_event_context_with_maybe_missing_prevs(
self, dest: str, event: EventBase
) -> EventContext:
@@ -884,6 +984,9 @@ class FederationEventHandler:
The event context.
Raises:
+ FederationPullAttemptBackoffError if we are are deliberately not attempting
+ to pull the given event over federation because we've already done so
+ recently and are backing off.
FederationError if we fail to get the state from the remote server after any
missing `prev_event`s.
"""
@@ -894,6 +997,18 @@ class FederationEventHandler:
seen = await self._store.have_events_in_timeline(prevs)
missing_prevs = prevs - seen
+ # If we've already recently attempted to pull this missing event, don't
+ # try it again so soon. Since we have to fetch all of the prev_events, we can
+ # bail early here if we find any to ignore.
+ prevs_to_ignore = await self._store.get_event_ids_to_not_pull_from_backoff(
+ room_id, missing_prevs
+ )
+ if len(prevs_to_ignore) > 0:
+ raise FederationPullAttemptBackoffError(
+ event_ids=prevs_to_ignore,
+ message=f"While computing context for event={event_id}, not attempting to pull missing prev_event={prevs_to_ignore[0]} because we already tried to pull recently (backing off).",
+ )
+
if not missing_prevs:
return await self._state_handler.compute_event_context(event)
@@ -950,10 +1065,9 @@ class FederationEventHandler:
state_res_store=StateResolutionStore(self._store),
)
- except Exception:
+ except Exception as e:
logger.warning(
- "Error attempting to resolve state at missing prev_events",
- exc_info=True,
+ "Error attempting to resolve state at missing prev_events: %s", e
)
raise FederationError(
"ERROR",
@@ -965,6 +1079,8 @@ class FederationEventHandler:
event, state_ids_before_event=state_map, partial_state=partial_state
)
+ @trace
+ @tag_args
async def _get_state_ids_after_missing_prev_event(
self,
destination: str,
@@ -985,6 +1101,14 @@ class FederationEventHandler:
InvalidResponseError: if the remote homeserver's response contains fields
of the wrong type.
"""
+
+ # It would be better if we could query the difference from our known
+ # state to the given `event_id` so the sending server doesn't have to
+ # send as much and we don't have to process as many events. For example
+ # in a room like #matrix:matrix.org, we get 200k events (77k state_events, 122k
+ # auth_events) from this call.
+ #
+ # Tracked by https://github.com/matrix-org/synapse/issues/13618
(
state_event_ids,
auth_event_ids,
@@ -1004,10 +1128,10 @@ class FederationEventHandler:
logger.debug("Fetching %i events from cache/store", len(desired_events))
have_events = await self._store.have_seen_events(room_id, desired_events)
- missing_desired_events = desired_events - have_events
+ missing_desired_event_ids = desired_events - have_events
logger.debug(
"We are missing %i events (got %i)",
- len(missing_desired_events),
+ len(missing_desired_event_ids),
len(have_events),
)
@@ -1019,13 +1143,30 @@ class FederationEventHandler:
# already have a bunch of the state events. It would be nice if the
# federation api gave us a way of finding out which we actually need.
- missing_auth_events = set(auth_event_ids) - have_events
- missing_auth_events.difference_update(
- await self._store.have_seen_events(room_id, missing_auth_events)
+ missing_auth_event_ids = set(auth_event_ids) - have_events
+ missing_auth_event_ids.difference_update(
+ await self._store.have_seen_events(room_id, missing_auth_event_ids)
)
- logger.debug("We are also missing %i auth events", len(missing_auth_events))
+ logger.debug("We are also missing %i auth events", len(missing_auth_event_ids))
- missing_events = missing_desired_events | missing_auth_events
+ missing_event_ids = missing_desired_event_ids | missing_auth_event_ids
+
+ set_tag(
+ SynapseTags.RESULT_PREFIX + "missing_auth_event_ids",
+ str(missing_auth_event_ids),
+ )
+ set_tag(
+ SynapseTags.RESULT_PREFIX + "missing_auth_event_ids.length",
+ str(len(missing_auth_event_ids)),
+ )
+ set_tag(
+ SynapseTags.RESULT_PREFIX + "missing_desired_event_ids",
+ str(missing_desired_event_ids),
+ )
+ set_tag(
+ SynapseTags.RESULT_PREFIX + "missing_desired_event_ids.length",
+ str(len(missing_desired_event_ids)),
+ )
# Making an individual request for each of 1000s of events has a lot of
# overhead. On the other hand, we don't really want to fetch all of the events
@@ -1036,13 +1177,13 @@ class FederationEventHandler:
#
# TODO: might it be better to have an API which lets us do an aggregate event
# request
- if (len(missing_events) * 10) >= len(auth_event_ids) + len(state_event_ids):
+ if (len(missing_event_ids) * 10) >= len(auth_event_ids) + len(state_event_ids):
logger.debug("Requesting complete state from remote")
await self._get_state_and_persist(destination, room_id, event_id)
else:
- logger.debug("Fetching %i events from remote", len(missing_events))
+ logger.debug("Fetching %i events from remote", len(missing_event_ids))
await self._get_events_and_persist(
- destination=destination, room_id=room_id, event_ids=missing_events
+ destination=destination, room_id=room_id, event_ids=missing_event_ids
)
# We now need to fill out the state map, which involves fetching the
@@ -1099,6 +1240,14 @@ class FederationEventHandler:
event_id,
failed_to_fetch,
)
+ set_tag(
+ SynapseTags.RESULT_PREFIX + "failed_to_fetch",
+ str(failed_to_fetch),
+ )
+ set_tag(
+ SynapseTags.RESULT_PREFIX + "failed_to_fetch.length",
+ str(len(failed_to_fetch)),
+ )
if remote_event.is_state() and remote_event.rejected_reason is None:
state_map[
@@ -1107,6 +1256,8 @@ class FederationEventHandler:
return state_map
+ @trace
+ @tag_args
async def _get_state_and_persist(
self, destination: str, room_id: str, event_id: str
) -> None:
@@ -1128,6 +1279,7 @@ class FederationEventHandler:
destination=destination, room_id=room_id, event_ids=(event_id,)
)
+ @trace
async def _process_received_pdu(
self,
origin: str,
@@ -1278,6 +1430,7 @@ class FederationEventHandler:
except Exception:
logger.exception("Failed to resync device for %s", sender)
+ @trace
async def _handle_marker_event(self, origin: str, marker_event: EventBase) -> None:
"""Handles backfilling the insertion event when we receive a marker
event that points to one.
@@ -1309,7 +1462,7 @@ class FederationEventHandler:
logger.debug("_handle_marker_event: received %s", marker_event)
insertion_event_id = marker_event.content.get(
- EventContentFields.MSC2716_MARKER_INSERTION
+ EventContentFields.MSC2716_INSERTION_EVENT_REFERENCE
)
if insertion_event_id is None:
@@ -1363,8 +1516,8 @@ class FederationEventHandler:
)
async def backfill_event_id(
- self, destination: str, room_id: str, event_id: str
- ) -> EventBase:
+ self, destinations: List[str], room_id: str, event_id: str
+ ) -> PulledPduInfo:
"""Backfill a single event and persist it as a non-outlier which means
we also pull in all of the state and auth events necessary for it.
@@ -1376,24 +1529,21 @@ class FederationEventHandler:
Raises:
FederationError if we are unable to find the event from the destination
"""
- logger.info(
- "backfill_event_id: event_id=%s from destination=%s", event_id, destination
- )
+ logger.info("backfill_event_id: event_id=%s", event_id)
room_version = await self._store.get_room_version(room_id)
- event_from_response = await self._federation_client.get_pdu(
- [destination],
+ pulled_pdu_info = await self._federation_client.get_pdu(
+ destinations,
event_id,
room_version,
)
- if not event_from_response:
+ if not pulled_pdu_info:
raise FederationError(
"ERROR",
404,
- "Unable to find event_id=%s from destination=%s to backfill."
- % (event_id, destination),
+ f"Unable to find event_id={event_id} from remote servers to backfill.",
affected=event_id,
)
@@ -1401,14 +1551,16 @@ class FederationEventHandler:
# and auth events to de-outlier it. This also sets up the necessary
# `state_groups` for the event.
await self._process_pulled_events(
- destination,
- [event_from_response],
+ pulled_pdu_info.pull_origin,
+ [pulled_pdu_info.pdu],
# Prevent notifications going to clients
backfilled=True,
)
- return event_from_response
+ return pulled_pdu_info
+ @trace
+ @tag_args
async def _get_events_and_persist(
self, destination: str, room_id: str, event_ids: Collection[str]
) -> None:
@@ -1428,19 +1580,19 @@ class FederationEventHandler:
async def get_event(event_id: str) -> None:
with nested_logging_context(event_id):
try:
- event = await self._federation_client.get_pdu(
+ pulled_pdu_info = await self._federation_client.get_pdu(
[destination],
event_id,
room_version,
)
- if event is None:
+ if pulled_pdu_info is None:
logger.warning(
"Server %s didn't return event %s",
destination,
event_id,
)
return
- events.append(event)
+ events.append(pulled_pdu_info.pdu)
except Exception as e:
logger.warning(
@@ -1454,6 +1606,7 @@ class FederationEventHandler:
logger.info("Fetched %i events of %i requested", len(events), len(event_ids))
await self._auth_and_persist_outliers(room_id, events)
+ @trace
async def _auth_and_persist_outliers(
self, room_id: str, events: Iterable[EventBase]
) -> None:
@@ -1472,6 +1625,16 @@ class FederationEventHandler:
"""
event_map = {event.event_id: event for event in events}
+ event_ids = event_map.keys()
+ set_tag(
+ SynapseTags.FUNC_ARG_PREFIX + "event_ids",
+ str(event_ids),
+ )
+ set_tag(
+ SynapseTags.FUNC_ARG_PREFIX + "event_ids.length",
+ str(len(event_ids)),
+ )
+
# filter out any events we have already seen. This might happen because
# the events were eagerly pushed to us (eg, during a room join), or because
# another thread has raced against us since we decided to request the event.
@@ -1588,6 +1751,7 @@ class FederationEventHandler:
backfilled=True,
)
+ @trace
async def _check_event_auth(
self, origin: Optional[str], event: EventBase, context: EventContext
) -> None:
@@ -1626,6 +1790,14 @@ class FederationEventHandler:
claimed_auth_events = await self._load_or_fetch_auth_events_for_event(
origin, event
)
+ set_tag(
+ SynapseTags.RESULT_PREFIX + "claimed_auth_events",
+ str([ev.event_id for ev in claimed_auth_events]),
+ )
+ set_tag(
+ SynapseTags.RESULT_PREFIX + "claimed_auth_events.length",
+ str(len(claimed_auth_events)),
+ )
# ... and check that the event passes auth at those auth events.
# https://spec.matrix.org/v1.3/server-server-api/#checks-performed-on-receipt-of-a-pdu:
@@ -1723,6 +1895,7 @@ class FederationEventHandler:
)
context.rejected = RejectedReason.AUTH_ERROR
+ @trace
async def _maybe_kick_guest_users(self, event: EventBase) -> None:
if event.type != EventTypes.GuestAccess:
return
@@ -1930,6 +2103,8 @@ class FederationEventHandler:
# instead we raise an AuthError, which will make the caller ignore it.
raise AuthError(code=HTTPStatus.FORBIDDEN, msg="Auth events could not be found")
+ @trace
+ @tag_args
async def _get_remote_auth_chain_for_event(
self, destination: str, room_id: str, event_id: str
) -> None:
@@ -1958,6 +2133,7 @@ class FederationEventHandler:
await self._auth_and_persist_outliers(room_id, remote_auth_events)
+ @trace
async def _run_push_actions_and_persist_event(
self, event: EventBase, context: EventContext, backfilled: bool = False
) -> None:
@@ -1991,8 +2167,8 @@ class FederationEventHandler:
min_depth,
)
else:
- await self._bulk_push_rule_evaluator.action_for_event_by_user(
- event, context
+ await self._bulk_push_rule_evaluator.action_for_events_by_user(
+ [(event, context)]
)
try:
@@ -2034,6 +2210,7 @@ class FederationEventHandler:
if instance != self._instance_name:
# Limit the number of events sent over replication. We choose 200
# here as that is what we default to in `max_request_body_size(..)`
+ result = {}
try:
for batch in batch_iter(event_and_contexts, 200):
result = await self._send_events(
@@ -2066,8 +2243,17 @@ class FederationEventHandler:
self._message_handler.maybe_schedule_expiry(event)
if not backfilled: # Never notify for backfilled events
- for event in events:
- await self._notify_persisted_event(event, max_stream_token)
+ with start_active_span("notify_persisted_events"):
+ set_tag(
+ SynapseTags.RESULT_PREFIX + "event_ids",
+ str([ev.event_id for ev in events]),
+ )
+ set_tag(
+ SynapseTags.RESULT_PREFIX + "event_ids.length",
+ str(len(events)),
+ )
+ for event in events:
+ await self._notify_persisted_event(event, max_stream_token)
return max_stream_token.stream
@@ -2104,8 +2290,8 @@ class FederationEventHandler:
event_pos = PersistedEventPosition(
self._instance_name, event.internal_metadata.stream_ordering
)
- await self._notifier.on_new_room_event(
- event, event_pos, max_stream_token, extra_users=extra_users
+ await self._notifier.on_new_room_events(
+ [(event, event_pos)], max_stream_token, extra_users=extra_users
)
if event.type == EventTypes.Member and event.membership == Membership.JOIN:
diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py
index 61b9622697..b98c7b3b40 100644
--- a/synapse/handlers/identity.py
+++ b/synapse/handlers/identity.py
@@ -26,7 +26,6 @@ from synapse.api.errors import (
SynapseError,
)
from synapse.api.ratelimiting import Ratelimiter
-from synapse.config.emailconfig import ThreepidBehaviour
from synapse.http import RequestTimedOutError
from synapse.http.client import SimpleHttpClient
from synapse.http.site import SynapseRequest
@@ -434,48 +433,6 @@ class IdentityHandler:
return session_id
- async def request_email_token(
- self,
- id_server: str,
- email: str,
- client_secret: str,
- send_attempt: int,
- next_link: Optional[str] = None,
- ) -> JsonDict:
- """
- Request an external server send an email on our behalf for the purposes of threepid
- validation.
-
- Args:
- id_server: The identity server to proxy to
- email: The email to send the message to
- client_secret: The unique client_secret sends by the user
- send_attempt: Which attempt this is
- next_link: A link to redirect the user to once they submit the token
-
- Returns:
- The json response body from the server
- """
- params = {
- "email": email,
- "client_secret": client_secret,
- "send_attempt": send_attempt,
- }
- if next_link:
- params["next_link"] = next_link
-
- try:
- data = await self.http_client.post_json_get_json(
- id_server + "/_matrix/identity/api/v1/validate/email/requestToken",
- params,
- )
- return data
- except HttpResponseException as e:
- logger.info("Proxied requestToken failed: %r", e)
- raise e.to_synapse_error()
- except RequestTimedOutError:
- raise SynapseError(500, "Timed out contacting identity server")
-
async def requestMsisdnToken(
self,
id_server: str,
@@ -549,18 +506,7 @@ class IdentityHandler:
validation_session = None
# Try to validate as email
- if self.hs.config.email.threepid_behaviour_email == ThreepidBehaviour.REMOTE:
- # Remote emails will only be used if a valid identity server is provided.
- assert (
- self.hs.config.registration.account_threepid_delegate_email is not None
- )
-
- # Ask our delegated email identity server
- validation_session = await self.threepid_from_creds(
- self.hs.config.registration.account_threepid_delegate_email,
- threepid_creds,
- )
- elif self.hs.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
+ if self.hs.config.email.can_verify_email:
# Get a validated session matching these details
validation_session = await self.store.get_threepid_validation_session(
"email", client_secret, sid=sid, validated=True
@@ -610,11 +556,7 @@ class IdentityHandler:
raise SynapseError(400, "Error contacting the identity server")
async def lookup_3pid(
- self,
- id_server: str,
- medium: str,
- address: str,
- id_access_token: Optional[str] = None,
+ self, id_server: str, medium: str, address: str, id_access_token: str
) -> Optional[str]:
"""Looks up a 3pid in the passed identity server.
@@ -629,60 +571,15 @@ class IdentityHandler:
Returns:
the matrix ID of the 3pid, or None if it is not recognized.
"""
- if id_access_token is not None:
- try:
- results = await self._lookup_3pid_v2(
- id_server, id_access_token, medium, address
- )
- return results
-
- except Exception as e:
- # Catch HttpResponseExcept for a non-200 response code
- # Check if this identity server does not know about v2 lookups
- if isinstance(e, HttpResponseException) and e.code == 404:
- # This is an old identity server that does not yet support v2 lookups
- logger.warning(
- "Attempted v2 lookup on v1 identity server %s. Falling "
- "back to v1",
- id_server,
- )
- else:
- logger.warning("Error when looking up hashing details: %s", e)
- return None
-
- return await self._lookup_3pid_v1(id_server, medium, address)
-
- async def _lookup_3pid_v1(
- self, id_server: str, medium: str, address: str
- ) -> Optional[str]:
- """Looks up a 3pid in the passed identity server using v1 lookup.
- Args:
- id_server: The server name (including port, if required)
- of the identity server to use.
- medium: The type of the third party identifier (e.g. "email").
- address: The third party identifier (e.g. "foo@example.com").
-
- Returns:
- the matrix ID of the 3pid, or None if it is not recognized.
- """
try:
- data = await self.blacklisting_http_client.get_json(
- "%s%s/_matrix/identity/api/v1/lookup" % (id_server_scheme, id_server),
- {"medium": medium, "address": address},
+ results = await self._lookup_3pid_v2(
+ id_server, id_access_token, medium, address
)
-
- if "mxid" in data:
- # note: we used to verify the identity server's signature here, but no longer
- # require or validate it. See the following for context:
- # https://github.com/matrix-org/synapse/issues/5253#issuecomment-666246950
- return data["mxid"]
- except RequestTimedOutError:
- raise SynapseError(500, "Timed out contacting identity server")
- except OSError as e:
- logger.warning("Error from v1 identity server lookup: %s" % (e,))
-
- return None
+ return results
+ except Exception as e:
+ logger.warning("Error when looking up hashing details: %s", e)
+ return None
async def _lookup_3pid_v2(
self, id_server: str, id_access_token: str, medium: str, address: str
@@ -811,7 +708,7 @@ class IdentityHandler:
room_type: Optional[str],
inviter_display_name: str,
inviter_avatar_url: str,
- id_access_token: Optional[str] = None,
+ id_access_token: str,
) -> Tuple[str, List[Dict[str, str]], Dict[str, str], str]:
"""
Asks an identity server for a third party invite.
@@ -832,7 +729,7 @@ class IdentityHandler:
inviter_display_name: The current display name of the
inviter.
inviter_avatar_url: The URL of the inviter's avatar.
- id_access_token (str|None): The access token to authenticate to the identity
+ id_access_token: The access token to authenticate to the identity
server with
Returns:
@@ -864,71 +761,24 @@ class IdentityHandler:
invite_config["org.matrix.web_client_location"] = self._web_client_location
# Add the identity service access token to the JSON body and use the v2
- # Identity Service endpoints if id_access_token is present
+ # Identity Service endpoints
data = None
- base_url = "%s%s/_matrix/identity" % (id_server_scheme, id_server)
- if id_access_token:
- key_validity_url = "%s%s/_matrix/identity/v2/pubkey/isvalid" % (
- id_server_scheme,
- id_server,
- )
-
- # Attempt a v2 lookup
- url = base_url + "/v2/store-invite"
- try:
- data = await self.blacklisting_http_client.post_json_get_json(
- url,
- invite_config,
- {"Authorization": create_id_access_token_header(id_access_token)},
- )
- except RequestTimedOutError:
- raise SynapseError(500, "Timed out contacting identity server")
- except HttpResponseException as e:
- if e.code != 404:
- logger.info("Failed to POST %s with JSON: %s", url, e)
- raise e
+ key_validity_url = "%s%s/_matrix/identity/v2/pubkey/isvalid" % (
+ id_server_scheme,
+ id_server,
+ )
- if data is None:
- key_validity_url = "%s%s/_matrix/identity/api/v1/pubkey/isvalid" % (
- id_server_scheme,
- id_server,
+ url = "%s%s/_matrix/identity/v2/store-invite" % (id_server_scheme, id_server)
+ try:
+ data = await self.blacklisting_http_client.post_json_get_json(
+ url,
+ invite_config,
+ {"Authorization": create_id_access_token_header(id_access_token)},
)
- url = base_url + "/api/v1/store-invite"
-
- try:
- data = await self.blacklisting_http_client.post_json_get_json(
- url, invite_config
- )
- except RequestTimedOutError:
- raise SynapseError(500, "Timed out contacting identity server")
- except HttpResponseException as e:
- logger.warning(
- "Error trying to call /store-invite on %s%s: %s",
- id_server_scheme,
- id_server,
- e,
- )
-
- if data is None:
- # Some identity servers may only support application/x-www-form-urlencoded
- # types. This is especially true with old instances of Sydent, see
- # https://github.com/matrix-org/sydent/pull/170
- try:
- data = await self.blacklisting_http_client.post_urlencoded_get_json(
- url, invite_config
- )
- except HttpResponseException as e:
- logger.warning(
- "Error calling /store-invite on %s%s with fallback "
- "encoding: %s",
- id_server_scheme,
- id_server,
- e,
- )
- raise e
+ except RequestTimedOutError:
+ raise SynapseError(500, "Timed out contacting identity server")
- # TODO: Check for success
token = data["token"]
public_keys = data.get("public_keys", [])
if "public_key" in data:
diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py
index 85b472f250..9c335e6863 100644
--- a/synapse/handlers/initial_sync.py
+++ b/synapse/handlers/initial_sync.py
@@ -57,13 +57,7 @@ class InitialSyncHandler:
self.validator = EventValidator()
self.snapshot_cache: ResponseCache[
Tuple[
- str,
- Optional[StreamToken],
- Optional[StreamToken],
- str,
- Optional[int],
- bool,
- bool,
+ str, Optional[StreamToken], Optional[StreamToken], str, int, bool, bool
]
] = ResponseCache(hs.get_clock(), "initial_sync_cache")
self._event_serializer = hs.get_event_client_serializer()
@@ -143,8 +137,8 @@ class InitialSyncHandler:
joined_rooms,
to_key=int(now_token.receipt_key),
)
- if self.hs.config.experimental.msc2285_enabled:
- receipt = ReceiptEventSource.filter_out_private_receipts(receipt, user_id)
+
+ receipt = ReceiptEventSource.filter_out_private_receipts(receipt, user_id)
tags_by_room = await self.store.get_tags_for_user(user_id)
@@ -154,11 +148,6 @@ class InitialSyncHandler:
public_room_ids = await self.store.get_public_room_ids()
- if pagin_config.limit is not None:
- limit = pagin_config.limit
- else:
- limit = 10
-
serializer_options = SerializeEventConfig(as_client_event=as_client_event)
async def handle_room(event: RoomsForUser) -> None:
@@ -210,7 +199,7 @@ class InitialSyncHandler:
run_in_background(
self.store.get_recent_events_for_room,
event.room_id,
- limit=limit,
+ limit=pagin_config.limit,
end_token=room_end_token,
),
deferred_room_state,
@@ -309,18 +298,18 @@ class InitialSyncHandler:
if blocked:
raise SynapseError(403, "This room has been blocked on this server")
- user_id = requester.user.to_string()
-
(
membership,
member_event_id,
) = await self.auth.check_user_in_room_or_world_readable(
room_id,
- user_id,
+ requester,
allow_departed_users=True,
)
is_peeking = member_event_id is None
+ user_id = requester.user.to_string()
+
if membership == Membership.JOIN:
result = await self._room_initial_sync_joined(
user_id, room_id, pagin_config, membership, is_peeking
@@ -360,15 +349,11 @@ class InitialSyncHandler:
member_event_id
)
- limit = pagin_config.limit if pagin_config else None
- if limit is None:
- limit = 10
-
leave_position = await self.store.get_position_for_event(member_event_id)
stream_token = leave_position.to_room_stream_token()
messages, token = await self.store.get_recent_events_for_room(
- room_id, limit=limit, end_token=stream_token
+ room_id, limit=pagin_config.limit, end_token=stream_token
)
messages = await filter_events_for_client(
@@ -420,10 +405,6 @@ class InitialSyncHandler:
now_token = self.hs.get_event_sources().get_current_token()
- limit = pagin_config.limit if pagin_config else None
- if limit is None:
- limit = 10
-
room_members = [
m
for m in current_state.values()
@@ -456,11 +437,8 @@ class InitialSyncHandler:
)
if not receipts:
return []
- if self.hs.config.experimental.msc2285_enabled:
- receipts = ReceiptEventSource.filter_out_private_receipts(
- receipts, user_id
- )
- return receipts
+
+ return ReceiptEventSource.filter_out_private_receipts(receipts, user_id)
presence, receipts, (messages, token) = await make_deferred_yieldable(
gather_results(
@@ -470,7 +448,7 @@ class InitialSyncHandler:
run_in_background(
self.store.get_recent_events_for_room,
room_id,
- limit=limit,
+ limit=pagin_config.limit,
end_token=now_token.room_key,
),
),
diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py
index ee0773988e..4cf593cfdc 100644
--- a/synapse/handlers/message.py
+++ b/synapse/handlers/message.py
@@ -52,16 +52,20 @@ from synapse.events.builder import EventBuilder
from synapse.events.snapshot import EventContext
from synapse.events.validator import EventValidator
from synapse.handlers.directory import DirectoryHandler
+from synapse.logging import opentracing
from synapse.logging.context import make_deferred_yieldable, run_in_background
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.replication.http.send_event import ReplicationSendEventRestServlet
+from synapse.replication.http.send_events import ReplicationSendEventsRestServlet
from synapse.storage.databases.main.events import PartialStateConflictError
from synapse.storage.databases.main.events_worker import EventRedactBehaviour
from synapse.storage.state import StateFilter
from synapse.types import (
MutableStateMap,
+ PersistedEventPosition,
Requester,
RoomAlias,
+ StateMap,
StreamToken,
UserID,
create_requester,
@@ -103,7 +107,7 @@ class MessageHandler:
async def get_room_data(
self,
- user_id: str,
+ requester: Requester,
room_id: str,
event_type: str,
state_key: str,
@@ -111,7 +115,7 @@ class MessageHandler:
"""Get data from a room.
Args:
- user_id
+ requester: The user who did the request.
room_id
event_type
state_key
@@ -124,7 +128,7 @@ class MessageHandler:
membership,
membership_event_id,
) = await self.auth.check_user_in_room_or_world_readable(
- room_id, user_id, allow_departed_users=True
+ room_id, requester, allow_departed_users=True
)
if membership == Membership.JOIN:
@@ -160,11 +164,10 @@ class MessageHandler:
async def get_state_events(
self,
- user_id: str,
+ requester: Requester,
room_id: str,
state_filter: Optional[StateFilter] = None,
at_token: Optional[StreamToken] = None,
- is_guest: bool = False,
) -> List[dict]:
"""Retrieve all state events for a given room. If the user is
joined to the room then return the current state. If the user has
@@ -173,14 +176,13 @@ class MessageHandler:
visible.
Args:
- user_id: The user requesting state events.
+ requester: The user requesting state events.
room_id: The room ID to get all state events from.
state_filter: The state filter used to fetch state from the database.
at_token: the stream token of the at which we are requesting
the stats. If the user is not allowed to view the state as of that
stream token, we raise a 403 SynapseError. If None, returns the current
state based on the current_state_events table.
- is_guest: whether this user is a guest
Returns:
A list of dicts representing state events. [{}, {}, {}]
Raises:
@@ -190,6 +192,7 @@ class MessageHandler:
members of this room.
"""
state_filter = state_filter or StateFilter.all()
+ user_id = requester.user.to_string()
if at_token:
last_event_id = (
@@ -222,7 +225,7 @@ class MessageHandler:
membership,
membership_event_id,
) = await self.auth.check_user_in_room_or_world_readable(
- room_id, user_id, allow_departed_users=True
+ room_id, requester, allow_departed_users=True
)
if membership == Membership.JOIN:
@@ -316,12 +319,11 @@ class MessageHandler:
Returns:
A dict of user_id to profile info
"""
- user_id = requester.user.to_string()
if not requester.app_service:
# We check AS auth after fetching the room membership, as it
# requires us to pull out all joined members anyway.
membership, _ = await self.auth.check_user_in_room_or_world_readable(
- room_id, user_id, allow_departed_users=True
+ room_id, requester, allow_departed_users=True
)
if membership != Membership.JOIN:
raise SynapseError(
@@ -330,12 +332,19 @@ class MessageHandler:
msg="Getting joined members while not being a current member of the room is forbidden.",
)
- users_with_profile = await self.store.get_users_in_room_with_profiles(room_id)
+ users_with_profile = (
+ await self._state_storage_controller.get_users_in_room_with_profiles(
+ room_id
+ )
+ )
# If this is an AS, double check that they are allowed to see the members.
# This can either be because the AS user is in the room or because there
# is a user in the room that the AS is "interested in"
- if requester.app_service and user_id not in users_with_profile:
+ if (
+ requester.app_service
+ and requester.user.to_string() not in users_with_profile
+ ):
for uid in users_with_profile:
if requester.app_service.is_interested_in_user(uid):
break
@@ -486,6 +495,7 @@ class EventCreationHandler:
self.membership_types_to_include_profile_data_in.add(Membership.INVITE)
self.send_event = ReplicationSendEventRestServlet.make_client(hs)
+ self.send_events = ReplicationSendEventsRestServlet.make_client(hs)
self.request_ratelimiter = hs.get_request_ratelimiter()
@@ -561,9 +571,17 @@ class EventCreationHandler:
outlier: bool = False,
historical: bool = False,
depth: Optional[int] = None,
+ state_map: Optional[StateMap[str]] = None,
+ for_batch: bool = False,
+ current_state_group: Optional[int] = None,
) -> Tuple[EventBase, EventContext]:
"""
- Given a dict from a client, create a new event.
+ Given a dict from a client, create a new event. If bool for_batch is true, will
+ create an event using the prev_event_ids, and will create an event context for
+ the event using the parameters state_map and current_state_group, thus these parameters
+ must be provided in this case if for_batch is True. The subsequently created event
+ and context are suitable for being batched up and bulk persisted to the database
+ with other similarly created events.
Creates an FrozenEvent object, filling out auth_events, prev_events,
etc.
@@ -606,16 +624,27 @@ class EventCreationHandler:
outlier: Indicates whether the event is an `outlier`, i.e. if
it's from an arbitrary point and floating in the DAG as
opposed to being inline with the current DAG.
+
historical: Indicates whether the message is being inserted
back in time around some existing events. This is used to skip
a few checks and mark the event as backfilled.
+
depth: Override the depth used to order the event in the DAG.
Should normally be set to None, which will cause the depth to be calculated
based on the prev_events.
+ state_map: A state map of previously created events, used only when creating events
+ for batch persisting
+
+ for_batch: whether the event is being created for batch persisting to the db
+
+ current_state_group: the current state group, used only for creating events for
+ batch persisting
+
Raises:
ResourceLimitError if server is blocked to some resource being
exceeded
+
Returns:
Tuple of created event, Context
"""
@@ -687,6 +716,9 @@ class EventCreationHandler:
auth_event_ids=auth_event_ids,
state_event_ids=state_event_ids,
depth=depth,
+ state_map=state_map,
+ for_batch=for_batch,
+ current_state_group=current_state_group,
)
# In an ideal world we wouldn't need the second part of this condition. However,
@@ -701,10 +733,14 @@ class EventCreationHandler:
# federation as well as those created locally. As of room v3, aliases events
# can be created by users that are not in the room, therefore we have to
# tolerate them in event_auth.check().
- prev_state_ids = await context.get_prev_state_ids(
- StateFilter.from_types([(EventTypes.Member, None)])
- )
- prev_event_id = prev_state_ids.get((EventTypes.Member, event.sender))
+ if for_batch:
+ assert state_map is not None
+ prev_event_id = state_map.get((EventTypes.Member, event.sender))
+ else:
+ prev_state_ids = await context.get_prev_state_ids(
+ StateFilter.from_types([(EventTypes.Member, None)])
+ )
+ prev_event_id = prev_state_ids.get((EventTypes.Member, event.sender))
prev_event = (
await self.store.get_event(prev_event_id, allow_none=True)
if prev_event_id
@@ -746,18 +782,12 @@ class EventCreationHandler:
if builder.type == EventTypes.Member:
membership = builder.content.get("membership", None)
if membership == Membership.JOIN:
- return await self._is_server_notices_room(builder.room_id)
+ return await self.store.is_server_notice_room(builder.room_id)
elif membership == Membership.LEAVE:
# the user is always allowed to leave (but not kick people)
return builder.state_key == requester.user.to_string()
return False
- async def _is_server_notices_room(self, room_id: str) -> bool:
- if self.config.servernotices.server_notices_mxid is None:
- return False
- user_ids = await self.store.get_users_in_room(room_id)
- return self.config.servernotices.server_notices_mxid in user_ids
-
async def assert_accepted_privacy_policy(self, requester: Requester) -> None:
"""Check if a user has accepted the privacy policy
@@ -847,6 +877,36 @@ class EventCreationHandler:
return prev_event
return None
+ async def get_event_from_transaction(
+ self,
+ requester: Requester,
+ txn_id: str,
+ room_id: str,
+ ) -> Optional[EventBase]:
+ """For the given transaction ID and room ID, check if there is a matching event.
+ If so, fetch it and return it.
+
+ Args:
+ requester: The requester making the request in the context of which we want
+ to fetch the event.
+ txn_id: The transaction ID.
+ room_id: The room ID.
+
+ Returns:
+ An event if one could be found, None otherwise.
+ """
+ if requester.access_token_id:
+ existing_event_id = await self.store.get_event_id_from_transaction_id(
+ room_id,
+ requester.user.to_string(),
+ requester.access_token_id,
+ txn_id,
+ )
+ if existing_event_id:
+ return await self.store.get_event(existing_event_id)
+
+ return None
+
async def create_and_send_nonmember_event(
self,
requester: Requester,
@@ -926,18 +986,17 @@ class EventCreationHandler:
# extremities to pile up, which in turn leads to state resolution
# taking longer.
async with self.limiter.queue(event_dict["room_id"]):
- if txn_id and requester.access_token_id:
- existing_event_id = await self.store.get_event_id_from_transaction_id(
- event_dict["room_id"],
- requester.user.to_string(),
- requester.access_token_id,
- txn_id,
+ if txn_id:
+ event = await self.get_event_from_transaction(
+ requester, txn_id, event_dict["room_id"]
)
- if existing_event_id:
- event = await self.store.get_event(existing_event_id)
+ if event:
# we know it was persisted, so must have a stream ordering
assert event.internal_metadata.stream_ordering
- return event, event.internal_metadata.stream_ordering
+ return (
+ event,
+ event.internal_metadata.stream_ordering,
+ )
event, context = await self.create_event(
requester,
@@ -989,8 +1048,7 @@ class EventCreationHandler:
ev = await self.handle_new_client_event(
requester=requester,
- event=event,
- context=context,
+ events_and_context=[(event, context)],
ratelimit=ratelimit,
ignore_shadow_ban=ignore_shadow_ban,
)
@@ -1009,8 +1067,16 @@ class EventCreationHandler:
auth_event_ids: Optional[List[str]] = None,
state_event_ids: Optional[List[str]] = None,
depth: Optional[int] = None,
+ state_map: Optional[StateMap[str]] = None,
+ for_batch: bool = False,
+ current_state_group: Optional[int] = None,
) -> Tuple[EventBase, EventContext]:
- """Create a new event for a local client
+ """Create a new event for a local client. If bool for_batch is true, will
+ create an event using the prev_event_ids, and will create an event context for
+ the event using the parameters state_map and current_state_group, thus these parameters
+ must be provided in this case if for_batch is True. The subsequently created event
+ and context are suitable for being batched up and bulk persisted to the database
+ with other similarly created events.
Args:
builder:
@@ -1043,6 +1109,14 @@ class EventCreationHandler:
Should normally be set to None, which will cause the depth to be calculated
based on the prev_events.
+ state_map: A state map of previously created events, used only when creating events
+ for batch persisting
+
+ for_batch: whether the event is being created for batch persisting to the db
+
+ current_state_group: the current state group, used only for creating events for
+ batch persisting
+
Returns:
Tuple of created event, context
"""
@@ -1095,64 +1169,76 @@ class EventCreationHandler:
builder.type == EventTypes.Create or prev_event_ids
), "Attempting to create a non-m.room.create event with no prev_events"
- event = await builder.build(
- prev_event_ids=prev_event_ids,
- auth_event_ids=auth_event_ids,
- depth=depth,
- )
+ if for_batch:
+ assert prev_event_ids is not None
+ assert state_map is not None
+ assert current_state_group is not None
+ auth_ids = self._event_auth_handler.compute_auth_events(builder, state_map)
+ event = await builder.build(
+ prev_event_ids=prev_event_ids, auth_event_ids=auth_ids, depth=depth
+ )
+ context = await self.state.compute_event_context_for_batched(
+ event, state_map, current_state_group
+ )
+ else:
+ event = await builder.build(
+ prev_event_ids=prev_event_ids,
+ auth_event_ids=auth_event_ids,
+ depth=depth,
+ )
- # Pass on the outlier property from the builder to the event
- # after it is created
- if builder.internal_metadata.outlier:
- event.internal_metadata.outlier = True
- context = EventContext.for_outlier(self._storage_controllers)
- elif (
- event.type == EventTypes.MSC2716_INSERTION
- and state_event_ids
- and builder.internal_metadata.is_historical()
- ):
- # Add explicit state to the insertion event so it has state to derive
- # from even though it's floating with no `prev_events`. The rest of
- # the batch can derive from this state and state_group.
- #
- # TODO(faster_joins): figure out how this works, and make sure that the
- # old state is complete.
- # https://github.com/matrix-org/synapse/issues/13003
- metadata = await self.store.get_metadata_for_events(state_event_ids)
-
- state_map_for_event: MutableStateMap[str] = {}
- for state_id in state_event_ids:
- data = metadata.get(state_id)
- if data is None:
- # We're trying to persist a new historical batch of events
- # with the given state, e.g. via
- # `RoomBatchSendEventRestServlet`. The state can be inferred
- # by Synapse or set directly by the client.
- #
- # Either way, we should have persisted all the state before
- # getting here.
- raise Exception(
- f"State event {state_id} not found in DB,"
- " Synapse should have persisted it before using it."
- )
+ # Pass on the outlier property from the builder to the event
+ # after it is created
+ if builder.internal_metadata.outlier:
+ event.internal_metadata.outlier = True
+ context = EventContext.for_outlier(self._storage_controllers)
+ elif (
+ event.type == EventTypes.MSC2716_INSERTION
+ and state_event_ids
+ and builder.internal_metadata.is_historical()
+ ):
+ # Add explicit state to the insertion event so it has state to derive
+ # from even though it's floating with no `prev_events`. The rest of
+ # the batch can derive from this state and state_group.
+ #
+ # TODO(faster_joins): figure out how this works, and make sure that the
+ # old state is complete.
+ # https://github.com/matrix-org/synapse/issues/13003
+ metadata = await self.store.get_metadata_for_events(state_event_ids)
+
+ state_map_for_event: MutableStateMap[str] = {}
+ for state_id in state_event_ids:
+ data = metadata.get(state_id)
+ if data is None:
+ # We're trying to persist a new historical batch of events
+ # with the given state, e.g. via
+ # `RoomBatchSendEventRestServlet`. The state can be inferred
+ # by Synapse or set directly by the client.
+ #
+ # Either way, we should have persisted all the state before
+ # getting here.
+ raise Exception(
+ f"State event {state_id} not found in DB,"
+ " Synapse should have persisted it before using it."
+ )
- if data.state_key is None:
- raise Exception(
- f"Trying to set non-state event {state_id} as state"
- )
+ if data.state_key is None:
+ raise Exception(
+ f"Trying to set non-state event {state_id} as state"
+ )
- state_map_for_event[(data.event_type, data.state_key)] = state_id
+ state_map_for_event[(data.event_type, data.state_key)] = state_id
- context = await self.state.compute_event_context(
- event,
- state_ids_before_event=state_map_for_event,
- # TODO(faster_joins): check how MSC2716 works and whether we can have
- # partial state here
- # https://github.com/matrix-org/synapse/issues/13003
- partial_state=False,
- )
- else:
- context = await self.state.compute_event_context(event)
+ context = await self.state.compute_event_context(
+ event,
+ state_ids_before_event=state_map_for_event,
+ # TODO(faster_joins): check how MSC2716 works and whether we can have
+ # partial state here
+ # https://github.com/matrix-org/synapse/issues/13003
+ partial_state=False,
+ )
+ else:
+ context = await self.state.compute_event_context(event)
if requester:
context.app_service = requester.app_service
@@ -1238,13 +1324,13 @@ class EventCreationHandler:
async def handle_new_client_event(
self,
requester: Requester,
- event: EventBase,
- context: EventContext,
+ events_and_context: List[Tuple[EventBase, EventContext]],
ratelimit: bool = True,
extra_users: Optional[List[UserID]] = None,
ignore_shadow_ban: bool = False,
) -> EventBase:
- """Processes a new event.
+ """Processes new events. Please note that if batch persisting events, an error in
+ handling any one of these events will result in all of the events being dropped.
This includes deduplicating, checking auth, persisting,
notifying users, sending to remote servers, etc.
@@ -1254,8 +1340,7 @@ class EventCreationHandler:
Args:
requester
- event
- context
+ events_and_context: A list of one or more tuples of event, context to be persisted
ratelimit
extra_users: Any extra users to notify about event
@@ -1273,67 +1358,76 @@ class EventCreationHandler:
"""
extra_users = extra_users or []
- # we don't apply shadow-banning to membership events here. Invites are blocked
- # higher up the stack, and we allow shadow-banned users to send join and leave
- # events as normal.
- if (
- event.type != EventTypes.Member
- and not ignore_shadow_ban
- and requester.shadow_banned
- ):
- # We randomly sleep a bit just to annoy the requester.
- await self.clock.sleep(random.randint(1, 10))
- raise ShadowBanError()
+ for event, context in events_and_context:
+ # we don't apply shadow-banning to membership events here. Invites are blocked
+ # higher up the stack, and we allow shadow-banned users to send join and leave
+ # events as normal.
+ if (
+ event.type != EventTypes.Member
+ and not ignore_shadow_ban
+ and requester.shadow_banned
+ ):
+ # We randomly sleep a bit just to annoy the requester.
+ await self.clock.sleep(random.randint(1, 10))
+ raise ShadowBanError()
- if event.is_state():
- prev_event = await self.deduplicate_state_event(event, context)
- if prev_event is not None:
- logger.info(
- "Not bothering to persist state event %s duplicated by %s",
- event.event_id,
- prev_event.event_id,
- )
- return prev_event
+ if event.is_state():
+ prev_event = await self.deduplicate_state_event(event, context)
+ if prev_event is not None:
+ logger.info(
+ "Not bothering to persist state event %s duplicated by %s",
+ event.event_id,
+ prev_event.event_id,
+ )
+ return prev_event
- if event.internal_metadata.is_out_of_band_membership():
- # the only sort of out-of-band-membership events we expect to see here are
- # invite rejections and rescinded knocks that we have generated ourselves.
- assert event.type == EventTypes.Member
- assert event.content["membership"] == Membership.LEAVE
- else:
- try:
- validate_event_for_room_version(event)
- await self._event_auth_handler.check_auth_rules_from_context(
- event, context
- )
- except AuthError as err:
- logger.warning("Denying new event %r because %s", event, err)
- raise err
+ if event.internal_metadata.is_out_of_band_membership():
+ # the only sort of out-of-band-membership events we expect to see here are
+ # invite rejections and rescinded knocks that we have generated ourselves.
+ assert event.type == EventTypes.Member
+ assert event.content["membership"] == Membership.LEAVE
+ else:
+ try:
+ validate_event_for_room_version(event)
+ # If we are persisting a batch of events the event(s) needed to auth the
+ # current event may be part of the batch and will not be in the DB yet
+ event_id_to_event = {e.event_id: e for e, _ in events_and_context}
+ batched_auth_events = {}
+ for event_id in event.auth_event_ids():
+ auth_event = event_id_to_event.get(event_id)
+ if auth_event:
+ batched_auth_events[event_id] = auth_event
+ await self._event_auth_handler.check_auth_rules_from_context(
+ event, batched_auth_events
+ )
+ except AuthError as err:
+ logger.warning("Denying new event %r because %s", event, err)
+ raise err
- # Ensure that we can round trip before trying to persist in db
- try:
- dump = json_encoder.encode(event.content)
- json_decoder.decode(dump)
- except Exception:
- logger.exception("Failed to encode content: %r", event.content)
- raise
+ # Ensure that we can round trip before trying to persist in db
+ try:
+ dump = json_encoder.encode(event.content)
+ json_decoder.decode(dump)
+ except Exception:
+ logger.exception("Failed to encode content: %r", event.content)
+ raise
# We now persist the event (and update the cache in parallel, since we
# don't want to block on it).
+ event, context = events_and_context[0]
try:
result, _ = await make_deferred_yieldable(
gather_results(
(
run_in_background(
- self._persist_event,
+ self._persist_events,
requester=requester,
- event=event,
- context=context,
+ events_and_context=events_and_context,
ratelimit=ratelimit,
extra_users=extra_users,
),
run_in_background(
- self.cache_joined_hosts_for_event, event, context
+ self.cache_joined_hosts_for_events, events_and_context
).addErrback(
log_failure, "cache_joined_hosts_for_event failed"
),
@@ -1352,44 +1446,39 @@ class EventCreationHandler:
return result
- async def _persist_event(
+ async def _persist_events(
self,
requester: Requester,
- event: EventBase,
- context: EventContext,
+ events_and_context: List[Tuple[EventBase, EventContext]],
ratelimit: bool = True,
extra_users: Optional[List[UserID]] = None,
) -> EventBase:
- """Actually persists the event. Should only be called by
+ """Actually persists new events. Should only be called by
`handle_new_client_event`, and see its docstring for documentation of
- the arguments.
+ the arguments. Please note that if batch persisting events, an error in
+ handling any one of these events will result in all of the events being dropped.
PartialStateConflictError: if attempting to persist a partial state event in
a room that has been un-partial stated.
"""
- # Skip push notification actions for historical messages
- # because we don't want to notify people about old history back in time.
- # The historical messages also do not have the proper `context.current_state_ids`
- # and `state_groups` because they have `prev_events` that aren't persisted yet
- # (historical messages persisted in reverse-chronological order).
- if not event.internal_metadata.is_historical():
- await self._bulk_push_rule_evaluator.action_for_event_by_user(
- event, context
- )
+ await self._bulk_push_rule_evaluator.action_for_events_by_user(
+ events_and_context
+ )
try:
# If we're a worker we need to hit out to the master.
- writer_instance = self._events_shard_config.get_instance(event.room_id)
+ first_event, _ = events_and_context[0]
+ writer_instance = self._events_shard_config.get_instance(
+ first_event.room_id
+ )
if writer_instance != self._instance_name:
try:
- result = await self.send_event(
+ result = await self.send_events(
instance_name=writer_instance,
- event_id=event.event_id,
+ events_and_context=events_and_context,
store=self.store,
requester=requester,
- event=event,
- context=context,
ratelimit=ratelimit,
extra_users=extra_users,
)
@@ -1399,6 +1488,11 @@ class EventCreationHandler:
raise
stream_id = result["stream_id"]
event_id = result["event_id"]
+
+ # If we batch persisted events we return the last persisted event, otherwise
+ # we return the one event that was persisted
+ event, _ = events_and_context[-1]
+
if event_id != event.event_id:
# If we get a different event back then it means that its
# been de-duplicated, so we replace the given event with the
@@ -1411,72 +1505,80 @@ class EventCreationHandler:
event.internal_metadata.stream_ordering = stream_id
return event
- event = await self.persist_and_notify_client_event(
- requester, event, context, ratelimit=ratelimit, extra_users=extra_users
+ event = await self.persist_and_notify_client_events(
+ requester,
+ events_and_context,
+ ratelimit=ratelimit,
+ extra_users=extra_users,
)
return event
except Exception:
- # Ensure that we actually remove the entries in the push actions
- # staging area, if we calculated them.
- await self.store.remove_push_actions_from_staging(event.event_id)
+ for event, _ in events_and_context:
+ # Ensure that we actually remove the entries in the push actions
+ # staging area, if we calculated them.
+ await self.store.remove_push_actions_from_staging(event.event_id)
raise
- async def cache_joined_hosts_for_event(
- self, event: EventBase, context: EventContext
+ async def cache_joined_hosts_for_events(
+ self, events_and_context: List[Tuple[EventBase, EventContext]]
) -> None:
- """Precalculate the joined hosts at the event, when using Redis, so that
+ """Precalculate the joined hosts at each of the given events, when using Redis, so that
external federation senders don't have to recalculate it themselves.
"""
- if not self._external_cache.is_enabled():
- return
-
- # If external cache is enabled we should always have this.
- assert self._external_cache_joined_hosts_updates is not None
+ for event, _ in events_and_context:
+ if not self._external_cache.is_enabled():
+ return
- # We actually store two mappings, event ID -> prev state group,
- # state group -> joined hosts, which is much more space efficient
- # than event ID -> joined hosts.
- #
- # Note: We have to cache event ID -> prev state group, as we don't
- # store that in the DB.
- #
- # Note: We set the state group -> joined hosts cache if it hasn't been
- # set for a while, so that the expiry time is reset.
+ # If external cache is enabled we should always have this.
+ assert self._external_cache_joined_hosts_updates is not None
- state_entry = await self.state.resolve_state_groups_for_events(
- event.room_id, event_ids=event.prev_event_ids()
- )
+ # We actually store two mappings, event ID -> prev state group,
+ # state group -> joined hosts, which is much more space efficient
+ # than event ID -> joined hosts.
+ #
+ # Note: We have to cache event ID -> prev state group, as we don't
+ # store that in the DB.
+ #
+ # Note: We set the state group -> joined hosts cache if it hasn't been
+ # set for a while, so that the expiry time is reset.
- if state_entry.state_group:
- await self._external_cache.set(
- "event_to_prev_state_group",
- event.event_id,
- state_entry.state_group,
- expiry_ms=60 * 60 * 1000,
+ state_entry = await self.state.resolve_state_groups_for_events(
+ event.room_id, event_ids=event.prev_event_ids()
)
- if state_entry.state_group in self._external_cache_joined_hosts_updates:
- return
+ if state_entry.state_group:
+ await self._external_cache.set(
+ "event_to_prev_state_group",
+ event.event_id,
+ state_entry.state_group,
+ expiry_ms=60 * 60 * 1000,
+ )
- state = await state_entry.get_state(
- self._storage_controllers.state, StateFilter.all()
- )
- joined_hosts = await self.store.get_joined_hosts(
- event.room_id, state, state_entry
- )
+ if state_entry.state_group in self._external_cache_joined_hosts_updates:
+ return
- # Note that the expiry times must be larger than the expiry time in
- # _external_cache_joined_hosts_updates.
- await self._external_cache.set(
- "get_joined_hosts",
- str(state_entry.state_group),
- list(joined_hosts),
- expiry_ms=60 * 60 * 1000,
- )
+ state = await state_entry.get_state(
+ self._storage_controllers.state, StateFilter.all()
+ )
+ with opentracing.start_active_span("get_joined_hosts"):
+ joined_hosts = await self.store.get_joined_hosts(
+ event.room_id, state, state_entry
+ )
+
+ # Note that the expiry times must be larger than the expiry time in
+ # _external_cache_joined_hosts_updates.
+ await self._external_cache.set(
+ "get_joined_hosts",
+ str(state_entry.state_group),
+ list(joined_hosts),
+ expiry_ms=60 * 60 * 1000,
+ )
- self._external_cache_joined_hosts_updates[state_entry.state_group] = None
+ self._external_cache_joined_hosts_updates[
+ state_entry.state_group
+ ] = None
async def _validate_canonical_alias(
self,
@@ -1512,23 +1614,26 @@ class EventCreationHandler:
Codes.BAD_ALIAS,
)
- async def persist_and_notify_client_event(
+ async def persist_and_notify_client_events(
self,
requester: Requester,
- event: EventBase,
- context: EventContext,
+ events_and_context: List[Tuple[EventBase, EventContext]],
ratelimit: bool = True,
extra_users: Optional[List[UserID]] = None,
) -> EventBase:
- """Called when we have fully built the event, have already
- calculated the push actions for the event, and checked auth.
+ """Called when we have fully built the events, have already
+ calculated the push actions for the events, and checked auth.
This should only be run on the instance in charge of persisting events.
+ Please note that if batch persisting events, an error in
+ handling any one of these events will result in all of the events being dropped.
+
Returns:
- The persisted event. This may be different than the given event if
- it was de-duplicated (e.g. because we had already persisted an
- event with the same transaction ID.)
+ The persisted event, if one event is passed in, or the last event in the
+ list in the case of batch persisting. If only one event was persisted, the
+ returned event may be different than the given event if it was de-duplicated
+ (e.g. because we had already persisted an event with the same transaction ID.)
Raises:
PartialStateConflictError: if attempting to persist a partial state event in
@@ -1536,277 +1641,296 @@ class EventCreationHandler:
"""
extra_users = extra_users or []
- assert self._storage_controllers.persistence is not None
- assert self._events_shard_config.should_handle(
- self._instance_name, event.room_id
- )
+ for event, context in events_and_context:
+ assert self._events_shard_config.should_handle(
+ self._instance_name, event.room_id
+ )
- if ratelimit:
- # We check if this is a room admin redacting an event so that we
- # can apply different ratelimiting. We do this by simply checking
- # it's not a self-redaction (to avoid having to look up whether the
- # user is actually admin or not).
- is_admin_redaction = False
- if event.type == EventTypes.Redaction:
- assert event.redacts is not None
+ if ratelimit:
+ # We check if this is a room admin redacting an event so that we
+ # can apply different ratelimiting. We do this by simply checking
+ # it's not a self-redaction (to avoid having to look up whether the
+ # user is actually admin or not).
+ is_admin_redaction = False
+ if event.type == EventTypes.Redaction:
+ assert event.redacts is not None
+
+ original_event = await self.store.get_event(
+ event.redacts,
+ redact_behaviour=EventRedactBehaviour.as_is,
+ get_prev_content=False,
+ allow_rejected=False,
+ allow_none=True,
+ )
- original_event = await self.store.get_event(
- event.redacts,
- redact_behaviour=EventRedactBehaviour.as_is,
- get_prev_content=False,
- allow_rejected=False,
- allow_none=True,
+ is_admin_redaction = bool(
+ original_event and event.sender != original_event.sender
+ )
+
+ await self.request_ratelimiter.ratelimit(
+ requester, is_admin_redaction=is_admin_redaction
)
- is_admin_redaction = bool(
- original_event and event.sender != original_event.sender
+ # run checks/actions on event based on type
+ if event.type == EventTypes.Member and event.membership == Membership.JOIN:
+ (
+ current_membership,
+ _,
+ ) = await self.store.get_local_current_membership_for_user_in_room(
+ event.state_key, event.room_id
)
+ if current_membership != Membership.JOIN:
+ self._notifier.notify_user_joined_room(
+ event.event_id, event.room_id
+ )
- await self.request_ratelimiter.ratelimit(
- requester, is_admin_redaction=is_admin_redaction
- )
+ await self._maybe_kick_guest_users(event, context)
- if event.type == EventTypes.Member and event.membership == Membership.JOIN:
- (
- current_membership,
- _,
- ) = await self.store.get_local_current_membership_for_user_in_room(
- event.state_key, event.room_id
- )
- if current_membership != Membership.JOIN:
- self._notifier.notify_user_joined_room(event.event_id, event.room_id)
+ if event.type == EventTypes.CanonicalAlias:
+ # Validate a newly added alias or newly added alt_aliases.
- await self._maybe_kick_guest_users(event, context)
+ original_alias = None
+ original_alt_aliases: object = []
- if event.type == EventTypes.CanonicalAlias:
- # Validate a newly added alias or newly added alt_aliases.
+ original_event_id = event.unsigned.get("replaces_state")
+ if original_event_id:
+ original_alias_event = await self.store.get_event(original_event_id)
- original_alias = None
- original_alt_aliases: object = []
+ if original_alias_event:
+ original_alias = original_alias_event.content.get("alias", None)
+ original_alt_aliases = original_alias_event.content.get(
+ "alt_aliases", []
+ )
- original_event_id = event.unsigned.get("replaces_state")
- if original_event_id:
- original_event = await self.store.get_event(original_event_id)
+ # Check the alias is currently valid (if it has changed).
+ room_alias_str = event.content.get("alias", None)
+ directory_handler = self.hs.get_directory_handler()
+ if room_alias_str and room_alias_str != original_alias:
+ await self._validate_canonical_alias(
+ directory_handler, room_alias_str, event.room_id
+ )
- if original_event:
- original_alias = original_event.content.get("alias", None)
- original_alt_aliases = original_event.content.get("alt_aliases", [])
-
- # Check the alias is currently valid (if it has changed).
- room_alias_str = event.content.get("alias", None)
- directory_handler = self.hs.get_directory_handler()
- if room_alias_str and room_alias_str != original_alias:
- await self._validate_canonical_alias(
- directory_handler, room_alias_str, event.room_id
- )
+ # Check that alt_aliases is the proper form.
+ alt_aliases = event.content.get("alt_aliases", [])
+ if not isinstance(alt_aliases, (list, tuple)):
+ raise SynapseError(
+ 400,
+ "The alt_aliases property must be a list.",
+ Codes.INVALID_PARAM,
+ )
- # Check that alt_aliases is the proper form.
- alt_aliases = event.content.get("alt_aliases", [])
- if not isinstance(alt_aliases, (list, tuple)):
- raise SynapseError(
- 400, "The alt_aliases property must be a list.", Codes.INVALID_PARAM
- )
+ # If the old version of alt_aliases is of an unknown form,
+ # completely replace it.
+ if not isinstance(original_alt_aliases, (list, tuple)):
+ # TODO: check that the original_alt_aliases' entries are all strings
+ original_alt_aliases = []
+
+ # Check that each alias is currently valid.
+ new_alt_aliases = set(alt_aliases) - set(original_alt_aliases)
+ if new_alt_aliases:
+ for alias_str in new_alt_aliases:
+ await self._validate_canonical_alias(
+ directory_handler, alias_str, event.room_id
+ )
- # If the old version of alt_aliases is of an unknown form,
- # completely replace it.
- if not isinstance(original_alt_aliases, (list, tuple)):
- # TODO: check that the original_alt_aliases' entries are all strings
- original_alt_aliases = []
+ federation_handler = self.hs.get_federation_handler()
- # Check that each alias is currently valid.
- new_alt_aliases = set(alt_aliases) - set(original_alt_aliases)
- if new_alt_aliases:
- for alias_str in new_alt_aliases:
- await self._validate_canonical_alias(
- directory_handler, alias_str, event.room_id
+ if event.type == EventTypes.Member:
+ if event.content["membership"] == Membership.INVITE:
+ event.unsigned[
+ "invite_room_state"
+ ] = await self.store.get_stripped_room_state_from_event_context(
+ context,
+ self.room_prejoin_state_types,
+ membership_user_id=event.sender,
)
- federation_handler = self.hs.get_federation_handler()
+ invitee = UserID.from_string(event.state_key)
+ if not self.hs.is_mine(invitee):
+ # TODO: Can we add signature from remote server in a nicer
+ # way? If we have been invited by a remote server, we need
+ # to get them to sign the event.
- if event.type == EventTypes.Member:
- if event.content["membership"] == Membership.INVITE:
- event.unsigned[
- "invite_room_state"
- ] = await self.store.get_stripped_room_state_from_event_context(
- context,
- self.room_prejoin_state_types,
- membership_user_id=event.sender,
- )
+ returned_invite = await federation_handler.send_invite(
+ invitee.domain, event
+ )
+ event.unsigned.pop("room_state", None)
- invitee = UserID.from_string(event.state_key)
- if not self.hs.is_mine(invitee):
- # TODO: Can we add signature from remote server in a nicer
- # way? If we have been invited by a remote server, we need
- # to get them to sign the event.
+ # TODO: Make sure the signatures actually are correct.
+ event.signatures.update(returned_invite.signatures)
- returned_invite = await federation_handler.send_invite(
- invitee.domain, event
+ if event.content["membership"] == Membership.KNOCK:
+ event.unsigned[
+ "knock_room_state"
+ ] = await self.store.get_stripped_room_state_from_event_context(
+ context,
+ self.room_prejoin_state_types,
)
- event.unsigned.pop("room_state", None)
- # TODO: Make sure the signatures actually are correct.
- event.signatures.update(returned_invite.signatures)
+ if event.type == EventTypes.Redaction:
+ assert event.redacts is not None
- if event.content["membership"] == Membership.KNOCK:
- event.unsigned[
- "knock_room_state"
- ] = await self.store.get_stripped_room_state_from_event_context(
- context,
- self.room_prejoin_state_types,
+ original_event = await self.store.get_event(
+ event.redacts,
+ redact_behaviour=EventRedactBehaviour.as_is,
+ get_prev_content=False,
+ allow_rejected=False,
+ allow_none=True,
)
- if event.type == EventTypes.Redaction:
- assert event.redacts is not None
+ room_version = await self.store.get_room_version_id(event.room_id)
+ room_version_obj = KNOWN_ROOM_VERSIONS[room_version]
- original_event = await self.store.get_event(
- event.redacts,
- redact_behaviour=EventRedactBehaviour.as_is,
- get_prev_content=False,
- allow_rejected=False,
- allow_none=True,
- )
+ # we can make some additional checks now if we have the original event.
+ if original_event:
+ if original_event.type == EventTypes.Create:
+ raise AuthError(403, "Redacting create events is not permitted")
- room_version = await self.store.get_room_version_id(event.room_id)
- room_version_obj = KNOWN_ROOM_VERSIONS[room_version]
-
- # we can make some additional checks now if we have the original event.
- if original_event:
- if original_event.type == EventTypes.Create:
- raise AuthError(403, "Redacting create events is not permitted")
-
- if original_event.room_id != event.room_id:
- raise SynapseError(400, "Cannot redact event from a different room")
-
- if original_event.type == EventTypes.ServerACL:
- raise AuthError(403, "Redacting server ACL events is not permitted")
-
- # Add a little safety stop-gap to prevent people from trying to
- # redact MSC2716 related events when they're in a room version
- # which does not support it yet. We allow people to use MSC2716
- # events in existing room versions but only from the room
- # creator since it does not require any changes to the auth
- # rules and in effect, the redaction algorithm . In the
- # supported room version, we add the `historical` power level to
- # auth the MSC2716 related events and adjust the redaction
- # algorthim to keep the `historical` field around (redacting an
- # event should only strip fields which don't affect the
- # structural protocol level).
- is_msc2716_event = (
- original_event.type == EventTypes.MSC2716_INSERTION
- or original_event.type == EventTypes.MSC2716_BATCH
- or original_event.type == EventTypes.MSC2716_MARKER
- )
- if not room_version_obj.msc2716_historical and is_msc2716_event:
- raise AuthError(
- 403,
- "Redacting MSC2716 events is not supported in this room version",
- )
+ if original_event.room_id != event.room_id:
+ raise SynapseError(
+ 400, "Cannot redact event from a different room"
+ )
- event_types = event_auth.auth_types_for_event(event.room_version, event)
- prev_state_ids = await context.get_prev_state_ids(
- StateFilter.from_types(event_types)
- )
+ if original_event.type == EventTypes.ServerACL:
+ raise AuthError(
+ 403, "Redacting server ACL events is not permitted"
+ )
- auth_events_ids = self._event_auth_handler.compute_auth_events(
- event, prev_state_ids, for_verification=True
- )
- auth_events_map = await self.store.get_events(auth_events_ids)
- auth_events = {(e.type, e.state_key): e for e in auth_events_map.values()}
+ # Add a little safety stop-gap to prevent people from trying to
+ # redact MSC2716 related events when they're in a room version
+ # which does not support it yet. We allow people to use MSC2716
+ # events in existing room versions but only from the room
+ # creator since it does not require any changes to the auth
+ # rules and in effect, the redaction algorithm . In the
+ # supported room version, we add the `historical` power level to
+ # auth the MSC2716 related events and adjust the redaction
+ # algorthim to keep the `historical` field around (redacting an
+ # event should only strip fields which don't affect the
+ # structural protocol level).
+ is_msc2716_event = (
+ original_event.type == EventTypes.MSC2716_INSERTION
+ or original_event.type == EventTypes.MSC2716_BATCH
+ or original_event.type == EventTypes.MSC2716_MARKER
+ )
+ if not room_version_obj.msc2716_historical and is_msc2716_event:
+ raise AuthError(
+ 403,
+ "Redacting MSC2716 events is not supported in this room version",
+ )
- if event_auth.check_redaction(
- room_version_obj, event, auth_events=auth_events
- ):
- # this user doesn't have 'redact' rights, so we need to do some more
- # checks on the original event. Let's start by checking the original
- # event exists.
- if not original_event:
- raise NotFoundError("Could not find event %s" % (event.redacts,))
-
- if event.user_id != original_event.user_id:
- raise AuthError(403, "You don't have permission to redact events")
-
- # all the checks are done.
- event.internal_metadata.recheck_redaction = False
-
- if event.type == EventTypes.Create:
- prev_state_ids = await context.get_prev_state_ids()
- if prev_state_ids:
- raise AuthError(403, "Changing the room create event is forbidden")
-
- if event.type == EventTypes.MSC2716_INSERTION:
- room_version = await self.store.get_room_version_id(event.room_id)
- room_version_obj = KNOWN_ROOM_VERSIONS[room_version]
-
- create_event = await self.store.get_create_event_for_room(event.room_id)
- room_creator = create_event.content.get(EventContentFields.ROOM_CREATOR)
-
- # Only check an insertion event if the room version
- # supports it or the event is from the room creator.
- if room_version_obj.msc2716_historical or (
- self.config.experimental.msc2716_enabled
- and event.sender == room_creator
- ):
- next_batch_id = event.content.get(
- EventContentFields.MSC2716_NEXT_BATCH_ID
+ event_types = event_auth.auth_types_for_event(event.room_version, event)
+ prev_state_ids = await context.get_prev_state_ids(
+ StateFilter.from_types(event_types)
)
- conflicting_insertion_event_id = None
- if next_batch_id:
- conflicting_insertion_event_id = (
- await self.store.get_insertion_event_id_by_batch_id(
- event.room_id, next_batch_id
+
+ auth_events_ids = self._event_auth_handler.compute_auth_events(
+ event, prev_state_ids, for_verification=True
+ )
+ auth_events_map = await self.store.get_events(auth_events_ids)
+ auth_events = {
+ (e.type, e.state_key): e for e in auth_events_map.values()
+ }
+
+ if event_auth.check_redaction(
+ room_version_obj, event, auth_events=auth_events
+ ):
+ # this user doesn't have 'redact' rights, so we need to do some more
+ # checks on the original event. Let's start by checking the original
+ # event exists.
+ if not original_event:
+ raise NotFoundError(
+ "Could not find event %s" % (event.redacts,)
)
+
+ if event.user_id != original_event.user_id:
+ raise AuthError(
+ 403, "You don't have permission to redact events"
+ )
+
+ # all the checks are done.
+ event.internal_metadata.recheck_redaction = False
+
+ if event.type == EventTypes.Create:
+ prev_state_ids = await context.get_prev_state_ids()
+ if prev_state_ids:
+ raise AuthError(403, "Changing the room create event is forbidden")
+
+ if event.type == EventTypes.MSC2716_INSERTION:
+ room_version = await self.store.get_room_version_id(event.room_id)
+ room_version_obj = KNOWN_ROOM_VERSIONS[room_version]
+
+ create_event = await self.store.get_create_event_for_room(event.room_id)
+ room_creator = create_event.content.get(EventContentFields.ROOM_CREATOR)
+
+ # Only check an insertion event if the room version
+ # supports it or the event is from the room creator.
+ if room_version_obj.msc2716_historical or (
+ self.config.experimental.msc2716_enabled
+ and event.sender == room_creator
+ ):
+ next_batch_id = event.content.get(
+ EventContentFields.MSC2716_NEXT_BATCH_ID
)
- if conflicting_insertion_event_id is not None:
- # The current insertion event that we're processing is invalid
- # because an insertion event already exists in the room with the
- # same next_batch_id. We can't allow multiple because the batch
- # pointing will get weird, e.g. we can't determine which insertion
- # event the batch event is pointing to.
- raise SynapseError(
- HTTPStatus.BAD_REQUEST,
- "Another insertion event already exists with the same next_batch_id",
- errcode=Codes.INVALID_PARAM,
- )
+ conflicting_insertion_event_id = None
+ if next_batch_id:
+ conflicting_insertion_event_id = (
+ await self.store.get_insertion_event_id_by_batch_id(
+ event.room_id, next_batch_id
+ )
+ )
+ if conflicting_insertion_event_id is not None:
+ # The current insertion event that we're processing is invalid
+ # because an insertion event already exists in the room with the
+ # same next_batch_id. We can't allow multiple because the batch
+ # pointing will get weird, e.g. we can't determine which insertion
+ # event the batch event is pointing to.
+ raise SynapseError(
+ HTTPStatus.BAD_REQUEST,
+ "Another insertion event already exists with the same next_batch_id",
+ errcode=Codes.INVALID_PARAM,
+ )
- # Mark any `m.historical` messages as backfilled so they don't appear
- # in `/sync` and have the proper decrementing `stream_ordering` as we import
- backfilled = False
- if event.internal_metadata.is_historical():
- backfilled = True
+ # Mark any `m.historical` messages as backfilled so they don't appear
+ # in `/sync` and have the proper decrementing `stream_ordering` as we import
+ backfilled = False
+ if event.internal_metadata.is_historical():
+ backfilled = True
- # Note that this returns the event that was persisted, which may not be
- # the same as we passed in if it was deduplicated due transaction IDs.
+ assert self._storage_controllers.persistence is not None
(
- event,
- event_pos,
+ persisted_events,
max_stream_token,
- ) = await self._storage_controllers.persistence.persist_event(
- event, context=context, backfilled=backfilled
+ ) = await self._storage_controllers.persistence.persist_events(
+ events_and_context, backfilled=backfilled
)
- if self._ephemeral_events_enabled:
- # If there's an expiry timestamp on the event, schedule its expiry.
- self._message_handler.maybe_schedule_expiry(event)
+ events_and_pos = []
+ for event in persisted_events:
+ if self._ephemeral_events_enabled:
+ # If there's an expiry timestamp on the event, schedule its expiry.
+ self._message_handler.maybe_schedule_expiry(event)
+
+ stream_ordering = event.internal_metadata.stream_ordering
+ assert stream_ordering is not None
+ pos = PersistedEventPosition(self._instance_name, stream_ordering)
+ events_and_pos.append((event, pos))
+
+ if event.type == EventTypes.Message:
+ # We don't want to block sending messages on any presence code. This
+ # matters as sometimes presence code can take a while.
+ run_in_background(self._bump_active_time, requester.user)
async def _notify() -> None:
try:
- await self.notifier.on_new_room_event(
- event, event_pos, max_stream_token, extra_users=extra_users
+ await self.notifier.on_new_room_events(
+ events_and_pos, max_stream_token, extra_users=extra_users
)
except Exception:
- logger.exception(
- "Error notifying about new room event %s",
- event.event_id,
- )
+ logger.exception("Error notifying about new room events")
run_in_background(_notify)
- if event.type == EventTypes.Message:
- # We don't want to block sending messages on any presence code. This
- # matters as sometimes presence code can take a while.
- run_in_background(self._bump_active_time, requester.user)
-
- return event
+ return persisted_events[-1]
async def _maybe_kick_guest_users(
self, event: EventBase, context: EventContext
@@ -1895,8 +2019,7 @@ class EventCreationHandler:
# shadow-banned user.
await self.handle_new_client_event(
requester,
- event,
- context,
+ events_and_context=[(event, context)],
ratelimit=False,
ignore_shadow_ban=True,
)
diff --git a/synapse/handlers/oidc.py b/synapse/handlers/oidc.py
index d7a8226900..03de6a4ba6 100644
--- a/synapse/handlers/oidc.py
+++ b/synapse/handlers/oidc.py
@@ -12,14 +12,28 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+import binascii
import inspect
+import json
import logging
-from typing import TYPE_CHECKING, Any, Dict, Generic, List, Optional, TypeVar, Union
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ Dict,
+ Generic,
+ List,
+ Optional,
+ Type,
+ TypeVar,
+ Union,
+)
from urllib.parse import urlencode, urlparse
import attr
+import unpaddedbase64
from authlib.common.security import generate_token
-from authlib.jose import JsonWebToken, jwt
+from authlib.jose import JsonWebToken, JWTClaims
+from authlib.jose.errors import InvalidClaimError, JoseError, MissingClaimError
from authlib.oauth2.auth import ClientAuth
from authlib.oauth2.rfc6749.parameters import prepare_grant_uri
from authlib.oidc.core import CodeIDToken, UserInfo
@@ -35,9 +49,12 @@ from typing_extensions import TypedDict
from twisted.web.client import readBody
from twisted.web.http_headers import Headers
+from synapse.api.errors import SynapseError
from synapse.config import ConfigError
from synapse.config.oidc import OidcProviderClientSecretJwtKey, OidcProviderConfig
from synapse.handlers.sso import MappingException, UserAttributes
+from synapse.http.server import finish_request
+from synapse.http.servlet import parse_string
from synapse.http.site import SynapseRequest
from synapse.logging.context import make_deferred_yieldable
from synapse.types import JsonDict, UserID, map_username_to_mxid_localpart
@@ -88,6 +105,8 @@ class Token(TypedDict):
#: there is no real point of doing this in our case.
JWK = Dict[str, str]
+C = TypeVar("C")
+
#: A JWK Set, as per RFC7517 sec 5.
class JWKS(TypedDict):
@@ -247,6 +266,80 @@ class OidcHandler:
await oidc_provider.handle_oidc_callback(request, session_data, code)
+ async def handle_backchannel_logout(self, request: SynapseRequest) -> None:
+ """Handle an incoming request to /_synapse/client/oidc/backchannel_logout
+
+ This extracts the logout_token from the request and tries to figure out
+ which OpenID Provider it is comming from. This works by matching the iss claim
+ with the issuer and the aud claim with the client_id.
+
+ Since at this point we don't know who signed the JWT, we can't just
+ decode it using authlib since it will always verifies the signature. We
+ have to decode it manually without validating the signature. The actual JWT
+ verification is done in the `OidcProvider.handler_backchannel_logout` method,
+ once we figured out which provider sent the request.
+
+ Args:
+ request: the incoming request from the browser.
+ """
+ logout_token = parse_string(request, "logout_token")
+ if logout_token is None:
+ raise SynapseError(400, "Missing logout_token in request")
+
+ # A JWT looks like this:
+ # header.payload.signature
+ # where all parts are encoded with urlsafe base64.
+ # The aud and iss claims we care about are in the payload part, which
+ # is a JSON object.
+ try:
+ # By destructuring the list after splitting, we ensure that we have
+ # exactly 3 segments
+ _, payload, _ = logout_token.split(".")
+ except ValueError:
+ raise SynapseError(400, "Invalid logout_token in request")
+
+ try:
+ payload_bytes = unpaddedbase64.decode_base64(payload)
+ claims = json_decoder.decode(payload_bytes.decode("utf-8"))
+ except (json.JSONDecodeError, binascii.Error, UnicodeError):
+ raise SynapseError(400, "Invalid logout_token payload in request")
+
+ try:
+ # Let's extract the iss and aud claims
+ iss = claims["iss"]
+ aud = claims["aud"]
+ # The aud claim can be either a string or a list of string. Here we
+ # normalize it as a list of strings.
+ if isinstance(aud, str):
+ aud = [aud]
+
+ # Check that we have the right types for the aud and the iss claims
+ if not isinstance(iss, str) or not isinstance(aud, list):
+ raise TypeError()
+ for a in aud:
+ if not isinstance(a, str):
+ raise TypeError()
+
+ # At this point we properly checked both claims types
+ issuer: str = iss
+ audience: List[str] = aud
+ except (TypeError, KeyError):
+ raise SynapseError(400, "Invalid issuer/audience in logout_token")
+
+ # Now that we know the audience and the issuer, we can figure out from
+ # what provider it is coming from
+ oidc_provider: Optional[OidcProvider] = None
+ for provider in self._providers.values():
+ if provider.issuer == issuer and provider.client_id in audience:
+ oidc_provider = provider
+ break
+
+ if oidc_provider is None:
+ raise SynapseError(400, "Could not find the OP that issued this event")
+
+ # Ask the provider to handle the logout request.
+ await oidc_provider.handle_backchannel_logout(request, logout_token)
+
class OidcError(Exception):
"""Used to catch errors when calling the token_endpoint"""
@@ -275,6 +368,7 @@ class OidcProvider:
provider: OidcProviderConfig,
):
self._store = hs.get_datastores().main
+ self._clock = hs.get_clock()
self._macaroon_generaton = macaroon_generator
@@ -341,6 +435,7 @@ class OidcProvider:
self.idp_brand = provider.idp_brand
self._sso_handler = hs.get_sso_handler()
+ self._device_handler = hs.get_device_handler()
self._sso_handler.register_identity_provider(self)
@@ -399,6 +494,41 @@ class OidcProvider:
# If we're not using userinfo, we need a valid jwks to validate the ID token
m.validate_jwks_uri()
+ if self._config.backchannel_logout_enabled:
+ if not m.get("backchannel_logout_supported", False):
+ logger.warning(
+ "OIDC Back-Channel Logout is enabled for issuer %r"
+ "but it does not advertise support for it",
+ self.issuer,
+ )
+
+ elif not m.get("backchannel_logout_session_supported", False):
+ logger.warning(
+ "OIDC Back-Channel Logout is enabled and supported "
+ "by issuer %r but it might not send a session ID with "
+ "logout tokens, which is required for the logouts to work",
+ self.issuer,
+ )
+
+ if not self._config.backchannel_logout_ignore_sub:
+ # If OIDC backchannel logouts are enabled, the provider mapping provider
+ # should use the `sub` claim. We verify that by mapping a dumb user and
+ # see if we get back the sub claim
+ user = UserInfo({"sub": "thisisasubject"})
+ try:
+ subject = self._user_mapping_provider.get_remote_user_id(user)
+ if subject != user["sub"]:
+ raise ValueError("Unexpected subject")
+ except Exception:
+ logger.warning(
+ f"OIDC Back-Channel Logout is enabled for issuer {self.issuer!r} "
+ "but it looks like the configured `user_mapping_provider` "
+ "does not use the `sub` claim as subject. If it is the case, "
+ "and you want Synapse to ignore the `sub` claim in OIDC "
+ "Back-Channel Logouts, set `backchannel_logout_ignore_sub` "
+ "to `true` in the issuer config."
+ )
+
@property
def _uses_userinfo(self) -> bool:
"""Returns True if the ``userinfo_endpoint`` should be used.
@@ -414,6 +544,16 @@ class OidcProvider:
or self._user_profile_method == "userinfo_endpoint"
)
+ @property
+ def issuer(self) -> str:
+ """The issuer identifying this provider."""
+ return self._config.issuer
+
+ @property
+ def client_id(self) -> str:
+ """The client_id used when interacting with this provider."""
+ return self._config.client_id
+
async def load_metadata(self, force: bool = False) -> OpenIDProviderMetadata:
"""Return the provider metadata.
@@ -647,7 +787,7 @@ class OidcProvider:
Must include an ``access_token`` field.
Returns:
- UserInfo: an object representing the user.
+ an object representing the user.
"""
logger.debug("Using the OAuth2 access_token to request userinfo")
metadata = await self.load_metadata()
@@ -661,61 +801,99 @@ class OidcProvider:
return UserInfo(resp)
- async def _parse_id_token(self, token: Token, nonce: str) -> CodeIDToken:
- """Return an instance of UserInfo from token's ``id_token``.
+ async def _verify_jwt(
+ self,
+ alg_values: List[str],
+ token: str,
+ claims_cls: Type[C],
+ claims_options: Optional[dict] = None,
+ claims_params: Optional[dict] = None,
+ ) -> C:
+ """Decode and validate a JWT, re-fetching the JWKS as needed.
Args:
- token: the token given by the ``token_endpoint``.
- Must include an ``id_token`` field.
- nonce: the nonce value originally sent in the initial authorization
- request. This value should match the one inside the token.
+ alg_values: list of `alg` values allowed when verifying the JWT.
+ token: the JWT.
+ claims_cls: the JWTClaims class to use to validate the claims.
+ claims_options: dict of options passed to the `claims_cls` constructor.
+ claims_params: dict of params passed to the `claims_cls` constructor.
Returns:
- The decoded claims in the ID token.
+ The decoded claims in the JWT.
"""
- metadata = await self.load_metadata()
- claims_params = {
- "nonce": nonce,
- "client_id": self._client_auth.client_id,
- }
- if "access_token" in token:
- # If we got an `access_token`, there should be an `at_hash` claim
- # in the `id_token` that we can check against.
- claims_params["access_token"] = token["access_token"]
-
- alg_values = metadata.get("id_token_signing_alg_values_supported", ["RS256"])
jwt = JsonWebToken(alg_values)
- claim_options = {"iss": {"values": [metadata["issuer"]]}}
-
- id_token = token["id_token"]
- logger.debug("Attempting to decode JWT id_token %r", id_token)
+ logger.debug("Attempting to decode JWT (%s) %r", claims_cls.__name__, token)
# Try to decode the keys in cache first, then retry by forcing the keys
# to be reloaded
jwk_set = await self.load_jwks()
try:
claims = jwt.decode(
- id_token,
+ token,
key=jwk_set,
- claims_cls=CodeIDToken,
- claims_options=claim_options,
+ claims_cls=claims_cls,
+ claims_options=claims_options,
claims_params=claims_params,
)
except ValueError:
logger.info("Reloading JWKS after decode error")
jwk_set = await self.load_jwks(force=True) # try reloading the jwks
claims = jwt.decode(
- id_token,
+ token,
key=jwk_set,
- claims_cls=CodeIDToken,
- claims_options=claim_options,
+ claims_cls=claims_cls,
+ claims_options=claims_options,
claims_params=claims_params,
)
- logger.debug("Decoded id_token JWT %r; validating", claims)
+ logger.debug("Decoded JWT (%s) %r; validating", claims_cls.__name__, claims)
- claims.validate(leeway=120) # allows 2 min of clock skew
+ claims.validate(
+ now=self._clock.time(), leeway=120
+ ) # allows 2 min of clock skew
+ return claims
+
+ async def _parse_id_token(self, token: Token, nonce: str) -> CodeIDToken:
+ """Return an instance of UserInfo from token's ``id_token``.
+
+ Args:
+ token: the token given by the ``token_endpoint``.
+ Must include an ``id_token`` field.
+ nonce: the nonce value originally sent in the initial authorization
+ request. This value should match the one inside the token.
+
+ Returns:
+ The decoded claims in the ID token.
+ """
+ id_token = token.get("id_token")
+
+ # That has been theoritically been checked by the caller, so even though
+ # assertion are not enabled in production, it is mainly here to appease mypy
+ assert id_token is not None
+
+ metadata = await self.load_metadata()
+
+ claims_params = {
+ "nonce": nonce,
+ "client_id": self._client_auth.client_id,
+ }
+ if "access_token" in token:
+ # If we got an `access_token`, there should be an `at_hash` claim
+ # in the `id_token` that we can check against.
+ claims_params["access_token"] = token["access_token"]
+
+ claims_options = {"iss": {"values": [metadata["issuer"]]}}
+
+ alg_values = metadata.get("id_token_signing_alg_values_supported", ["RS256"])
+
+ claims = await self._verify_jwt(
+ alg_values=alg_values,
+ token=id_token,
+ claims_cls=CodeIDToken,
+ claims_options=claims_options,
+ claims_params=claims_params,
+ )
return claims
@@ -1036,6 +1214,146 @@ class OidcProvider:
# to be strings.
return str(remote_user_id)
+ async def handle_backchannel_logout(
+ self, request: SynapseRequest, logout_token: str
+ ) -> None:
+ """Handle an incoming request to /_synapse/client/oidc/backchannel_logout
+
+ The OIDC Provider posts a logout token to this endpoint when a user
+ session ends. That token is a JWT signed with the same keys as
+ ID tokens. The OpenID Connect Back-Channel Logout draft explains how to
+ validate the JWT and figure out what session to end.
+
+ Args:
+ request: The request to respond to
+ logout_token: The logout token (a JWT) extracted from the request body
+ """
+ # Back-Channel Logout can be disabled in the config, hence this check.
+ # This is not that important for now since Synapse is registered
+ # manually to the OP, so not specifying the backchannel-logout URI is
+ # as effective than disabling it here. It might make more sense if we
+ # support dynamic registration in Synapse at some point.
+ if not self._config.backchannel_logout_enabled:
+ logger.warning(
+ f"Received an OIDC Back-Channel Logout request from issuer {self.issuer!r} but it is disabled in config"
+ )
+
+ # TODO: this responds with a 400 status code, which is what the OIDC
+ # Back-Channel Logout spec expects, but spec also suggests answering with
+ # a JSON object, with the `error` and `error_description` fields set, which
+ # we are not doing here.
+ # See https://openid.net/specs/openid-connect-backchannel-1_0.html#BCResponse
+ raise SynapseError(
+ 400, "OpenID Connect Back-Channel Logout is disabled for this provider"
+ )
+
+ metadata = await self.load_metadata()
+
+ # As per OIDC Back-Channel Logout 1.0 sec. 2.4:
+ # A Logout Token MUST be signed and MAY also be encrypted. The same
+ # keys are used to sign and encrypt Logout Tokens as are used for ID
+ # Tokens. If the Logout Token is encrypted, it SHOULD replicate the
+ # iss (issuer) claim in the JWT Header Parameters, as specified in
+ # Section 5.3 of [JWT].
+ alg_values = metadata.get("id_token_signing_alg_values_supported", ["RS256"])
+
+ # As per sec. 2.6:
+ # 3. Validate the iss, aud, and iat Claims in the same way they are
+ # validated in ID Tokens.
+ # Which means the audience should contain Synapse's client_id and the
+ # issuer should be the IdP issuer
+ claims_options = {
+ "iss": {"values": [metadata["issuer"]]},
+ "aud": {"values": [self.client_id]},
+ }
+
+ try:
+ claims = await self._verify_jwt(
+ alg_values=alg_values,
+ token=logout_token,
+ claims_cls=LogoutToken,
+ claims_options=claims_options,
+ )
+ except JoseError:
+ logger.exception("Invalid logout_token")
+ raise SynapseError(400, "Invalid logout_token")
+
+ # As per sec. 2.6:
+ # 4. Verify that the Logout Token contains a sub Claim, a sid Claim,
+ # or both.
+ # 5. Verify that the Logout Token contains an events Claim whose
+ # value is JSON object containing the member name
+ # http://schemas.openid.net/event/backchannel-logout.
+ # 6. Verify that the Logout Token does not contain a nonce Claim.
+ # This is all verified by the LogoutToken claims class, so at this
+ # point the `sid` claim exists and is a string.
+ sid: str = claims.get("sid")
+
+ # If the `sub` claim was included in the logout token, we check that it matches
+ # that it matches the right user. We can have cases where the `sub` claim is not
+ # the ID saved in database, so we let admins disable this check in config.
+ sub: Optional[str] = claims.get("sub")
+ expected_user_id: Optional[str] = None
+ if sub is not None and not self._config.backchannel_logout_ignore_sub:
+ expected_user_id = await self._store.get_user_by_external_id(
+ self.idp_id, sub
+ )
+
+ # Invalidate any running user-mapping sessions, in-flight login tokens and
+ # active devices
+ await self._sso_handler.revoke_sessions_for_provider_session_id(
+ auth_provider_id=self.idp_id,
+ auth_provider_session_id=sid,
+ expected_user_id=expected_user_id,
+ )
+
+ request.setResponseCode(200)
+ request.setHeader(b"Cache-Control", b"no-cache, no-store")
+ request.setHeader(b"Pragma", b"no-cache")
+ finish_request(request)
+
+
+class LogoutToken(JWTClaims):
+ """
+ Holds and verify claims of a logout token, as per
+ https://openid.net/specs/openid-connect-backchannel-1_0.html#LogoutToken
+ """
+
+ REGISTERED_CLAIMS = ["iss", "sub", "aud", "iat", "jti", "events", "sid"]
+
+ def validate(self, now: Optional[int] = None, leeway: int = 0) -> None:
+ """Validate everything in claims payload."""
+ super().validate(now, leeway)
+ self.validate_sid()
+ self.validate_events()
+ self.validate_nonce()
+
+ def validate_sid(self) -> None:
+ """Ensure the sid claim is present"""
+ sid = self.get("sid")
+ if not sid:
+ raise MissingClaimError("sid")
+
+ if not isinstance(sid, str):
+ raise InvalidClaimError("sid")
+
+ def validate_nonce(self) -> None:
+ """Ensure the nonce claim is absent"""
+ if "nonce" in self:
+ raise InvalidClaimError("nonce")
+
+ def validate_events(self) -> None:
+ """Ensure the events claim is present and with the right value"""
+ events = self.get("events")
+ if not events:
+ raise MissingClaimError("events")
+
+ if not isinstance(events, dict):
+ raise InvalidClaimError("events")
+
+ if "http://schemas.openid.net/event/backchannel-logout" not in events:
+ raise InvalidClaimError("events")
+
# number of seconds a newly-generated client secret should be valid for
CLIENT_SECRET_VALIDITY_SECONDS = 3600
@@ -1105,6 +1423,7 @@ class JwtClientSecret:
logger.info(
"Generating new JWT for %s: %s %s", self._oauth_issuer, header, payload
)
+ jwt = JsonWebToken(header["alg"])
self._cached_secret = jwt.encode(header, payload, self._key.key)
self._cached_secret_replacement_time = (
expires_at - CLIENT_SECRET_MIN_VALIDITY_SECONDS
@@ -1116,12 +1435,10 @@ class UserAttributeDict(TypedDict):
localpart: Optional[str]
confirm_localpart: bool
display_name: Optional[str]
+ picture: Optional[str] # may be omitted by older `OidcMappingProviders`
emails: List[str]
-C = TypeVar("C")
-
-
class OidcMappingProvider(Generic[C]):
"""A mapping provider maps a UserInfo object to user attributes.
@@ -1204,6 +1521,7 @@ env.filters.update(
@attr.s(slots=True, frozen=True, auto_attribs=True)
class JinjaOidcMappingConfig:
subject_claim: str
+ picture_claim: str
localpart_template: Optional[Template]
display_name_template: Optional[Template]
email_template: Optional[Template]
@@ -1223,6 +1541,7 @@ class JinjaOidcMappingProvider(OidcMappingProvider[JinjaOidcMappingConfig]):
@staticmethod
def parse_config(config: dict) -> JinjaOidcMappingConfig:
subject_claim = config.get("subject_claim", "sub")
+ picture_claim = config.get("picture_claim", "picture")
def parse_template_config(option_name: str) -> Optional[Template]:
if option_name not in config:
@@ -1256,6 +1575,7 @@ class JinjaOidcMappingProvider(OidcMappingProvider[JinjaOidcMappingConfig]):
return JinjaOidcMappingConfig(
subject_claim=subject_claim,
+ picture_claim=picture_claim,
localpart_template=localpart_template,
display_name_template=display_name_template,
email_template=email_template,
@@ -1295,10 +1615,13 @@ class JinjaOidcMappingProvider(OidcMappingProvider[JinjaOidcMappingConfig]):
if email:
emails.append(email)
+ picture = userinfo.get("picture")
+
return UserAttributeDict(
localpart=localpart,
display_name=display_name,
emails=emails,
+ picture=picture,
confirm_localpart=self._config.confirm_localpart,
)
diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py
index 6262a35822..c572508a02 100644
--- a/synapse/handlers/pagination.py
+++ b/synapse/handlers/pagination.py
@@ -24,7 +24,9 @@ from synapse.api.errors import SynapseError
from synapse.api.filtering import Filter
from synapse.events.utils import SerializeEventConfig
from synapse.handlers.room import ShutdownRoomResponse
+from synapse.logging.opentracing import trace
from synapse.metrics.background_process_metrics import run_as_background_process
+from synapse.rest.admin._base import assert_user_is_admin
from synapse.storage.state import StateFilter
from synapse.streams.config import PaginationConfig
from synapse.types import JsonDict, Requester, StreamKeyType
@@ -158,11 +160,9 @@ class PaginationHandler:
self._retention_allowed_lifetime_max = (
hs.config.retention.retention_allowed_lifetime_max
)
+ self._is_master = hs.config.worker.worker_app is None
- if (
- hs.config.worker.run_background_tasks
- and hs.config.retention.retention_enabled
- ):
+ if hs.config.retention.retention_enabled and self._is_master:
# Run the purge jobs described in the configuration file.
for job in hs.config.retention.retention_purge_jobs:
logger.info("Setting up purge job with config: %s", job)
@@ -416,6 +416,7 @@ class PaginationHandler:
await self._storage_controllers.purge_events.purge_room(room_id)
+ @trace
async def get_messages(
self,
requester: Requester,
@@ -423,6 +424,7 @@ class PaginationHandler:
pagin_config: PaginationConfig,
as_client_event: bool = True,
event_filter: Optional[Filter] = None,
+ use_admin_priviledge: bool = False,
) -> JsonDict:
"""Get messages in a room.
@@ -432,14 +434,26 @@ class PaginationHandler:
pagin_config: The pagination config rules to apply, if any.
as_client_event: True to get events in client-server format.
event_filter: Filter to apply to results or None
+ use_admin_priviledge: if `True`, return all events, regardless
+ of whether `user` has access to them. To be used **ONLY**
+ from the admin API.
Returns:
Pagination API results
"""
+ if use_admin_priviledge:
+ await assert_user_is_admin(self.auth, requester)
+
user_id = requester.user.to_string()
if pagin_config.from_token:
from_token = pagin_config.from_token
+ elif pagin_config.direction == "f":
+ from_token = (
+ await self.hs.get_event_sources().get_start_token_for_pagination(
+ room_id
+ )
+ )
else:
from_token = (
await self.hs.get_event_sources().get_current_token_for_pagination(
@@ -450,20 +464,17 @@ class PaginationHandler:
# `/messages` should still works with live tokens when manually provided.
assert from_token.room_key.topological is not None
- if pagin_config.limit is None:
- # This shouldn't happen as we've set a default limit before this
- # gets called.
- raise Exception("limit not set")
-
room_token = from_token.room_key
async with self.pagination_lock.read(room_id):
- (
- membership,
- member_event_id,
- ) = await self.auth.check_user_in_room_or_world_readable(
- room_id, user_id, allow_departed_users=True
- )
+ (membership, member_event_id) = (None, None)
+ if not use_admin_priviledge:
+ (
+ membership,
+ member_event_id,
+ ) = await self.auth.check_user_in_room_or_world_readable(
+ room_id, requester, allow_departed_users=True
+ )
if pagin_config.direction == "b":
# if we're going backwards, we might need to backfill. This
@@ -475,7 +486,7 @@ class PaginationHandler:
room_id, room_token.stream
)
- if membership == Membership.LEAVE:
+ if not use_admin_priviledge and membership == Membership.LEAVE:
# If they have left the room then clamp the token to be before
# they left the room, to save the effort of loading from the
# database.
@@ -528,12 +539,13 @@ class PaginationHandler:
if event_filter:
events = await event_filter.filter(events)
- events = await filter_events_for_client(
- self._storage_controllers,
- user_id,
- events,
- is_peeking=(member_event_id is None),
- )
+ if not use_admin_priviledge:
+ events = await filter_events_for_client(
+ self._storage_controllers,
+ user_id,
+ events,
+ is_peeking=(member_event_id is None),
+ )
# if after the filter applied there are no more events
# return immediately - but there might be more in next_token batch
diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py
index 741504ba9f..cf08737d11 100644
--- a/synapse/handlers/presence.py
+++ b/synapse/handlers/presence.py
@@ -201,7 +201,7 @@ class BasePresenceHandler(abc.ABC):
"""Get the current presence state for multiple users.
Returns:
- dict: `user_id` -> `UserPresenceState`
+ A mapping of `user_id` -> `UserPresenceState`
"""
states = {}
missing = []
@@ -256,7 +256,7 @@ class BasePresenceHandler(abc.ABC):
with the app.
"""
- async def update_external_syncs_row(
+ async def update_external_syncs_row( # noqa: B027 (no-op by design)
self, process_id: str, user_id: str, is_syncing: bool, sync_time_msec: int
) -> None:
"""Update the syncing users for an external process as a delta.
@@ -272,7 +272,9 @@ class BasePresenceHandler(abc.ABC):
sync_time_msec: Time in ms when the user was last syncing
"""
- async def update_external_syncs_clear(self, process_id: str) -> None:
+ async def update_external_syncs_clear( # noqa: B027 (no-op by design)
+ self, process_id: str
+ ) -> None:
"""Marks all users that had been marked as syncing by a given process
as offline.
@@ -476,7 +478,7 @@ class WorkerPresenceHandler(BasePresenceHandler):
return _NullContextManager()
prev_state = await self.current_state_for_user(user_id)
- if prev_state != PresenceState.BUSY:
+ if prev_state.state != PresenceState.BUSY:
# We set state here but pass ignore_status_msg = True as we don't want to
# cause the status message to be cleared.
# Note that this causes last_active_ts to be incremented which is not
@@ -1596,7 +1598,9 @@ class PresenceEventSource(EventSource[int, UserPresenceState]):
self,
user: UserID,
from_key: Optional[int],
- limit: Optional[int] = None,
+ # Having a default limit doesn't match the EventSource API, but some
+ # callers do not provide it. It is unused in this class.
+ limit: int = 0,
room_ids: Optional[Collection[str]] = None,
is_guest: bool = False,
explicit_room_id: Optional[str] = None,
@@ -2051,8 +2055,7 @@ async def get_interested_remotes(
)
for room_id, states in room_ids_to_states.items():
- user_ids = await store.get_users_in_room(room_id)
- hosts = {get_domain_from_id(user_id) for user_id in user_ids}
+ hosts = await store.get_current_hosts_in_room(room_id)
for host in hosts:
hosts_and_states.setdefault(host, set()).update(states)
diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py
index d8ff5289b5..4bf9a047a3 100644
--- a/synapse/handlers/profile.py
+++ b/synapse/handlers/profile.py
@@ -307,7 +307,11 @@ class ProfileHandler:
if not self.max_avatar_size and not self.allowed_avatar_mimetypes:
return True
- server_name, _, media_id = parse_and_validate_mxc_uri(mxc)
+ host, port, media_id = parse_and_validate_mxc_uri(mxc)
+ if port is not None:
+ server_name = host + ":" + str(port)
+ else:
+ server_name = host
if server_name == self.server_name:
media_info = await self.store.get_local_media(media_id)
diff --git a/synapse/handlers/push_rules.py b/synapse/handlers/push_rules.py
index 2599160bcc..1219672a59 100644
--- a/synapse/handlers/push_rules.py
+++ b/synapse/handlers/push_rules.py
@@ -16,14 +16,17 @@ from typing import TYPE_CHECKING, List, Optional, Union
import attr
from synapse.api.errors import SynapseError, UnrecognizedRequestError
-from synapse.push.baserules import BASE_RULE_IDS
from synapse.storage.push_rule import RuleNotFoundException
+from synapse.synapse_rust.push import get_base_rule_ids
from synapse.types import JsonDict
if TYPE_CHECKING:
from synapse.server import HomeServer
+BASE_RULE_IDS = get_base_rule_ids()
+
+
@attr.s(slots=True, frozen=True, auto_attribs=True)
class RuleSpec:
scope: str
diff --git a/synapse/handlers/receipts.py b/synapse/handlers/receipts.py
index 43d2882b0a..ac01582442 100644
--- a/synapse/handlers/receipts.py
+++ b/synapse/handlers/receipts.py
@@ -70,7 +70,7 @@ class ReceiptsHandler:
# If we're not in the room just ditch the event entirely. This is
# probably an old server that has come back and thinks we're still in
# the room (or we've been rejoined to the room by a state reset).
- is_in_room = await self.event_auth_handler.check_host_in_room(
+ is_in_room = await self.event_auth_handler.is_host_in_room(
room_id, self.server_name
)
if not is_in_room:
@@ -91,13 +91,22 @@ class ReceiptsHandler:
)
continue
+ # Check if these receipts apply to a thread.
+ thread_id = None
+ data = user_values.get("data", {})
+ thread_id = data.get("thread_id")
+ # If the thread ID is invalid, consider it missing.
+ if not isinstance(thread_id, str):
+ thread_id = None
+
receipts.append(
ReadReceipt(
room_id=room_id,
receipt_type=receipt_type,
user_id=user_id,
event_ids=user_values["event_ids"],
- data=user_values.get("data", {}),
+ thread_id=thread_id,
+ data=data,
)
)
@@ -114,6 +123,7 @@ class ReceiptsHandler:
receipt.receipt_type,
receipt.user_id,
receipt.event_ids,
+ receipt.thread_id,
receipt.data,
)
@@ -146,7 +156,12 @@ class ReceiptsHandler:
return True
async def received_client_receipt(
- self, room_id: str, receipt_type: str, user_id: str, event_id: str
+ self,
+ room_id: str,
+ receipt_type: str,
+ user_id: str,
+ event_id: str,
+ thread_id: Optional[str],
) -> None:
"""Called when a client tells us a local user has read up to the given
event_id in the room.
@@ -156,6 +171,7 @@ class ReceiptsHandler:
receipt_type=receipt_type,
user_id=user_id,
event_ids=[event_id],
+ thread_id=thread_id,
data={"ts": int(self.clock.time_msec())},
)
@@ -241,7 +257,7 @@ class ReceiptEventSource(EventSource[int, JsonDict]):
self,
user: UserID,
from_key: int,
- limit: Optional[int],
+ limit: int,
room_ids: Iterable[str],
is_guest: bool,
explicit_room_id: Optional[str] = None,
@@ -256,10 +272,9 @@ class ReceiptEventSource(EventSource[int, JsonDict]):
room_ids, from_key=from_key, to_key=to_key
)
- if self.config.experimental.msc2285_enabled:
- events = ReceiptEventSource.filter_out_private_receipts(
- events, user.to_string()
- )
+ events = ReceiptEventSource.filter_out_private_receipts(
+ events, user.to_string()
+ )
return events, to_key
diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py
index c77d181722..6307fa9c5d 100644
--- a/synapse/handlers/register.py
+++ b/synapse/handlers/register.py
@@ -29,9 +29,16 @@ from synapse.api.constants import (
JoinRules,
LoginType,
)
-from synapse.api.errors import AuthError, Codes, ConsentNotGivenError, SynapseError
+from synapse.api.errors import (
+ AuthError,
+ Codes,
+ ConsentNotGivenError,
+ InvalidClientTokenError,
+ SynapseError,
+)
from synapse.appservice import ApplicationService
from synapse.config.server import is_threepid_reserved
+from synapse.handlers.device import DeviceHandler
from synapse.http.servlet import assert_params_in_dict
from synapse.replication.http.login import RegisterDeviceReplicationServlet
from synapse.replication.http.register import (
@@ -180,10 +187,7 @@ class RegistrationHandler:
)
if guest_access_token:
user_data = await self.auth.get_user_by_access_token(guest_access_token)
- if (
- not user_data.is_guest
- or UserID.from_string(user_data.user_id).localpart != localpart
- ):
+ if not user_data.is_guest or user_data.user.localpart != localpart:
raise AuthError(
403,
"Cannot register taken user ID without valid guest "
@@ -217,6 +221,7 @@ class RegistrationHandler:
by_admin: bool = False,
user_agent_ips: Optional[List[Tuple[str, str]]] = None,
auth_provider_id: Optional[str] = None,
+ approved: bool = False,
) -> str:
"""Registers a new client on the server.
@@ -243,6 +248,8 @@ class RegistrationHandler:
user_agent_ips: Tuples of user-agents and IP addresses used
during the registration process.
auth_provider_id: The SSO IdP the user used, if any.
+ approved: True if the new user should be considered already
+ approved by an administrator.
Returns:
The registered user_id.
Raises:
@@ -304,6 +311,7 @@ class RegistrationHandler:
user_type=user_type,
address=address,
shadow_banned=shadow_banned,
+ approved=approved,
)
profile = await self.store.get_profileinfo(localpart)
@@ -618,7 +626,7 @@ class RegistrationHandler:
user_id = user.to_string()
service = self.store.get_app_service_by_token(as_token)
if not service:
- raise AuthError(403, "Invalid application service token.")
+ raise InvalidClientTokenError()
if not service.is_interested_in_user(user_id):
raise SynapseError(
400,
@@ -692,6 +700,7 @@ class RegistrationHandler:
user_type: Optional[str] = None,
address: Optional[str] = None,
shadow_banned: bool = False,
+ approved: bool = False,
) -> None:
"""Register user in the datastore.
@@ -710,6 +719,7 @@ class RegistrationHandler:
api.constants.UserTypes, or None for a normal user.
address: the IP address used to perform the registration.
shadow_banned: Whether to shadow-ban the user
+ approved: Whether to mark the user as approved by an administrator
"""
if self.hs.config.worker.worker_app:
await self._register_client(
@@ -723,6 +733,7 @@ class RegistrationHandler:
user_type=user_type,
address=address,
shadow_banned=shadow_banned,
+ approved=approved,
)
else:
await self.store.register_user(
@@ -735,6 +746,7 @@ class RegistrationHandler:
admin=admin,
user_type=user_type,
shadow_banned=shadow_banned,
+ approved=approved,
)
# Only call the account validity module(s) on the main process, to avoid
@@ -830,6 +842,9 @@ class RegistrationHandler:
refresh_token = None
refresh_token_id = None
+ # This can only run on the main process.
+ assert isinstance(self.device_handler, DeviceHandler)
+
registered_device_id = await self.device_handler.check_device_registered(
user_id,
device_id,
@@ -994,7 +1009,7 @@ class RegistrationHandler:
assert user_tuple
token_id = user_tuple.token_id
- await self.pusher_pool.add_pusher(
+ await self.pusher_pool.add_or_update_pusher(
user_id=user_id,
access_token=token_id,
kind="email",
@@ -1002,7 +1017,7 @@ class RegistrationHandler:
app_display_name="Email Notifications",
device_display_name=threepid["address"],
pushkey=threepid["address"],
- lang=None, # We don't know a user's language here
+ lang=None,
data={},
)
diff --git a/synapse/handlers/relations.py b/synapse/handlers/relations.py
index 8f797e3ae9..e96f9999a8 100644
--- a/synapse/handlers/relations.py
+++ b/synapse/handlers/relations.py
@@ -11,16 +11,21 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+import enum
import logging
-from typing import TYPE_CHECKING, Dict, FrozenSet, Iterable, List, Optional, Tuple
+from typing import TYPE_CHECKING, Collection, Dict, FrozenSet, Iterable, List, Optional
import attr
-from synapse.api.constants import RelationTypes
+from synapse.api.constants import EventTypes, RelationTypes
from synapse.api.errors import SynapseError
from synapse.events import EventBase, relation_from_event
-from synapse.storage.databases.main.relations import _RelatedEvent
-from synapse.types import JsonDict, Requester, StreamToken, UserID
+from synapse.logging.context import make_deferred_yieldable, run_in_background
+from synapse.logging.opentracing import trace
+from synapse.storage.databases.main.relations import ThreadsNextBatch, _RelatedEvent
+from synapse.streams.config import PaginationConfig
+from synapse.types import JsonDict, Requester, UserID
+from synapse.util.async_helpers import gather_results
from synapse.visibility import filter_events_for_client
if TYPE_CHECKING:
@@ -30,6 +35,13 @@ if TYPE_CHECKING:
logger = logging.getLogger(__name__)
+class ThreadsListInclude(str, enum.Enum):
+ """Valid values for the 'include' flag of /threads."""
+
+ all = "all"
+ participated = "participated"
+
+
@attr.s(slots=True, frozen=True, auto_attribs=True)
class _ThreadAggregation:
# The latest event in the thread.
@@ -65,18 +77,17 @@ class RelationsHandler:
self._clock = hs.get_clock()
self._event_handler = hs.get_event_handler()
self._event_serializer = hs.get_event_client_serializer()
+ self._event_creation_handler = hs.get_event_creation_handler()
async def get_relations(
self,
requester: Requester,
event_id: str,
room_id: str,
+ pagin_config: PaginationConfig,
+ include_original_event: bool,
relation_type: Optional[str] = None,
event_type: Optional[str] = None,
- limit: int = 5,
- direction: str = "b",
- from_token: Optional[StreamToken] = None,
- to_token: Optional[StreamToken] = None,
) -> JsonDict:
"""Get related events of a event, ordered by topological ordering.
@@ -86,13 +97,10 @@ class RelationsHandler:
requester: The user requesting the relations.
event_id: Fetch events that relate to this event ID.
room_id: The room the event belongs to.
+ pagin_config: The pagination config rules to apply, if any.
+ include_original_event: Whether to include the parent event.
relation_type: Only fetch events with this relation type, if given.
event_type: Only fetch events with this event type, if given.
- limit: Only fetch the most recent `limit` events.
- direction: Whether to fetch the most recent first (`"b"`) or the
- oldest first (`"f"`).
- from_token: Fetch rows from the given token, or from the start if None.
- to_token: Fetch rows up to the given token, or up to the end if None.
Returns:
The pagination chunk.
@@ -102,7 +110,7 @@ class RelationsHandler:
# TODO Properly handle a user leaving a room.
(_, member_event_id) = await self._auth.check_user_in_room_or_world_readable(
- room_id, user_id, allow_departed_users=True
+ room_id, requester, allow_departed_users=True
)
# This gets the original event and checks that a) the event exists and
@@ -120,10 +128,10 @@ class RelationsHandler:
room_id=room_id,
relation_type=relation_type,
event_type=event_type,
- limit=limit,
- direction=direction,
- from_token=from_token,
- to_token=to_token,
+ limit=pagin_config.limit,
+ direction=pagin_config.direction,
+ from_token=pagin_config.from_token,
+ to_token=pagin_config.to_token,
)
events = await self._main_store.get_events_as_list(
@@ -137,113 +145,189 @@ class RelationsHandler:
is_peeking=(member_event_id is None),
)
- now = self._clock.time_msec()
- # Do not bundle aggregations when retrieving the original event because
- # we want the content before relations are applied to it.
- original_event = self._event_serializer.serialize_event(
- event, now, bundle_aggregations=None
- )
# The relations returned for the requested event do include their
# bundled aggregations.
aggregations = await self.get_bundled_aggregations(
events, requester.user.to_string()
)
- serialized_events = self._event_serializer.serialize_events(
- events, now, bundle_aggregations=aggregations
- )
- return_value = {
- "chunk": serialized_events,
- "original_event": original_event,
+ now = self._clock.time_msec()
+ return_value: JsonDict = {
+ "chunk": self._event_serializer.serialize_events(
+ events, now, bundle_aggregations=aggregations
+ ),
}
+ if include_original_event:
+ # Do not bundle aggregations when retrieving the original event because
+ # we want the content before relations are applied to it.
+ return_value["original_event"] = self._event_serializer.serialize_event(
+ event, now, bundle_aggregations=None
+ )
if next_token:
return_value["next_batch"] = await next_token.to_string(self._main_store)
- if from_token:
- return_value["prev_batch"] = await from_token.to_string(self._main_store)
+ if pagin_config.from_token:
+ return_value["prev_batch"] = await pagin_config.from_token.to_string(
+ self._main_store
+ )
return return_value
- async def get_relations_for_event(
+ async def redact_events_related_to(
self,
+ requester: Requester,
event_id: str,
- event: EventBase,
- room_id: str,
- relation_type: str,
- ignored_users: FrozenSet[str] = frozenset(),
- ) -> Tuple[List[_RelatedEvent], Optional[StreamToken]]:
- """Get a list of events which relate to an event, ordered by topological ordering.
+ initial_redaction_event: EventBase,
+ relation_types: List[str],
+ ) -> None:
+ """Redacts all events related to the given event ID with one of the given
+ relation types.
- Args:
- event_id: Fetch events that relate to this event ID.
- event: The matching EventBase to event_id.
- room_id: The room the event belongs to.
- relation_type: The type of relation.
- ignored_users: The users ignored by the requesting user.
+ This method is expected to be called when redacting the event referred to by
+ the given event ID.
- Returns:
- List of event IDs that match relations requested. The rows are of
- the form `{"event_id": "..."}`.
- """
+ If an event cannot be redacted (e.g. because of insufficient permissions), log
+ the error and try to redact the next one.
- # Call the underlying storage method, which is cached.
- related_events, next_token = await self._main_store.get_relations_for_event(
- event_id, event, room_id, relation_type, direction="f"
+ Args:
+ requester: The requester to redact events on behalf of.
+ event_id: The event IDs to look and redact relations of.
+ initial_redaction_event: The redaction for the event referred to by
+ event_id.
+ relation_types: The types of relations to look for.
+
+ Raises:
+ ShadowBanError if the requester is shadow-banned
+ """
+ related_event_ids = (
+ await self._main_store.get_all_relations_for_event_with_types(
+ event_id, relation_types
+ )
)
- # Filter out ignored users and convert to the expected format.
- related_events = [
- event for event in related_events if event.sender not in ignored_users
- ]
-
- return related_events, next_token
+ for related_event_id in related_event_ids:
+ try:
+ await self._event_creation_handler.create_and_send_nonmember_event(
+ requester,
+ {
+ "type": EventTypes.Redaction,
+ "content": initial_redaction_event.content,
+ "room_id": initial_redaction_event.room_id,
+ "sender": requester.user.to_string(),
+ "redacts": related_event_id,
+ },
+ ratelimit=False,
+ )
+ except SynapseError as e:
+ logger.warning(
+ "Failed to redact event %s (related to event %s): %s",
+ related_event_id,
+ event_id,
+ e.msg,
+ )
- async def get_annotations_for_event(
- self,
- event_id: str,
- room_id: str,
- limit: int = 5,
- ignored_users: FrozenSet[str] = frozenset(),
- ) -> List[JsonDict]:
- """Get a list of annotations on the event, grouped by event type and
+ async def get_annotations_for_events(
+ self, event_ids: Collection[str], ignored_users: FrozenSet[str] = frozenset()
+ ) -> Dict[str, List[JsonDict]]:
+ """Get a list of annotations to the given events, grouped by event type and
aggregation key, sorted by count.
- This is used e.g. to get the what and how many reactions have happend
+ This is used e.g. to get the what and how many reactions have happened
on an event.
Args:
- event_id: Fetch events that relate to this event ID.
- room_id: The room the event belongs to.
- limit: Only fetch the `limit` groups.
+ event_ids: Fetch events that relate to these event IDs.
ignored_users: The users ignored by the requesting user.
Returns:
- List of groups of annotations that match. Each row is a dict with
- `type`, `key` and `count` fields.
+ A map of event IDs to a list of groups of annotations that match.
+ Each entry is a dict with `type`, `key` and `count` fields.
"""
# Get the base results for all users.
- full_results = await self._main_store.get_aggregation_groups_for_event(
- event_id, room_id, limit
+ full_results = await self._main_store.get_aggregation_groups_for_events(
+ event_ids
)
+ # Avoid additional logic if there are no ignored users.
+ if not ignored_users:
+ return {
+ event_id: results
+ for event_id, results in full_results.items()
+ if results
+ }
+
# Then subtract off the results for any ignored users.
ignored_results = await self._main_store.get_aggregation_groups_for_users(
- event_id, room_id, limit, ignored_users
+ [event_id for event_id, results in full_results.items() if results],
+ ignored_users,
)
- filtered_results = []
- for result in full_results:
- key = (result["type"], result["key"])
- if key in ignored_results:
- result = result.copy()
- result["count"] -= ignored_results[key]
- if result["count"] <= 0:
- continue
- filtered_results.append(result)
+ filtered_results = {}
+ for event_id, results in full_results.items():
+ # If no annotations, skip.
+ if not results:
+ continue
+
+ # If there are not ignored results for this event, copy verbatim.
+ if event_id not in ignored_results:
+ filtered_results[event_id] = results
+ continue
+
+ # Otherwise, subtract out the ignored results.
+ event_ignored_results = ignored_results[event_id]
+ for result in results:
+ key = (result["type"], result["key"])
+ if key in event_ignored_results:
+ # Ensure to not modify the cache.
+ result = result.copy()
+ result["count"] -= event_ignored_results[key]
+ if result["count"] <= 0:
+ continue
+ filtered_results.setdefault(event_id, []).append(result)
return filtered_results
+ async def get_references_for_events(
+ self, event_ids: Collection[str], ignored_users: FrozenSet[str] = frozenset()
+ ) -> Dict[str, List[_RelatedEvent]]:
+ """Get a list of references to the given events.
+
+ Args:
+ event_ids: Fetch events that relate to this event ID.
+ ignored_users: The users ignored by the requesting user.
+
+ Returns:
+ A map of event IDs to a list related events.
+ """
+
+ related_events = await self._main_store.get_references_for_events(event_ids)
+
+ # Avoid additional logic if there are no ignored users.
+ if not ignored_users:
+ return {
+ event_id: results
+ for event_id, results in related_events.items()
+ if results
+ }
+
+ # Filter out ignored users.
+ results = {}
+ for event_id, events in related_events.items():
+ # If no references, skip.
+ if not events:
+ continue
+
+ # Filter ignored users out.
+ events = [event for event in events if event.sender not in ignored_users]
+ # If there are no events left, skip this event.
+ if not events:
+ continue
+
+ results[event_id] = events
+
+ return results
+
async def _get_threads_for_events(
self,
events_by_id: Dict[str, EventBase],
@@ -306,61 +390,69 @@ class RelationsHandler:
results = {}
for event_id, summary in summaries.items():
- if summary:
- thread_count, latest_thread_event = summary
-
- # Subtract off the count of any ignored users.
- for ignored_user in ignored_users:
- thread_count -= ignored_results.get((event_id, ignored_user), 0)
-
- # This is gnarly, but if the latest event is from an ignored user,
- # attempt to find one that isn't from an ignored user.
- if latest_thread_event.sender in ignored_users:
- room_id = latest_thread_event.room_id
-
- # If the root event is not found, something went wrong, do
- # not include a summary of the thread.
- event = await self._event_handler.get_event(user, room_id, event_id)
- if event is None:
- continue
+ # If no thread, skip.
+ if not summary:
+ continue
- potential_events, _ = await self.get_relations_for_event(
- event_id,
- event,
- room_id,
- RelationTypes.THREAD,
- ignored_users,
- )
+ thread_count, latest_thread_event = summary
- # If all found events are from ignored users, do not include
- # a summary of the thread.
- if not potential_events:
- continue
+ # Subtract off the count of any ignored users.
+ for ignored_user in ignored_users:
+ thread_count -= ignored_results.get((event_id, ignored_user), 0)
- # The *last* event returned is the one that is cared about.
- event = await self._event_handler.get_event(
- user, room_id, potential_events[-1].event_id
- )
- # It is unexpected that the event will not exist.
- if event is None:
- logger.warning(
- "Unable to fetch latest event in a thread with event ID: %s",
- potential_events[-1].event_id,
- )
- continue
- latest_thread_event = event
-
- results[event_id] = _ThreadAggregation(
- latest_event=latest_thread_event,
- count=thread_count,
- # If there's a thread summary it must also exist in the
- # participated dictionary.
- current_user_participated=events_by_id[event_id].sender == user_id
- or participated[event_id],
+ # This is gnarly, but if the latest event is from an ignored user,
+ # attempt to find one that isn't from an ignored user.
+ if latest_thread_event.sender in ignored_users:
+ room_id = latest_thread_event.room_id
+
+ # If the root event is not found, something went wrong, do
+ # not include a summary of the thread.
+ event = await self._event_handler.get_event(user, room_id, event_id)
+ if event is None:
+ continue
+
+ # Attempt to find another event to use as the latest event.
+ potential_events, _ = await self._main_store.get_relations_for_event(
+ event_id, event, room_id, RelationTypes.THREAD, direction="f"
)
+ # Filter out ignored users.
+ potential_events = [
+ event
+ for event in potential_events
+ if event.sender not in ignored_users
+ ]
+
+ # If all found events are from ignored users, do not include
+ # a summary of the thread.
+ if not potential_events:
+ continue
+
+ # The *last* event returned is the one that is cared about.
+ event = await self._event_handler.get_event(
+ user, room_id, potential_events[-1].event_id
+ )
+ # It is unexpected that the event will not exist.
+ if event is None:
+ logger.warning(
+ "Unable to fetch latest event in a thread with event ID: %s",
+ potential_events[-1].event_id,
+ )
+ continue
+ latest_thread_event = event
+
+ results[event_id] = _ThreadAggregation(
+ latest_event=latest_thread_event,
+ count=thread_count,
+ # If there's a thread summary it must also exist in the
+ # participated dictionary.
+ current_user_participated=events_by_id[event_id].sender == user_id
+ or participated[event_id],
+ )
+
return results
+ @trace
async def get_bundled_aggregations(
self, events: Iterable[EventBase], user_id: str
) -> Dict[str, BundledAggregations]:
@@ -435,48 +527,131 @@ class RelationsHandler:
# (as that is what makes it part of the thread).
relations_by_id[latest_thread_event.event_id] = RelationTypes.THREAD
- # Fetch other relations per event.
- for event in events_by_id.values():
- # Fetch any annotations (ie, reactions) to bundle with this event.
- annotations = await self.get_annotations_for_event(
- event.event_id, event.room_id, ignored_users=ignored_users
+ async def _fetch_annotations() -> None:
+ """Fetch any annotations (ie, reactions) to bundle with this event."""
+ annotations_by_event_id = await self.get_annotations_for_events(
+ events_by_id.keys(), ignored_users=ignored_users
)
- if annotations:
- results.setdefault(
- event.event_id, BundledAggregations()
- ).annotations = {"chunk": annotations}
-
- # Fetch any references to bundle with this event.
- references, next_token = await self.get_relations_for_event(
- event.event_id,
- event,
- event.room_id,
- RelationTypes.REFERENCE,
- ignored_users=ignored_users,
+ for event_id, annotations in annotations_by_event_id.items():
+ if annotations:
+ results.setdefault(event_id, BundledAggregations()).annotations = {
+ "chunk": annotations
+ }
+
+ async def _fetch_references() -> None:
+ """Fetch any references to bundle with this event."""
+ references_by_event_id = await self.get_references_for_events(
+ events_by_id.keys(), ignored_users=ignored_users
+ )
+ for event_id, references in references_by_event_id.items():
+ if references:
+ results.setdefault(event_id, BundledAggregations()).references = {
+ "chunk": [{"event_id": ev.event_id} for ev in references]
+ }
+
+ async def _fetch_edits() -> None:
+ """
+ Fetch any edits (but not for redacted events).
+
+ Note that there is no use in limiting edits by ignored users since the
+ parent event should be ignored in the first place if the user is ignored.
+ """
+ edits = await self._main_store.get_applicable_edits(
+ [
+ event_id
+ for event_id, event in events_by_id.items()
+ if not event.internal_metadata.is_redacted()
+ ]
+ )
+ for event_id, edit in edits.items():
+ results.setdefault(event_id, BundledAggregations()).replace = edit
+
+ # Parallelize the calls for annotations, references, and edits since they
+ # are unrelated.
+ await make_deferred_yieldable(
+ gather_results(
+ (
+ run_in_background(_fetch_annotations),
+ run_in_background(_fetch_references),
+ run_in_background(_fetch_edits),
+ )
)
- if references:
- aggregations = results.setdefault(event.event_id, BundledAggregations())
- aggregations.references = {
- "chunk": [{"event_id": ev.event_id} for ev in references]
- }
-
- if next_token:
- aggregations.references["next_batch"] = await next_token.to_string(
- self._main_store
- )
-
- # Fetch any edits (but not for redacted events).
- #
- # Note that there is no use in limiting edits by ignored users since the
- # parent event should be ignored in the first place if the user is ignored.
- edits = await self._main_store.get_applicable_edits(
- [
- event_id
- for event_id, event in events_by_id.items()
- if not event.internal_metadata.is_redacted()
- ]
)
- for event_id, edit in edits.items():
- results.setdefault(event_id, BundledAggregations()).replace = edit
return results
+
+ async def get_threads(
+ self,
+ requester: Requester,
+ room_id: str,
+ include: ThreadsListInclude,
+ limit: int = 5,
+ from_token: Optional[ThreadsNextBatch] = None,
+ ) -> JsonDict:
+ """Get related events of a event, ordered by topological ordering.
+
+ Args:
+ requester: The user requesting the relations.
+ room_id: The room the event belongs to.
+ include: One of "all" or "participated" to indicate which threads should
+ be returned.
+ limit: Only fetch the most recent `limit` events.
+ from_token: Fetch rows from the given token, or from the start if None.
+
+ Returns:
+ The pagination chunk.
+ """
+
+ user_id = requester.user.to_string()
+
+ # TODO Properly handle a user leaving a room.
+ (_, member_event_id) = await self._auth.check_user_in_room_or_world_readable(
+ room_id, requester, allow_departed_users=True
+ )
+
+ # Note that ignored users are not passed into get_threads
+ # below. Ignored users are handled in filter_events_for_client (and by
+ # not passing them in here we should get a better cache hit rate).
+ thread_roots, next_batch = await self._main_store.get_threads(
+ room_id=room_id, limit=limit, from_token=from_token
+ )
+
+ events = await self._main_store.get_events_as_list(thread_roots)
+
+ if include == ThreadsListInclude.participated:
+ # Pre-seed thread participation with whether the requester sent the event.
+ participated = {event.event_id: event.sender == user_id for event in events}
+ # For events the requester did not send, check the database for whether
+ # the requester sent a threaded reply.
+ participated.update(
+ await self._main_store.get_threads_participated(
+ [eid for eid, p in participated.items() if not p],
+ user_id,
+ )
+ )
+
+ # Limit the returned threads to those the user has participated in.
+ events = [event for event in events if participated[event.event_id]]
+
+ events = await filter_events_for_client(
+ self._storage_controllers,
+ user_id,
+ events,
+ is_peeking=(member_event_id is None),
+ )
+
+ aggregations = await self.get_bundled_aggregations(
+ events, requester.user.to_string()
+ )
+
+ now = self._clock.time_msec()
+ serialized_events = self._event_serializer.serialize_events(
+ events, now, bundle_aggregations=aggregations
+ )
+
+ return_value: JsonDict = {"chunk": serialized_events}
+
+ if next_batch:
+ return_value["next_batch"] = str(next_batch)
+
+ return return_value
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index 55395457c3..6dcfd86fdf 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -19,6 +19,7 @@ import math
import random
import string
from collections import OrderedDict
+from http import HTTPStatus
from typing import (
TYPE_CHECKING,
Any,
@@ -48,7 +49,6 @@ from synapse.api.constants import (
from synapse.api.errors import (
AuthError,
Codes,
- HttpResponseException,
LimitExceededError,
NotFoundError,
StoreError,
@@ -59,8 +59,6 @@ from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion
from synapse.event_auth import validate_event_for_room_version
from synapse.events import EventBase
from synapse.events.utils import copy_and_fixup_power_levels_contents
-from synapse.federation.federation_client import InvalidResponseError
-from synapse.handlers.federation import get_domains_from_state
from synapse.handlers.relations import BundledAggregations
from synapse.module_api import NOT_SPAM
from synapse.rest.admin._base import assert_user_is_admin
@@ -229,9 +227,7 @@ class RoomCreationHandler:
},
)
validate_event_for_room_version(tombstone_event)
- await self._event_auth_handler.check_auth_rules_from_context(
- tombstone_event, tombstone_context
- )
+ await self._event_auth_handler.check_auth_rules_from_context(tombstone_event)
# Upgrade the room
#
@@ -301,8 +297,7 @@ class RoomCreationHandler:
# now send the tombstone
await self.event_creation_handler.handle_new_client_event(
requester=requester,
- event=tombstone_event,
- context=tombstone_context,
+ events_and_context=[(tombstone_event, tombstone_context)],
)
state_filter = StateFilter.from_types(
@@ -562,7 +557,6 @@ class RoomCreationHandler:
invite_list=[],
initial_state=initial_state,
creation_content=creation_content,
- ratelimit=False,
)
# Transfer membership events
@@ -705,8 +699,8 @@ class RoomCreationHandler:
was, requested, `room_alias`. Secondly, the stream_id of the
last persisted event.
Raises:
- SynapseError if the room ID couldn't be stored, or something went
- horribly wrong.
+ SynapseError if the room ID couldn't be stored, 3pid invitation config
+ validation failed, or something went horribly wrong.
ResourceLimitError if server is blocked to some resource being
exceeded
"""
@@ -716,12 +710,12 @@ class RoomCreationHandler:
if (
self._server_notices_mxid is not None
- and requester.user.to_string() == self._server_notices_mxid
+ and user_id == self._server_notices_mxid
):
# allow the server notices mxid to create rooms
is_requester_admin = True
else:
- is_requester_admin = await self.auth.is_server_admin(requester.user)
+ is_requester_admin = await self.auth.is_server_admin(requester)
# Let the third party rules modify the room creation config if needed, or abort
# the room creation entirely with an exception.
@@ -732,6 +726,19 @@ class RoomCreationHandler:
invite_3pid_list = config.get("invite_3pid", [])
invite_list = config.get("invite", [])
+ # validate each entry for correctness
+ for invite_3pid in invite_3pid_list:
+ if not all(
+ key in invite_3pid
+ for key in ("medium", "address", "id_server", "id_access_token")
+ ):
+ raise SynapseError(
+ HTTPStatus.BAD_REQUEST,
+ "all of `medium`, `address`, `id_server` and `id_access_token` "
+ "are required when making a 3pid invite",
+ Codes.MISSING_PARAM,
+ )
+
if not is_requester_admin:
spam_check = await self.spam_checker.user_may_create_room(user_id)
if spam_check != NOT_SPAM:
@@ -743,6 +750,10 @@ class RoomCreationHandler:
)
if ratelimit:
+ # Rate limit once in advance, but don't rate limit the individual
+ # events in the room — room creation isn't atomic and it's very
+ # janky if half the events in the initial state don't make it because
+ # of rate limiting.
await self.request_ratelimiter.ratelimit(requester)
room_version_id = config.get(
@@ -903,7 +914,6 @@ class RoomCreationHandler:
room_alias=room_alias,
power_level_content_override=power_level_content_override,
creator_join_profile=creator_join_profile,
- ratelimit=ratelimit,
)
if "name" in config:
@@ -979,7 +989,7 @@ class RoomCreationHandler:
for invite_3pid in invite_3pid_list:
id_server = invite_3pid["id_server"]
- id_access_token = invite_3pid.get("id_access_token") # optional
+ id_access_token = invite_3pid["id_access_token"]
address = invite_3pid["address"]
medium = invite_3pid["medium"]
# Note that do_3pid_invite can raise a ShadowBanError, but this was
@@ -1027,26 +1037,36 @@ class RoomCreationHandler:
room_alias: Optional[RoomAlias] = None,
power_level_content_override: Optional[JsonDict] = None,
creator_join_profile: Optional[JsonDict] = None,
- ratelimit: bool = True,
) -> Tuple[int, str, int]:
- """Sends the initial events into a new room.
+ """Sends the initial events into a new room. Sends the room creation, membership,
+ and power level events into the room sequentially, then creates and batches up the
+ rest of the events to persist as a batch to the DB.
`power_level_content_override` doesn't apply when initial state has
power level state event content.
+ Rate limiting should already have been applied by this point.
+
Returns:
A tuple containing the stream ID, event ID and depth of the last
event sent to the room.
"""
creator_id = creator.user.to_string()
-
event_keys = {"room_id": room_id, "sender": creator_id, "state_key": ""}
-
depth = 1
- last_sent_event_id: Optional[str] = None
- def create(etype: str, content: JsonDict, **kwargs: Any) -> JsonDict:
+ # the most recently created event
+ prev_event: List[str] = []
+ # a map of event types, state keys -> event_ids. We collect these mappings this as events are
+ # created (but not persisted to the db) to determine state for future created events
+ # (as this info can't be pulled from the db)
+ state_map: MutableStateMap[str] = {}
+ # current_state_group of last event created. Used for computing event context of
+ # events to be batched
+ current_state_group = None
+
+ def create_event_dict(etype: str, content: JsonDict, **kwargs: Any) -> JsonDict:
e = {"type": etype, "content": content}
e.update(event_keys)
@@ -1054,32 +1074,44 @@ class RoomCreationHandler:
return e
- async def send(etype: str, content: JsonDict, **kwargs: Any) -> int:
- nonlocal last_sent_event_id
+ async def create_event(
+ etype: str,
+ content: JsonDict,
+ for_batch: bool,
+ **kwargs: Any,
+ ) -> Tuple[EventBase, synapse.events.snapshot.EventContext]:
+ """
+ Creates an event and associated event context.
+ Args:
+ etype: the type of event to be created
+ content: content of the event
+ for_batch: whether the event is being created for batch persisting. If
+ bool for_batch is true, this will create an event using the prev_event_ids,
+ and will create an event context for the event using the parameters state_map
+ and current_state_group, thus these parameters must be provided in this
+ case if for_batch is True. The subsequently created event and context
+ are suitable for being batched up and bulk persisted to the database
+ with other similarly created events.
+ """
nonlocal depth
+ nonlocal prev_event
- event = create(etype, content, **kwargs)
- logger.debug("Sending %s in new room", etype)
- # Allow these events to be sent even if the user is shadow-banned to
- # allow the room creation to complete.
- (
- sent_event,
- last_stream_id,
- ) = await self.event_creation_handler.create_and_send_nonmember_event(
+ event_dict = create_event_dict(etype, content, **kwargs)
+
+ new_event, new_context = await self.event_creation_handler.create_event(
creator,
- event,
- ratelimit=False,
- ignore_shadow_ban=True,
- # Note: we don't pass state_event_ids here because this triggers
- # an additional query per event to look them up from the events table.
- prev_event_ids=[last_sent_event_id] if last_sent_event_id else [],
+ event_dict,
+ prev_event_ids=prev_event,
depth=depth,
+ state_map=state_map,
+ for_batch=for_batch,
+ current_state_group=current_state_group,
)
-
- last_sent_event_id = sent_event.event_id
depth += 1
+ prev_event = [new_event.event_id]
+ state_map[(new_event.type, new_event.state_key)] = new_event.event_id
- return last_stream_id
+ return new_event, new_context
try:
config = self._presets_dict[preset_config]
@@ -1089,31 +1121,55 @@ class RoomCreationHandler:
)
creation_content.update({"creator": creator_id})
- await send(etype=EventTypes.Create, content=creation_content)
+ creation_event, creation_context = await create_event(
+ EventTypes.Create, creation_content, False
+ )
logger.debug("Sending %s in new room", EventTypes.Member)
- # Room create event must exist at this point
- assert last_sent_event_id is not None
+ ev = await self.event_creation_handler.handle_new_client_event(
+ requester=creator,
+ events_and_context=[(creation_event, creation_context)],
+ ratelimit=False,
+ ignore_shadow_ban=True,
+ )
+ last_sent_event_id = ev.event_id
+
member_event_id, _ = await self.room_member_handler.update_membership(
creator,
creator.user,
room_id,
"join",
- ratelimit=ratelimit,
+ ratelimit=False,
content=creator_join_profile,
new_room=True,
prev_event_ids=[last_sent_event_id],
depth=depth,
)
- last_sent_event_id = member_event_id
+ prev_event = [member_event_id]
+
+ # update the depth and state map here as the membership event has been created
+ # through a different code path
+ depth += 1
+ state_map[(EventTypes.Member, creator.user.to_string())] = member_event_id
+
+ # we need the state group of the membership event as it is the current state group
+ event_to_state = (
+ await self._storage_controllers.state.get_state_group_for_events(
+ [member_event_id]
+ )
+ )
+ current_state_group = event_to_state[member_event_id]
+ events_to_send = []
# We treat the power levels override specially as this needs to be one
# of the first events that get sent into a room.
pl_content = initial_state.pop((EventTypes.PowerLevels, ""), None)
if pl_content is not None:
- last_sent_stream_id = await send(
- etype=EventTypes.PowerLevels, content=pl_content
+ power_event, power_context = await create_event(
+ EventTypes.PowerLevels, pl_content, True
)
+ current_state_group = power_context._state_group
+ events_to_send.append((power_event, power_context))
else:
power_level_content: JsonDict = {
"users": {creator_id: 100},
@@ -1156,48 +1212,73 @@ class RoomCreationHandler:
# apply those.
if power_level_content_override:
power_level_content.update(power_level_content_override)
-
- last_sent_stream_id = await send(
- etype=EventTypes.PowerLevels, content=power_level_content
+ pl_event, pl_context = await create_event(
+ EventTypes.PowerLevels,
+ power_level_content,
+ True,
)
+ current_state_group = pl_context._state_group
+ events_to_send.append((pl_event, pl_context))
if room_alias and (EventTypes.CanonicalAlias, "") not in initial_state:
- last_sent_stream_id = await send(
- etype=EventTypes.CanonicalAlias,
- content={"alias": room_alias.to_string()},
+ room_alias_event, room_alias_context = await create_event(
+ EventTypes.CanonicalAlias, {"alias": room_alias.to_string()}, True
)
+ current_state_group = room_alias_context._state_group
+ events_to_send.append((room_alias_event, room_alias_context))
if (EventTypes.JoinRules, "") not in initial_state:
- last_sent_stream_id = await send(
- etype=EventTypes.JoinRules, content={"join_rule": config["join_rules"]}
+ join_rules_event, join_rules_context = await create_event(
+ EventTypes.JoinRules,
+ {"join_rule": config["join_rules"]},
+ True,
)
+ current_state_group = join_rules_context._state_group
+ events_to_send.append((join_rules_event, join_rules_context))
if (EventTypes.RoomHistoryVisibility, "") not in initial_state:
- last_sent_stream_id = await send(
- etype=EventTypes.RoomHistoryVisibility,
- content={"history_visibility": config["history_visibility"]},
+ visibility_event, visibility_context = await create_event(
+ EventTypes.RoomHistoryVisibility,
+ {"history_visibility": config["history_visibility"]},
+ True,
)
+ current_state_group = visibility_context._state_group
+ events_to_send.append((visibility_event, visibility_context))
if config["guest_can_join"]:
if (EventTypes.GuestAccess, "") not in initial_state:
- last_sent_stream_id = await send(
- etype=EventTypes.GuestAccess,
- content={EventContentFields.GUEST_ACCESS: GuestAccess.CAN_JOIN},
+ guest_access_event, guest_access_context = await create_event(
+ EventTypes.GuestAccess,
+ {EventContentFields.GUEST_ACCESS: GuestAccess.CAN_JOIN},
+ True,
)
+ current_state_group = guest_access_context._state_group
+ events_to_send.append((guest_access_event, guest_access_context))
for (etype, state_key), content in initial_state.items():
- last_sent_stream_id = await send(
- etype=etype, state_key=state_key, content=content
+ event, context = await create_event(
+ etype, content, True, state_key=state_key
)
+ current_state_group = context._state_group
+ events_to_send.append((event, context))
if config["encrypted"]:
- last_sent_stream_id = await send(
- etype=EventTypes.RoomEncryption,
+ encryption_event, encryption_context = await create_event(
+ EventTypes.RoomEncryption,
+ {"algorithm": RoomEncryptionAlgorithms.DEFAULT},
+ True,
state_key="",
- content={"algorithm": RoomEncryptionAlgorithms.DEFAULT},
)
+ events_to_send.append((encryption_event, encryption_context))
- return last_sent_stream_id, last_sent_event_id, depth
+ last_event = await self.event_creation_handler.handle_new_client_event(
+ creator,
+ events_to_send,
+ ignore_shadow_ban=True,
+ ratelimit=False,
+ )
+ assert last_event.internal_metadata.stream_ordering is not None
+ return last_event.internal_metadata.stream_ordering, last_event.event_id, depth
def _generate_room_id(self) -> str:
"""Generates a random room ID.
@@ -1279,13 +1360,16 @@ class RoomContextHandler:
"""
user = requester.user
if use_admin_priviledge:
- await assert_user_is_admin(self.auth, requester.user)
+ await assert_user_is_admin(self.auth, requester)
before_limit = math.floor(limit / 2.0)
after_limit = limit - before_limit
- users = await self.store.get_users_in_room(room_id)
- is_peeking = user.to_string() not in users
+ is_user_in_room = await self.store.check_local_user_in_room(
+ user_id=user.to_string(), room_id=room_id
+ )
+ # The user is peeking if they aren't in the room already
+ is_peeking = not is_user_in_room
async def filter_evts(events: List[EventBase]) -> List[EventBase]:
if use_admin_priviledge:
@@ -1367,7 +1451,7 @@ class RoomContextHandler:
events_before=events_before,
event=event,
events_after=events_after,
- state=await filter_evts(state_events),
+ state=state_events,
aggregations=aggregations,
start=await token.copy_and_replace(
StreamKeyType.ROOM, results.start
@@ -1413,7 +1497,12 @@ class TimestampLookupHandler:
Raises:
SynapseError if unable to find any event locally in the given direction
"""
-
+ logger.debug(
+ "get_event_for_timestamp(room_id=%s, timestamp=%s, direction=%s) Finding closest event...",
+ room_id,
+ timestamp,
+ direction,
+ )
local_event_id = await self.store.get_event_id_for_timestamp(
room_id, timestamp, direction
)
@@ -1459,90 +1548,60 @@ class TimestampLookupHandler:
timestamp,
)
- # Find other homeservers from the given state in the room
- curr_state = await self._storage_controllers.state.get_current_state(
- room_id
+ likely_domains = (
+ await self._storage_controllers.state.get_current_hosts_in_room_ordered(
+ room_id
+ )
)
- curr_domains = get_domains_from_state(curr_state)
- likely_domains = [
- domain for domain, depth in curr_domains if domain != self.server_name
- ]
- # Loop through each homeserver candidate until we get a succesful response
- for domain in likely_domains:
- try:
- remote_response = await self.federation_client.timestamp_to_event(
- domain, room_id, timestamp, direction
- )
- logger.debug(
- "get_event_for_timestamp: response from domain(%s)=%s",
- domain,
- remote_response,
- )
+ remote_response = await self.federation_client.timestamp_to_event(
+ destinations=likely_domains,
+ room_id=room_id,
+ timestamp=timestamp,
+ direction=direction,
+ )
+ if remote_response is not None:
+ logger.debug(
+ "get_event_for_timestamp: remote_response=%s",
+ remote_response,
+ )
- remote_event_id = remote_response.event_id
- remote_origin_server_ts = remote_response.origin_server_ts
-
- # Backfill this event so we can get a pagination token for
- # it with `/context` and paginate `/messages` from this
- # point.
- #
- # TODO: The requested timestamp may lie in a part of the
- # event graph that the remote server *also* didn't have,
- # in which case they will have returned another event
- # which may be nowhere near the requested timestamp. In
- # the future, we may need to reconcile that gap and ask
- # other homeservers, and/or extend `/timestamp_to_event`
- # to return events on *both* sides of the timestamp to
- # help reconcile the gap faster.
- remote_event = (
- await self.federation_event_handler.backfill_event_id(
- domain, room_id, remote_event_id
- )
- )
+ remote_event_id = remote_response.event_id
+ remote_origin_server_ts = remote_response.origin_server_ts
- # XXX: When we see that the remote server is not trustworthy,
- # maybe we should not ask them first in the future.
- if remote_origin_server_ts != remote_event.origin_server_ts:
- logger.info(
- "get_event_for_timestamp: Remote server (%s) claimed that remote_event_id=%s occured at remote_origin_server_ts=%s but that isn't true (actually occured at %s). Their claims are dubious and we should consider not trusting them.",
- domain,
- remote_event_id,
- remote_origin_server_ts,
- remote_event.origin_server_ts,
- )
-
- # Only return the remote event if it's closer than the local event
- if not local_event or (
- abs(remote_event.origin_server_ts - timestamp)
- < abs(local_event.origin_server_ts - timestamp)
- ):
- logger.info(
- "get_event_for_timestamp: returning remote_event_id=%s (%s) since it's closer to timestamp=%s than local_event=%s (%s)",
- remote_event_id,
- remote_event.origin_server_ts,
- timestamp,
- local_event.event_id if local_event else None,
- local_event.origin_server_ts if local_event else None,
- )
- return remote_event_id, remote_origin_server_ts
- except (HttpResponseException, InvalidResponseError) as ex:
- # Let's not put a high priority on some other homeserver
- # failing to respond or giving a random response
- logger.debug(
- "get_event_for_timestamp: Failed to fetch /timestamp_to_event from %s because of exception(%s) %s args=%s",
- domain,
- type(ex).__name__,
- ex,
- ex.args,
+ # Backfill this event so we can get a pagination token for
+ # it with `/context` and paginate `/messages` from this
+ # point.
+ pulled_pdu_info = await self.federation_event_handler.backfill_event_id(
+ likely_domains, room_id, remote_event_id
+ )
+ remote_event = pulled_pdu_info.pdu
+
+ # XXX: When we see that the remote server is not trustworthy,
+ # maybe we should not ask them first in the future.
+ if remote_origin_server_ts != remote_event.origin_server_ts:
+ logger.info(
+ "get_event_for_timestamp: Remote server (%s) claimed that remote_event_id=%s occured at remote_origin_server_ts=%s but that isn't true (actually occured at %s). Their claims are dubious and we should consider not trusting them.",
+ pulled_pdu_info.pull_origin,
+ remote_event_id,
+ remote_origin_server_ts,
+ remote_event.origin_server_ts,
)
- except Exception:
- # But we do want to see some exceptions in our code
- logger.warning(
- "get_event_for_timestamp: Failed to fetch /timestamp_to_event from %s because of exception",
- domain,
- exc_info=True,
+
+ # Only return the remote event if it's closer than the local event
+ if not local_event or (
+ abs(remote_event.origin_server_ts - timestamp)
+ < abs(local_event.origin_server_ts - timestamp)
+ ):
+ logger.info(
+ "get_event_for_timestamp: returning remote_event_id=%s (%s) since it's closer to timestamp=%s than local_event=%s (%s)",
+ remote_event_id,
+ remote_event.origin_server_ts,
+ timestamp,
+ local_event.event_id if local_event else None,
+ local_event.origin_server_ts if local_event else None,
)
+ return remote_event_id, remote_origin_server_ts
# To appease mypy, we have to add both of these conditions to check for
# `None`. We only expect `local_event` to be `None` when
@@ -1565,7 +1624,7 @@ class RoomEventSource(EventSource[RoomStreamToken, EventBase]):
self,
user: UserID,
from_key: RoomStreamToken,
- limit: Optional[int],
+ limit: int,
room_ids: Collection[str],
is_guest: bool,
explicit_room_id: Optional[str] = None,
diff --git a/synapse/handlers/room_batch.py b/synapse/handlers/room_batch.py
index 1414e575d6..411a6fb22f 100644
--- a/synapse/handlers/room_batch.py
+++ b/synapse/handlers/room_batch.py
@@ -379,8 +379,7 @@ class RoomBatchHandler:
await self.create_requester_for_user_id_from_app_service(
event.sender, app_service_requester.app_service
),
- event=event,
- context=context,
+ events_and_context=[(event, context)],
)
return event_ids
diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py
index 520c52e013..6ad2b38b8f 100644
--- a/synapse/handlers/room_member.py
+++ b/synapse/handlers/room_member.py
@@ -32,6 +32,7 @@ from synapse.event_auth import get_named_level, get_power_level_event
from synapse.events import EventBase
from synapse.events.snapshot import EventContext
from synapse.handlers.profile import MAX_AVATAR_URL_LEN, MAX_DISPLAYNAME_LEN
+from synapse.logging import opentracing
from synapse.module_api import NOT_SPAM
from synapse.storage.state import StateFilter
from synapse.types import (
@@ -178,7 +179,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
"""Try and join a room that this server is not in
Args:
- requester
+ requester: The user making the request, according to the access token.
remote_room_hosts: List of servers that can be used to join via.
room_id: Room that we are trying to join
user: User who is trying to join
@@ -321,6 +322,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
require_consent: bool = True,
outlier: bool = False,
historical: bool = False,
+ origin_server_ts: Optional[int] = None,
) -> Tuple[str, int]:
"""
Internal membership update function to get an existing event or create
@@ -360,6 +362,8 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
historical: Indicates whether the message is being inserted
back in time around some existing events. This is used to skip
a few checks and mark the event as backfilled.
+ origin_server_ts: The origin_server_ts to use if a new event is created. Uses
+ the current timestamp if set to None.
Returns:
Tuple of event ID and stream ordering position
@@ -398,6 +402,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
"state_key": user_id,
# For backwards compatibility:
"membership": membership,
+ "origin_server_ts": origin_server_ts,
},
txn_id=txn_id,
allow_no_prev_events=allow_no_prev_events,
@@ -428,14 +433,13 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
await self._join_rate_per_room_limiter.ratelimit(
requester, key=room_id, update=False
)
-
- result_event = await self.event_creation_handler.handle_new_client_event(
- requester,
- event,
- context,
- extra_users=[target],
- ratelimit=ratelimit,
- )
+ with opentracing.start_active_span("handle_new_client_event"):
+ result_event = await self.event_creation_handler.handle_new_client_event(
+ requester,
+ events_and_context=[(event, context)],
+ extra_users=[target],
+ ratelimit=ratelimit,
+ )
if event.membership == Membership.LEAVE:
if prev_member_event_id:
@@ -504,6 +508,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
prev_event_ids: Optional[List[str]] = None,
state_event_ids: Optional[List[str]] = None,
depth: Optional[int] = None,
+ origin_server_ts: Optional[int] = None,
) -> Tuple[str, int]:
"""Update a user's membership in a room.
@@ -542,6 +547,8 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
depth: Override the depth used to order the event in the DAG.
Should normally be set to None, which will cause the depth to be calculated
based on the prev_events.
+ origin_server_ts: The origin_server_ts to use if a new event is created. Uses
+ the current timestamp if set to None.
Returns:
A tuple of the new event ID and stream ID.
@@ -564,25 +571,27 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
# by application services), and then by room ID.
async with self.member_as_limiter.queue(as_id):
async with self.member_linearizer.queue(key):
- result = await self.update_membership_locked(
- requester,
- target,
- room_id,
- action,
- txn_id=txn_id,
- remote_room_hosts=remote_room_hosts,
- third_party_signed=third_party_signed,
- ratelimit=ratelimit,
- content=content,
- new_room=new_room,
- require_consent=require_consent,
- outlier=outlier,
- historical=historical,
- allow_no_prev_events=allow_no_prev_events,
- prev_event_ids=prev_event_ids,
- state_event_ids=state_event_ids,
- depth=depth,
- )
+ with opentracing.start_active_span("update_membership_locked"):
+ result = await self.update_membership_locked(
+ requester,
+ target,
+ room_id,
+ action,
+ txn_id=txn_id,
+ remote_room_hosts=remote_room_hosts,
+ third_party_signed=third_party_signed,
+ ratelimit=ratelimit,
+ content=content,
+ new_room=new_room,
+ require_consent=require_consent,
+ outlier=outlier,
+ historical=historical,
+ allow_no_prev_events=allow_no_prev_events,
+ prev_event_ids=prev_event_ids,
+ state_event_ids=state_event_ids,
+ depth=depth,
+ origin_server_ts=origin_server_ts,
+ )
return result
@@ -605,6 +614,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
prev_event_ids: Optional[List[str]] = None,
state_event_ids: Optional[List[str]] = None,
depth: Optional[int] = None,
+ origin_server_ts: Optional[int] = None,
) -> Tuple[str, int]:
"""Helper for update_membership.
@@ -645,10 +655,13 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
depth: Override the depth used to order the event in the DAG.
Should normally be set to None, which will cause the depth to be calculated
based on the prev_events.
+ origin_server_ts: The origin_server_ts to use if a new event is created. Uses
+ the current timestamp if set to None.
Returns:
A tuple of the new event ID and stream ID.
"""
+
content_specified = bool(content)
if content is None:
content = {}
@@ -686,7 +699,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
errcode=Codes.BAD_JSON,
)
- if "avatar_url" in content:
+ if "avatar_url" in content and content.get("avatar_url") is not None:
if not await self.profile_handler.check_avatar_size_and_mime_type(
content["avatar_url"],
):
@@ -741,7 +754,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
is_requester_admin = True
else:
- is_requester_admin = await self.auth.is_server_admin(requester.user)
+ is_requester_admin = await self.auth.is_server_admin(requester)
if not is_requester_admin:
if self.config.server.block_non_admin_invites:
@@ -783,6 +796,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
require_consent=require_consent,
outlier=outlier,
historical=historical,
+ origin_server_ts=origin_server_ts,
)
latest_event_ids = await self.store.get_prev_events_for_room(room_id)
@@ -834,7 +848,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
old_membership == Membership.INVITE
and effective_membership_state == Membership.LEAVE
):
- is_blocked = await self._is_server_notice_room(room_id)
+ is_blocked = await self.store.is_server_notice_room(room_id)
if is_blocked:
raise SynapseError(
HTTPStatus.FORBIDDEN,
@@ -865,7 +879,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
bypass_spam_checker = True
else:
- bypass_spam_checker = await self.auth.is_server_admin(requester.user)
+ bypass_spam_checker = await self.auth.is_server_admin(requester)
inviter = await self._get_inviter(target.to_string(), room_id)
if (
@@ -1028,6 +1042,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
content=content,
require_consent=require_consent,
outlier=outlier,
+ origin_server_ts=origin_server_ts,
)
async def _should_perform_remote_join(
@@ -1148,8 +1163,8 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
logger.info("Transferring room state from %s to %s", old_room_id, room_id)
# Find all local users that were in the old room and copy over each user's state
- users = await self.store.get_users_in_room(old_room_id)
- await self.copy_user_state_on_room_upgrade(old_room_id, room_id, users)
+ local_users = await self.store.get_local_users_in_room(old_room_id)
+ await self.copy_user_state_on_room_upgrade(old_room_id, room_id, local_users)
# Add new room to the room directory if the old room was there
# Remove old room from the room directory
@@ -1249,7 +1264,10 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
raise SynapseError(403, "This room has been blocked on this server")
event = await self.event_creation_handler.handle_new_client_event(
- requester, event, context, extra_users=[target_user], ratelimit=ratelimit
+ requester,
+ events_and_context=[(event, context)],
+ extra_users=[target_user],
+ ratelimit=ratelimit,
)
prev_member_event_id = prev_state_ids.get(
@@ -1379,7 +1397,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
id_server: str,
requester: Requester,
txn_id: Optional[str],
- id_access_token: Optional[str] = None,
+ id_access_token: str,
prev_event_ids: Optional[List[str]] = None,
depth: Optional[int] = None,
) -> Tuple[str, int]:
@@ -1394,7 +1412,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
requester: The user making the request.
txn_id: The transaction ID this is part of, or None if this is not
part of a transaction.
- id_access_token: The optional identity server access token.
+ id_access_token: Identity server access token.
depth: Override the depth used to order the event in the DAG.
prev_event_ids: The event IDs to use as the prev events
Should normally be set to None, which will cause the depth to be calculated
@@ -1407,7 +1425,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
ShadowBanError if the requester has been shadow-banned.
"""
if self.config.server.block_non_admin_invites:
- is_requester_admin = await self.auth.is_server_admin(requester.user)
+ is_requester_admin = await self.auth.is_server_admin(requester)
if not is_requester_admin:
raise SynapseError(
403, "Invites have been disabled on this server", Codes.FORBIDDEN
@@ -1491,7 +1509,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
room_id: str,
user: UserID,
txn_id: Optional[str],
- id_access_token: Optional[str] = None,
+ id_access_token: str,
prev_event_ids: Optional[List[str]] = None,
depth: Optional[int] = None,
) -> Tuple[EventBase, int]:
@@ -1614,12 +1632,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
return False
- async def _is_server_notice_room(self, room_id: str) -> bool:
- if self._server_notices_mxid is None:
- return False
- user_ids = await self.store.get_users_in_room(room_id)
- return self._server_notices_mxid in user_ids
-
class RoomMemberMasterHandler(RoomMemberHandler):
def __init__(self, hs: "HomeServer"):
@@ -1690,7 +1702,7 @@ class RoomMemberMasterHandler(RoomMemberHandler):
check_complexity
and self.hs.config.server.limit_remote_rooms.admins_can_join
):
- check_complexity = not await self.auth.is_server_admin(user)
+ check_complexity = not await self.store.is_server_admin(user)
if check_complexity:
# Fetch the room complexity
@@ -1863,8 +1875,7 @@ class RoomMemberMasterHandler(RoomMemberHandler):
result_event = await self.event_creation_handler.handle_new_client_event(
requester,
- event,
- context,
+ events_and_context=[(event, context)],
extra_users=[UserID.from_string(target_user)],
)
# we know it was persisted, so must have a stream ordering
@@ -1920,8 +1931,11 @@ class RoomMemberMasterHandler(RoomMemberHandler):
]:
raise SynapseError(400, "User %s in room %s" % (user_id, room_id))
- if membership:
- await self.store.forget(user_id, room_id)
+ # In normal case this call is only required if `membership` is not `None`.
+ # But: After the last member had left the room, the background update
+ # `_background_remove_left_rooms` is deleting rows related to this room from
+ # the table `current_state_events` and `get_current_state_events` is `None`.
+ await self.store.forget(user_id, room_id)
def get_users_which_can_issue_invite(auth_events: StateMap[EventBase]) -> List[str]:
diff --git a/synapse/handlers/room_summary.py b/synapse/handlers/room_summary.py
index ebd445adca..8d08625237 100644
--- a/synapse/handlers/room_summary.py
+++ b/synapse/handlers/room_summary.py
@@ -609,7 +609,7 @@ class RoomSummaryHandler:
# If this is a request over federation, check if the host is in the room or
# has a user who could join the room.
elif origin:
- if await self._event_auth_handler.check_host_in_room(
+ if await self._event_auth_handler.is_host_in_room(
room_id, origin
) or await self._store.is_host_invited(room_id, origin):
return True
@@ -624,9 +624,7 @@ class RoomSummaryHandler:
await self._event_auth_handler.get_rooms_that_allow_join(state_ids)
)
for space_id in allowed_rooms:
- if await self._event_auth_handler.check_host_in_room(
- space_id, origin
- ):
+ if await self._event_auth_handler.is_host_in_room(space_id, origin):
return True
logger.info(
diff --git a/synapse/handlers/saml.py b/synapse/handlers/saml.py
index 9602f0d0bb..874860d461 100644
--- a/synapse/handlers/saml.py
+++ b/synapse/handlers/saml.py
@@ -441,7 +441,7 @@ class DefaultSamlMappingProvider:
client_redirect_url: where the client wants to redirect to
Returns:
- dict: A dict containing new user attributes. Possible keys:
+ A dict containing new user attributes. Possible keys:
* mxid_localpart (str): Required. The localpart of the user's mxid
* displayname (str): The displayname of the user
* emails (list[str]): Any emails for the user
@@ -483,7 +483,7 @@ class DefaultSamlMappingProvider:
Args:
config: A dictionary containing configuration options for this provider
Returns:
- SamlConfig: A custom config object for this module
+ A custom config object for this module
"""
# Parse config options and use defaults where necessary
mxid_source_attribute = config.get("mxid_source_attribute", "uid")
diff --git a/synapse/handlers/send_email.py b/synapse/handlers/send_email.py
index e2844799e8..804cc6e81e 100644
--- a/synapse/handlers/send_email.py
+++ b/synapse/handlers/send_email.py
@@ -187,6 +187,19 @@ class SendEmailHandler:
multipart_msg["To"] = email_address
multipart_msg["Date"] = email.utils.formatdate()
multipart_msg["Message-ID"] = email.utils.make_msgid()
+ # Discourage automatic responses to Synapse's emails.
+ # Per RFC 3834, automatic responses should not be sent if the "Auto-Submitted"
+ # header is present with any value other than "no". See
+ # https://www.rfc-editor.org/rfc/rfc3834.html#section-5.1
+ multipart_msg["Auto-Submitted"] = "auto-generated"
+ # Also include a Microsoft-Exchange specific header:
+ # https://learn.microsoft.com/en-us/openspecs/exchange_server_protocols/ms-oxcmail/ced68690-498a-4567-9d14-5c01f974d8b1
+ # which suggests it can take the value "All" to "suppress all auto-replies",
+ # or a comma separated list of auto-reply classes to suppress.
+ # The following stack overflow question has a little more context:
+ # https://stackoverflow.com/a/25324691/5252017
+ # https://stackoverflow.com/a/61646381/5252017
+ multipart_msg["X-Auto-Response-Suppress"] = "All"
multipart_msg.attach(text_part)
multipart_msg.attach(html_part)
diff --git a/synapse/handlers/set_password.py b/synapse/handlers/set_password.py
index 73861bbd40..bd9d0bb34b 100644
--- a/synapse/handlers/set_password.py
+++ b/synapse/handlers/set_password.py
@@ -15,6 +15,7 @@ import logging
from typing import TYPE_CHECKING, Optional
from synapse.api.errors import Codes, StoreError, SynapseError
+from synapse.handlers.device import DeviceHandler
from synapse.types import Requester
if TYPE_CHECKING:
@@ -29,7 +30,10 @@ class SetPasswordHandler:
def __init__(self, hs: "HomeServer"):
self.store = hs.get_datastores().main
self._auth_handler = hs.get_auth_handler()
- self._device_handler = hs.get_device_handler()
+ # This can only be instantiated on the main process.
+ device_handler = hs.get_device_handler()
+ assert isinstance(device_handler, DeviceHandler)
+ self._device_handler = device_handler
async def set_password(
self,
diff --git a/synapse/handlers/sso.py b/synapse/handlers/sso.py
index 1e171f3f71..44e70fc4b8 100644
--- a/synapse/handlers/sso.py
+++ b/synapse/handlers/sso.py
@@ -12,6 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
+import hashlib
+import io
import logging
from typing import (
TYPE_CHECKING,
@@ -37,6 +39,7 @@ from twisted.web.server import Request
from synapse.api.constants import LoginType
from synapse.api.errors import Codes, NotFoundError, RedirectException, SynapseError
from synapse.config.sso import SsoAttributeRequirement
+from synapse.handlers.device import DeviceHandler
from synapse.handlers.register import init_counters_for_auth_provider
from synapse.handlers.ui_auth import UIAuthSessionDataConstants
from synapse.http import get_request_user_agent
@@ -128,12 +131,16 @@ class SsoIdentityProvider(Protocol):
@attr.s(auto_attribs=True)
class UserAttributes:
+ # NB: This struct is documented in docs/sso_mapping_providers.md so that users can
+ # populate it with data from their own mapping providers.
+
# the localpart of the mxid that the mapper has assigned to the user.
# if `None`, the mapper has not picked a userid, and the user should be prompted to
# enter one.
localpart: Optional[str]
confirm_localpart: bool = False
display_name: Optional[str] = None
+ picture: Optional[str] = None
emails: Collection[str] = attr.Factory(list)
@@ -144,6 +151,9 @@ class UsernameMappingSession:
# A unique identifier for this SSO provider, e.g. "oidc" or "saml".
auth_provider_id: str
+ # An optional session ID from the IdP.
+ auth_provider_session_id: Optional[str]
+
# user ID on the IdP server
remote_user_id: str
@@ -185,9 +195,14 @@ class SsoHandler:
self._server_name = hs.hostname
self._registration_handler = hs.get_registration_handler()
self._auth_handler = hs.get_auth_handler()
+ self._device_handler = hs.get_device_handler()
self._error_template = hs.config.sso.sso_error_template
self._bad_user_template = hs.config.sso.sso_auth_bad_user_template
self._profile_handler = hs.get_profile_handler()
+ self._media_repo = (
+ hs.get_media_repository() if hs.config.media.can_load_media_repo else None
+ )
+ self._http_client = hs.get_proxied_blacklisted_http_client()
# The following template is shown after a successful user interactive
# authentication session. It tells the user they can close the window.
@@ -461,6 +476,7 @@ class SsoHandler:
client_redirect_url,
next_step_url,
extra_login_attributes,
+ auth_provider_session_id,
)
user_id = await self._register_mapped_user(
@@ -486,6 +502,8 @@ class SsoHandler:
await self._profile_handler.set_displayname(
user_id_obj, requester, attributes.display_name, True
)
+ if attributes.picture:
+ await self.set_avatar(user_id, attributes.picture)
await self._auth_handler.complete_sso_login(
user_id,
@@ -582,6 +600,7 @@ class SsoHandler:
client_redirect_url: str,
next_step_url: bytes,
extra_login_attributes: Optional[JsonDict],
+ auth_provider_session_id: Optional[str],
) -> NoReturn:
"""Creates a UsernameMappingSession and redirects the browser
@@ -604,6 +623,8 @@ class SsoHandler:
extra_login_attributes: An optional dictionary of extra
attributes to be provided to the client in the login response.
+ auth_provider_session_id: An optional session ID from the IdP.
+
Raises:
RedirectException
"""
@@ -612,6 +633,7 @@ class SsoHandler:
now = self._clock.time_msec()
session = UsernameMappingSession(
auth_provider_id=auth_provider_id,
+ auth_provider_session_id=auth_provider_session_id,
remote_user_id=remote_user_id,
display_name=attributes.display_name,
emails=attributes.emails,
@@ -690,8 +712,110 @@ class SsoHandler:
await self._store.record_user_external_id(
auth_provider_id, remote_user_id, registered_user_id
)
+
+ # Set avatar, if available
+ if attributes.picture:
+ await self.set_avatar(registered_user_id, attributes.picture)
+
return registered_user_id
+ async def set_avatar(self, user_id: str, picture_https_url: str) -> bool:
+ """Set avatar of the user.
+
+ This downloads the image file from the URL provided, stores that in
+ the media repository and then sets the avatar on the user's profile.
+
+ It can detect if the same image is being saved again and bails early by storing
+ the hash of the file in the `upload_name` of the avatar image.
+
+ Currently, it only supports server configurations which run the media repository
+ within the same process.
+
+ It silently fails and logs a warning by raising an exception and catching it
+ internally if:
+ * it is unable to fetch the image itself (non 200 status code) or
+ * the image supplied is bigger than max allowed size or
+ * the image type is not one of the allowed image types.
+
+ Args:
+ user_id: matrix user ID in the form @localpart:domain as a string.
+
+ picture_https_url: HTTPS url for the picture image file.
+
+ Returns: `True` if the user's avatar has been successfully set to the image at
+ `picture_https_url`.
+ """
+ if self._media_repo is None:
+ logger.info(
+ "failed to set user avatar because out-of-process media repositories "
+ "are not supported yet "
+ )
+ return False
+
+ try:
+ uid = UserID.from_string(user_id)
+
+ def is_allowed_mime_type(content_type: str) -> bool:
+ if (
+ self._profile_handler.allowed_avatar_mimetypes
+ and content_type
+ not in self._profile_handler.allowed_avatar_mimetypes
+ ):
+ return False
+ return True
+
+ # download picture, enforcing size limit & mime type check
+ picture = io.BytesIO()
+
+ content_length, headers, uri, code = await self._http_client.get_file(
+ url=picture_https_url,
+ output_stream=picture,
+ max_size=self._profile_handler.max_avatar_size,
+ is_allowed_content_type=is_allowed_mime_type,
+ )
+
+ if code != 200:
+ raise Exception(
+ "GET request to download sso avatar image returned {}".format(code)
+ )
+
+ # upload name includes hash of the image file's content so that we can
+ # easily check if it requires an update or not, the next time user logs in
+ upload_name = "sso_avatar_" + hashlib.sha256(picture.read()).hexdigest()
+
+ # bail if user already has the same avatar
+ profile = await self._profile_handler.get_profile(user_id)
+ if profile["avatar_url"] is not None:
+ server_name = profile["avatar_url"].split("/")[-2]
+ media_id = profile["avatar_url"].split("/")[-1]
+ if server_name == self._server_name:
+ media = await self._media_repo.store.get_local_media(media_id)
+ if media is not None and upload_name == media["upload_name"]:
+ logger.info("skipping saving the user avatar")
+ return True
+
+ # store it in media repository
+ avatar_mxc_url = await self._media_repo.create_content(
+ media_type=headers[b"Content-Type"][0].decode("utf-8"),
+ upload_name=upload_name,
+ content=picture,
+ content_length=content_length,
+ auth_user=uid,
+ )
+
+ # save it as user avatar
+ await self._profile_handler.set_avatar_url(
+ uid,
+ create_requester(uid),
+ str(avatar_mxc_url),
+ )
+
+ logger.info("successfully saved the user avatar")
+ return True
+ except Exception:
+ logger.warning("failed to save the user avatar")
+ return False
+
async def complete_sso_ui_auth_request(
self,
auth_provider_id: str,
@@ -863,7 +987,7 @@ class SsoHandler:
)
async def handle_terms_accepted(
- self, request: Request, session_id: str, terms_version: str
+ self, request: SynapseRequest, session_id: str, terms_version: str
) -> None:
"""Handle a request to the new-user 'consent' endpoint
@@ -965,6 +1089,7 @@ class SsoHandler:
session.client_redirect_url,
session.extra_login_attributes,
new_user=True,
+ auth_provider_session_id=session.auth_provider_session_id,
)
def _expire_old_sessions(self) -> None:
@@ -1014,6 +1139,84 @@ class SsoHandler:
return True
+ async def revoke_sessions_for_provider_session_id(
+ self,
+ auth_provider_id: str,
+ auth_provider_session_id: str,
+ expected_user_id: Optional[str] = None,
+ ) -> None:
+ """Revoke any devices and in-flight logins tied to a provider session.
+
+ Can only be called from the main process.
+
+ Args:
+ auth_provider_id: A unique identifier for this SSO provider, e.g.
+ "oidc" or "saml".
+ auth_provider_session_id: The session ID from the provider to logout
+ expected_user_id: The user we're expecting to logout. If set, it will ignore
+ sessions belonging to other users and log an error.
+ """
+
+ # It is expected that this is the main process.
+ assert isinstance(
+ self._device_handler, DeviceHandler
+ ), "revoking SSO sessions can only be called on the main process"
+
+ # Invalidate any running user-mapping sessions
+ to_delete = []
+ for session_id, session in self._username_mapping_sessions.items():
+ if (
+ session.auth_provider_id == auth_provider_id
+ and session.auth_provider_session_id == auth_provider_session_id
+ ):
+ to_delete.append(session_id)
+
+ for session_id in to_delete:
+ logger.info("Revoking mapping session %s", session_id)
+ del self._username_mapping_sessions[session_id]
+
+ # Invalidate any in-flight login tokens
+ await self._store.invalidate_login_tokens_by_session_id(
+ auth_provider_id=auth_provider_id,
+ auth_provider_session_id=auth_provider_session_id,
+ )
+
+ # Fetch any device(s) in the store associated with the session ID.
+ devices = await self._store.get_devices_by_auth_provider_session_id(
+ auth_provider_id=auth_provider_id,
+ auth_provider_session_id=auth_provider_session_id,
+ )
+
+ # We have no guarantee that all the devices of that session are for the same
+ # `user_id`. Hence, we have to iterate over the list of devices and log them out
+ # one by one.
+ for device in devices:
+ user_id = device["user_id"]
+ device_id = device["device_id"]
+
+ # If the user_id associated with that device/session is not the one we got
+ # out of the `sub` claim, skip that device and show log an error.
+ if expected_user_id is not None and user_id != expected_user_id:
+ logger.error(
+ "Received a logout notification from SSO provider "
+ f"{auth_provider_id!r} for the user {expected_user_id!r}, but with "
+ f"a session ID ({auth_provider_session_id!r}) which belongs to "
+ f"{user_id!r}. This may happen when the SSO provider user mapper "
+ "uses something else than the standard attribute as mapping ID. "
+ "For OIDC providers, set `backchannel_logout_ignore_sub` to `true` "
+ "in the provider config if that is the case."
+ )
+ continue
+
+ logger.info(
+ "Logging out %r (device %r) via SSO (%r) logout notification (session %r).",
+ user_id,
+ device_id,
+ auth_provider_id,
+ auth_provider_session_id,
+ )
+ await self._device_handler.delete_devices(user_id, [device_id])
+
def get_username_mapping_session_cookie_from_request(request: IRequest) -> str:
"""Extract the session ID from the cookie
diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py
index d42a414c90..259456b55d 100644
--- a/synapse/handlers/sync.py
+++ b/synapse/handlers/sync.py
@@ -13,7 +13,20 @@
# limitations under the License.
import itertools
import logging
-from typing import TYPE_CHECKING, Any, Dict, FrozenSet, List, Optional, Set, Tuple
+from typing import (
+ TYPE_CHECKING,
+ AbstractSet,
+ Any,
+ Collection,
+ Dict,
+ FrozenSet,
+ List,
+ Mapping,
+ Optional,
+ Sequence,
+ Set,
+ Tuple,
+)
import attr
from prometheus_client import Counter
@@ -27,7 +40,8 @@ from synapse.handlers.relations import BundledAggregations
from synapse.logging.context import current_context
from synapse.logging.opentracing import SynapseTags, log_kv, set_tag, start_active_span
from synapse.push.clientformat import format_push_rules_for_user
-from synapse.storage.databases.main.event_push_actions import NotifCounts
+from synapse.storage.databases.main.event_push_actions import RoomNotifCounts
+from synapse.storage.databases.main.roommember import extract_heroes_from_room_summary
from synapse.storage.roommember import MemberSummary
from synapse.storage.state import StateFilter
from synapse.types import (
@@ -89,7 +103,7 @@ class SyncConfig:
@attr.s(slots=True, frozen=True, auto_attribs=True)
class TimelineBatch:
prev_batch: StreamToken
- events: List[EventBase]
+ events: Sequence[EventBase]
limited: bool
# A mapping of event ID to the bundled aggregations for the above events.
# This is only calculated if limited is true.
@@ -115,6 +129,7 @@ class JoinedSyncResult:
ephemeral: List[JsonDict]
account_data: List[JsonDict]
unread_notifications: JsonDict
+ unread_thread_notifications: JsonDict
summary: Optional[JsonDict]
unread_count: int
@@ -507,10 +522,17 @@ class SyncHandler:
# ensure that we always include current state in the timeline
current_state_ids: FrozenSet[str] = frozenset()
if any(e.is_state() for e in recents):
+ # FIXME(faster_joins): We use the partial state here as
+ # we don't want to block `/sync` on finishing a lazy join.
+ # Which should be fine once
+ # https://github.com/matrix-org/synapse/issues/12989 is resolved,
+ # since we shouldn't reach here anymore?
+ # Note that we use the current state as a whitelist for filtering
+ # `recents`, so partial state is only a problem when a membership
+ # event turns up in `recents` but has not made it into the current
+ # state.
current_state_ids_map = (
- await self._state_storage_controller.get_current_state_ids(
- room_id
- )
+ await self.store.get_partial_current_state_ids(room_id)
)
current_state_ids = frozenset(current_state_ids_map.values())
@@ -579,7 +601,13 @@ class SyncHandler:
if any(e.is_state() for e in loaded_recents):
# FIXME(faster_joins): We use the partial state here as
# we don't want to block `/sync` on finishing a lazy join.
- # Is this the correct way of doing it?
+ # Which should be fine once
+ # https://github.com/matrix-org/synapse/issues/12989 is resolved,
+ # since we shouldn't reach here anymore?
+ # Note that we use the current state as a whitelist for filtering
+ # `loaded_recents`, so partial state is only a problem when a
+ # membership event turns up in `loaded_recents` but has not made it
+ # into the current state.
current_state_ids_map = (
await self.store.get_partial_current_state_ids(room_id)
)
@@ -627,7 +655,10 @@ class SyncHandler:
)
async def get_state_after_event(
- self, event_id: str, state_filter: Optional[StateFilter] = None
+ self,
+ event_id: str,
+ state_filter: Optional[StateFilter] = None,
+ await_full_state: bool = True,
) -> StateMap[str]:
"""
Get the room state after the given event
@@ -635,9 +666,14 @@ class SyncHandler:
Args:
event_id: event of interest
state_filter: The state filter used to fetch state from the database.
+ await_full_state: if `True`, will block if we do not yet have complete state
+ at the event and `state_filter` is not satisfied by partial state.
+ Defaults to `True`.
"""
state_ids = await self._state_storage_controller.get_state_ids_for_event(
- event_id, state_filter=state_filter or StateFilter.all()
+ event_id,
+ state_filter=state_filter or StateFilter.all(),
+ await_full_state=await_full_state,
)
# using get_metadata_for_events here (instead of get_event) sidesteps an issue
@@ -660,6 +696,7 @@ class SyncHandler:
room_id: str,
stream_position: StreamToken,
state_filter: Optional[StateFilter] = None,
+ await_full_state: bool = True,
) -> StateMap[str]:
"""Get the room state at a particular stream position
@@ -667,6 +704,9 @@ class SyncHandler:
room_id: room for which to get state
stream_position: point at which to get state
state_filter: The state filter used to fetch state from the database.
+ await_full_state: if `True`, will block if we do not yet have complete state
+ at the last event in the room before `stream_position` and
+ `state_filter` is not satisfied by partial state. Defaults to `True`.
"""
# FIXME: This gets the state at the latest event before the stream ordering,
# which might not be the same as the "current state" of the room at the time
@@ -678,7 +718,9 @@ class SyncHandler:
if last_event_id:
state = await self.get_state_after_event(
- last_event_id, state_filter=state_filter or StateFilter.all()
+ last_event_id,
+ state_filter=state_filter or StateFilter.all(),
+ await_full_state=await_full_state,
)
else:
@@ -764,18 +806,6 @@ class SyncHandler:
if canonical_alias and canonical_alias.content.get("alias"):
return summary
- me = sync_config.user.to_string()
-
- joined_user_ids = [
- r[0] for r in details.get(Membership.JOIN, empty_ms).members if r[0] != me
- ]
- invited_user_ids = [
- r[0] for r in details.get(Membership.INVITE, empty_ms).members if r[0] != me
- ]
- gone_user_ids = [
- r[0] for r in details.get(Membership.LEAVE, empty_ms).members if r[0] != me
- ] + [r[0] for r in details.get(Membership.BAN, empty_ms).members if r[0] != me]
-
# FIXME: only build up a member_ids list for our heroes
member_ids = {}
for membership in (
@@ -787,11 +817,8 @@ class SyncHandler:
for user_id, event_id in details.get(membership, empty_ms).members:
member_ids[user_id] = event_id
- # FIXME: order by stream ordering rather than as returned by SQL
- if joined_user_ids or invited_user_ids:
- summary["m.heroes"] = sorted(joined_user_ids + invited_user_ids)[0:5]
- else:
- summary["m.heroes"] = sorted(gone_user_ids)[0:5]
+ me = sync_config.user.to_string()
+ summary["m.heroes"] = extract_heroes_from_room_summary(details, me)
if not sync_config.filter_collection.lazy_load_members():
return summary
@@ -852,16 +879,26 @@ class SyncHandler:
now_token: StreamToken,
full_state: bool,
) -> MutableStateMap[EventBase]:
- """Works out the difference in state between the start of the timeline
- and the previous sync.
+ """Works out the difference in state between the end of the previous sync and
+ the start of the timeline.
Args:
room_id:
batch: The timeline batch for the room that will be sent to the user.
sync_config:
- since_token: Token of the end of the previous batch. May be None.
+ since_token: Token of the end of the previous batch. May be `None`.
now_token: Token of the end of the current batch.
full_state: Whether to force returning the full state.
+ `lazy_load_members` still applies when `full_state` is `True`.
+
+ Returns:
+ The state to return in the sync response for the room.
+
+ Clients will overlay this onto the state at the end of the previous sync to
+ arrive at the state at the start of the timeline.
+
+ Clients will then overlay state events in the timeline to arrive at the
+ state at the end of the timeline, in preparation for the next sync.
"""
# TODO(mjark) Check if the state events were received by the server
# after the previous sync, since we need to include those state
@@ -869,8 +906,17 @@ class SyncHandler:
# TODO(mjark) Check for new redactions in the state events.
with Measure(self.clock, "compute_state_delta"):
+ # The memberships needed for events in the timeline.
+ # Only calculated when `lazy_load_members` is on.
+ members_to_fetch: Optional[Set[str]] = None
+
+ # A dictionary mapping user IDs to the first event in the timeline sent by
+ # them. Only calculated when `lazy_load_members` is on.
+ first_event_by_sender_map: Optional[Dict[str, EventBase]] = None
- members_to_fetch = None
+ # The contribution to the room state from state events in the timeline.
+ # Only contains the last event for any given state key.
+ timeline_state: StateMap[str]
lazy_load_members = sync_config.filter_collection.lazy_load_members()
include_redundant_members = (
@@ -881,10 +927,23 @@ class SyncHandler:
# We only request state for the members needed to display the
# timeline:
- members_to_fetch = {
- event.sender # FIXME: we also care about invite targets etc.
- for event in batch.events
- }
+ timeline_state = {}
+
+ members_to_fetch = set()
+ first_event_by_sender_map = {}
+ for event in batch.events:
+ # Build the map from user IDs to the first timeline event they sent.
+ if event.sender not in first_event_by_sender_map:
+ first_event_by_sender_map[event.sender] = event
+
+ # We need the event's sender, unless their membership was in a
+ # previous timeline event.
+ if (EventTypes.Member, event.sender) not in timeline_state:
+ members_to_fetch.add(event.sender)
+ # FIXME: we also care about invite targets etc.
+
+ if event.is_state():
+ timeline_state[(event.type, event.state_key)] = event.event_id
if full_state:
# always make sure we LL ourselves so we know we're in the room
@@ -894,55 +953,80 @@ class SyncHandler:
members_to_fetch.add(sync_config.user.to_string())
state_filter = StateFilter.from_lazy_load_member_list(members_to_fetch)
+
+ # We are happy to use partial state to compute the `/sync` response.
+ # Since partial state may not include the lazy-loaded memberships we
+ # require, we fix up the state response afterwards with memberships from
+ # auth events.
+ await_full_state = False
else:
+ timeline_state = {
+ (event.type, event.state_key): event.event_id
+ for event in batch.events
+ if event.is_state()
+ }
+
state_filter = StateFilter.all()
+ await_full_state = True
- timeline_state = {
- (event.type, event.state_key): event.event_id
- for event in batch.events
- if event.is_state()
- }
+ # Now calculate the state to return in the sync response for the room.
+ # This is more or less the change in state between the end of the previous
+ # sync's timeline and the start of the current sync's timeline.
+ # See the docstring above for details.
+ state_ids: StateMap[str]
if full_state:
if batch:
- current_state_ids = (
+ state_at_timeline_end = (
await self._state_storage_controller.get_state_ids_for_event(
- batch.events[-1].event_id, state_filter=state_filter
+ batch.events[-1].event_id,
+ state_filter=state_filter,
+ await_full_state=await_full_state,
)
)
- state_ids = (
+ state_at_timeline_start = (
await self._state_storage_controller.get_state_ids_for_event(
- batch.events[0].event_id, state_filter=state_filter
+ batch.events[0].event_id,
+ state_filter=state_filter,
+ await_full_state=await_full_state,
)
)
else:
- current_state_ids = await self.get_state_at(
- room_id, stream_position=now_token, state_filter=state_filter
+ state_at_timeline_end = await self.get_state_at(
+ room_id,
+ stream_position=now_token,
+ state_filter=state_filter,
+ await_full_state=await_full_state,
)
- state_ids = current_state_ids
+ state_at_timeline_start = state_at_timeline_end
state_ids = _calculate_state(
timeline_contains=timeline_state,
- timeline_start=state_ids,
- previous={},
- current=current_state_ids,
+ timeline_start=state_at_timeline_start,
+ timeline_end=state_at_timeline_end,
+ previous_timeline_end={},
lazy_load_members=lazy_load_members,
)
elif batch.limited:
if batch:
state_at_timeline_start = (
await self._state_storage_controller.get_state_ids_for_event(
- batch.events[0].event_id, state_filter=state_filter
+ batch.events[0].event_id,
+ state_filter=state_filter,
+ await_full_state=await_full_state,
)
)
else:
# We can get here if the user has ignored the senders of all
# the recent events.
state_at_timeline_start = await self.get_state_at(
- room_id, stream_position=now_token, state_filter=state_filter
+ room_id,
+ stream_position=now_token,
+ state_filter=state_filter,
+ await_full_state=await_full_state,
)
# for now, we disable LL for gappy syncs - see
@@ -964,28 +1048,35 @@ class SyncHandler:
# is indeed the case.
assert since_token is not None
state_at_previous_sync = await self.get_state_at(
- room_id, stream_position=since_token, state_filter=state_filter
+ room_id,
+ stream_position=since_token,
+ state_filter=state_filter,
+ await_full_state=await_full_state,
)
if batch:
- current_state_ids = (
+ state_at_timeline_end = (
await self._state_storage_controller.get_state_ids_for_event(
- batch.events[-1].event_id, state_filter=state_filter
+ batch.events[-1].event_id,
+ state_filter=state_filter,
+ await_full_state=await_full_state,
)
)
else:
- # Its not clear how we get here, but empirically we do
- # (#5407). Logging has been added elsewhere to try and
- # figure out where this state comes from.
- current_state_ids = await self.get_state_at(
- room_id, stream_position=now_token, state_filter=state_filter
+ # We can get here if the user has ignored the senders of all
+ # the recent events.
+ state_at_timeline_end = await self.get_state_at(
+ room_id,
+ stream_position=now_token,
+ state_filter=state_filter,
+ await_full_state=await_full_state,
)
state_ids = _calculate_state(
timeline_contains=timeline_state,
timeline_start=state_at_timeline_start,
- previous=state_at_previous_sync,
- current=current_state_ids,
+ timeline_end=state_at_timeline_end,
+ previous_timeline_end=state_at_previous_sync,
# we have to include LL members in case LL initial sync missed them
lazy_load_members=lazy_load_members,
)
@@ -1008,8 +1099,30 @@ class SyncHandler:
(EventTypes.Member, member)
for member in members_to_fetch
),
+ await_full_state=False,
)
+ # If we only have partial state for the room, `state_ids` may be missing the
+ # memberships we wanted. We attempt to find some by digging through the auth
+ # events of timeline events.
+ if lazy_load_members and await self.store.is_partial_state_room(room_id):
+ assert members_to_fetch is not None
+ assert first_event_by_sender_map is not None
+
+ additional_state_ids = (
+ await self._find_missing_partial_state_memberships(
+ room_id, members_to_fetch, first_event_by_sender_map, state_ids
+ )
+ )
+ state_ids = {**state_ids, **additional_state_ids}
+
+ # At this point, if `lazy_load_members` is enabled, `state_ids` includes
+ # the memberships of all event senders in the timeline. This is because we
+ # may not have sent the memberships in a previous sync.
+
+ # When `include_redundant_members` is on, we send all the lazy-loaded
+ # memberships of event senders. Otherwise we make an effort to limit the set
+ # of memberships we send to those that we have not already sent to this client.
if lazy_load_members and not include_redundant_members:
cache_key = (sync_config.user.to_string(), sync_config.device_id)
cache = self.get_lazy_loaded_members_cache(cache_key)
@@ -1051,9 +1164,118 @@ class SyncHandler:
if e.type != EventTypes.Aliases # until MSC2261 or alternative solution
}
+ async def _find_missing_partial_state_memberships(
+ self,
+ room_id: str,
+ members_to_fetch: Collection[str],
+ events_with_membership_auth: Mapping[str, EventBase],
+ found_state_ids: StateMap[str],
+ ) -> StateMap[str]:
+ """Finds missing memberships from a set of auth events and returns them as a
+ state map.
+
+ Args:
+ room_id: The partial state room to find the remaining memberships for.
+ members_to_fetch: The memberships to find.
+ events_with_membership_auth: A mapping from user IDs to events whose auth
+ events would contain their prior membership, if one exists.
+ Note that join events will not cite a prior membership if a user has
+ never been in a room before.
+ found_state_ids: A dict from (type, state_key) -> state_event_id, containing
+ memberships that have been previously found. Entries in
+ `members_to_fetch` that have a membership in `found_state_ids` are
+ ignored.
+
+ Returns:
+ A dict from ("m.room.member", state_key) -> state_event_id, containing the
+ memberships missing from `found_state_ids`.
+
+ When `events_with_membership_auth` contains a join event for a given user
+ which does not cite a prior membership, no membership is returned for that
+ user.
+
+ Raises:
+ KeyError: if `events_with_membership_auth` does not have an entry for a
+ missing membership. Memberships in `found_state_ids` do not need an
+ entry in `events_with_membership_auth`.
+ """
+ additional_state_ids: MutableStateMap[str] = {}
+
+ # Tracks the missing members for logging purposes.
+ missing_members = set()
+
+ # Identify memberships missing from `found_state_ids` and pick out the auth
+ # events in which to look for them.
+ auth_event_ids: Set[str] = set()
+ for member in members_to_fetch:
+ if (EventTypes.Member, member) in found_state_ids:
+ continue
+
+ event_with_membership_auth = events_with_membership_auth[member]
+ is_join = (
+ event_with_membership_auth.is_state()
+ and event_with_membership_auth.type == EventTypes.Member
+ and event_with_membership_auth.state_key == member
+ and event_with_membership_auth.content.get("membership")
+ == Membership.JOIN
+ )
+ if not is_join:
+ # The event must include the desired membership as an auth event, unless
+ # it's the first join event for a given user.
+ missing_members.add(member)
+ auth_event_ids.update(event_with_membership_auth.auth_event_ids())
+
+ auth_events = await self.store.get_events(auth_event_ids)
+
+ # Run through the missing memberships once more, picking out the memberships
+ # from the pile of auth events we have just fetched.
+ for member in members_to_fetch:
+ if (EventTypes.Member, member) in found_state_ids:
+ continue
+
+ event_with_membership_auth = events_with_membership_auth[member]
+
+ # Dig through the auth events to find the desired membership.
+ for auth_event_id in event_with_membership_auth.auth_event_ids():
+ # We only store events once we have all their auth events,
+ # so the auth event must be in the pile we have just
+ # fetched.
+ auth_event = auth_events[auth_event_id]
+
+ if (
+ auth_event.type == EventTypes.Member
+ and auth_event.state_key == member
+ ):
+ missing_members.discard(member)
+ additional_state_ids[
+ (EventTypes.Member, member)
+ ] = auth_event.event_id
+ break
+
+ if missing_members:
+ # There really shouldn't be any missing memberships now. Either:
+ # * we couldn't find an auth event, which shouldn't happen because we do
+ # not persist events with persisting their auth events first, or
+ # * the set of auth events did not contain a membership we wanted, which
+ # means our caller didn't compute the events in `members_to_fetch`
+ # correctly, or we somehow accepted an event whose auth events were
+ # dodgy.
+ logger.error(
+ "Failed to find memberships for %s in partial state room "
+ "%s in the auth events of %s.",
+ missing_members,
+ room_id,
+ [
+ events_with_membership_auth[member].event_id
+ for member in missing_members
+ ],
+ )
+
+ return additional_state_ids
+
async def unread_notifs_for_room_id(
self, room_id: str, sync_config: SyncConfig
- ) -> NotifCounts:
+ ) -> RoomNotifCounts:
with Measure(self.clock, "unread_notifs_for_room_id"):
return await self.store.get_unread_event_push_actions_by_room_for_user(
@@ -1079,6 +1301,19 @@ class SyncHandler:
At the end, we transfer data from the `sync_result_builder` to a new `SyncResult`
instance to signify that the sync calculation is complete.
"""
+
+ user_id = sync_config.user.to_string()
+ app_service = self.store.get_app_service_by_user_id(user_id)
+ if app_service:
+ # We no longer support AS users using /sync directly.
+ # See https://github.com/matrix-org/matrix-doc/issues/1144
+ raise NotImplementedError()
+
+ # Note: we get the users room list *before* we get the current token, this
+ # avoids checking back in history if rooms are joined after the token is fetched.
+ token_before_rooms = self.event_sources.get_current_token()
+ mutable_joined_room_ids = set(await self.store.get_rooms_for_user(user_id))
+
# NB: The now_token gets changed by some of the generate_sync_* methods,
# this is due to some of the underlying streams not supporting the ability
# to query up to a given point.
@@ -1086,6 +1321,57 @@ class SyncHandler:
now_token = self.event_sources.get_current_token()
log_kv({"now_token": now_token})
+ # Since we fetched the users room list before the token, there's a small window
+ # during which membership events may have been persisted, so we fetch these now
+ # and modify the joined room list for any changes between the get_rooms_for_user
+ # call and the get_current_token call.
+ membership_change_events = []
+ if since_token:
+ membership_change_events = await self.store.get_membership_changes_for_user(
+ user_id, since_token.room_key, now_token.room_key, self.rooms_to_exclude
+ )
+
+ mem_last_change_by_room_id: Dict[str, EventBase] = {}
+ for event in membership_change_events:
+ mem_last_change_by_room_id[event.room_id] = event
+
+ # For the latest membership event in each room found, add/remove the room ID
+ # from the joined room list accordingly. In this case we only care if the
+ # latest change is JOIN.
+
+ for room_id, event in mem_last_change_by_room_id.items():
+ assert event.internal_metadata.stream_ordering
+ if (
+ event.internal_metadata.stream_ordering
+ < token_before_rooms.room_key.stream
+ ):
+ continue
+
+ logger.info(
+ "User membership change between getting rooms and current token: %s %s %s",
+ user_id,
+ event.membership,
+ room_id,
+ )
+ # User joined a room - we have to then check the room state to ensure we
+ # respect any bans if there's a race between the join and ban events.
+ if event.membership == Membership.JOIN:
+ user_ids_in_room = await self.store.get_users_in_room(room_id)
+ if user_id in user_ids_in_room:
+ mutable_joined_room_ids.add(room_id)
+ # The user left the room, or left and was re-invited but not joined yet
+ else:
+ mutable_joined_room_ids.discard(room_id)
+
+ # Now we have our list of joined room IDs, exclude as configured and freeze
+ joined_room_ids = frozenset(
+ (
+ room_id
+ for room_id in mutable_joined_room_ids
+ if room_id not in self.rooms_to_exclude
+ )
+ )
+
logger.debug(
"Calculating sync response for %r between %s and %s",
sync_config.user,
@@ -1093,22 +1379,13 @@ class SyncHandler:
now_token,
)
- user_id = sync_config.user.to_string()
- app_service = self.store.get_app_service_by_user_id(user_id)
- if app_service:
- # We no longer support AS users using /sync directly.
- # See https://github.com/matrix-org/matrix-doc/issues/1144
- raise NotImplementedError()
- else:
- joined_room_ids = await self.get_rooms_for_user_at(
- user_id, now_token.room_key
- )
sync_result_builder = SyncResultBuilder(
sync_config,
full_state,
since_token=since_token,
now_token=now_token,
joined_room_ids=joined_room_ids,
+ membership_change_events=membership_change_events,
)
logger.debug("Fetching account data")
@@ -1195,10 +1472,10 @@ class SyncHandler:
async def _generate_sync_entry_for_device_list(
self,
sync_result_builder: "SyncResultBuilder",
- newly_joined_rooms: Set[str],
- newly_joined_or_invited_or_knocked_users: Set[str],
- newly_left_rooms: Set[str],
- newly_left_users: Set[str],
+ newly_joined_rooms: AbstractSet[str],
+ newly_joined_or_invited_or_knocked_users: AbstractSet[str],
+ newly_left_rooms: AbstractSet[str],
+ newly_left_users: AbstractSet[str],
) -> DeviceListUpdates:
"""Generate the DeviceListUpdates section of sync
@@ -1216,8 +1493,7 @@ class SyncHandler:
user_id = sync_result_builder.sync_config.user.to_string()
since_token = sync_result_builder.since_token
- # We're going to mutate these fields, so lets copy them rather than
- # assume they won't get used later.
+ # Take a copy since these fields will be mutated later.
newly_joined_or_invited_or_knocked_users = set(
newly_joined_or_invited_or_knocked_users
)
@@ -1256,16 +1532,14 @@ class SyncHandler:
since_token.device_list_key
)
if changed_users is not None:
- result = await self.store.get_rooms_for_users_with_stream_ordering(
- changed_users
- )
+ result = await self.store.get_rooms_for_users(changed_users)
for changed_user_id, entries in result.items():
# Check if the changed user shares any rooms with the user,
# or if the changed user is the syncing user (as we always
# want to include device list updates of their own devices).
if user_id == changed_user_id or any(
- e.room_id in joined_rooms for e in entries
+ rid in joined_rooms for rid in entries
):
users_that_have_changed.add(changed_user_id)
else:
@@ -1299,13 +1573,9 @@ class SyncHandler:
newly_left_users.update(left_users)
# Remove any users that we still share a room with.
- left_users_rooms = (
- await self.store.get_rooms_for_users_with_stream_ordering(
- newly_left_users
- )
- )
+ left_users_rooms = await self.store.get_rooms_for_users(newly_left_users)
for user_id, entries in left_users_rooms.items():
- if any(e.room_id in joined_rooms for e in entries):
+ if any(rid in joined_rooms for rid in entries):
newly_left_users.discard(user_id)
return DeviceListUpdates(
@@ -1417,8 +1687,8 @@ class SyncHandler:
async def _generate_sync_entry_for_presence(
self,
sync_result_builder: "SyncResultBuilder",
- newly_joined_rooms: Set[str],
- newly_joined_or_invited_users: Set[str],
+ newly_joined_rooms: AbstractSet[str],
+ newly_joined_or_invited_users: AbstractSet[str],
) -> None:
"""Generates the presence portion of the sync response. Populates the
`sync_result_builder` with the result.
@@ -1476,7 +1746,7 @@ class SyncHandler:
self,
sync_result_builder: "SyncResultBuilder",
account_data_by_room: Dict[str, Dict[str, JsonDict]],
- ) -> Tuple[Set[str], Set[str], Set[str], Set[str]]:
+ ) -> Tuple[AbstractSet[str], AbstractSet[str], AbstractSet[str], AbstractSet[str]]:
"""Generates the rooms portion of the sync response. Populates the
`sync_result_builder` with the result.
@@ -1536,15 +1806,13 @@ class SyncHandler:
ignored_users = await self.store.ignored_users(user_id)
if since_token:
room_changes = await self._get_rooms_changed(
- sync_result_builder, ignored_users, self.rooms_to_exclude
+ sync_result_builder, ignored_users
)
tags_by_room = await self.store.get_updated_tags(
user_id, since_token.account_data_key
)
else:
- room_changes = await self._get_all_rooms(
- sync_result_builder, ignored_users, self.rooms_to_exclude
- )
+ room_changes = await self._get_all_rooms(sync_result_builder, ignored_users)
tags_by_room = await self.store.get_tags_for_user(user_id)
log_kv({"rooms_changed": len(room_changes.room_entries)})
@@ -1598,19 +1866,12 @@ class SyncHandler:
Does not modify the `sync_result_builder`.
"""
- user_id = sync_result_builder.sync_config.user.to_string()
since_token = sync_result_builder.since_token
- now_token = sync_result_builder.now_token
+ membership_change_events = sync_result_builder.membership_change_events
assert since_token
- # Get a list of membership change events that have happened to the user
- # requesting the sync.
- membership_changes = await self.store.get_membership_changes_for_user(
- user_id, since_token.room_key, now_token.room_key
- )
-
- if membership_changes:
+ if membership_change_events:
return True
stream_id = since_token.room_key.stream
@@ -1623,13 +1884,14 @@ class SyncHandler:
self,
sync_result_builder: "SyncResultBuilder",
ignored_users: FrozenSet[str],
- excluded_rooms: List[str],
) -> _RoomChanges:
"""Determine the changes in rooms to report to the user.
This function is a first pass at generating the rooms part of the sync response.
It determines which rooms have changed during the sync period, and categorises
- them into four buckets: "knock", "invite", "join" and "leave".
+ them into four buckets: "knock", "invite", "join" and "leave". It also excludes
+ from that list any room that appears in the list of rooms to exclude from sync
+ results in the server configuration.
1. Finds all membership changes for the user in the sync period (from
`since_token` up to `now_token`).
@@ -1648,16 +1910,10 @@ class SyncHandler:
since_token = sync_result_builder.since_token
now_token = sync_result_builder.now_token
sync_config = sync_result_builder.sync_config
+ membership_change_events = sync_result_builder.membership_change_events
assert since_token
- # TODO: we've already called this function and ran this query in
- # _have_rooms_changed. We could keep the results in memory to avoid a
- # second query, at the cost of more complicated source code.
- membership_change_events = await self.store.get_membership_changes_for_user(
- user_id, since_token.room_key, now_token.room_key, excluded_rooms
- )
-
mem_change_events_by_room_id: Dict[str, List[EventBase]] = {}
for event in membership_change_events:
mem_change_events_by_room_id.setdefault(event.room_id, []).append(event)
@@ -1696,7 +1952,11 @@ class SyncHandler:
continue
if room_id in sync_result_builder.joined_room_ids or has_join:
- old_state_ids = await self.get_state_at(room_id, since_token)
+ old_state_ids = await self.get_state_at(
+ room_id,
+ since_token,
+ state_filter=StateFilter.from_types([(EventTypes.Member, user_id)]),
+ )
old_mem_ev_id = old_state_ids.get((EventTypes.Member, user_id), None)
old_mem_ev = None
if old_mem_ev_id:
@@ -1722,7 +1982,13 @@ class SyncHandler:
newly_left_rooms.append(room_id)
else:
if not old_state_ids:
- old_state_ids = await self.get_state_at(room_id, since_token)
+ old_state_ids = await self.get_state_at(
+ room_id,
+ since_token,
+ state_filter=StateFilter.from_types(
+ [(EventTypes.Member, user_id)]
+ ),
+ )
old_mem_ev_id = old_state_ids.get(
(EventTypes.Member, user_id), None
)
@@ -1862,7 +2128,6 @@ class SyncHandler:
self,
sync_result_builder: "SyncResultBuilder",
ignored_users: FrozenSet[str],
- ignored_rooms: List[str],
) -> _RoomChanges:
"""Returns entries for all rooms for the user.
@@ -1884,7 +2149,7 @@ class SyncHandler:
room_list = await self.store.get_rooms_for_local_user_where_membership_is(
user_id=user_id,
membership_list=Membership.LIST,
- excluded_rooms=ignored_rooms,
+ excluded_rooms=self.rooms_to_exclude,
)
room_entries = []
@@ -2117,6 +2382,7 @@ class SyncHandler:
ephemeral=ephemeral,
account_data=account_data_events,
unread_notifications=unread_notifications,
+ unread_thread_notifications={},
summary=summary,
unread_count=0,
)
@@ -2124,10 +2390,33 @@ class SyncHandler:
if room_sync or always_include:
notifs = await self.unread_notifs_for_room_id(room_id, sync_config)
- unread_notifications["notification_count"] = notifs.notify_count
- unread_notifications["highlight_count"] = notifs.highlight_count
-
- room_sync.unread_count = notifs.unread_count
+ # Notifications for the main timeline.
+ notify_count = notifs.main_timeline.notify_count
+ highlight_count = notifs.main_timeline.highlight_count
+ unread_count = notifs.main_timeline.unread_count
+
+ # Check the sync configuration.
+ if sync_config.filter_collection.unread_thread_notifications():
+ # And add info for each thread.
+ room_sync.unread_thread_notifications = {
+ thread_id: {
+ "notification_count": thread_notifs.notify_count,
+ "highlight_count": thread_notifs.highlight_count,
+ }
+ for thread_id, thread_notifs in notifs.threads.items()
+ if thread_id is not None
+ }
+
+ else:
+ # Combine the unread counts for all threads and main timeline.
+ for thread_notifs in notifs.threads.values():
+ notify_count += thread_notifs.notify_count
+ highlight_count += thread_notifs.highlight_count
+ unread_count += thread_notifs.unread_count
+
+ unread_notifications["notification_count"] = notify_count
+ unread_notifications["highlight_count"] = highlight_count
+ room_sync.unread_count = unread_count
sync_result_builder.joined.append(room_sync)
@@ -2149,53 +2438,6 @@ class SyncHandler:
else:
raise Exception("Unrecognized rtype: %r", room_builder.rtype)
- async def get_rooms_for_user_at(
- self, user_id: str, room_key: RoomStreamToken
- ) -> FrozenSet[str]:
- """Get set of joined rooms for a user at the given stream ordering.
-
- The stream ordering *must* be recent, otherwise this may throw an
- exception if older than a month. (This function is called with the
- current token, which should be perfectly fine).
-
- Args:
- user_id
- stream_ordering
-
- ReturnValue:
- Set of room_ids the user is in at given stream_ordering.
- """
- joined_rooms = await self.store.get_rooms_for_user_with_stream_ordering(user_id)
-
- joined_room_ids = set()
-
- # We need to check that the stream ordering of the join for each room
- # is before the stream_ordering asked for. This might not be the case
- # if the user joins a room between us getting the current token and
- # calling `get_rooms_for_user_with_stream_ordering`.
- # If the membership's stream ordering is after the given stream
- # ordering, we need to go and work out if the user was in the room
- # before.
- for joined_room in joined_rooms:
- if not joined_room.event_pos.persisted_after(room_key):
- joined_room_ids.add(joined_room.room_id)
- continue
-
- logger.info("User joined room after current token: %s", joined_room.room_id)
-
- extrems = (
- await self.store.get_forward_extremities_for_room_at_stream_ordering(
- joined_room.room_id, joined_room.event_pos.stream
- )
- )
- users_in_room = await self.state.get_current_users_in_room(
- joined_room.room_id, extrems
- )
- if user_id in users_in_room:
- joined_room_ids.add(joined_room.room_id)
-
- return frozenset(joined_room_ids)
-
def _action_has_highlight(actions: List[JsonDict]) -> bool:
for action in actions:
@@ -2211,8 +2453,8 @@ def _action_has_highlight(actions: List[JsonDict]) -> bool:
def _calculate_state(
timeline_contains: StateMap[str],
timeline_start: StateMap[str],
- previous: StateMap[str],
- current: StateMap[str],
+ timeline_end: StateMap[str],
+ previous_timeline_end: StateMap[str],
lazy_load_members: bool,
) -> StateMap[str]:
"""Works out what state to include in a sync response.
@@ -2220,45 +2462,50 @@ def _calculate_state(
Args:
timeline_contains: state in the timeline
timeline_start: state at the start of the timeline
- previous: state at the end of the previous sync (or empty dict
+ timeline_end: state at the end of the timeline
+ previous_timeline_end: state at the end of the previous sync (or empty dict
if this is an initial sync)
- current: state at the end of the timeline
lazy_load_members: whether to return members from timeline_start
or not. assumes that timeline_start has already been filtered to
include only the members the client needs to know about.
"""
- event_id_to_key = {
- e: key
- for key, e in itertools.chain(
+ event_id_to_state_key = {
+ event_id: state_key
+ for state_key, event_id in itertools.chain(
timeline_contains.items(),
- previous.items(),
timeline_start.items(),
- current.items(),
+ timeline_end.items(),
+ previous_timeline_end.items(),
)
}
- c_ids = set(current.values())
- ts_ids = set(timeline_start.values())
- p_ids = set(previous.values())
- tc_ids = set(timeline_contains.values())
+ timeline_end_ids = set(timeline_end.values())
+ timeline_start_ids = set(timeline_start.values())
+ previous_timeline_end_ids = set(previous_timeline_end.values())
+ timeline_contains_ids = set(timeline_contains.values())
# If we are lazyloading room members, we explicitly add the membership events
# for the senders in the timeline into the state block returned by /sync,
# as we may not have sent them to the client before. We find these membership
# events by filtering them out of timeline_start, which has already been filtered
# to only include membership events for the senders in the timeline.
- # In practice, we can do this by removing them from the p_ids list,
- # which is the list of relevant state we know we have already sent to the client.
+ # In practice, we can do this by removing them from the previous_timeline_end_ids
+ # list, which is the list of relevant state we know we have already sent to the
+ # client.
# see https://github.com/matrix-org/synapse/pull/2970/files/efcdacad7d1b7f52f879179701c7e0d9b763511f#r204732809
if lazy_load_members:
- p_ids.difference_update(
+ previous_timeline_end_ids.difference_update(
e for t, e in timeline_start.items() if t[0] == EventTypes.Member
)
- state_ids = ((c_ids | ts_ids) - p_ids) - tc_ids
+ state_ids = (
+ (timeline_end_ids | timeline_start_ids)
+ - previous_timeline_end_ids
+ - timeline_contains_ids
+ )
- return {event_id_to_key[e]: e for e in state_ids}
+ return {event_id_to_state_key[e]: e for e in state_ids}
@attr.s(slots=True, auto_attribs=True)
@@ -2287,6 +2534,7 @@ class SyncResultBuilder:
since_token: Optional[StreamToken]
now_token: StreamToken
joined_room_ids: FrozenSet[str]
+ membership_change_events: List[EventBase]
presence: List[UserPresenceState] = attr.Factory(list)
account_data: List[JsonDict] = attr.Factory(list)
@@ -2296,7 +2544,7 @@ class SyncResultBuilder:
archived: List[ArchivedSyncResult] = attr.Factory(list)
to_device: List[JsonDict] = attr.Factory(list)
- def calculate_user_changes(self) -> Tuple[Set[str], Set[str]]:
+ def calculate_user_changes(self) -> Tuple[AbstractSet[str], AbstractSet[str]]:
"""Work out which other users have joined or left rooms we are joined to.
This data only is only useful for an incremental sync.
diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py
index 27aa0d3126..a0ea719430 100644
--- a/synapse/handlers/typing.py
+++ b/synapse/handlers/typing.py
@@ -26,7 +26,7 @@ from synapse.metrics.background_process_metrics import (
)
from synapse.replication.tcp.streams import TypingStream
from synapse.streams import EventSource
-from synapse.types import JsonDict, Requester, StreamKeyType, UserID, get_domain_from_id
+from synapse.types import JsonDict, Requester, StreamKeyType, UserID
from synapse.util.caches.stream_change_cache import StreamChangeCache
from synapse.util.metrics import Measure
from synapse.util.wheel_timer import WheelTimer
@@ -253,12 +253,11 @@ class TypingWriterHandler(FollowerTypingHandler):
self, target_user: UserID, requester: Requester, room_id: str, timeout: int
) -> None:
target_user_id = target_user.to_string()
- auth_user_id = requester.user.to_string()
if not self.is_mine_id(target_user_id):
raise SynapseError(400, "User is not hosted on this homeserver")
- if target_user_id != auth_user_id:
+ if target_user != requester.user:
raise AuthError(400, "Cannot set another user's typing state")
if requester.shadow_banned:
@@ -266,7 +265,7 @@ class TypingWriterHandler(FollowerTypingHandler):
await self.clock.sleep(random.randint(1, 10))
raise ShadowBanError()
- await self.auth.check_user_in_room(room_id, target_user_id)
+ await self.auth.check_user_in_room(room_id, requester)
logger.debug("%s has started typing in %s", target_user_id, room_id)
@@ -289,12 +288,11 @@ class TypingWriterHandler(FollowerTypingHandler):
self, target_user: UserID, requester: Requester, room_id: str
) -> None:
target_user_id = target_user.to_string()
- auth_user_id = requester.user.to_string()
if not self.is_mine_id(target_user_id):
raise SynapseError(400, "User is not hosted on this homeserver")
- if target_user_id != auth_user_id:
+ if target_user != requester.user:
raise AuthError(400, "Cannot set another user's typing state")
if requester.shadow_banned:
@@ -302,7 +300,7 @@ class TypingWriterHandler(FollowerTypingHandler):
await self.clock.sleep(random.randint(1, 10))
raise ShadowBanError()
- await self.auth.check_user_in_room(room_id, target_user_id)
+ await self.auth.check_user_in_room(room_id, requester)
logger.debug("%s has stopped typing in %s", target_user_id, room_id)
@@ -342,7 +340,7 @@ class TypingWriterHandler(FollowerTypingHandler):
# If we're not in the room just ditch the event entirely. This is
# probably an old server that has come back and thinks we're still in
# the room (or we've been rejoined to the room by a state reset).
- is_in_room = await self.event_auth_handler.check_host_in_room(
+ is_in_room = await self.event_auth_handler.is_host_in_room(
room_id, self.server_name
)
if not is_in_room:
@@ -364,10 +362,14 @@ class TypingWriterHandler(FollowerTypingHandler):
)
return
- users = await self.store.get_users_in_room(room_id)
- domains = {get_domain_from_id(u) for u in users}
+ # Let's check that the origin server is in the room before accepting the typing
+ # event. We don't want to block waiting on a partial state so take an
+ # approximation if needed.
+ domains = await self._storage_controllers.state.get_current_hosts_in_room_or_partial_state_approximation(
+ room_id
+ )
- if self.server_name in domains:
+ if user.domain in domains:
logger.info("Got typing update from %s: %r", user_id, content)
now = self.clock.time_msec()
self._member_typing_until[member] = now + FEDERATION_TIMEOUT
@@ -511,7 +513,7 @@ class TypingNotificationEventSource(EventSource[int, JsonDict]):
self,
user: UserID,
from_key: int,
- limit: Optional[int],
+ limit: int,
room_ids: Iterable[str],
is_guest: bool,
explicit_room_id: Optional[str] = None,
diff --git a/synapse/handlers/ui_auth/checkers.py b/synapse/handlers/ui_auth/checkers.py
index 05cebb5d4d..332edcca24 100644
--- a/synapse/handlers/ui_auth/checkers.py
+++ b/synapse/handlers/ui_auth/checkers.py
@@ -19,7 +19,6 @@ from twisted.web.client import PartialDownloadError
from synapse.api.constants import LoginType
from synapse.api.errors import Codes, LoginError, SynapseError
-from synapse.config.emailconfig import ThreepidBehaviour
from synapse.util import json_decoder
if TYPE_CHECKING:
@@ -120,6 +119,9 @@ class RecaptchaAuthChecker(UserInteractiveAuthChecker):
except PartialDownloadError as pde:
# Twisted is silly
data = pde.response
+ # For mypy's benefit. A general Error.response is Optional[bytes], but
+ # a PartialDownloadError.response should be bytes AFAICS.
+ assert data is not None
resp_body = json_decoder.decode(data.decode("utf-8"))
if "success" in resp_body:
@@ -153,7 +155,7 @@ class _BaseThreepidAuthChecker:
logger.info("Getting validated threepid. threepidcreds: %r", (threepid_creds,))
- # msisdns are currently always ThreepidBehaviour.REMOTE
+ # msisdns are currently always verified via the IS
if medium == "msisdn":
if not self.hs.config.registration.account_threepid_delegate_msisdn:
raise SynapseError(
@@ -164,18 +166,7 @@ class _BaseThreepidAuthChecker:
threepid_creds,
)
elif medium == "email":
- if (
- self.hs.config.email.threepid_behaviour_email
- == ThreepidBehaviour.REMOTE
- ):
- assert self.hs.config.registration.account_threepid_delegate_email
- threepid = await identity_handler.threepid_from_creds(
- self.hs.config.registration.account_threepid_delegate_email,
- threepid_creds,
- )
- elif (
- self.hs.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL
- ):
+ if self.hs.config.email.can_verify_email:
threepid = None
row = await self.store.get_threepid_validation_session(
medium,
@@ -227,10 +218,7 @@ class EmailIdentityAuthChecker(UserInteractiveAuthChecker, _BaseThreepidAuthChec
_BaseThreepidAuthChecker.__init__(self, hs)
def is_enabled(self) -> bool:
- return self.hs.config.email.threepid_behaviour_email in (
- ThreepidBehaviour.REMOTE,
- ThreepidBehaviour.LOCAL,
- )
+ return self.hs.config.email.can_verify_email
async def check_auth(self, authdict: dict, clientip: str) -> Any:
return await self._check_threepid("email", authdict)
diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py
index 8c3c52e1ca..3610b6bf78 100644
--- a/synapse/handlers/user_directory.py
+++ b/synapse/handlers/user_directory.py
@@ -13,7 +13,7 @@
# limitations under the License.
import logging
-from typing import TYPE_CHECKING, Any, Dict, List, Optional
+from typing import TYPE_CHECKING, Any, Dict, List, Optional, Set, Tuple
import synapse.metrics
from synapse.api.constants import EventTypes, HistoryVisibility, JoinRules, Membership
@@ -379,7 +379,7 @@ class UserDirectoryHandler(StateDeltasHandler):
user_id, event.content.get("displayname"), event.content.get("avatar_url")
)
- async def _track_user_joined_room(self, room_id: str, user_id: str) -> None:
+ async def _track_user_joined_room(self, room_id: str, joining_user_id: str) -> None:
"""Someone's just joined a room. Update `users_in_public_rooms` or
`users_who_share_private_rooms` as appropriate.
@@ -390,32 +390,44 @@ class UserDirectoryHandler(StateDeltasHandler):
room_id
)
if is_public:
- await self.store.add_users_in_public_rooms(room_id, (user_id,))
+ await self.store.add_users_in_public_rooms(room_id, (joining_user_id,))
else:
users_in_room = await self.store.get_users_in_room(room_id)
other_users_in_room = [
other
for other in users_in_room
- if other != user_id
+ if other != joining_user_id
and (
+ # We can't apply any special rules to remote users so
+ # they're always included
not self.is_mine_id(other)
+ # Check the special rules whether the local user should be
+ # included in the user directory
or await self.store.should_include_local_user_in_dir(other)
)
]
- to_insert = set()
+ updates_to_users_who_share_rooms: Set[Tuple[str, str]] = set()
- # First, if they're our user then we need to update for every user
- if self.is_mine_id(user_id):
+ # First, if the joining user is our local user then we need an
+ # update for every other user in the room.
+ if self.is_mine_id(joining_user_id):
for other_user_id in other_users_in_room:
- to_insert.add((user_id, other_user_id))
+ updates_to_users_who_share_rooms.add(
+ (joining_user_id, other_user_id)
+ )
- # Next we need to update for every local user in the room
+ # Next, we need an update for every other local user in the room
+ # that they now share a room with the joining user.
for other_user_id in other_users_in_room:
if self.is_mine_id(other_user_id):
- to_insert.add((other_user_id, user_id))
+ updates_to_users_who_share_rooms.add(
+ (other_user_id, joining_user_id)
+ )
- if to_insert:
- await self.store.add_users_who_share_private_room(room_id, to_insert)
+ if updates_to_users_who_share_rooms:
+ await self.store.add_users_who_share_private_room(
+ room_id, updates_to_users_who_share_rooms
+ )
async def _handle_remove_user(self, room_id: str, user_id: str) -> None:
"""Called when when someone leaves a room. The user may be local or remote.
diff --git a/synapse/http/additional_resource.py b/synapse/http/additional_resource.py
index 6a9f6635d2..8729630581 100644
--- a/synapse/http/additional_resource.py
+++ b/synapse/http/additional_resource.py
@@ -45,8 +45,7 @@ class AdditionalResource(DirectServeJsonResource):
Args:
hs: homeserver
- handler ((twisted.web.server.Request) -> twisted.internet.defer.Deferred):
- function to be called to handle the request.
+ handler: function to be called to handle the request.
"""
super().__init__()
self._handler = handler
diff --git a/synapse/http/client.py b/synapse/http/client.py
index 084d0a5b84..4eb740c040 100644
--- a/synapse/http/client.py
+++ b/synapse/http/client.py
@@ -25,7 +25,6 @@ from typing import (
List,
Mapping,
Optional,
- Sequence,
Tuple,
Union,
)
@@ -90,14 +89,29 @@ incoming_responses_counter = Counter(
"synapse_http_client_responses", "", ["method", "code"]
)
-# the type of the headers list, to be passed to the t.w.h.Headers.
-# Actually we can mix str and bytes keys, but Mapping treats 'key' as invariant so
-# we simplify.
+# the type of the headers map, to be passed to the t.w.h.Headers.
+#
+# The actual type accepted by Twisted is
+# Mapping[Union[str, bytes], Sequence[Union[str, bytes]] ,
+# allowing us to mix and match str and bytes freely. However: any str is also a
+# Sequence[str]; passing a header string value which is a
+# standalone str is interpreted as a sequence of 1-codepoint strings. This is a disastrous footgun.
+# We use a narrower value type (RawHeaderValue) to avoid this footgun.
+#
+# We also simplify the keys to be either all str or all bytes. This helps because
+# Dict[K, V] is invariant in K (and indeed V).
RawHeaders = Union[Mapping[str, "RawHeaderValue"], Mapping[bytes, "RawHeaderValue"]]
# the value actually has to be a List, but List is invariant so we can't specify that
# the entries can either be Lists or bytes.
-RawHeaderValue = Sequence[Union[str, bytes]]
+RawHeaderValue = Union[
+ List[str],
+ List[bytes],
+ List[Union[str, bytes]],
+ Tuple[str, ...],
+ Tuple[bytes, ...],
+ Tuple[Union[str, bytes], ...],
+]
def check_against_blacklist(
diff --git a/synapse/http/federation/matrix_federation_agent.py b/synapse/http/federation/matrix_federation_agent.py
index 2f0177f1e2..0359231e7d 100644
--- a/synapse/http/federation/matrix_federation_agent.py
+++ b/synapse/http/federation/matrix_federation_agent.py
@@ -155,11 +155,10 @@ class MatrixFederationAgent:
a file for a file upload). Or None if the request is to have
no body.
Returns:
- Deferred[twisted.web.iweb.IResponse]:
- fires when the header of the response has been received (regardless of the
- response status code). Fails if there is any problem which prevents that
- response from being received (including problems that prevent the request
- from being sent).
+ A deferred which fires when the header of the response has been received
+ (regardless of the response status code). Fails if there is any problem
+ which prevents that response from being received (including problems that
+ prevent the request from being sent).
"""
# We use urlparse as that will set `port` to None if there is no
# explicit port.
diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py
index 3c35b1d2c7..b92f1d3d1a 100644
--- a/synapse/http/matrixfederationclient.py
+++ b/synapse/http/matrixfederationclient.py
@@ -951,8 +951,7 @@ class MatrixFederationHttpClient:
args: query params
Returns:
- dict|list: Succeeds when we get a 2xx HTTP response. The
- result will be the decoded JSON body.
+ Succeeds when we get a 2xx HTTP response. The result will be the decoded JSON body.
Raises:
HttpResponseException: If we get an HTTP response code >= 300
diff --git a/synapse/http/proxyagent.py b/synapse/http/proxyagent.py
index b2a50c9105..18899bc6d1 100644
--- a/synapse/http/proxyagent.py
+++ b/synapse/http/proxyagent.py
@@ -34,8 +34,9 @@ from twisted.web.client import (
)
from twisted.web.error import SchemeNotSupported
from twisted.web.http_headers import Headers
-from twisted.web.iweb import IAgent, IBodyProducer, IPolicyForHTTPS
+from twisted.web.iweb import IAgent, IBodyProducer, IPolicyForHTTPS, IResponse
+from synapse.http import redact_uri
from synapse.http.connectproxyclient import HTTPConnectProxyEndpoint, ProxyCredentials
from synapse.types import ISynapseReactor
@@ -133,7 +134,7 @@ class ProxyAgent(_AgentBase):
uri: bytes,
headers: Optional[Headers] = None,
bodyProducer: Optional[IBodyProducer] = None,
- ) -> defer.Deferred:
+ ) -> "defer.Deferred[IResponse]":
"""
Issue a request to the server indicated by the given uri.
@@ -156,17 +157,17 @@ class ProxyAgent(_AgentBase):
a file upload). Or, None if the request is to have no body.
Returns:
- Deferred[IResponse]: completes when the header of the response has
- been received (regardless of the response status code).
+ A deferred which completes when the header of the response has
+ been received (regardless of the response status code).
- Can fail with:
- SchemeNotSupported: if the uri is not http or https
+ Can fail with:
+ SchemeNotSupported: if the uri is not http or https
- twisted.internet.error.TimeoutError if the server we are connecting
- to (proxy or destination) does not accept a connection before
- connectTimeout.
+ twisted.internet.error.TimeoutError if the server we are connecting
+ to (proxy or destination) does not accept a connection before
+ connectTimeout.
- ... other things too.
+ ... other things too.
"""
uri = uri.strip()
if not _VALID_URI.match(uri):
@@ -220,7 +221,11 @@ class ProxyAgent(_AgentBase):
self._reactor, parsed_uri.host, parsed_uri.port, **self._endpoint_kwargs
)
- logger.debug("Requesting %s via %s", uri, endpoint)
+ logger.debug(
+ "Requesting %s via %s",
+ redact_uri(uri.decode("ascii", errors="replace")),
+ endpoint,
+ )
if parsed_uri.scheme == b"https":
tls_connection_creator = self._policy_for_https.creatorForNetloc(
diff --git a/synapse/http/server.py b/synapse/http/server.py
index 19f42159b8..051a1899a0 100644
--- a/synapse/http/server.py
+++ b/synapse/http/server.py
@@ -19,6 +19,7 @@ import logging
import types
import urllib
from http import HTTPStatus
+from http.client import FOUND
from inspect import isawaitable
from typing import (
TYPE_CHECKING,
@@ -33,7 +34,6 @@ from typing import (
Optional,
Pattern,
Tuple,
- TypeVar,
Union,
)
@@ -64,6 +64,7 @@ from synapse.logging.context import defer_to_thread, preserve_fn, run_in_backgro
from synapse.logging.opentracing import active_span, start_active_span, trace_servlet
from synapse.util import json_encoder
from synapse.util.caches import intern_dict
+from synapse.util.cancellation import is_function_cancellable
from synapse.util.iterutils import chunk_seq
if TYPE_CHECKING:
@@ -94,68 +95,6 @@ HTML_ERROR_TEMPLATE = """<!DOCTYPE html>
HTTP_STATUS_REQUEST_CANCELLED = 499
-F = TypeVar("F", bound=Callable[..., Any])
-
-
-_cancellable_method_names = frozenset(
- {
- # `RestServlet`, `BaseFederationServlet` and `BaseFederationServerServlet`
- # methods
- "on_GET",
- "on_PUT",
- "on_POST",
- "on_DELETE",
- # `_AsyncResource`, `DirectServeHtmlResource` and `DirectServeJsonResource`
- # methods
- "_async_render_GET",
- "_async_render_PUT",
- "_async_render_POST",
- "_async_render_DELETE",
- "_async_render_OPTIONS",
- # `ReplicationEndpoint` methods
- "_handle_request",
- }
-)
-
-
-def cancellable(method: F) -> F:
- """Marks a servlet method as cancellable.
-
- Methods with this decorator will be cancelled if the client disconnects before we
- finish processing the request.
-
- During cancellation, `Deferred.cancel()` will be invoked on the `Deferred` wrapping
- the method. The `cancel()` call will propagate down to the `Deferred` that is
- currently being waited on. That `Deferred` will raise a `CancelledError`, which will
- propagate up, as per normal exception handling.
-
- Before applying this decorator to a new endpoint, you MUST recursively check
- that all `await`s in the function are on `async` functions or `Deferred`s that
- handle cancellation cleanly, otherwise a variety of bugs may occur, ranging from
- premature logging context closure, to stuck requests, to database corruption.
-
- Usage:
- class SomeServlet(RestServlet):
- @cancellable
- async def on_GET(self, request: SynapseRequest) -> ...:
- ...
- """
- if method.__name__ not in _cancellable_method_names and not any(
- method.__name__.startswith(prefix) for prefix in _cancellable_method_names
- ):
- raise ValueError(
- "@cancellable decorator can only be applied to servlet methods."
- )
-
- method.cancellable = True # type: ignore[attr-defined]
- return method
-
-
-def is_method_cancellable(method: Callable[..., Any]) -> bool:
- """Checks whether a servlet method has the `@cancellable` flag."""
- return getattr(method, "cancellable", False)
-
-
def return_json_error(
f: failure.Failure, request: SynapseRequest, config: Optional[HomeServerConfig]
) -> None:
@@ -328,7 +267,7 @@ class HttpServer(Protocol):
request. The first argument will be the request object and
subsequent arguments will be any matched groups from the regex.
This should return either tuple of (code, response), or None.
- servlet_classname (str): The name of the handler to be used in prometheus
+ servlet_classname: The name of the handler to be used in prometheus
and opentracing logs.
"""
@@ -389,7 +328,7 @@ class _AsyncResource(resource.Resource, metaclass=abc.ABCMeta):
method_handler = getattr(self, "_async_render_%s" % (request_method,), None)
if method_handler:
- request.is_render_cancellable = is_method_cancellable(method_handler)
+ request.is_render_cancellable = is_function_cancellable(method_handler)
raw_callback_return = method_handler(request)
@@ -401,7 +340,7 @@ class _AsyncResource(resource.Resource, metaclass=abc.ABCMeta):
return callback_return
- _unrecognised_request_handler(request)
+ return _unrecognised_request_handler(request)
@abc.abstractmethod
def _send_response(
@@ -551,7 +490,7 @@ class JsonResource(DirectServeJsonResource):
async def _async_render(self, request: SynapseRequest) -> Tuple[int, Any]:
callback, servlet_classname, group_dict = self._get_handler_for_request(request)
- request.is_render_cancellable = is_method_cancellable(callback)
+ request.is_render_cancellable = is_function_cancellable(callback)
# Make sure we have an appropriate name for this handler in prometheus
# (rather than the default of JsonResource).
@@ -660,7 +599,7 @@ class RootRedirect(resource.Resource):
class OptionsResource(resource.Resource):
"""Responds to OPTION requests for itself and all children."""
- def render_OPTIONS(self, request: Request) -> bytes:
+ def render_OPTIONS(self, request: SynapseRequest) -> bytes:
request.setResponseCode(204)
request.setHeader(b"Content-Length", b"0")
@@ -767,7 +706,7 @@ class _ByteProducer:
self._request = None
-def _encode_json_bytes(json_object: Any) -> bytes:
+def _encode_json_bytes(json_object: object) -> bytes:
"""
Encode an object into JSON. Returns an iterator of bytes.
"""
@@ -808,7 +747,7 @@ def respond_with_json(
return None
if canonical_json:
- encoder = encode_canonical_json
+ encoder: Callable[[object], bytes] = encode_canonical_json
else:
encoder = _encode_json_bytes
@@ -825,7 +764,7 @@ def respond_with_json(
def respond_with_json_bytes(
- request: Request,
+ request: SynapseRequest,
code: int,
json_bytes: bytes,
send_cors: bool = False,
@@ -921,7 +860,7 @@ def _write_bytes_to_request(request: Request, bytes_to_write: bytes) -> None:
_ByteProducer(request, bytes_generator)
-def set_cors_headers(request: Request) -> None:
+def set_cors_headers(request: SynapseRequest) -> None:
"""Set the CORS headers so that javascript running in a web browsers can
use this API
@@ -932,10 +871,20 @@ def set_cors_headers(request: Request) -> None:
request.setHeader(
b"Access-Control-Allow-Methods", b"GET, HEAD, POST, PUT, DELETE, OPTIONS"
)
- request.setHeader(
- b"Access-Control-Allow-Headers",
- b"X-Requested-With, Content-Type, Authorization, Date",
- )
+ if request.experimental_cors_msc3886:
+ request.setHeader(
+ b"Access-Control-Allow-Headers",
+ b"X-Requested-With, Content-Type, Authorization, Date, If-Match, If-None-Match",
+ )
+ request.setHeader(
+ b"Access-Control-Expose-Headers",
+ b"ETag, Location, X-Max-Bytes",
+ )
+ else:
+ request.setHeader(
+ b"Access-Control-Allow-Headers",
+ b"X-Requested-With, Content-Type, Authorization, Date",
+ )
def set_corp_headers(request: Request) -> None:
@@ -1004,10 +953,25 @@ def set_clickjacking_protection_headers(request: Request) -> None:
request.setHeader(b"Content-Security-Policy", b"frame-ancestors 'none';")
-def respond_with_redirect(request: Request, url: bytes) -> None:
- """Write a 302 response to the request, if it is still alive."""
+def respond_with_redirect(
+ request: SynapseRequest, url: bytes, statusCode: int = FOUND, cors: bool = False
+) -> None:
+ """
+ Write a 302 (or other specified status code) response to the request, if it is still alive.
+
+ Args:
+ request: The http request to respond to.
+ url: The URL to redirect to.
+ statusCode: The HTTP status code to use for the redirect (defaults to 302).
+ cors: Whether to set CORS headers on the response.
+ """
logger.debug("Redirect to %s", url.decode("utf-8"))
- request.redirect(url)
+
+ if cors:
+ set_cors_headers(request)
+
+ request.setResponseCode(statusCode)
+ request.setHeader(b"location", url)
finish_request(request)
diff --git a/synapse/http/servlet.py b/synapse/http/servlet.py
index 4ff840ca0e..dead02cd5c 100644
--- a/synapse/http/servlet.py
+++ b/synapse/http/servlet.py
@@ -23,14 +23,19 @@ from typing import (
Optional,
Sequence,
Tuple,
+ Type,
+ TypeVar,
overload,
)
+from pydantic import BaseModel, MissingError, PydanticValueError, ValidationError
+from pydantic.error_wrappers import ErrorWrapper
from typing_extensions import Literal
from twisted.web.server import Request
from synapse.api.errors import Codes, SynapseError
+from synapse.http import redact_uri
from synapse.http.server import HttpServer
from synapse.types import JsonDict, RoomAlias, RoomID
from synapse.util import json_decoder
@@ -660,7 +665,13 @@ def parse_json_value_from_request(
try:
content = json_decoder.decode(content_bytes.decode("utf-8"))
except Exception as e:
- logger.warning("Unable to parse JSON: %s (%s)", e, content_bytes)
+ logger.warning(
+ "Unable to parse JSON from %s %s response: %s (%s)",
+ request.method.decode("ascii", errors="replace"),
+ redact_uri(request.uri.decode("ascii", errors="replace")),
+ e,
+ content_bytes,
+ )
raise SynapseError(
HTTPStatus.BAD_REQUEST, "Content not JSON.", errcode=Codes.NOT_JSON
)
@@ -694,6 +705,42 @@ def parse_json_object_from_request(
return content
+Model = TypeVar("Model", bound=BaseModel)
+
+
+def parse_and_validate_json_object_from_request(
+ request: Request, model_type: Type[Model]
+) -> Model:
+ """Parse a JSON object from the body of a twisted HTTP request, then deserialise and
+ validate using the given pydantic model.
+
+ Raises:
+ SynapseError if the request body couldn't be decoded as JSON or
+ if it wasn't a JSON object.
+ """
+ content = parse_json_object_from_request(request, allow_empty_body=False)
+ try:
+ instance = model_type.parse_obj(content)
+ except ValidationError as e:
+ # Choose a matrix error code. The catch-all is BAD_JSON, but we try to find a
+ # more specific error if possible (which occasionally helps us to be spec-
+ # compliant) This is a bit awkward because the spec's error codes aren't very
+ # clear-cut: BAD_JSON arguably overlaps with MISSING_PARAM and INVALID_PARAM.
+ errcode = Codes.BAD_JSON
+
+ raw_errors = e.raw_errors
+ if len(raw_errors) == 1 and isinstance(raw_errors[0], ErrorWrapper):
+ raw_error = raw_errors[0].exc
+ if isinstance(raw_error, MissingError):
+ errcode = Codes.MISSING_PARAM
+ elif isinstance(raw_error, PydanticValueError):
+ errcode = Codes.INVALID_PARAM
+
+ raise SynapseError(HTTPStatus.BAD_REQUEST, str(e), errcode=errcode)
+
+ return instance
+
+
def assert_params_in_dict(body: JsonDict, required: Iterable[str]) -> None:
absent = []
for k in required:
diff --git a/synapse/http/site.py b/synapse/http/site.py
index eeec74b78a..6a1dbf7f33 100644
--- a/synapse/http/site.py
+++ b/synapse/http/site.py
@@ -72,14 +72,17 @@ class SynapseRequest(Request):
site: "SynapseSite",
*args: Any,
max_request_body_size: int = 1024,
+ request_id_header: Optional[str] = None,
**kw: Any,
):
super().__init__(channel, *args, **kw)
self._max_request_body_size = max_request_body_size
+ self.request_id_header = request_id_header
self.synapse_site = site
self.reactor = site.reactor
self._channel = channel # this is used by the tests
self.start_time = 0.0
+ self.experimental_cors_msc3886 = site.experimental_cors_msc3886
# The requester, if authenticated. For federation requests this is the
# server name, for client requests this is the Requester object.
@@ -172,7 +175,14 @@ class SynapseRequest(Request):
self._opentracing_span = span
def get_request_id(self) -> str:
- return "%s-%i" % (self.get_method(), self.request_seq)
+ request_id_value = None
+ if self.request_id_header:
+ request_id_value = self.getHeader(self.request_id_header)
+
+ if request_id_value is None:
+ request_id_value = str(self.request_seq)
+
+ return "%s-%s" % (self.get_method(), request_id_value)
def get_redacted_uri(self) -> str:
"""Gets the redacted URI associated with the request (or placeholder if the URI
@@ -226,7 +236,7 @@ class SynapseRequest(Request):
# If this is a request where the target user doesn't match the user who
# authenticated (e.g. and admin is puppetting a user) then we return both.
- if self._requester.user.to_string() != authenticated_entity:
+ if requester != authenticated_entity:
return requester, authenticated_entity
return requester, None
@@ -390,7 +400,7 @@ class SynapseRequest(Request):
be sure to call finished_processing.
Args:
- servlet_name (str): the name of the servlet which will be
+ servlet_name: the name of the servlet which will be
processing this request. This is used in the metrics.
It is possible to update this afterwards by updating
@@ -611,12 +621,17 @@ class SynapseSite(Site):
proxied = config.http_options.x_forwarded
request_class = XForwardedForRequest if proxied else SynapseRequest
+ request_id_header = config.http_options.request_id_header
+
+ self.experimental_cors_msc3886 = config.http_options.experimental_cors_msc3886
+
def request_factory(channel: HTTPChannel, queued: bool) -> Request:
return request_class(
channel,
self,
max_request_body_size=max_request_body_size,
queued=queued,
+ request_id_header=request_id_header,
)
self.requestFactory = request_factory # type: ignore
diff --git a/synapse/logging/context.py b/synapse/logging/context.py
index fd9cb97920..f62bea968f 100644
--- a/synapse/logging/context.py
+++ b/synapse/logging/context.py
@@ -117,8 +117,7 @@ class ContextResourceUsage:
"""Create a new ContextResourceUsage
Args:
- copy_from (ContextResourceUsage|None): if not None, an object to
- copy stats from
+ copy_from: if not None, an object to copy stats from
"""
if copy_from is None:
self.reset()
@@ -162,7 +161,7 @@ class ContextResourceUsage:
"""Add another ContextResourceUsage's stats to this one's.
Args:
- other (ContextResourceUsage): the other resource usage object
+ other: the other resource usage object
"""
self.ru_utime += other.ru_utime
self.ru_stime += other.ru_stime
@@ -342,7 +341,7 @@ class LoggingContext:
called directly.
Returns:
- LoggingContext: the current logging context
+ The current logging context
"""
warnings.warn(
"synapse.logging.context.LoggingContext.current_context() is deprecated "
@@ -362,7 +361,8 @@ class LoggingContext:
called directly.
Args:
- context(LoggingContext): The context to activate.
+ context: The context to activate.
+
Returns:
The context that was previously active
"""
@@ -474,8 +474,7 @@ class LoggingContext:
"""Get resources used by this logcontext so far.
Returns:
- ContextResourceUsage: a *copy* of the object tracking resource
- usage so far
+ A *copy* of the object tracking resource usage so far
"""
# we always return a copy, for consistency
res = self._resource_usage.copy()
@@ -586,7 +585,7 @@ class LoggingContextFilter(logging.Filter):
True to include the record in the log output.
"""
context = current_context()
- record.request = self._default_request # type: ignore
+ record.request = self._default_request
# context should never be None, but if it somehow ends up being, then
# we end up in a death spiral of infinite loops, so let's check, for
@@ -594,21 +593,21 @@ class LoggingContextFilter(logging.Filter):
if context is not None:
# Logging is interested in the request ID. Note that for backwards
# compatibility this is stored as the "request" on the record.
- record.request = str(context) # type: ignore
+ record.request = str(context)
# Add some data from the HTTP request.
request = context.request
if request is None:
return True
- record.ip_address = request.ip_address # type: ignore
- record.site_tag = request.site_tag # type: ignore
- record.requester = request.requester # type: ignore
- record.authenticated_entity = request.authenticated_entity # type: ignore
- record.method = request.method # type: ignore
- record.url = request.url # type: ignore
- record.protocol = request.protocol # type: ignore
- record.user_agent = request.user_agent # type: ignore
+ record.ip_address = request.ip_address
+ record.site_tag = request.site_tag
+ record.requester = request.requester
+ record.authenticated_entity = request.authenticated_entity
+ record.method = request.method
+ record.url = request.url
+ record.protocol = request.protocol
+ record.user_agent = request.user_agent
return True
@@ -663,7 +662,8 @@ def current_context() -> LoggingContextOrSentinel:
def set_current_context(context: LoggingContextOrSentinel) -> LoggingContextOrSentinel:
"""Set the current logging context in thread local storage
Args:
- context(LoggingContext): The context to activate.
+ context: The context to activate.
+
Returns:
The context that was previously active
"""
@@ -700,7 +700,7 @@ def nested_logging_context(suffix: str) -> LoggingContext:
suffix: suffix to add to the parent context's 'name'.
Returns:
- LoggingContext: new logging context.
+ A new logging context.
"""
curr_context = current_context()
if not curr_context:
@@ -898,20 +898,19 @@ def defer_to_thread(
on it.
Args:
- reactor (twisted.internet.base.ReactorBase): The reactor in whose main thread
- the Deferred will be invoked, and whose threadpool we should use for the
- function.
+ reactor: The reactor in whose main thread the Deferred will be invoked,
+ and whose threadpool we should use for the function.
Normally this will be hs.get_reactor().
- f (callable): The function to call.
+ f: The function to call.
args: positional arguments to pass to f.
kwargs: keyword arguments to pass to f.
Returns:
- Deferred: A Deferred which fires a callback with the result of `f`, or an
+ A Deferred which fires a callback with the result of `f`, or an
errback if `f` throws an exception.
"""
return defer_to_threadpool(reactor, reactor.getThreadPool(), f, *args, **kwargs)
@@ -939,20 +938,20 @@ def defer_to_threadpool(
on it.
Args:
- reactor (twisted.internet.base.ReactorBase): The reactor in whose main thread
- the Deferred will be invoked. Normally this will be hs.get_reactor().
+ reactor: The reactor in whose main thread the Deferred will be invoked.
+ Normally this will be hs.get_reactor().
- threadpool (twisted.python.threadpool.ThreadPool): The threadpool to use for
- running `f`. Normally this will be hs.get_reactor().getThreadPool().
+ threadpool: The threadpool to use for running `f`. Normally this will be
+ hs.get_reactor().getThreadPool().
- f (callable): The function to call.
+ f: The function to call.
args: positional arguments to pass to f.
kwargs: keyword arguments to pass to f.
Returns:
- Deferred: A Deferred which fires a callback with the result of `f`, or an
+ A Deferred which fires a callback with the result of `f`, or an
errback if `f` throws an exception.
"""
curr_context = current_context()
diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py
index c1aa205eed..b69060854f 100644
--- a/synapse/logging/opentracing.py
+++ b/synapse/logging/opentracing.py
@@ -173,6 +173,7 @@ from typing import (
Any,
Callable,
Collection,
+ ContextManager,
Dict,
Generator,
Iterable,
@@ -202,6 +203,9 @@ if TYPE_CHECKING:
# Helper class
+# Matches the number suffix in an instance name like "matrix.org client_reader-8"
+STRIP_INSTANCE_NUMBER_SUFFIX_REGEX = re.compile(r"[_-]?\d+$")
+
class _DummyTagNames:
"""wrapper of opentracings tags. We need to have them if we
@@ -294,6 +298,8 @@ class SynapseTags:
# Whether the sync response has new data to be returned to the client.
SYNC_RESULT = "sync.new_data"
+ INSTANCE_NAME = "instance_name"
+
# incoming HTTP request ID (as written in the logs)
REQUEST_ID = "request_id"
@@ -309,6 +315,19 @@ class SynapseTags:
# The name of the external cache
CACHE_NAME = "cache.name"
+ # Used to tag function arguments
+ #
+ # Tag a named arg. The name of the argument should be appended to this prefix.
+ FUNC_ARG_PREFIX = "ARG."
+ # Tag extra variadic number of positional arguments (`def foo(first, second, *extras)`)
+ FUNC_ARGS = "args"
+ # Tag keyword args
+ FUNC_KWARGS = "kwargs"
+
+ # Some intermediate result that's interesting to the function. The label for
+ # the result should be appended to this prefix.
+ RESULT_PREFIX = "RESULT."
+
class SynapseBaggage:
FORCE_TRACING = "synapse-force-tracing"
@@ -427,9 +446,17 @@ def init_tracer(hs: "HomeServer") -> None:
from jaeger_client.metrics.prometheus import PrometheusMetricsFactory
+ # Instance names are opaque strings but by stripping off the number suffix,
+ # we can get something that looks like a "worker type", e.g.
+ # "client_reader-1" -> "client_reader" so we don't spread the traces across
+ # so many services.
+ instance_name_by_type = re.sub(
+ STRIP_INSTANCE_NUMBER_SUFFIX_REGEX, "", hs.get_instance_name()
+ )
+
config = JaegerConfig(
config=hs.config.tracing.jaeger_config,
- service_name=f"{hs.config.server.server_name} {hs.get_instance_name()}",
+ service_name=f"{hs.config.server.server_name} {instance_name_by_type}",
scope_manager=LogContextScopeManager(),
metrics_factory=PrometheusMetricsFactory(),
)
@@ -694,7 +721,7 @@ def inject_header_dict(
destination: address of entity receiving the span context. Must be given unless
check_destination is False. The context will only be injected if the
destination matches the opentracing whitelist
- check_destination (bool): If false, destination will be ignored and the context
+ check_destination: If false, destination will be ignored and the context
will always be injected.
Note:
@@ -753,7 +780,7 @@ def get_active_span_text_map(destination: Optional[str] = None) -> Dict[str, str
destination: the name of the remote server.
Returns:
- dict: the active span's context if opentracing is enabled, otherwise empty.
+ the active span's context if opentracing is enabled, otherwise empty.
"""
if destination and not whitelisted_homeserver(destination):
@@ -823,75 +850,117 @@ def extract_text_map(carrier: Dict[str, str]) -> Optional["opentracing.SpanConte
# Tracing decorators
-def trace_with_opname(opname: str) -> Callable[[Callable[P, R]], Callable[P, R]]:
+def _custom_sync_async_decorator(
+ func: Callable[P, R],
+ wrapping_logic: Callable[[Callable[P, R], Any, Any], ContextManager[None]],
+) -> Callable[P, R]:
"""
- Decorator to trace a function with a custom opname.
-
- See the module's doc string for usage examples.
+ Decorates a function that is sync or async (coroutines), or that returns a Twisted
+ `Deferred`. The custom business logic of the decorator goes in `wrapping_logic`.
+
+ Example usage:
+ ```py
+ # Decorator to time the function and log it out
+ def duration(func: Callable[P, R]) -> Callable[P, R]:
+ @contextlib.contextmanager
+ def _wrapping_logic(func: Callable[P, R], *args: P.args, **kwargs: P.kwargs) -> Generator[None, None, None]:
+ start_ts = time.time()
+ try:
+ yield
+ finally:
+ end_ts = time.time()
+ duration = end_ts - start_ts
+ logger.info("%s took %s seconds", func.__name__, duration)
+ return _custom_sync_async_decorator(func, _wrapping_logic)
+ ```
+ Args:
+ func: The function to be decorated
+ wrapping_logic: The business logic of your custom decorator.
+ This should be a ContextManager so you are able to run your logic
+ before/after the function as desired.
"""
- def decorator(func: Callable[P, R]) -> Callable[P, R]:
- if opentracing is None:
- return func # type: ignore[unreachable]
+ if inspect.iscoroutinefunction(func):
- if inspect.iscoroutinefunction(func):
+ @wraps(func)
+ async def _wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
+ with wrapping_logic(func, *args, **kwargs):
+ return await func(*args, **kwargs) # type: ignore[misc]
- @wraps(func)
- async def _trace_inner(*args: P.args, **kwargs: P.kwargs) -> R:
- with start_active_span(opname):
- return await func(*args, **kwargs) # type: ignore[misc]
+ else:
+ # The other case here handles both sync functions and those
+ # decorated with inlineDeferred.
+ @wraps(func)
+ def _wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
+ scope = wrapping_logic(func, *args, **kwargs)
+ scope.__enter__()
- else:
- # The other case here handles both sync functions and those
- # decorated with inlineDeferred.
- @wraps(func)
- def _trace_inner(*args: P.args, **kwargs: P.kwargs) -> R:
- scope = start_active_span(opname)
- scope.__enter__()
-
- try:
- result = func(*args, **kwargs)
- if isinstance(result, defer.Deferred):
-
- def call_back(result: R) -> R:
- scope.__exit__(None, None, None)
- return result
-
- def err_back(result: R) -> R:
- scope.__exit__(None, None, None)
- return result
-
- result.addCallbacks(call_back, err_back)
-
- else:
- if inspect.isawaitable(result):
- logger.error(
- "@trace may not have wrapped %s correctly! "
- "The function is not async but returned a %s.",
- func.__qualname__,
- type(result).__name__,
- )
+ try:
+ result = func(*args, **kwargs)
+ if isinstance(result, defer.Deferred):
+ def call_back(result: R) -> R:
scope.__exit__(None, None, None)
+ return result
- return result
+ def err_back(result: R) -> R:
+ scope.__exit__(None, None, None)
+ return result
+
+ result.addCallbacks(call_back, err_back)
+
+ else:
+ if inspect.isawaitable(result):
+ logger.error(
+ "@trace may not have wrapped %s correctly! "
+ "The function is not async but returned a %s.",
+ func.__qualname__,
+ type(result).__name__,
+ )
- except Exception as e:
- scope.__exit__(type(e), None, e.__traceback__)
- raise
+ scope.__exit__(None, None, None)
- return _trace_inner # type: ignore[return-value]
+ return result
- return decorator
+ except Exception as e:
+ scope.__exit__(type(e), None, e.__traceback__)
+ raise
+
+ return _wrapper # type: ignore[return-value]
+
+
+def trace_with_opname(
+ opname: str,
+ *,
+ tracer: Optional["opentracing.Tracer"] = None,
+) -> Callable[[Callable[P, R]], Callable[P, R]]:
+ """
+ Decorator to trace a function with a custom opname.
+ See the module's doc string for usage examples.
+ """
+
+ # type-ignore: mypy bug, see https://github.com/python/mypy/issues/12909
+ @contextlib.contextmanager # type: ignore[arg-type]
+ def _wrapping_logic(
+ func: Callable[P, R], *args: P.args, **kwargs: P.kwargs
+ ) -> Generator[None, None, None]:
+ with start_active_span(opname, tracer=tracer):
+ yield
+
+ def _decorator(func: Callable[P, R]) -> Callable[P, R]:
+ if not opentracing:
+ return func
+
+ return _custom_sync_async_decorator(func, _wrapping_logic)
+
+ return _decorator
def trace(func: Callable[P, R]) -> Callable[P, R]:
"""
Decorator to trace a function.
-
Sets the operation name to that of the function's name.
-
See the module's doc string for usage examples.
"""
@@ -900,22 +969,36 @@ def trace(func: Callable[P, R]) -> Callable[P, R]:
def tag_args(func: Callable[P, R]) -> Callable[P, R]:
"""
- Tags all of the args to the active span.
+ Decorator to tag all of the args to the active span.
+
+ Args:
+ func: `func` is assumed to be a method taking a `self` parameter, or a
+ `classmethod` taking a `cls` parameter. In either case, a tag is not
+ created for this parameter.
"""
if not opentracing:
return func
- @wraps(func)
- def _tag_args_inner(*args: P.args, **kwargs: P.kwargs) -> R:
+ # type-ignore: mypy bug, see https://github.com/python/mypy/issues/12909
+ @contextlib.contextmanager # type: ignore[arg-type]
+ def _wrapping_logic(
+ func: Callable[P, R], *args: P.args, **kwargs: P.kwargs
+ ) -> Generator[None, None, None]:
argspec = inspect.getfullargspec(func)
- for i, arg in enumerate(argspec.args[1:]):
- set_tag("ARG_" + arg, str(args[i])) # type: ignore[index]
- set_tag("args", str(args[len(argspec.args) :])) # type: ignore[index]
- set_tag("kwargs", str(kwargs))
- return func(*args, **kwargs)
+ # We use `[1:]` to skip the `self` object reference and `start=1` to
+ # make the index line up with `argspec.args`.
+ #
+ # FIXME: We could update this to handle any type of function by ignoring the
+ # first argument only if it's named `self` or `cls`. This isn't fool-proof
+ # but handles the idiomatic cases.
+ for i, arg in enumerate(args[1:], start=1):
+ set_tag(SynapseTags.FUNC_ARG_PREFIX + argspec.args[i], str(arg))
+ set_tag(SynapseTags.FUNC_ARGS, str(args[len(argspec.args) :]))
+ set_tag(SynapseTags.FUNC_KWARGS, str(kwargs))
+ yield
- return _tag_args_inner
+ return _custom_sync_async_decorator(func, _wrapping_logic)
@contextlib.contextmanager
@@ -962,11 +1045,11 @@ def trace_servlet(
# with JsonResource).
scope.span.set_operation_name(request.request_metrics.name)
- # set the tags *after* the servlet completes, in case it decided to
- # prioritise the span (tags will get dropped on unprioritised spans)
request_tags[
SynapseTags.REQUEST_TAG
] = request.request_metrics.start_context.tag
+ # set the tags *after* the servlet completes, in case it decided to
+ # prioritise the span (tags will get dropped on unprioritised spans)
for k, v in request_tags.items():
scope.span.set_tag(k, v)
diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py
index 496fce2ecc..b01372565d 100644
--- a/synapse/metrics/__init__.py
+++ b/synapse/metrics/__init__.py
@@ -46,12 +46,8 @@ from twisted.python.threadpool import ThreadPool
# This module is imported for its side effects; flake8 needn't warn that it's unused.
import synapse.metrics._reactor_metrics # noqa: F401
-from synapse.metrics._exposition import (
- MetricsResource,
- generate_latest,
- start_http_server,
-)
from synapse.metrics._gc import MIN_TIME_BETWEEN_GCS, install_gc_manager
+from synapse.metrics._twisted_exposition import MetricsResource, generate_latest
from synapse.metrics._types import Collector
from synapse.util import SYNAPSE_VERSION
@@ -474,7 +470,6 @@ __all__ = [
"Collector",
"MetricsResource",
"generate_latest",
- "start_http_server",
"LaterGauge",
"InFlightGauge",
"GaugeBucketCollector",
diff --git a/synapse/metrics/_exposition.py b/synapse/metrics/_exposition.py
deleted file mode 100644
index 353d0a63b6..0000000000
--- a/synapse/metrics/_exposition.py
+++ /dev/null
@@ -1,262 +0,0 @@
-# Copyright 2015-2019 Prometheus Python Client Developers
-# Copyright 2019 Matrix.org Foundation C.I.C.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-This code is based off `prometheus_client/exposition.py` from version 0.7.1.
-
-Due to the renaming of metrics in prometheus_client 0.4.0, this customised
-vendoring of the code will emit both the old versions that Synapse dashboards
-expect, and the newer "best practice" version of the up-to-date official client.
-"""
-
-import math
-import threading
-from http.server import BaseHTTPRequestHandler, HTTPServer
-from socketserver import ThreadingMixIn
-from typing import Any, Dict, List, Type, Union
-from urllib.parse import parse_qs, urlparse
-
-from prometheus_client import REGISTRY, CollectorRegistry
-from prometheus_client.core import Sample
-
-from twisted.web.resource import Resource
-from twisted.web.server import Request
-
-from synapse.util import caches
-
-CONTENT_TYPE_LATEST = "text/plain; version=0.0.4; charset=utf-8"
-
-
-def floatToGoString(d: Union[int, float]) -> str:
- d = float(d)
- if d == math.inf:
- return "+Inf"
- elif d == -math.inf:
- return "-Inf"
- elif math.isnan(d):
- return "NaN"
- else:
- s = repr(d)
- dot = s.find(".")
- # Go switches to exponents sooner than Python.
- # We only need to care about positive values for le/quantile.
- if d > 0 and dot > 6:
- mantissa = f"{s[0]}.{s[1:dot]}{s[dot + 1 :]}".rstrip("0.")
- return f"{mantissa}e+0{dot - 1}"
- return s
-
-
-def sample_line(line: Sample, name: str) -> str:
- if line.labels:
- labelstr = "{{{0}}}".format(
- ",".join(
- [
- '{}="{}"'.format(
- k,
- v.replace("\\", r"\\").replace("\n", r"\n").replace('"', r"\""),
- )
- for k, v in sorted(line.labels.items())
- ]
- )
- )
- else:
- labelstr = ""
- timestamp = ""
- if line.timestamp is not None:
- # Convert to milliseconds.
- timestamp = f" {int(float(line.timestamp) * 1000):d}"
- return "{}{} {}{}\n".format(name, labelstr, floatToGoString(line.value), timestamp)
-
-
-def generate_latest(registry: CollectorRegistry, emit_help: bool = False) -> bytes:
-
- # Trigger the cache metrics to be rescraped, which updates the common
- # metrics but do not produce metrics themselves
- for collector in caches.collectors_by_name.values():
- collector.collect()
-
- output = []
-
- for metric in registry.collect():
- if not metric.samples:
- # No samples, don't bother.
- continue
-
- mname = metric.name
- mnewname = metric.name
- mtype = metric.type
-
- # OpenMetrics -> Prometheus
- if mtype == "counter":
- mnewname = mnewname + "_total"
- elif mtype == "info":
- mtype = "gauge"
- mnewname = mnewname + "_info"
- elif mtype == "stateset":
- mtype = "gauge"
- elif mtype == "gaugehistogram":
- mtype = "histogram"
- elif mtype == "unknown":
- mtype = "untyped"
-
- # Output in the old format for compatibility.
- if emit_help:
- output.append(
- "# HELP {} {}\n".format(
- mname,
- metric.documentation.replace("\\", r"\\").replace("\n", r"\n"),
- )
- )
- output.append(f"# TYPE {mname} {mtype}\n")
-
- om_samples: Dict[str, List[str]] = {}
- for s in metric.samples:
- for suffix in ["_created", "_gsum", "_gcount"]:
- if s.name == metric.name + suffix:
- # OpenMetrics specific sample, put in a gauge at the end.
- # (these come from gaugehistograms which don't get renamed,
- # so no need to faff with mnewname)
- om_samples.setdefault(suffix, []).append(sample_line(s, s.name))
- break
- else:
- newname = s.name.replace(mnewname, mname)
- if ":" in newname and newname.endswith("_total"):
- newname = newname[: -len("_total")]
- output.append(sample_line(s, newname))
-
- for suffix, lines in sorted(om_samples.items()):
- if emit_help:
- output.append(
- "# HELP {}{} {}\n".format(
- metric.name,
- suffix,
- metric.documentation.replace("\\", r"\\").replace("\n", r"\n"),
- )
- )
- output.append(f"# TYPE {metric.name}{suffix} gauge\n")
- output.extend(lines)
-
- # Get rid of the weird colon things while we're at it
- if mtype == "counter":
- mnewname = mnewname.replace(":total", "")
- mnewname = mnewname.replace(":", "_")
-
- if mname == mnewname:
- continue
-
- # Also output in the new format, if it's different.
- if emit_help:
- output.append(
- "# HELP {} {}\n".format(
- mnewname,
- metric.documentation.replace("\\", r"\\").replace("\n", r"\n"),
- )
- )
- output.append(f"# TYPE {mnewname} {mtype}\n")
-
- for s in metric.samples:
- # Get rid of the OpenMetrics specific samples (we should already have
- # dealt with them above anyway.)
- for suffix in ["_created", "_gsum", "_gcount"]:
- if s.name == metric.name + suffix:
- break
- else:
- output.append(
- sample_line(s, s.name.replace(":total", "").replace(":", "_"))
- )
-
- return "".join(output).encode("utf-8")
-
-
-class MetricsHandler(BaseHTTPRequestHandler):
- """HTTP handler that gives metrics from ``REGISTRY``."""
-
- registry = REGISTRY
-
- def do_GET(self) -> None:
- registry = self.registry
- params = parse_qs(urlparse(self.path).query)
-
- if "help" in params:
- emit_help = True
- else:
- emit_help = False
-
- try:
- output = generate_latest(registry, emit_help=emit_help)
- except Exception:
- self.send_error(500, "error generating metric output")
- raise
- self.send_response(200)
- self.send_header("Content-Type", CONTENT_TYPE_LATEST)
- self.send_header("Content-Length", str(len(output)))
- self.end_headers()
- self.wfile.write(output)
-
- def log_message(self, format: str, *args: Any) -> None:
- """Log nothing."""
-
- @classmethod
- def factory(cls, registry: CollectorRegistry) -> Type:
- """Returns a dynamic MetricsHandler class tied
- to the passed registry.
- """
- # This implementation relies on MetricsHandler.registry
- # (defined above and defaulted to REGISTRY).
-
- # As we have unicode_literals, we need to create a str()
- # object for type().
- cls_name = str(cls.__name__)
- MyMetricsHandler = type(cls_name, (cls, object), {"registry": registry})
- return MyMetricsHandler
-
-
-class _ThreadingSimpleServer(ThreadingMixIn, HTTPServer):
- """Thread per request HTTP server."""
-
- # Make worker threads "fire and forget". Beginning with Python 3.7 this
- # prevents a memory leak because ``ThreadingMixIn`` starts to gather all
- # non-daemon threads in a list in order to join on them at server close.
- # Enabling daemon threads virtually makes ``_ThreadingSimpleServer`` the
- # same as Python 3.7's ``ThreadingHTTPServer``.
- daemon_threads = True
-
-
-def start_http_server(
- port: int, addr: str = "", registry: CollectorRegistry = REGISTRY
-) -> None:
- """Starts an HTTP server for prometheus metrics as a daemon thread"""
- CustomMetricsHandler = MetricsHandler.factory(registry)
- httpd = _ThreadingSimpleServer((addr, port), CustomMetricsHandler)
- t = threading.Thread(target=httpd.serve_forever)
- t.daemon = True
- t.start()
-
-
-class MetricsResource(Resource):
- """
- Twisted ``Resource`` that serves prometheus metrics.
- """
-
- isLeaf = True
-
- def __init__(self, registry: CollectorRegistry = REGISTRY):
- self.registry = registry
-
- def render_GET(self, request: Request) -> bytes:
- request.setHeader(b"Content-Type", CONTENT_TYPE_LATEST.encode("ascii"))
- response = generate_latest(self.registry)
- request.setHeader(b"Content-Length", str(len(response)))
- return response
diff --git a/synapse/metrics/_twisted_exposition.py b/synapse/metrics/_twisted_exposition.py
new file mode 100644
index 0000000000..0abcd14953
--- /dev/null
+++ b/synapse/metrics/_twisted_exposition.py
@@ -0,0 +1,38 @@
+# Copyright 2015-2019 Prometheus Python Client Developers
+# Copyright 2019 Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from prometheus_client import REGISTRY, CollectorRegistry, generate_latest
+
+from twisted.web.resource import Resource
+from twisted.web.server import Request
+
+CONTENT_TYPE_LATEST = "text/plain; version=0.0.4; charset=utf-8"
+
+
+class MetricsResource(Resource):
+ """
+ Twisted ``Resource`` that serves prometheus metrics.
+ """
+
+ isLeaf = True
+
+ def __init__(self, registry: CollectorRegistry = REGISTRY):
+ self.registry = registry
+
+ def render_GET(self, request: Request) -> bytes:
+ request.setHeader(b"Content-Type", CONTENT_TYPE_LATEST.encode("ascii"))
+ response = generate_latest(self.registry)
+ request.setHeader(b"Content-Length", str(len(response)))
+ return response
diff --git a/synapse/metrics/background_process_metrics.py b/synapse/metrics/background_process_metrics.py
index 7a1516d3a8..9ea4e23b31 100644
--- a/synapse/metrics/background_process_metrics.py
+++ b/synapse/metrics/background_process_metrics.py
@@ -174,8 +174,10 @@ class _BackgroundProcess:
diff = new_stats - self._reported_stats
self._reported_stats = new_stats
- _background_process_ru_utime.labels(self.desc).inc(diff.ru_utime)
- _background_process_ru_stime.labels(self.desc).inc(diff.ru_stime)
+ # For unknown reasons, the difference in times can be negative. See comment in
+ # synapse.http.request_metrics.RequestMetrics.update_metrics.
+ _background_process_ru_utime.labels(self.desc).inc(max(diff.ru_utime, 0))
+ _background_process_ru_stime.labels(self.desc).inc(max(diff.ru_stime, 0))
_background_process_db_txn_count.labels(self.desc).inc(diff.db_txn_count)
_background_process_db_txn_duration.labels(self.desc).inc(
diff.db_txn_duration_sec
diff --git a/synapse/metrics/common_usage_metrics.py b/synapse/metrics/common_usage_metrics.py
new file mode 100644
index 0000000000..0a22ea3d92
--- /dev/null
+++ b/synapse/metrics/common_usage_metrics.py
@@ -0,0 +1,79 @@
+# Copyright 2022 The Matrix.org Foundation C.I.C
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+import attr
+
+from synapse.metrics.background_process_metrics import run_as_background_process
+
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
+
+from prometheus_client import Gauge
+
+# Gauge to expose daily active users metrics
+current_dau_gauge = Gauge(
+ "synapse_admin_daily_active_users",
+ "Current daily active users count",
+)
+
+
+@attr.s(auto_attribs=True)
+class CommonUsageMetrics:
+ """Usage metrics shared between the phone home stats and the prometheus exporter."""
+
+ daily_active_users: int
+
+
+class CommonUsageMetricsManager:
+ """Collects common usage metrics."""
+
+ def __init__(self, hs: "HomeServer") -> None:
+ self._store = hs.get_datastores().main
+ self._clock = hs.get_clock()
+
+ async def get_metrics(self) -> CommonUsageMetrics:
+ """Get the CommonUsageMetrics object. If no collection has happened yet, do it
+ before returning the metrics.
+
+ Returns:
+ The CommonUsageMetrics object to read common metrics from.
+ """
+ return await self._collect()
+
+ async def setup(self) -> None:
+ """Keep the gauges for common usage metrics up to date."""
+ await self._update_gauges()
+ self._clock.looping_call(
+ run_as_background_process,
+ 5 * 60 * 1000,
+ desc="common_usage_metrics_update_gauges",
+ func=self._update_gauges,
+ )
+
+ async def _collect(self) -> CommonUsageMetrics:
+ """Collect the common metrics and either create the CommonUsageMetrics object to
+ use if it doesn't exist yet, or update it.
+ """
+ dau_count = await self._store.count_daily_users()
+
+ return CommonUsageMetrics(
+ daily_active_users=dau_count,
+ )
+
+ async def _update_gauges(self) -> None:
+ """Update the Prometheus gauges."""
+ metrics = await self._collect()
+
+ current_dau_gauge.set(float(metrics.daily_active_users))
diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py
index 778608280f..38c71b8b43 100644
--- a/synapse/module_api/__init__.py
+++ b/synapse/module_api/__init__.py
@@ -87,6 +87,7 @@ from synapse.handlers.auth import (
ON_LOGGED_OUT_CALLBACK,
AuthHandler,
)
+from synapse.handlers.device import DeviceHandler
from synapse.handlers.push_rules import RuleSpec, check_actions
from synapse.http.client import SimpleHttpClient
from synapse.http.server import (
@@ -126,7 +127,7 @@ from synapse.types import (
)
from synapse.util import Clock
from synapse.util.async_helpers import maybe_awaitable
-from synapse.util.caches.descriptors import cached
+from synapse.util.caches.descriptors import CachedFunction, cached
from synapse.util.frozenutils import freeze
if TYPE_CHECKING:
@@ -208,6 +209,7 @@ class ModuleApi:
self._registration_handler = hs.get_registration_handler()
self._send_email_handler = hs.get_send_email_handler()
self._push_rules_handler = hs.get_push_rules_handler()
+ self._device_handler = hs.get_device_handler()
self.custom_template_dir = hs.config.server.custom_template_directory
try:
@@ -751,16 +753,16 @@ class ModuleApi:
)
)
- def generate_short_term_login_token(
+ async def create_login_token(
self,
user_id: str,
duration_in_ms: int = (2 * 60 * 1000),
- auth_provider_id: str = "",
+ auth_provider_id: Optional[str] = None,
auth_provider_session_id: Optional[str] = None,
) -> str:
- """Generate a login token suitable for m.login.token authentication
+ """Create a login token suitable for m.login.token authentication
- Added in Synapse v1.9.0.
+ Added in Synapse v1.69.0.
Args:
user_id: gives the ID of the user that the token is for
@@ -768,14 +770,17 @@ class ModuleApi:
duration_in_ms: the time that the token will be valid for
auth_provider_id: the ID of the SSO IdP that the user used to authenticate
- to get this token, if any. This is encoded in the token so that
- /login can report stats on number of successful logins by IdP.
+ to get this token, if any. This is encoded in the token so that
+ /login can report stats on number of successful logins by IdP.
+
+ auth_provider_session_id: The session ID got during login from the SSO IdP,
+ if any.
"""
- return self._hs.get_macaroon_generator().generate_short_term_login_token(
+ return await self._hs.get_auth_handler().create_login_token_for_user_id(
user_id,
+ duration_in_ms,
auth_provider_id,
auth_provider_session_id,
- duration_in_ms,
)
@defer.inlineCallbacks
@@ -784,10 +789,12 @@ class ModuleApi:
) -> Generator["defer.Deferred[Any]", Any, None]:
"""Invalidate an access token for a user
+ Can only be called from the main process.
+
Added in Synapse v0.25.0.
Args:
- access_token(str): access token
+ access_token: access token
Returns:
twisted.internet.defer.Deferred - resolves once the access token
@@ -796,6 +803,10 @@ class ModuleApi:
Raises:
synapse.api.errors.AuthError: the access token is invalid
"""
+ assert isinstance(
+ self._device_handler, DeviceHandler
+ ), "invalidate_access_token can only be called on the main process"
+
# see if the access token corresponds to a device
user_info = yield defer.ensureDeferred(
self._auth.get_user_by_access_token(access_token)
@@ -805,7 +816,7 @@ class ModuleApi:
if device_id:
# delete the device, which will also delete its access tokens
yield defer.ensureDeferred(
- self._hs.get_device_handler().delete_devices(user_id, [device_id])
+ self._device_handler.delete_devices(user_id, [device_id])
)
else:
# no associated device. Just delete the access token.
@@ -832,36 +843,46 @@ class ModuleApi:
**kwargs: named args to be passed to func
Returns:
- Deferred[object]: result of func
+ Result of func
"""
# type-ignore: See https://github.com/python/mypy/issues/8862
return defer.ensureDeferred(
self._store.db_pool.runInteraction(desc, func, *args, **kwargs) # type: ignore[arg-type]
)
- def complete_sso_login(
- self, registered_user_id: str, request: SynapseRequest, client_redirect_url: str
- ) -> None:
- """Complete a SSO login by redirecting the user to a page to confirm whether they
- want their access token sent to `client_redirect_url`, or redirect them to that
- URL with a token directly if the URL matches with one of the whitelisted clients.
-
- This is deprecated in favor of complete_sso_login_async.
+ def register_cached_function(self, cached_func: CachedFunction) -> None:
+ """Register a cached function that should be invalidated across workers.
+ Invalidation local to a worker can be done directly using `cached_func.invalidate`,
+ however invalidation that needs to go to other workers needs to call `invalidate_cache`
+ on the module API instead.
- Added in Synapse v1.11.1.
+ Added in Synapse v1.69.0.
Args:
- registered_user_id: The MXID that has been registered as a previous step of
- of this SSO login.
- request: The request to respond to.
- client_redirect_url: The URL to which to offer to redirect the user (or to
- redirect them directly if whitelisted).
+ cached_function: The cached function that will be registered to receive invalidation
+ locally and from other workers.
"""
- self._auth_handler._complete_sso_login(
- registered_user_id,
- "<unknown>",
- request,
- client_redirect_url,
+ self._store.register_external_cached_function(
+ f"{cached_func.__module__}.{cached_func.__name__}", cached_func
+ )
+
+ async def invalidate_cache(
+ self, cached_func: CachedFunction, keys: Tuple[Any, ...]
+ ) -> None:
+ """Invalidate a cache entry of a cached function across workers. The cached function
+ needs to be registered on all workers first with `register_cached_function`.
+
+ Added in Synapse v1.69.0.
+
+ Args:
+ cached_function: The cached function that needs an invalidation
+ keys: keys of the entry to invalidate, usually matching the arguments of the
+ cached function.
+ """
+ cached_func.invalidate(keys)
+ await self._store.send_invalidation_to_replication(
+ f"{cached_func.__module__}.{cached_func.__name__}",
+ keys,
)
async def complete_sso_login_async(
@@ -914,8 +935,7 @@ class ModuleApi:
to represent 'any') of the room state to acquire.
Returns:
- twisted.internet.defer.Deferred[list(synapse.events.FrozenEvent)]:
- The filtered state events in the room.
+ The filtered state events in the room.
"""
state_ids = yield defer.ensureDeferred(
self._storage_controllers.state.get_current_state_ids(
@@ -932,10 +952,12 @@ class ModuleApi:
room_id: str,
new_membership: str,
content: Optional[JsonDict] = None,
+ remote_room_hosts: Optional[List[str]] = None,
) -> EventBase:
"""Updates the membership of a user to the given value.
Added in Synapse v1.46.0.
+ Changed in Synapse v1.65.0: Added the 'remote_room_hosts' parameter.
Args:
sender: The user performing the membership change. Must be a user local to
@@ -949,6 +971,7 @@ class ModuleApi:
https://spec.matrix.org/unstable/client-server-api/#mroommember for the
list of allowed values.
content: Additional values to include in the resulting event's content.
+ remote_room_hosts: Remote servers to use for remote joins/knocks/etc.
Returns:
The newly created membership event.
@@ -1008,15 +1031,12 @@ class ModuleApi:
room_id=room_id,
action=new_membership,
content=content,
+ remote_room_hosts=remote_room_hosts,
)
# Try to retrieve the resulting event.
event = await self._hs.get_datastores().main.get_event(event_id)
- # update_membership is supposed to always return after the event has been
- # successfully persisted.
- assert event is not None
-
return event
async def create_and_send_event_into_room(self, event_dict: JsonDict) -> EventBase:
@@ -1479,6 +1499,57 @@ class ModuleApi:
return room_id.to_string(), hosts
+ async def create_room(
+ self,
+ user_id: str,
+ config: JsonDict,
+ ratelimit: bool = True,
+ creator_join_profile: Optional[JsonDict] = None,
+ ) -> Tuple[str, Optional[str]]:
+ """Creates a new room.
+
+ Added in Synapse v1.65.0.
+
+ Args:
+ user_id:
+ The user who requested the room creation.
+ config : A dict of configuration options. See "Request body" of:
+ https://spec.matrix.org/latest/client-server-api/#post_matrixclientv3createroom
+ ratelimit: set to False to disable the rate limiter for this specific operation.
+
+ creator_join_profile:
+ Set to override the displayname and avatar for the creating
+ user in this room. If unset, displayname and avatar will be
+ derived from the user's profile. If set, should contain the
+ values to go in the body of the 'join' event (typically
+ `avatar_url` and/or `displayname`.
+
+ Returns:
+ A tuple containing: 1) the room ID (str), 2) if an alias was requested,
+ the room alias (str), otherwise None if no alias was requested.
+
+ Raises:
+ ResourceLimitError if server is blocked to some resource being
+ exceeded.
+ RuntimeError if the user_id does not refer to a local user.
+ SynapseError if the user_id is invalid, room ID couldn't be stored, or
+ something went horribly wrong.
+ """
+ if not self.is_mine(user_id):
+ raise RuntimeError(
+ "Tried to create a room as a user that isn't local to this homeserver",
+ )
+
+ requester = create_requester(user_id)
+ room_id_and_alias, _ = await self._hs.get_room_creation_handler().create_room(
+ requester=requester,
+ config=config,
+ ratelimit=ratelimit,
+ creator_join_profile=creator_join_profile,
+ )
+
+ return room_id_and_alias["room_id"], room_id_and_alias.get("room_alias", None)
+
class PublicRoomListManager:
"""Contains methods for adding to, removing from and querying whether a room
diff --git a/synapse/notifier.py b/synapse/notifier.py
index c42bb8266a..26b97cf766 100644
--- a/synapse/notifier.py
+++ b/synapse/notifier.py
@@ -294,35 +294,31 @@ class Notifier:
"""
self._new_join_in_room_callbacks.append(cb)
- async def on_new_room_event(
+ async def on_new_room_events(
self,
- event: EventBase,
- event_pos: PersistedEventPosition,
+ events_and_pos: List[Tuple[EventBase, PersistedEventPosition]],
max_room_stream_token: RoomStreamToken,
extra_users: Optional[Collection[UserID]] = None,
) -> None:
- """Unwraps event and calls `on_new_room_event_args`."""
- await self.on_new_room_event_args(
- event_pos=event_pos,
- room_id=event.room_id,
- event_id=event.event_id,
- event_type=event.type,
- state_key=event.get("state_key"),
- membership=event.content.get("membership"),
- max_room_stream_token=max_room_stream_token,
- extra_users=extra_users or [],
- )
+ """Creates a _PendingRoomEventEntry for each of the listed events and calls
+ notify_new_room_events with the results."""
+ event_entries = []
+ for event, pos in events_and_pos:
+ entry = self.create_pending_room_event_entry(
+ pos,
+ extra_users,
+ event.room_id,
+ event.type,
+ event.get("state_key"),
+ event.content.get("membership"),
+ )
+ event_entries.append((entry, event.event_id))
+ await self.notify_new_room_events(event_entries, max_room_stream_token)
- async def on_new_room_event_args(
+ async def notify_new_room_events(
self,
- room_id: str,
- event_id: str,
- event_type: str,
- state_key: Optional[str],
- membership: Optional[str],
- event_pos: PersistedEventPosition,
+ event_entries: List[Tuple[_PendingRoomEventEntry, str]],
max_room_stream_token: RoomStreamToken,
- extra_users: Optional[Collection[UserID]] = None,
) -> None:
"""Used by handlers to inform the notifier something has happened
in the room, room event wise.
@@ -338,22 +334,33 @@ class Notifier:
until all previous events have been persisted before notifying
the client streams.
"""
- self.pending_new_room_events.append(
- _PendingRoomEventEntry(
- event_pos=event_pos,
- extra_users=extra_users or [],
- room_id=room_id,
- type=event_type,
- state_key=state_key,
- membership=membership,
- )
- )
- self._notify_pending_new_room_events(max_room_stream_token)
+ for event_entry, event_id in event_entries:
+ self.pending_new_room_events.append(event_entry)
+ await self._third_party_rules.on_new_event(event_id)
- await self._third_party_rules.on_new_event(event_id)
+ self._notify_pending_new_room_events(max_room_stream_token)
self.notify_replication()
+ def create_pending_room_event_entry(
+ self,
+ event_pos: PersistedEventPosition,
+ extra_users: Optional[Collection[UserID]],
+ room_id: str,
+ event_type: str,
+ state_key: Optional[str],
+ membership: Optional[str],
+ ) -> _PendingRoomEventEntry:
+ """Creates and returns a _PendingRoomEventEntry"""
+ return _PendingRoomEventEntry(
+ event_pos=event_pos,
+ extra_users=extra_users or [],
+ room_id=room_id,
+ type=event_type,
+ state_key=state_key,
+ membership=membership,
+ )
+
def _notify_pending_new_room_events(
self, max_room_stream_token: RoomStreamToken
) -> None:
diff --git a/synapse/push/__init__.py b/synapse/push/__init__.py
index 57c4d70466..a0c760239d 100644
--- a/synapse/push/__init__.py
+++ b/synapse/push/__init__.py
@@ -116,6 +116,8 @@ class PusherConfig:
last_stream_ordering: int
last_success: Optional[int]
failing_since: Optional[int]
+ enabled: bool
+ device_id: Optional[str]
def as_dict(self) -> Dict[str, Any]:
"""Information that can be retrieved about a pusher after creation."""
@@ -128,6 +130,8 @@ class PusherConfig:
"lang": self.lang,
"profile_tag": self.profile_tag,
"pushkey": self.pushkey,
+ "enabled": self.enabled,
+ "device_id": self.device_id,
}
diff --git a/synapse/push/baserules.py b/synapse/push/baserules.py
deleted file mode 100644
index 6c0cc5a6ce..0000000000
--- a/synapse/push/baserules.py
+++ /dev/null
@@ -1,446 +0,0 @@
-# Copyright 2015, 2016 OpenMarket Ltd
-# Copyright 2017 New Vector Ltd
-# Copyright 2019 The Matrix.org Foundation C.I.C.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import copy
-from typing import Any, Dict, List
-
-from synapse.push.rulekinds import PRIORITY_CLASS_INVERSE_MAP, PRIORITY_CLASS_MAP
-
-
-def list_with_base_rules(rawrules: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
- """Combine the list of rules set by the user with the default push rules
-
- Args:
- rawrules: The rules the user has modified or set.
-
- Returns:
- A new list with the rules set by the user combined with the defaults.
- """
- ruleslist = []
-
- # Grab the base rules that the user has modified.
- # The modified base rules have a priority_class of -1.
- modified_base_rules = {r["rule_id"]: r for r in rawrules if r["priority_class"] < 0}
-
- # Remove the modified base rules from the list, They'll be added back
- # in the default positions in the list.
- rawrules = [r for r in rawrules if r["priority_class"] >= 0]
-
- # shove the server default rules for each kind onto the end of each
- current_prio_class = list(PRIORITY_CLASS_INVERSE_MAP)[-1]
-
- ruleslist.extend(
- make_base_prepend_rules(
- PRIORITY_CLASS_INVERSE_MAP[current_prio_class], modified_base_rules
- )
- )
-
- for r in rawrules:
- if r["priority_class"] < current_prio_class:
- while r["priority_class"] < current_prio_class:
- ruleslist.extend(
- make_base_append_rules(
- PRIORITY_CLASS_INVERSE_MAP[current_prio_class],
- modified_base_rules,
- )
- )
- current_prio_class -= 1
- if current_prio_class > 0:
- ruleslist.extend(
- make_base_prepend_rules(
- PRIORITY_CLASS_INVERSE_MAP[current_prio_class],
- modified_base_rules,
- )
- )
-
- ruleslist.append(r)
-
- while current_prio_class > 0:
- ruleslist.extend(
- make_base_append_rules(
- PRIORITY_CLASS_INVERSE_MAP[current_prio_class], modified_base_rules
- )
- )
- current_prio_class -= 1
- if current_prio_class > 0:
- ruleslist.extend(
- make_base_prepend_rules(
- PRIORITY_CLASS_INVERSE_MAP[current_prio_class], modified_base_rules
- )
- )
-
- return ruleslist
-
-
-def make_base_append_rules(
- kind: str, modified_base_rules: Dict[str, Dict[str, Any]]
-) -> List[Dict[str, Any]]:
- rules = []
-
- if kind == "override":
- rules = BASE_APPEND_OVERRIDE_RULES
- elif kind == "underride":
- rules = BASE_APPEND_UNDERRIDE_RULES
- elif kind == "content":
- rules = BASE_APPEND_CONTENT_RULES
-
- # Copy the rules before modifying them
- rules = copy.deepcopy(rules)
- for r in rules:
- # Only modify the actions, keep the conditions the same.
- assert isinstance(r["rule_id"], str)
- modified = modified_base_rules.get(r["rule_id"])
- if modified:
- r["actions"] = modified["actions"]
-
- return rules
-
-
-def make_base_prepend_rules(
- kind: str,
- modified_base_rules: Dict[str, Dict[str, Any]],
-) -> List[Dict[str, Any]]:
- rules = []
-
- if kind == "override":
- rules = BASE_PREPEND_OVERRIDE_RULES
-
- # Copy the rules before modifying them
- rules = copy.deepcopy(rules)
- for r in rules:
- # Only modify the actions, keep the conditions the same.
- assert isinstance(r["rule_id"], str)
- modified = modified_base_rules.get(r["rule_id"])
- if modified:
- r["actions"] = modified["actions"]
-
- return rules
-
-
-# We have to annotate these types, otherwise mypy infers them as
-# `List[Dict[str, Sequence[Collection[str]]]]`.
-BASE_APPEND_CONTENT_RULES: List[Dict[str, Any]] = [
- {
- "rule_id": "global/content/.m.rule.contains_user_name",
- "conditions": [
- {
- "kind": "event_match",
- "key": "content.body",
- # Match the localpart of the requester's MXID.
- "pattern_type": "user_localpart",
- }
- ],
- "actions": [
- "notify",
- {"set_tweak": "sound", "value": "default"},
- {"set_tweak": "highlight"},
- ],
- }
-]
-
-
-BASE_PREPEND_OVERRIDE_RULES: List[Dict[str, Any]] = [
- {
- "rule_id": "global/override/.m.rule.master",
- "enabled": False,
- "conditions": [],
- "actions": ["dont_notify"],
- }
-]
-
-
-BASE_APPEND_OVERRIDE_RULES: List[Dict[str, Any]] = [
- {
- "rule_id": "global/override/.m.rule.suppress_notices",
- "conditions": [
- {
- "kind": "event_match",
- "key": "content.msgtype",
- "pattern": "m.notice",
- "_cache_key": "_suppress_notices",
- }
- ],
- "actions": ["dont_notify"],
- },
- # NB. .m.rule.invite_for_me must be higher prio than .m.rule.member_event
- # otherwise invites will be matched by .m.rule.member_event
- {
- "rule_id": "global/override/.m.rule.invite_for_me",
- "conditions": [
- {
- "kind": "event_match",
- "key": "type",
- "pattern": "m.room.member",
- "_cache_key": "_member",
- },
- {
- "kind": "event_match",
- "key": "content.membership",
- "pattern": "invite",
- "_cache_key": "_invite_member",
- },
- # Match the requester's MXID.
- {"kind": "event_match", "key": "state_key", "pattern_type": "user_id"},
- ],
- "actions": [
- "notify",
- {"set_tweak": "sound", "value": "default"},
- {"set_tweak": "highlight", "value": False},
- ],
- },
- # Will we sometimes want to know about people joining and leaving?
- # Perhaps: if so, this could be expanded upon. Seems the most usual case
- # is that we don't though. We add this override rule so that even if
- # the room rule is set to notify, we don't get notifications about
- # join/leave/avatar/displayname events.
- # See also: https://matrix.org/jira/browse/SYN-607
- {
- "rule_id": "global/override/.m.rule.member_event",
- "conditions": [
- {
- "kind": "event_match",
- "key": "type",
- "pattern": "m.room.member",
- "_cache_key": "_member",
- }
- ],
- "actions": ["dont_notify"],
- },
- # This was changed from underride to override so it's closer in priority
- # to the content rules where the user name highlight rule lives. This
- # way a room rule is lower priority than both but a custom override rule
- # is higher priority than both.
- {
- "rule_id": "global/override/.m.rule.contains_display_name",
- "conditions": [{"kind": "contains_display_name"}],
- "actions": [
- "notify",
- {"set_tweak": "sound", "value": "default"},
- {"set_tweak": "highlight"},
- ],
- },
- {
- "rule_id": "global/override/.m.rule.roomnotif",
- "conditions": [
- {
- "kind": "event_match",
- "key": "content.body",
- "pattern": "@room",
- "_cache_key": "_roomnotif_content",
- },
- {
- "kind": "sender_notification_permission",
- "key": "room",
- "_cache_key": "_roomnotif_pl",
- },
- ],
- "actions": ["notify", {"set_tweak": "highlight", "value": True}],
- },
- {
- "rule_id": "global/override/.m.rule.tombstone",
- "conditions": [
- {
- "kind": "event_match",
- "key": "type",
- "pattern": "m.room.tombstone",
- "_cache_key": "_tombstone",
- },
- {
- "kind": "event_match",
- "key": "state_key",
- "pattern": "",
- "_cache_key": "_tombstone_statekey",
- },
- ],
- "actions": ["notify", {"set_tweak": "highlight", "value": True}],
- },
- {
- "rule_id": "global/override/.m.rule.reaction",
- "conditions": [
- {
- "kind": "event_match",
- "key": "type",
- "pattern": "m.reaction",
- "_cache_key": "_reaction",
- }
- ],
- "actions": ["dont_notify"],
- },
- # XXX: This is an experimental rule that is only enabled if msc3786_enabled
- # is enabled, if it is not the rule gets filtered out in _load_rules() in
- # PushRulesWorkerStore
- {
- "rule_id": "global/override/.org.matrix.msc3786.rule.room.server_acl",
- "conditions": [
- {
- "kind": "event_match",
- "key": "type",
- "pattern": "m.room.server_acl",
- "_cache_key": "_room_server_acl",
- },
- {
- "kind": "event_match",
- "key": "state_key",
- "pattern": "",
- "_cache_key": "_room_server_acl_state_key",
- },
- ],
- "actions": [],
- },
-]
-
-
-BASE_APPEND_UNDERRIDE_RULES: List[Dict[str, Any]] = [
- {
- "rule_id": "global/underride/.m.rule.call",
- "conditions": [
- {
- "kind": "event_match",
- "key": "type",
- "pattern": "m.call.invite",
- "_cache_key": "_call",
- }
- ],
- "actions": [
- "notify",
- {"set_tweak": "sound", "value": "ring"},
- {"set_tweak": "highlight", "value": False},
- ],
- },
- # XXX: once m.direct is standardised everywhere, we should use it to detect
- # a DM from the user's perspective rather than this heuristic.
- {
- "rule_id": "global/underride/.m.rule.room_one_to_one",
- "conditions": [
- {"kind": "room_member_count", "is": "2", "_cache_key": "member_count"},
- {
- "kind": "event_match",
- "key": "type",
- "pattern": "m.room.message",
- "_cache_key": "_message",
- },
- ],
- "actions": [
- "notify",
- {"set_tweak": "sound", "value": "default"},
- {"set_tweak": "highlight", "value": False},
- ],
- },
- # XXX: this is going to fire for events which aren't m.room.messages
- # but are encrypted (e.g. m.call.*)...
- {
- "rule_id": "global/underride/.m.rule.encrypted_room_one_to_one",
- "conditions": [
- {"kind": "room_member_count", "is": "2", "_cache_key": "member_count"},
- {
- "kind": "event_match",
- "key": "type",
- "pattern": "m.room.encrypted",
- "_cache_key": "_encrypted",
- },
- ],
- "actions": [
- "notify",
- {"set_tweak": "sound", "value": "default"},
- {"set_tweak": "highlight", "value": False},
- ],
- },
- {
- "rule_id": "global/underride/.org.matrix.msc3772.thread_reply",
- "conditions": [
- {
- "kind": "org.matrix.msc3772.relation_match",
- "rel_type": "m.thread",
- # Match the requester's MXID.
- "sender_type": "user_id",
- }
- ],
- "actions": ["notify", {"set_tweak": "highlight", "value": False}],
- },
- {
- "rule_id": "global/underride/.m.rule.message",
- "conditions": [
- {
- "kind": "event_match",
- "key": "type",
- "pattern": "m.room.message",
- "_cache_key": "_message",
- }
- ],
- "actions": ["notify", {"set_tweak": "highlight", "value": False}],
- },
- # XXX: this is going to fire for events which aren't m.room.messages
- # but are encrypted (e.g. m.call.*)...
- {
- "rule_id": "global/underride/.m.rule.encrypted",
- "conditions": [
- {
- "kind": "event_match",
- "key": "type",
- "pattern": "m.room.encrypted",
- "_cache_key": "_encrypted",
- }
- ],
- "actions": ["notify", {"set_tweak": "highlight", "value": False}],
- },
- {
- "rule_id": "global/underride/.im.vector.jitsi",
- "conditions": [
- {
- "kind": "event_match",
- "key": "type",
- "pattern": "im.vector.modular.widgets",
- "_cache_key": "_type_modular_widgets",
- },
- {
- "kind": "event_match",
- "key": "content.type",
- "pattern": "jitsi",
- "_cache_key": "_content_type_jitsi",
- },
- {
- "kind": "event_match",
- "key": "state_key",
- "pattern": "*",
- "_cache_key": "_is_state_event",
- },
- ],
- "actions": ["notify", {"set_tweak": "highlight", "value": False}],
- },
-]
-
-
-BASE_RULE_IDS = set()
-
-for r in BASE_APPEND_CONTENT_RULES:
- r["priority_class"] = PRIORITY_CLASS_MAP["content"]
- r["default"] = True
- BASE_RULE_IDS.add(r["rule_id"])
-
-for r in BASE_PREPEND_OVERRIDE_RULES:
- r["priority_class"] = PRIORITY_CLASS_MAP["override"]
- r["default"] = True
- BASE_RULE_IDS.add(r["rule_id"])
-
-for r in BASE_APPEND_OVERRIDE_RULES:
- r["priority_class"] = PRIORITY_CLASS_MAP["override"]
- r["default"] = True
- BASE_RULE_IDS.add(r["rule_id"])
-
-for r in BASE_APPEND_UNDERRIDE_RULES:
- r["priority_class"] = PRIORITY_CLASS_MAP["underride"]
- r["default"] = True
- BASE_RULE_IDS.add(r["rule_id"])
diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py
index 713dcf6950..75b7e126ca 100644
--- a/synapse/push/bulk_push_rule_evaluator.py
+++ b/synapse/push/bulk_push_rule_evaluator.py
@@ -13,31 +13,38 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import itertools
import logging
-from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Set, Tuple, Union
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ Collection,
+ Dict,
+ List,
+ Mapping,
+ Optional,
+ Tuple,
+ Union,
+)
from prometheus_client import Counter
-from synapse.api.constants import EventTypes, Membership, RelationTypes
+from synapse.api.constants import MAIN_TIMELINE, EventTypes, Membership, RelationTypes
from synapse.event_auth import auth_types_for_event, get_user_power_level
from synapse.events import EventBase, relation_from_event
from synapse.events.snapshot import EventContext
from synapse.state import POWER_KEY
from synapse.storage.databases.main.roommember import EventIdMembership
from synapse.storage.state import StateFilter
+from synapse.synapse_rust.push import FilteredPushRules, PushRuleEvaluator
from synapse.util.caches import register_cache
from synapse.util.metrics import measure_func
from synapse.visibility import filter_event_for_clients_with_state
-from .push_rule_evaluator import PushRuleEvaluatorForEvent
-
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
-
push_rules_invalidation_counter = Counter(
"synapse_push_bulk_push_rule_evaluator_push_rules_invalidation_counter", ""
)
@@ -99,6 +106,8 @@ class BulkPushRuleEvaluator:
self.clock = hs.get_clock()
self._event_auth_handler = hs.get_event_auth_handler()
+ self._related_event_match_enabled = self.hs.config.experimental.msc3664_enabled
+
self.room_push_rule_cache_metrics = register_cache(
"cache",
"room_push_rule_cache",
@@ -106,13 +115,10 @@ class BulkPushRuleEvaluator:
resizable=False,
)
- # Whether to support MSC3772 is supported.
- self._relations_match_enabled = self.hs.config.experimental.msc3772_enabled
-
async def _get_rules_for_event(
self,
event: EventBase,
- ) -> Dict[str, List[Dict[str, Any]]]:
+ ) -> Dict[str, FilteredPushRules]:
"""Get the push rules for all users who may need to be notified about
the event.
@@ -160,23 +166,51 @@ class BulkPushRuleEvaluator:
return rules_by_user
async def _get_power_levels_and_sender_level(
- self, event: EventBase, context: EventContext
- ) -> Tuple[dict, int]:
+ self,
+ event: EventBase,
+ context: EventContext,
+ event_id_to_event: Mapping[str, EventBase],
+ ) -> Tuple[dict, Optional[int]]:
+ """
+ Given an event and an event context, get the power level event relevant to the event
+ and the power level of the sender of the event.
+ Args:
+ event: event to check
+ context: context of event to check
+ event_id_to_event: a mapping of event_id to event for a set of events being
+ batch persisted. This is needed as the sought-after power level event may
+ be in this batch rather than the DB
+ """
+ # There are no power levels and sender levels possible to get from outlier
+ if event.internal_metadata.is_outlier():
+ return {}, None
+
event_types = auth_types_for_event(event.room_version, event)
prev_state_ids = await context.get_prev_state_ids(
StateFilter.from_types(event_types)
)
pl_event_id = prev_state_ids.get(POWER_KEY)
+ # fastpath: if there's a power level event, that's all we need, and
+ # not having a power level event is an extreme edge case
if pl_event_id:
- # fastpath: if there's a power level event, that's all we need, and
- # not having a power level event is an extreme edge case
- auth_events = {POWER_KEY: await self.store.get_event(pl_event_id)}
+ # Get the power level event from the batch, or fall back to the database.
+ pl_event = event_id_to_event.get(pl_event_id)
+ if pl_event:
+ auth_events = {POWER_KEY: pl_event}
+ else:
+ auth_events = {POWER_KEY: await self.store.get_event(pl_event_id)}
else:
auth_events_ids = self._event_auth_handler.compute_auth_events(
event, prev_state_ids, for_verification=False
)
auth_events_dict = await self.store.get_events(auth_events_ids)
+ # Some needed auth events might be in the batch, combine them with those
+ # fetched from the database.
+ for auth_event_id in auth_events_ids:
+ auth_event = event_id_to_event.get(auth_event_id)
+ if auth_event:
+ auth_events_dict[auth_event_id] = auth_event
auth_events = {(e.type, e.state_key): e for e in auth_events_dict.values()}
sender_level = get_user_power_level(event.sender, auth_events)
@@ -185,76 +219,91 @@ class BulkPushRuleEvaluator:
return pl_event.content if pl_event else {}, sender_level
- async def _get_mutual_relations(
- self, event: EventBase, rules: Iterable[Dict[str, Any]]
- ) -> Dict[str, Set[Tuple[str, str]]]:
- """
- Fetch event metadata for events which related to the same event as the given event.
-
- If the given event has no relation information, returns an empty dictionary.
-
- Args:
- event_id: The event ID which is targeted by relations.
- rules: The push rules which will be processed for this event.
+ async def _related_events(self, event: EventBase) -> Dict[str, Dict[str, str]]:
+ """Fetches the related events for 'event'. Sets the im.vector.is_falling_back key if the event is from a fallback relation
Returns:
- A dictionary of relation type to:
- A set of tuples of:
- The sender
- The event type
+ Mapping of relation type to flattened events.
"""
+ related_events: Dict[str, Dict[str, str]] = {}
+ if self._related_event_match_enabled:
+ related_event_id = event.content.get("m.relates_to", {}).get("event_id")
+ relation_type = event.content.get("m.relates_to", {}).get("rel_type")
+ if related_event_id is not None and relation_type is not None:
+ related_event = await self.store.get_event(
+ related_event_id, allow_none=True
+ )
+ if related_event is not None:
+ related_events[relation_type] = _flatten_dict(related_event)
- # If the experimental feature is not enabled, skip fetching relations.
- if not self._relations_match_enabled:
- return {}
+ reply_event_id = (
+ event.content.get("m.relates_to", {})
+ .get("m.in_reply_to", {})
+ .get("event_id")
+ )
- # If the event does not have a relation, then cannot have any mutual
- # relations.
- relation = relation_from_event(event)
- if not relation:
- return {}
-
- # Pre-filter to figure out which relation types are interesting.
- rel_types = set()
- for rule in rules:
- # Skip disabled rules.
- if "enabled" in rule and not rule["enabled"]:
- continue
+ # convert replies to pseudo relations
+ if reply_event_id is not None:
+ related_event = await self.store.get_event(
+ reply_event_id, allow_none=True
+ )
- for condition in rule["conditions"]:
- if condition["kind"] != "org.matrix.msc3772.relation_match":
- continue
+ if related_event is not None:
+ related_events["m.in_reply_to"] = _flatten_dict(related_event)
- # rel_type is required.
- rel_type = condition.get("rel_type")
- if rel_type:
- rel_types.add(rel_type)
+ # indicate that this is from a fallback relation.
+ if relation_type == "m.thread" and event.content.get(
+ "m.relates_to", {}
+ ).get("is_falling_back", False):
+ related_events["m.in_reply_to"][
+ "im.vector.is_falling_back"
+ ] = ""
- # If no valid rules were found, no mutual relations.
- if not rel_types:
- return {}
+ return related_events
- # If any valid rules were found, fetch the mutual relations.
- return await self.store.get_mutual_event_relations(
- relation.parent_id, rel_types
- )
+ async def action_for_events_by_user(
+ self, events_and_context: List[Tuple[EventBase, EventContext]]
+ ) -> None:
+ """Given a list of events and their associated contexts, evaluate the push rules
+ for each event, check if the message should increment the unread count, and
+ insert the results into the event_push_actions_staging table.
+ """
+ # For batched events the power level events may not have been persisted yet,
+ # so we pass in the batched events. Thus if the event cannot be found in the
+ # database we can check in the batch.
+ event_id_to_event = {e.event_id: e for e, _ in events_and_context}
+ for event, context in events_and_context:
+ await self._action_for_event_by_user(event, context, event_id_to_event)
@measure_func("action_for_event_by_user")
- async def action_for_event_by_user(
- self, event: EventBase, context: EventContext
+ async def _action_for_event_by_user(
+ self,
+ event: EventBase,
+ context: EventContext,
+ event_id_to_event: Mapping[str, EventBase],
) -> None:
- """Given an event and context, evaluate the push rules, check if the message
- should increment the unread count, and insert the results into the
- event_push_actions_staging table.
- """
- if event.internal_metadata.is_outlier():
- # This can happen due to out of band memberships
+
+ if (
+ not event.internal_metadata.is_notifiable()
+ or event.internal_metadata.is_historical()
+ ):
+ # Push rules for events that aren't notifiable can't be processed by this and
+ # we want to skip push notification actions for historical messages
+ # because we don't want to notify people about old history back in time.
+ # The historical messages also do not have the proper `context.current_state_ids`
+ # and `state_groups` because they have `prev_events` that aren't persisted yet
+ # (historical messages persisted in reverse-chronological order).
return
- count_as_unread = _should_count_as_unread(event, context)
+ # Disable counting as unread unless the experimental configuration is
+ # enabled, as it can cause additional (unwanted) rows to be added to the
+ # event_push_actions table.
+ count_as_unread = False
+ if self.hs.config.experimental.msc2654_enabled:
+ count_as_unread = _should_count_as_unread(event, context)
rules_by_user = await self._get_rules_for_event(event)
- actions_by_user: Dict[str, List[Union[dict, str]]] = {}
+ actions_by_user: Dict[str, Collection[Union[Mapping, str]]] = {}
room_member_count = await self.store.get_number_joined_users_in_room(
event.room_id
@@ -263,19 +312,39 @@ class BulkPushRuleEvaluator:
(
power_levels,
sender_power_level,
- ) = await self._get_power_levels_and_sender_level(event, context)
-
- relations = await self._get_mutual_relations(
- event, itertools.chain(*rules_by_user.values())
+ ) = await self._get_power_levels_and_sender_level(
+ event, context, event_id_to_event
)
- evaluator = PushRuleEvaluatorForEvent(
- event,
+ # Find the event's thread ID.
+ relation = relation_from_event(event)
+ # If the event does not have a relation, then it cannot have a thread ID.
+ thread_id = MAIN_TIMELINE
+ if relation:
+ # Recursively attempt to find the thread this event relates to.
+ if relation.rel_type == RelationTypes.THREAD:
+ thread_id = relation.parent_id
+ else:
+ # Since the event has not yet been persisted we check whether
+ # the parent is part of a thread.
+ thread_id = await self.store.get_thread_id(relation.parent_id)
+
+ related_events = await self._related_events(event)
+
+ # It's possible that old room versions have non-integer power levels (floats or
+ # strings). Workaround this by explicitly converting to int.
+ notification_levels = power_levels.get("notifications", {})
+ if not event.room_version.msc3667_int_only_power_levels:
+ for user_id, level in notification_levels.items():
+ notification_levels[user_id] = int(level)
+
+ evaluator = PushRuleEvaluator(
+ _flatten_dict(event),
room_member_count,
sender_power_level,
- power_levels,
- relations,
- self._relations_match_enabled,
+ notification_levels,
+ related_events,
+ self._related_event_match_enabled,
)
users = rules_by_user.keys()
@@ -283,20 +352,10 @@ class BulkPushRuleEvaluator:
event.room_id, users
)
- # This is a check for the case where user joins a room without being
- # allowed to see history, and then the server receives a delayed event
- # from before the user joined, which they should not be pushed for
- uids_with_visibility = await filter_event_for_clients_with_state(
- self.store, users, event, context
- )
-
for uid, rules in rules_by_user.items():
if event.sender == uid:
continue
- if uid not in uids_with_visibility:
- continue
-
display_name = None
profile = profiles.get(uid)
if profile:
@@ -317,19 +376,30 @@ class BulkPushRuleEvaluator:
# current user, it'll be added to the dict later.
actions_by_user[uid] = []
- for rule in rules:
- if "enabled" in rule and not rule["enabled"]:
- continue
+ actions = evaluator.run(rules, uid, display_name)
+ if "notify" in actions:
+ # Push rules say we should notify the user of this event
+ actions_by_user[uid] = actions
- matches = evaluator.check_conditions(
- rule["conditions"], uid, display_name
- )
- if matches:
- actions = [x for x in rule["actions"] if x != "dont_notify"]
- if actions and "notify" in actions:
- # Push rules say we should notify the user of this event
- actions_by_user[uid] = actions
- break
+ # If there aren't any actions then we can skip the rest of the
+ # processing.
+ if not actions_by_user:
+ return
+
+ # This is a check for the case where user joins a room without being
+ # allowed to see history, and then the server receives a delayed event
+ # from before the user joined, which they should not be pushed for
+ #
+ # We do this *after* calculating the push actions as a) its unlikely
+ # that we'll filter anyone out and b) for large rooms its likely that
+ # most users will have push disabled and so the set of users to check is
+ # much smaller.
+ uids_with_visibility = await filter_event_for_clients_with_state(
+ self.store, actions_by_user.keys(), event, context
+ )
+
+ for user_id in set(actions_by_user).difference(uids_with_visibility):
+ actions_by_user.pop(user_id, None)
# Mark in the DB staging area the push actions for users who should be
# notified for this event. (This will then get handled when we persist
@@ -338,6 +408,7 @@ class BulkPushRuleEvaluator:
event.event_id,
actions_by_user,
count_as_unread,
+ thread_id,
)
@@ -345,3 +416,21 @@ MemberMap = Dict[str, Optional[EventIdMembership]]
Rule = Dict[str, dict]
RulesByUser = Dict[str, List[Rule]]
StateGroup = Union[object, int]
+
+
+def _flatten_dict(
+ d: Union[EventBase, Mapping[str, Any]],
+ prefix: Optional[List[str]] = None,
+ result: Optional[Dict[str, str]] = None,
+) -> Dict[str, str]:
+ if prefix is None:
+ prefix = []
+ if result is None:
+ result = {}
+ for key, value in d.items():
+ if isinstance(value, str):
+ result[".".join(prefix + [key])] = value.lower()
+ elif isinstance(value, Mapping):
+ _flatten_dict(value, prefix=(prefix + [key]), result=result)
+
+ return result
diff --git a/synapse/push/clientformat.py b/synapse/push/clientformat.py
index 5117ef6854..622a1e35c5 100644
--- a/synapse/push/clientformat.py
+++ b/synapse/push/clientformat.py
@@ -16,18 +16,16 @@ import copy
from typing import Any, Dict, List, Optional
from synapse.push.rulekinds import PRIORITY_CLASS_INVERSE_MAP, PRIORITY_CLASS_MAP
+from synapse.synapse_rust.push import FilteredPushRules, PushRule
from synapse.types import UserID
def format_push_rules_for_user(
- user: UserID, ruleslist: List
+ user: UserID, ruleslist: FilteredPushRules
) -> Dict[str, Dict[str, list]]:
"""Converts a list of rawrules and a enabled map into nested dictionaries
to match the Matrix client-server format for push rules"""
- # We're going to be mutating this a lot, so do a deep copy
- ruleslist = copy.deepcopy(ruleslist)
-
rules: Dict[str, Dict[str, List[Dict[str, Any]]]] = {
"global": {},
"device": {},
@@ -35,11 +33,36 @@ def format_push_rules_for_user(
rules["global"] = _add_empty_priority_class_arrays(rules["global"])
- for r in ruleslist:
- template_name = _priority_class_to_template_name(r["priority_class"])
+ for r, enabled in ruleslist.rules():
+ template_name = _priority_class_to_template_name(r.priority_class)
+
+ rulearray = rules["global"][template_name]
+
+ template_rule = _rule_to_template(r)
+ if not template_rule:
+ continue
+
+ rulearray.append(template_rule)
+
+ pattern_type = template_rule.pop("pattern_type", None)
+ if pattern_type == "user_id":
+ template_rule["pattern"] = user.to_string()
+ elif pattern_type == "user_localpart":
+ template_rule["pattern"] = user.localpart
+
+ template_rule["enabled"] = enabled
+
+ if "conditions" not in template_rule:
+ # Not all formatted rules have explicit conditions, e.g. "room"
+ # rules omit them as they can be derived from the kind and rule ID.
+ #
+ # If the formatted rule has no conditions then we can skip the
+ # formatting of conditions.
+ continue
# Remove internal stuff.
- for c in r["conditions"]:
+ template_rule["conditions"] = copy.deepcopy(template_rule["conditions"])
+ for c in template_rule["conditions"]:
c.pop("_cache_key", None)
pattern_type = c.pop("pattern_type", None)
@@ -52,16 +75,6 @@ def format_push_rules_for_user(
if sender_type == "user_id":
c["sender"] = user.to_string()
- rulearray = rules["global"][template_name]
-
- template_rule = _rule_to_template(r)
- if template_rule:
- if "enabled" in r:
- template_rule["enabled"] = r["enabled"]
- else:
- template_rule["enabled"] = True
- rulearray.append(template_rule)
-
return rules
@@ -71,34 +84,36 @@ def _add_empty_priority_class_arrays(d: Dict[str, list]) -> Dict[str, list]:
return d
-def _rule_to_template(rule: Dict[str, Any]) -> Optional[Dict[str, Any]]:
- unscoped_rule_id = None
- if "rule_id" in rule:
- unscoped_rule_id = _rule_id_from_namespaced(rule["rule_id"])
+def _rule_to_template(rule: PushRule) -> Optional[Dict[str, Any]]:
+ templaterule: Dict[str, Any]
+
+ unscoped_rule_id = _rule_id_from_namespaced(rule.rule_id)
- template_name = _priority_class_to_template_name(rule["priority_class"])
+ template_name = _priority_class_to_template_name(rule.priority_class)
if template_name in ["override", "underride"]:
- templaterule = {k: rule[k] for k in ["conditions", "actions"]}
+ templaterule = {"conditions": rule.conditions, "actions": rule.actions}
elif template_name in ["sender", "room"]:
- templaterule = {"actions": rule["actions"]}
- unscoped_rule_id = rule["conditions"][0]["pattern"]
+ templaterule = {"actions": rule.actions}
+ unscoped_rule_id = rule.conditions[0]["pattern"]
elif template_name == "content":
- if len(rule["conditions"]) != 1:
+ if len(rule.conditions) != 1:
return None
- thecond = rule["conditions"][0]
- if "pattern" not in thecond:
+ thecond = rule.conditions[0]
+
+ templaterule = {"actions": rule.actions}
+ if "pattern" in thecond:
+ templaterule["pattern"] = thecond["pattern"]
+ elif "pattern_type" in thecond:
+ templaterule["pattern_type"] = thecond["pattern_type"]
+ else:
return None
- templaterule = {"actions": rule["actions"]}
- templaterule["pattern"] = thecond["pattern"]
else:
# This should not be reached unless this function is not kept in sync
# with PRIORITY_CLASS_INVERSE_MAP.
raise ValueError("Unexpected template_name: %s" % (template_name,))
- if unscoped_rule_id:
- templaterule["rule_id"] = unscoped_rule_id
- if "default" in rule:
- templaterule["default"] = rule["default"]
+ templaterule["rule_id"] = unscoped_rule_id
+ templaterule["default"] = rule.default
return templaterule
diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py
index e96fb45e9f..b048b03a74 100644
--- a/synapse/push/httppusher.py
+++ b/synapse/push/httppusher.py
@@ -14,7 +14,7 @@
# limitations under the License.
import logging
import urllib.parse
-from typing import TYPE_CHECKING, Any, Dict, Iterable, Optional, Union
+from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Union
from prometheus_client import Counter
@@ -28,7 +28,7 @@ from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.push import Pusher, PusherConfig, PusherConfigException
from synapse.storage.databases.main.event_push_actions import HttpPushAction
-from . import push_rule_evaluator, push_tools
+from . import push_tools
if TYPE_CHECKING:
from synapse.server import HomeServer
@@ -56,6 +56,39 @@ http_badges_failed_counter = Counter(
)
+def tweaks_for_actions(actions: List[Union[str, Dict]]) -> Dict[str, Any]:
+ """
+ Converts a list of actions into a `tweaks` dict (which can then be passed to
+ the push gateway).
+
+ This function ignores all actions other than `set_tweak` actions, and treats
+ absent `value`s as `True`, which agrees with the only spec-defined treatment
+ of absent `value`s (namely, for `highlight` tweaks).
+
+ Args:
+ actions: list of actions
+ e.g. [
+ {"set_tweak": "a", "value": "AAA"},
+ {"set_tweak": "b", "value": "BBB"},
+ {"set_tweak": "highlight"},
+ "notify"
+ ]
+
+ Returns:
+ dictionary of tweaks for those actions
+ e.g. {"a": "AAA", "b": "BBB", "highlight": True}
+ """
+ tweaks = {}
+ for a in actions:
+ if not isinstance(a, dict):
+ continue
+ if "set_tweak" in a:
+ # value is allowed to be absent in which case the value assumed
+ # should be True.
+ tweaks[a["set_tweak"]] = a.get("value", True)
+ return tweaks
+
+
class HttpPusher(Pusher):
INITIAL_BACKOFF_SEC = 1 # in seconds because that's what Twisted takes
MAX_BACKOFF_SEC = 60 * 60
@@ -281,7 +314,7 @@ class HttpPusher(Pusher):
if "notify" not in push_action.actions:
return True
- tweaks = push_rule_evaluator.tweaks_for_actions(push_action.actions)
+ tweaks = tweaks_for_actions(push_action.actions)
badge = await push_tools.get_badge_count(
self.hs.get_datastores().main,
self.user_id,
diff --git a/synapse/push/push_rule_evaluator.py b/synapse/push/push_rule_evaluator.py
deleted file mode 100644
index 2e8a017add..0000000000
--- a/synapse/push/push_rule_evaluator.py
+++ /dev/null
@@ -1,350 +0,0 @@
-# Copyright 2015, 2016 OpenMarket Ltd
-# Copyright 2017 New Vector Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import logging
-import re
-from typing import Any, Dict, List, Mapping, Optional, Pattern, Set, Tuple, Union
-
-from matrix_common.regex import glob_to_regex, to_word_pattern
-
-from synapse.events import EventBase
-from synapse.types import UserID
-from synapse.util.caches.lrucache import LruCache
-
-logger = logging.getLogger(__name__)
-
-
-GLOB_REGEX = re.compile(r"\\\[(\\\!|)(.*)\\\]")
-IS_GLOB = re.compile(r"[\?\*\[\]]")
-INEQUALITY_EXPR = re.compile("^([=<>]*)([0-9]*)$")
-
-
-def _room_member_count(
- ev: EventBase, condition: Dict[str, Any], room_member_count: int
-) -> bool:
- return _test_ineq_condition(condition, room_member_count)
-
-
-def _sender_notification_permission(
- ev: EventBase,
- condition: Dict[str, Any],
- sender_power_level: int,
- power_levels: Dict[str, Union[int, Dict[str, int]]],
-) -> bool:
- notif_level_key = condition.get("key")
- if notif_level_key is None:
- return False
-
- notif_levels = power_levels.get("notifications", {})
- assert isinstance(notif_levels, dict)
- room_notif_level = notif_levels.get(notif_level_key, 50)
-
- return sender_power_level >= room_notif_level
-
-
-def _test_ineq_condition(condition: Dict[str, Any], number: int) -> bool:
- if "is" not in condition:
- return False
- m = INEQUALITY_EXPR.match(condition["is"])
- if not m:
- return False
- ineq = m.group(1)
- rhs = m.group(2)
- if not rhs.isdigit():
- return False
- rhs_int = int(rhs)
-
- if ineq == "" or ineq == "==":
- return number == rhs_int
- elif ineq == "<":
- return number < rhs_int
- elif ineq == ">":
- return number > rhs_int
- elif ineq == ">=":
- return number >= rhs_int
- elif ineq == "<=":
- return number <= rhs_int
- else:
- return False
-
-
-def tweaks_for_actions(actions: List[Union[str, Dict]]) -> Dict[str, Any]:
- """
- Converts a list of actions into a `tweaks` dict (which can then be passed to
- the push gateway).
-
- This function ignores all actions other than `set_tweak` actions, and treats
- absent `value`s as `True`, which agrees with the only spec-defined treatment
- of absent `value`s (namely, for `highlight` tweaks).
-
- Args:
- actions: list of actions
- e.g. [
- {"set_tweak": "a", "value": "AAA"},
- {"set_tweak": "b", "value": "BBB"},
- {"set_tweak": "highlight"},
- "notify"
- ]
-
- Returns:
- dictionary of tweaks for those actions
- e.g. {"a": "AAA", "b": "BBB", "highlight": True}
- """
- tweaks = {}
- for a in actions:
- if not isinstance(a, dict):
- continue
- if "set_tweak" in a:
- # value is allowed to be absent in which case the value assumed
- # should be True.
- tweaks[a["set_tweak"]] = a.get("value", True)
- return tweaks
-
-
-class PushRuleEvaluatorForEvent:
- def __init__(
- self,
- event: EventBase,
- room_member_count: int,
- sender_power_level: int,
- power_levels: Dict[str, Union[int, Dict[str, int]]],
- relations: Dict[str, Set[Tuple[str, str]]],
- relations_match_enabled: bool,
- ):
- self._event = event
- self._room_member_count = room_member_count
- self._sender_power_level = sender_power_level
- self._power_levels = power_levels
- self._relations = relations
- self._relations_match_enabled = relations_match_enabled
-
- # Maps strings of e.g. 'content.body' -> event["content"]["body"]
- self._value_cache = _flatten_dict(event)
-
- # Maps cache keys to final values.
- self._condition_cache: Dict[str, bool] = {}
-
- def check_conditions(
- self, conditions: List[dict], uid: str, display_name: Optional[str]
- ) -> bool:
- """
- Returns true if a user's conditions/user ID/display name match the event.
-
- Args:
- conditions: The user's conditions to match.
- uid: The user's MXID.
- display_name: The display name.
-
- Returns:
- True if all conditions match the event, False otherwise.
- """
- for cond in conditions:
- _cache_key = cond.get("_cache_key", None)
- if _cache_key:
- res = self._condition_cache.get(_cache_key, None)
- if res is False:
- return False
- elif res is True:
- continue
-
- res = self.matches(cond, uid, display_name)
- if _cache_key:
- self._condition_cache[_cache_key] = bool(res)
-
- if not res:
- return False
-
- return True
-
- def matches(
- self, condition: Dict[str, Any], user_id: str, display_name: Optional[str]
- ) -> bool:
- """
- Returns true if a user's condition/user ID/display name match the event.
-
- Args:
- condition: The user's condition to match.
- uid: The user's MXID.
- display_name: The display name, or None if there is not one.
-
- Returns:
- True if the condition matches the event, False otherwise.
- """
- if condition["kind"] == "event_match":
- return self._event_match(condition, user_id)
- elif condition["kind"] == "contains_display_name":
- return self._contains_display_name(display_name)
- elif condition["kind"] == "room_member_count":
- return _room_member_count(self._event, condition, self._room_member_count)
- elif condition["kind"] == "sender_notification_permission":
- return _sender_notification_permission(
- self._event, condition, self._sender_power_level, self._power_levels
- )
- elif (
- condition["kind"] == "org.matrix.msc3772.relation_match"
- and self._relations_match_enabled
- ):
- return self._relation_match(condition, user_id)
- else:
- # XXX This looks incorrect -- we have reached an unknown condition
- # kind and are unconditionally returning that it matches. Note
- # that it seems possible to provide a condition to the /pushrules
- # endpoint with an unknown kind, see _rule_tuple_from_request_object.
- return True
-
- def _event_match(self, condition: dict, user_id: str) -> bool:
- """
- Check an "event_match" push rule condition.
-
- Args:
- condition: The "event_match" push rule condition to match.
- user_id: The user's MXID.
-
- Returns:
- True if the condition matches the event, False otherwise.
- """
- pattern = condition.get("pattern", None)
-
- if not pattern:
- pattern_type = condition.get("pattern_type", None)
- if pattern_type == "user_id":
- pattern = user_id
- elif pattern_type == "user_localpart":
- pattern = UserID.from_string(user_id).localpart
-
- if not pattern:
- logger.warning("event_match condition with no pattern")
- return False
-
- # XXX: optimisation: cache our pattern regexps
- if condition["key"] == "content.body":
- body = self._event.content.get("body", None)
- if not body or not isinstance(body, str):
- return False
-
- return _glob_matches(pattern, body, word_boundary=True)
- else:
- haystack = self._value_cache.get(condition["key"], None)
- if haystack is None:
- return False
-
- return _glob_matches(pattern, haystack)
-
- def _contains_display_name(self, display_name: Optional[str]) -> bool:
- """
- Check an "event_match" push rule condition.
-
- Args:
- display_name: The display name, or None if there is not one.
-
- Returns:
- True if the display name is found in the event body, False otherwise.
- """
- if not display_name:
- return False
-
- body = self._event.content.get("body", None)
- if not body or not isinstance(body, str):
- return False
-
- # Similar to _glob_matches, but do not treat display_name as a glob.
- r = regex_cache.get((display_name, False, True), None)
- if not r:
- r1 = re.escape(display_name)
- r1 = to_word_pattern(r1)
- r = re.compile(r1, flags=re.IGNORECASE)
- regex_cache[(display_name, False, True)] = r
-
- return bool(r.search(body))
-
- def _relation_match(self, condition: dict, user_id: str) -> bool:
- """
- Check an "relation_match" push rule condition.
-
- Args:
- condition: The "event_match" push rule condition to match.
- user_id: The user's MXID.
-
- Returns:
- True if the condition matches the event, False otherwise.
- """
- rel_type = condition.get("rel_type")
- if not rel_type:
- logger.warning("relation_match condition missing rel_type")
- return False
-
- sender_pattern = condition.get("sender")
- if sender_pattern is None:
- sender_type = condition.get("sender_type")
- if sender_type == "user_id":
- sender_pattern = user_id
- type_pattern = condition.get("type")
-
- # If any other relations matches, return True.
- for sender, event_type in self._relations.get(rel_type, ()):
- if sender_pattern and not _glob_matches(sender_pattern, sender):
- continue
- if type_pattern and not _glob_matches(type_pattern, event_type):
- continue
- # All values must have matched.
- return True
-
- # No relations matched.
- return False
-
-
-# Caches (string, is_glob, word_boundary) -> regex for push. See _glob_matches
-regex_cache: LruCache[Tuple[str, bool, bool], Pattern] = LruCache(
- 50000, "regex_push_cache"
-)
-
-
-def _glob_matches(glob: str, value: str, word_boundary: bool = False) -> bool:
- """Tests if value matches glob.
-
- Args:
- glob
- value: String to test against glob.
- word_boundary: Whether to match against word boundaries or entire
- string. Defaults to False.
- """
-
- try:
- r = regex_cache.get((glob, True, word_boundary), None)
- if not r:
- r = glob_to_regex(glob, word_boundary=word_boundary)
- regex_cache[(glob, True, word_boundary)] = r
- return bool(r.search(value))
- except re.error:
- logger.warning("Failed to parse glob to regex: %r", glob)
- return False
-
-
-def _flatten_dict(
- d: Union[EventBase, Mapping[str, Any]],
- prefix: Optional[List[str]] = None,
- result: Optional[Dict[str, str]] = None,
-) -> Dict[str, str]:
- if prefix is None:
- prefix = []
- if result is None:
- result = {}
- for key, value in d.items():
- if isinstance(value, str):
- result[".".join(prefix + [key])] = value.lower()
- elif isinstance(value, Mapping):
- _flatten_dict(value, prefix=(prefix + [key]), result=result)
-
- return result
diff --git a/synapse/push/push_tools.py b/synapse/push/push_tools.py
index 6661887d9f..edeba27a45 100644
--- a/synapse/push/push_tools.py
+++ b/synapse/push/push_tools.py
@@ -17,6 +17,7 @@ from synapse.events import EventBase
from synapse.push.presentable_names import calculate_room_name, name_from_member_event
from synapse.storage.controllers import StorageControllers
from synapse.storage.databases.main import DataStore
+from synapse.util.async_helpers import concurrently_execute
async def get_badge_count(store: DataStore, user_id: str, group_by_room: bool) -> int:
@@ -25,14 +26,25 @@ async def get_badge_count(store: DataStore, user_id: str, group_by_room: bool) -
badge = len(invites)
- for room_id in joins:
- notifs = await (
- store.get_unread_event_push_actions_by_room_for_user(
+ room_notifs = []
+
+ async def get_room_unread_count(room_id: str) -> None:
+ room_notifs.append(
+ await store.get_unread_event_push_actions_by_room_for_user(
room_id,
user_id,
)
)
- if notifs.notify_count == 0:
+
+ await concurrently_execute(get_room_unread_count, joins, 10)
+
+ for notifs in room_notifs:
+ # Combine the counts from all the threads.
+ notify_count = notifs.main_timeline.notify_count + sum(
+ n.notify_count for n in notifs.threads.values()
+ )
+
+ if notify_count == 0:
continue
if group_by_room:
@@ -40,7 +52,7 @@ async def get_badge_count(store: DataStore, user_id: str, group_by_room: bool) -
badge += 1
else:
# increment the badge count by the number of unread messages in the room
- badge += notifs.notify_count
+ badge += notify_count
return badge
diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py
index 1e0ef44fc7..e2648cbc93 100644
--- a/synapse/push/pusherpool.py
+++ b/synapse/push/pusherpool.py
@@ -94,7 +94,7 @@ class PusherPool:
return
run_as_background_process("start_pushers", self._start_pushers)
- async def add_pusher(
+ async def add_or_update_pusher(
self,
user_id: str,
access_token: Optional[int],
@@ -106,6 +106,8 @@ class PusherPool:
lang: Optional[str],
data: JsonDict,
profile_tag: str = "",
+ enabled: bool = True,
+ device_id: Optional[str] = None,
) -> Optional[Pusher]:
"""Creates a new pusher and adds it to the pool
@@ -147,9 +149,22 @@ class PusherPool:
last_stream_ordering=last_stream_ordering,
last_success=None,
failing_since=None,
+ enabled=enabled,
+ device_id=device_id,
)
)
+ # Before we actually persist the pusher, we check if the user already has one
+ # this app ID and pushkey. If so, we want to keep the access token and device ID
+ # in place, since this could be one device modifying (e.g. enabling/disabling)
+ # another device's pusher.
+ existing_config = await self._get_pusher_config_for_user_by_app_id_and_pushkey(
+ user_id, app_id, pushkey
+ )
+ if existing_config:
+ access_token = existing_config.access_token
+ device_id = existing_config.device_id
+
await self.store.add_pusher(
user_id=user_id,
access_token=access_token,
@@ -163,8 +178,10 @@ class PusherPool:
data=data,
last_stream_ordering=last_stream_ordering,
profile_tag=profile_tag,
+ enabled=enabled,
+ device_id=device_id,
)
- pusher = await self.start_pusher_by_id(app_id, pushkey, user_id)
+ pusher = await self.process_pusher_change_by_id(app_id, pushkey, user_id)
return pusher
@@ -276,10 +293,25 @@ class PusherPool:
except Exception:
logger.exception("Exception in pusher on_new_receipts")
- async def start_pusher_by_id(
+ async def _get_pusher_config_for_user_by_app_id_and_pushkey(
+ self, user_id: str, app_id: str, pushkey: str
+ ) -> Optional[PusherConfig]:
+ resultlist = await self.store.get_pushers_by_app_id_and_pushkey(app_id, pushkey)
+
+ pusher_config = None
+ for r in resultlist:
+ if r.user_name == user_id:
+ pusher_config = r
+
+ return pusher_config
+
+ async def process_pusher_change_by_id(
self, app_id: str, pushkey: str, user_id: str
) -> Optional[Pusher]:
- """Look up the details for the given pusher, and start it
+ """Look up the details for the given pusher, and either start it if its
+ "enabled" flag is True, or try to stop it otherwise.
+
+ If the pusher is new and its "enabled" flag is False, the stop is a noop.
Returns:
The pusher started, if any
@@ -290,12 +322,13 @@ class PusherPool:
if not self._pusher_shard_config.should_handle(self._instance_name, user_id):
return None
- resultlist = await self.store.get_pushers_by_app_id_and_pushkey(app_id, pushkey)
+ pusher_config = await self._get_pusher_config_for_user_by_app_id_and_pushkey(
+ user_id, app_id, pushkey
+ )
- pusher_config = None
- for r in resultlist:
- if r.user_name == user_id:
- pusher_config = r
+ if pusher_config and not pusher_config.enabled:
+ self.maybe_stop_pusher(app_id, pushkey, user_id)
+ return None
pusher = None
if pusher_config:
@@ -305,7 +338,7 @@ class PusherPool:
async def _start_pushers(self) -> None:
"""Start all the pushers"""
- pushers = await self.store.get_all_pushers()
+ pushers = await self.store.get_enabled_pushers()
# Stagger starting up the pushers so we don't completely drown the
# process on start up.
@@ -363,6 +396,8 @@ class PusherPool:
synapse_pushers.labels(type(pusher).__name__, pusher.app_id).inc()
+ logger.info("Starting pusher %s / %s", pusher.user_id, appid_pushkey)
+
# Check if there *may* be push to process. We do this as this check is a
# lot cheaper to do than actually fetching the exact rows we need to
# push.
@@ -382,16 +417,7 @@ class PusherPool:
return pusher
async def remove_pusher(self, app_id: str, pushkey: str, user_id: str) -> None:
- appid_pushkey = "%s:%s" % (app_id, pushkey)
-
- byuser = self.pushers.get(user_id, {})
-
- if appid_pushkey in byuser:
- logger.info("Stopping pusher %s / %s", user_id, appid_pushkey)
- pusher = byuser.pop(appid_pushkey)
- pusher.on_stop()
-
- synapse_pushers.labels(type(pusher).__name__, pusher.app_id).dec()
+ self.maybe_stop_pusher(app_id, pushkey, user_id)
# We can only delete pushers on master.
if self._remove_pusher_client:
@@ -402,3 +428,22 @@ class PusherPool:
await self.store.delete_pusher_by_app_id_pushkey_user_id(
app_id, pushkey, user_id
)
+
+ def maybe_stop_pusher(self, app_id: str, pushkey: str, user_id: str) -> None:
+ """Stops a pusher with the given app ID and push key if one is running.
+
+ Args:
+ app_id: the pusher's app ID.
+ pushkey: the pusher's push key.
+ user_id: the user the pusher belongs to. Only used for logging.
+ """
+ appid_pushkey = "%s:%s" % (app_id, pushkey)
+
+ byuser = self.pushers.get(user_id, {})
+
+ if appid_pushkey in byuser:
+ logger.info("Stopping pusher %s / %s", user_id, appid_pushkey)
+ pusher = byuser.pop(appid_pushkey)
+ pusher.on_stop()
+
+ synapse_pushers.labels(type(pusher).__name__, pusher.app_id).dec()
diff --git a/synapse/replication/http/__init__.py b/synapse/replication/http/__init__.py
index 53aa7fa4c6..ac9a92240a 100644
--- a/synapse/replication/http/__init__.py
+++ b/synapse/replication/http/__init__.py
@@ -25,6 +25,7 @@ from synapse.replication.http import (
push,
register,
send_event,
+ send_events,
state,
streams,
)
@@ -43,6 +44,7 @@ class ReplicationRestResource(JsonResource):
def register_servlets(self, hs: "HomeServer") -> None:
send_event.register_servlets(hs, self)
+ send_events.register_servlets(hs, self)
federation.register_servlets(hs, self)
presence.register_servlets(hs, self)
membership.register_servlets(hs, self)
diff --git a/synapse/replication/http/_base.py b/synapse/replication/http/_base.py
index 561ad5bf04..3f4d3fc51a 100644
--- a/synapse/replication/http/_base.py
+++ b/synapse/replication/http/_base.py
@@ -26,12 +26,13 @@ from twisted.web.server import Request
from synapse.api.errors import HttpResponseException, SynapseError
from synapse.http import RequestTimedOutError
-from synapse.http.server import HttpServer, is_method_cancellable
+from synapse.http.server import HttpServer
from synapse.http.site import SynapseRequest
from synapse.logging import opentracing
from synapse.logging.opentracing import trace_with_opname
from synapse.types import JsonDict
from synapse.util.caches.response_cache import ResponseCache
+from synapse.util.cancellation import is_function_cancellable
from synapse.util.stringutils import random_string
if TYPE_CHECKING:
@@ -152,7 +153,7 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta):
argument list.
Returns:
- dict: If POST/PUT request then dictionary must be JSON serialisable,
+ If POST/PUT request then dictionary must be JSON serialisable,
otherwise must be appropriate for adding as query args.
"""
return {}
@@ -183,8 +184,10 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta):
client = hs.get_simple_http_client()
local_instance_name = hs.get_instance_name()
+ # The value of these option should match the replication listener settings
master_host = hs.config.worker.worker_replication_host
master_port = hs.config.worker.worker_replication_http_port
+ master_tls = hs.config.worker.worker_replication_http_tls
instance_map = hs.config.worker.instance_map
@@ -204,9 +207,11 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta):
if instance_name == "master":
host = master_host
port = master_port
+ tls = master_tls
elif instance_name in instance_map:
host = instance_map[instance_name].host
port = instance_map[instance_name].port
+ tls = instance_map[instance_name].tls
else:
raise Exception(
"Instance %r not in 'instance_map' config" % (instance_name,)
@@ -237,7 +242,11 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta):
"Unknown METHOD on %s replication endpoint" % (cls.NAME,)
)
- uri = "http://%s:%s/_synapse/replication/%s/%s" % (
+ # Here the protocol is hard coded to be http by default or https in case the replication
+ # port is set to have tls true.
+ scheme = "https" if tls else "http"
+ uri = "%s://%s:%s/_synapse/replication/%s/%s" % (
+ scheme,
host,
port,
cls.NAME,
@@ -311,7 +320,7 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta):
url_args = list(self.PATH_ARGS)
method = self.METHOD
- if self.CACHE and is_method_cancellable(self._handle_request):
+ if self.CACHE and is_function_cancellable(self._handle_request):
raise Exception(
f"{self.__class__.__name__} has been marked as cancellable, but CACHE "
"is set. The cancellable flag would have no effect."
@@ -359,6 +368,6 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta):
# The `@cancellable` decorator may be applied to `_handle_request`. But we
# told `HttpServer.register_paths` that our handler is `_check_auth_and_handle`,
# so we have to set up the cancellable flag ourselves.
- request.is_render_cancellable = is_method_cancellable(self._handle_request)
+ request.is_render_cancellable = is_function_cancellable(self._handle_request)
return await self._handle_request(request, **kwargs)
diff --git a/synapse/replication/http/devices.py b/synapse/replication/http/devices.py
index 3d63645726..7c4941c3d3 100644
--- a/synapse/replication/http/devices.py
+++ b/synapse/replication/http/devices.py
@@ -13,11 +13,12 @@
# limitations under the License.
import logging
-from typing import TYPE_CHECKING, Tuple
+from typing import TYPE_CHECKING, Optional, Tuple
from twisted.web.server import Request
from synapse.http.server import HttpServer
+from synapse.http.servlet import parse_json_object_from_request
from synapse.replication.http._base import ReplicationEndpoint
from synapse.types import JsonDict
@@ -62,7 +63,12 @@ class ReplicationUserDevicesResyncRestServlet(ReplicationEndpoint):
def __init__(self, hs: "HomeServer"):
super().__init__(hs)
- self.device_list_updater = hs.get_device_handler().device_list_updater
+ from synapse.handlers.device import DeviceHandler
+
+ handler = hs.get_device_handler()
+ assert isinstance(handler, DeviceHandler)
+ self.device_list_updater = handler.device_list_updater
+
self.store = hs.get_datastores().main
self.clock = hs.get_clock()
@@ -72,11 +78,77 @@ class ReplicationUserDevicesResyncRestServlet(ReplicationEndpoint):
async def _handle_request( # type: ignore[override]
self, request: Request, user_id: str
- ) -> Tuple[int, JsonDict]:
+ ) -> Tuple[int, Optional[JsonDict]]:
user_devices = await self.device_list_updater.user_device_resync(user_id)
return 200, user_devices
+class ReplicationUploadKeysForUserRestServlet(ReplicationEndpoint):
+ """Ask master to upload keys for the user and send them out over federation to
+ update other servers.
+
+ For now, only the master is permitted to handle key upload requests;
+ any worker can handle key query requests (since they're read-only).
+
+ Calls to e2e_keys_handler.upload_keys_for_user(user_id, device_id, keys) on
+ the main process to accomplish this.
+
+ Defined in https://spec.matrix.org/v1.4/client-server-api/#post_matrixclientv3keysupload
+ Request format(borrowed and expanded from KeyUploadServlet):
+
+ POST /_synapse/replication/upload_keys_for_user
+
+ {
+ "user_id": "<user_id>",
+ "device_id": "<device_id>",
+ "keys": {
+ ....this part can be found in KeyUploadServlet in rest/client/keys.py....
+ }
+ }
+
+ Response is equivalent to ` /_matrix/client/v3/keys/upload` found in KeyUploadServlet
+
+ """
+
+ NAME = "upload_keys_for_user"
+ PATH_ARGS = ()
+ CACHE = False
+
+ def __init__(self, hs: "HomeServer"):
+ super().__init__(hs)
+
+ self.e2e_keys_handler = hs.get_e2e_keys_handler()
+ self.store = hs.get_datastores().main
+ self.clock = hs.get_clock()
+
+ @staticmethod
+ async def _serialize_payload( # type: ignore[override]
+ user_id: str, device_id: str, keys: JsonDict
+ ) -> JsonDict:
+
+ return {
+ "user_id": user_id,
+ "device_id": device_id,
+ "keys": keys,
+ }
+
+ async def _handle_request( # type: ignore[override]
+ self, request: Request
+ ) -> Tuple[int, JsonDict]:
+ content = parse_json_object_from_request(request)
+
+ user_id = content["user_id"]
+ device_id = content["device_id"]
+ keys = content["keys"]
+
+ results = await self.e2e_keys_handler.upload_keys_for_user(
+ user_id, device_id, keys
+ )
+
+ return 200, results
+
+
def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
ReplicationUserDevicesResyncRestServlet(hs).register(http_server)
+ ReplicationUploadKeysForUserRestServlet(hs).register(http_server)
diff --git a/synapse/replication/http/register.py b/synapse/replication/http/register.py
index 6c8f8388fd..976c283360 100644
--- a/synapse/replication/http/register.py
+++ b/synapse/replication/http/register.py
@@ -39,6 +39,16 @@ class ReplicationRegisterServlet(ReplicationEndpoint):
self.store = hs.get_datastores().main
self.registration_handler = hs.get_registration_handler()
+ # Default value if the worker that sent the replication request did not include
+ # an 'approved' property.
+ if (
+ hs.config.experimental.msc3866.enabled
+ and hs.config.experimental.msc3866.require_approval_for_new_accounts
+ ):
+ self._approval_default = False
+ else:
+ self._approval_default = True
+
@staticmethod
async def _serialize_payload( # type: ignore[override]
user_id: str,
@@ -51,6 +61,7 @@ class ReplicationRegisterServlet(ReplicationEndpoint):
user_type: Optional[str],
address: Optional[str],
shadow_banned: bool,
+ approved: bool,
) -> JsonDict:
"""
Args:
@@ -68,6 +79,8 @@ class ReplicationRegisterServlet(ReplicationEndpoint):
or None for a normal user.
address: the IP address used to perform the regitration.
shadow_banned: Whether to shadow-ban the user
+ approved: Whether the user should be considered already approved by an
+ administrator.
"""
return {
"password_hash": password_hash,
@@ -79,6 +92,7 @@ class ReplicationRegisterServlet(ReplicationEndpoint):
"user_type": user_type,
"address": address,
"shadow_banned": shadow_banned,
+ "approved": approved,
}
async def _handle_request( # type: ignore[override]
@@ -88,6 +102,12 @@ class ReplicationRegisterServlet(ReplicationEndpoint):
await self.registration_handler.check_registration_ratelimit(content["address"])
+ # Always default admin users to approved (since it means they were created by
+ # an admin).
+ approved_default = self._approval_default
+ if content["admin"]:
+ approved_default = True
+
await self.registration_handler.register_with_store(
user_id=user_id,
password_hash=content["password_hash"],
@@ -99,6 +119,7 @@ class ReplicationRegisterServlet(ReplicationEndpoint):
user_type=content["user_type"],
address=content["address"],
shadow_banned=content["shadow_banned"],
+ approved=content.get("approved", approved_default),
)
return 200, {}
diff --git a/synapse/replication/http/send_event.py b/synapse/replication/http/send_event.py
index 486f04723c..4215a1c1bc 100644
--- a/synapse/replication/http/send_event.py
+++ b/synapse/replication/http/send_event.py
@@ -141,8 +141,8 @@ class ReplicationSendEventRestServlet(ReplicationEndpoint):
"Got event to send with ID: %s into room: %s", event.event_id, event.room_id
)
- event = await self.event_creation_handler.persist_and_notify_client_event(
- requester, event, context, ratelimit=ratelimit, extra_users=extra_users
+ event = await self.event_creation_handler.persist_and_notify_client_events(
+ requester, [(event, context)], ratelimit=ratelimit, extra_users=extra_users
)
return (
diff --git a/synapse/replication/http/send_events.py b/synapse/replication/http/send_events.py
new file mode 100644
index 0000000000..8889bbb644
--- /dev/null
+++ b/synapse/replication/http/send_events.py
@@ -0,0 +1,171 @@
+# Copyright 2022 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+from typing import TYPE_CHECKING, List, Tuple
+
+from twisted.web.server import Request
+
+from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
+from synapse.events import EventBase, make_event_from_dict
+from synapse.events.snapshot import EventContext
+from synapse.http.server import HttpServer
+from synapse.http.servlet import parse_json_object_from_request
+from synapse.replication.http._base import ReplicationEndpoint
+from synapse.types import JsonDict, Requester, UserID
+from synapse.util.metrics import Measure
+
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
+ from synapse.storage.databases.main import DataStore
+
+logger = logging.getLogger(__name__)
+
+
+class ReplicationSendEventsRestServlet(ReplicationEndpoint):
+ """Handles batches of newly created events on workers, including persisting and
+ notifying.
+
+ The API looks like:
+
+ POST /_synapse/replication/send_events/:txn_id
+
+ {
+ "events": [{
+ "event": { .. serialized event .. },
+ "room_version": .., // "1", "2", "3", etc: the version of the room
+ // containing the event
+ "event_format_version": .., // 1,2,3 etc: the event format version
+ "internal_metadata": { .. serialized internal_metadata .. },
+ "outlier": true|false,
+ "rejected_reason": .., // The event.rejected_reason field
+ "context": { .. serialized event context .. },
+ "requester": { .. serialized requester .. },
+ "ratelimit": true,
+ }]
+ }
+
+ 200 OK
+
+ { "stream_id": 12345, "event_id": "$abcdef..." }
+
+ Responds with a 409 when a `PartialStateConflictError` is raised due to an event
+ context that needs to be recomputed due to the un-partial stating of a room.
+
+ """
+
+ NAME = "send_events"
+ PATH_ARGS = ()
+
+ def __init__(self, hs: "HomeServer"):
+ super().__init__(hs)
+
+ self.event_creation_handler = hs.get_event_creation_handler()
+ self.store = hs.get_datastores().main
+ self._storage_controllers = hs.get_storage_controllers()
+ self.clock = hs.get_clock()
+
+ @staticmethod
+ async def _serialize_payload( # type: ignore[override]
+ events_and_context: List[Tuple[EventBase, EventContext]],
+ store: "DataStore",
+ requester: Requester,
+ ratelimit: bool,
+ extra_users: List[UserID],
+ ) -> JsonDict:
+ """
+ Args:
+ store
+ requester
+ events_and_ctx
+ ratelimit
+ """
+ serialized_events = []
+
+ for event, context in events_and_context:
+ serialized_context = await context.serialize(event, store)
+ serialized_event = {
+ "event": event.get_pdu_json(),
+ "room_version": event.room_version.identifier,
+ "event_format_version": event.format_version,
+ "internal_metadata": event.internal_metadata.get_dict(),
+ "outlier": event.internal_metadata.is_outlier(),
+ "rejected_reason": event.rejected_reason,
+ "context": serialized_context,
+ "requester": requester.serialize(),
+ "ratelimit": ratelimit,
+ "extra_users": [u.to_string() for u in extra_users],
+ }
+ serialized_events.append(serialized_event)
+
+ payload = {"events": serialized_events}
+
+ return payload
+
+ async def _handle_request( # type: ignore[override]
+ self, request: Request
+ ) -> Tuple[int, JsonDict]:
+ with Measure(self.clock, "repl_send_events_parse"):
+ payload = parse_json_object_from_request(request)
+ events_and_context = []
+ events = payload["events"]
+
+ for event_payload in events:
+ event_dict = event_payload["event"]
+ room_ver = KNOWN_ROOM_VERSIONS[event_payload["room_version"]]
+ internal_metadata = event_payload["internal_metadata"]
+ rejected_reason = event_payload["rejected_reason"]
+
+ event = make_event_from_dict(
+ event_dict, room_ver, internal_metadata, rejected_reason
+ )
+ event.internal_metadata.outlier = event_payload["outlier"]
+
+ requester = Requester.deserialize(
+ self.store, event_payload["requester"]
+ )
+ context = EventContext.deserialize(
+ self._storage_controllers, event_payload["context"]
+ )
+
+ ratelimit = event_payload["ratelimit"]
+ events_and_context.append((event, context))
+
+ extra_users = [
+ UserID.from_string(u) for u in event_payload["extra_users"]
+ ]
+
+ logger.info(
+ "Got batch of events to send, last ID of batch is: %s, sending into room: %s",
+ event.event_id,
+ event.room_id,
+ )
+
+ last_event = (
+ await self.event_creation_handler.persist_and_notify_client_events(
+ requester, events_and_context, ratelimit, extra_users
+ )
+ )
+
+ return (
+ 200,
+ {
+ "stream_id": last_event.internal_metadata.stream_ordering,
+ "event_id": last_event.event_id,
+ },
+ )
+
+
+def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
+ ReplicationSendEventsRestServlet(hs).register(http_server)
diff --git a/synapse/replication/slave/__init__.py b/synapse/replication/slave/__init__.py
deleted file mode 100644
index f43a360a80..0000000000
--- a/synapse/replication/slave/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# Copyright 2016 OpenMarket Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
diff --git a/synapse/replication/slave/storage/__init__.py b/synapse/replication/slave/storage/__init__.py
deleted file mode 100644
index f43a360a80..0000000000
--- a/synapse/replication/slave/storage/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# Copyright 2016 OpenMarket Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
diff --git a/synapse/replication/slave/storage/_slaved_id_tracker.py b/synapse/replication/slave/storage/_slaved_id_tracker.py
deleted file mode 100644
index 8f3f953ed4..0000000000
--- a/synapse/replication/slave/storage/_slaved_id_tracker.py
+++ /dev/null
@@ -1,50 +0,0 @@
-# Copyright 2016 OpenMarket Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-from typing import List, Optional, Tuple
-
-from synapse.storage.database import LoggingDatabaseConnection
-from synapse.storage.util.id_generators import AbstractStreamIdTracker, _load_current_id
-
-
-class SlavedIdTracker(AbstractStreamIdTracker):
- """Tracks the "current" stream ID of a stream with a single writer.
-
- See `AbstractStreamIdTracker` for more details.
-
- Note that this class does not work correctly when there are multiple
- writers.
- """
-
- def __init__(
- self,
- db_conn: LoggingDatabaseConnection,
- table: str,
- column: str,
- extra_tables: Optional[List[Tuple[str, str]]] = None,
- step: int = 1,
- ):
- self.step = step
- self._current = _load_current_id(db_conn, table, column, step)
- if extra_tables:
- for table, column in extra_tables:
- self.advance(None, _load_current_id(db_conn, table, column))
-
- def advance(self, instance_name: Optional[str], new_id: int) -> None:
- self._current = (max if self.step > 0 else min)(self._current, new_id)
-
- def get_current_token(self) -> int:
- return self._current
-
- def get_current_token_for_writer(self, instance_name: str) -> int:
- return self.get_current_token()
diff --git a/synapse/replication/slave/storage/devices.py b/synapse/replication/slave/storage/devices.py
deleted file mode 100644
index 6fcade510a..0000000000
--- a/synapse/replication/slave/storage/devices.py
+++ /dev/null
@@ -1,79 +0,0 @@
-# Copyright 2016 OpenMarket Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from typing import TYPE_CHECKING, Any, Iterable
-
-from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker
-from synapse.replication.tcp.streams._base import DeviceListsStream, UserSignatureStream
-from synapse.storage.database import DatabasePool, LoggingDatabaseConnection
-from synapse.storage.databases.main.devices import DeviceWorkerStore
-
-if TYPE_CHECKING:
- from synapse.server import HomeServer
-
-
-class SlavedDeviceStore(DeviceWorkerStore):
- def __init__(
- self,
- database: DatabasePool,
- db_conn: LoggingDatabaseConnection,
- hs: "HomeServer",
- ):
- self.hs = hs
-
- self._device_list_id_gen = SlavedIdTracker(
- db_conn,
- "device_lists_stream",
- "stream_id",
- extra_tables=[
- ("user_signature_stream", "stream_id"),
- ("device_lists_outbound_pokes", "stream_id"),
- ("device_lists_changes_in_room", "stream_id"),
- ],
- )
-
- super().__init__(database, db_conn, hs)
-
- def get_device_stream_token(self) -> int:
- return self._device_list_id_gen.get_current_token()
-
- def process_replication_rows(
- self, stream_name: str, instance_name: str, token: int, rows: Iterable[Any]
- ) -> None:
- if stream_name == DeviceListsStream.NAME:
- self._device_list_id_gen.advance(instance_name, token)
- self._invalidate_caches_for_devices(token, rows)
- elif stream_name == UserSignatureStream.NAME:
- self._device_list_id_gen.advance(instance_name, token)
- for row in rows:
- self._user_signature_stream_cache.entity_has_changed(row.user_id, token)
- return super().process_replication_rows(stream_name, instance_name, token, rows)
-
- def _invalidate_caches_for_devices(
- self, token: int, rows: Iterable[DeviceListsStream.DeviceListsStreamRow]
- ) -> None:
- for row in rows:
- # The entities are either user IDs (starting with '@') whose devices
- # have changed, or remote servers that we need to tell about
- # changes.
- if row.entity.startswith("@"):
- self._device_list_stream_cache.entity_has_changed(row.entity, token)
- self.get_cached_devices_for_user.invalidate((row.entity,))
- self._get_cached_user_device.invalidate((row.entity,))
- self.get_device_list_last_stream_id_for_remote.invalidate((row.entity,))
-
- else:
- self._device_list_federation_stream_cache.entity_has_changed(
- row.entity, token
- )
diff --git a/synapse/replication/slave/storage/events.py b/synapse/replication/slave/storage/events.py
deleted file mode 100644
index fe47778cb1..0000000000
--- a/synapse/replication/slave/storage/events.py
+++ /dev/null
@@ -1,79 +0,0 @@
-# Copyright 2016 OpenMarket Ltd
-# Copyright 2018 New Vector Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-import logging
-from typing import TYPE_CHECKING
-
-from synapse.storage.database import DatabasePool, LoggingDatabaseConnection
-from synapse.storage.databases.main.event_federation import EventFederationWorkerStore
-from synapse.storage.databases.main.event_push_actions import (
- EventPushActionsWorkerStore,
-)
-from synapse.storage.databases.main.events_worker import EventsWorkerStore
-from synapse.storage.databases.main.relations import RelationsWorkerStore
-from synapse.storage.databases.main.roommember import RoomMemberWorkerStore
-from synapse.storage.databases.main.signatures import SignatureWorkerStore
-from synapse.storage.databases.main.state import StateGroupWorkerStore
-from synapse.storage.databases.main.stream import StreamWorkerStore
-from synapse.storage.databases.main.user_erasure_store import UserErasureWorkerStore
-from synapse.util.caches.stream_change_cache import StreamChangeCache
-
-if TYPE_CHECKING:
- from synapse.server import HomeServer
-
-logger = logging.getLogger(__name__)
-
-
-# So, um, we want to borrow a load of functions intended for reading from
-# a DataStore, but we don't want to take functions that either write to the
-# DataStore or are cached and don't have cache invalidation logic.
-#
-# Rather than write duplicate versions of those functions, or lift them to
-# a common base class, we going to grab the underlying __func__ object from
-# the method descriptor on the DataStore and chuck them into our class.
-
-
-class SlavedEventStore(
- EventFederationWorkerStore,
- RoomMemberWorkerStore,
- EventPushActionsWorkerStore,
- StreamWorkerStore,
- StateGroupWorkerStore,
- SignatureWorkerStore,
- EventsWorkerStore,
- UserErasureWorkerStore,
- RelationsWorkerStore,
-):
- def __init__(
- self,
- database: DatabasePool,
- db_conn: LoggingDatabaseConnection,
- hs: "HomeServer",
- ):
- super().__init__(database, db_conn, hs)
-
- events_max = self._stream_id_gen.get_current_token()
- curr_state_delta_prefill, min_curr_state_delta_id = self.db_pool.get_cache_dict(
- db_conn,
- "current_state_delta_stream",
- entity_column="room_id",
- stream_column="stream_id",
- max_value=events_max, # As we share the stream id with events token
- limit=1000,
- )
- self._curr_state_delta_stream_cache = StreamChangeCache(
- "_curr_state_delta_stream_cache",
- min_curr_state_delta_id,
- prefilled_cache=curr_state_delta_prefill,
- )
diff --git a/synapse/replication/slave/storage/keys.py b/synapse/replication/slave/storage/keys.py
deleted file mode 100644
index a00b38c512..0000000000
--- a/synapse/replication/slave/storage/keys.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# Copyright 2015, 2016 OpenMarket Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from synapse.storage.databases.main.keys import KeyStore
-
-# KeyStore isn't really safe to use from a worker, but for now we do so and hope that
-# the races it creates aren't too bad.
-
-SlavedKeyStore = KeyStore
diff --git a/synapse/replication/slave/storage/push_rule.py b/synapse/replication/slave/storage/push_rule.py
deleted file mode 100644
index 52ee3f7e58..0000000000
--- a/synapse/replication/slave/storage/push_rule.py
+++ /dev/null
@@ -1,36 +0,0 @@
-# Copyright 2015, 2016 OpenMarket Ltd
-# Copyright 2018 New Vector Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-from typing import Any, Iterable
-
-from synapse.replication.tcp.streams import PushRulesStream
-from synapse.storage.databases.main.push_rule import PushRulesWorkerStore
-
-from .events import SlavedEventStore
-
-
-class SlavedPushRuleStore(SlavedEventStore, PushRulesWorkerStore):
- def get_max_push_rules_stream_id(self) -> int:
- return self._push_rules_stream_id_gen.get_current_token()
-
- def process_replication_rows(
- self, stream_name: str, instance_name: str, token: int, rows: Iterable[Any]
- ) -> None:
- if stream_name == PushRulesStream.NAME:
- self._push_rules_stream_id_gen.advance(instance_name, token)
- for row in rows:
- self.get_push_rules_for_user.invalidate((row.user_id,))
- self.get_push_rules_enabled_for_user.invalidate((row.user_id,))
- self.push_rules_stream_cache.entity_has_changed(row.user_id, token)
- return super().process_replication_rows(stream_name, instance_name, token, rows)
diff --git a/synapse/replication/slave/storage/pushers.py b/synapse/replication/slave/storage/pushers.py
deleted file mode 100644
index 44ed20e424..0000000000
--- a/synapse/replication/slave/storage/pushers.py
+++ /dev/null
@@ -1,47 +0,0 @@
-# Copyright 2016 OpenMarket Ltd
-# Copyright 2018 New Vector Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-from typing import TYPE_CHECKING, Any, Iterable
-
-from synapse.replication.tcp.streams import PushersStream
-from synapse.storage.database import DatabasePool, LoggingDatabaseConnection
-from synapse.storage.databases.main.pusher import PusherWorkerStore
-
-from ._slaved_id_tracker import SlavedIdTracker
-
-if TYPE_CHECKING:
- from synapse.server import HomeServer
-
-
-class SlavedPusherStore(PusherWorkerStore):
- def __init__(
- self,
- database: DatabasePool,
- db_conn: LoggingDatabaseConnection,
- hs: "HomeServer",
- ):
- super().__init__(database, db_conn, hs)
- self._pushers_id_gen = SlavedIdTracker( # type: ignore
- db_conn, "pushers", "id", extra_tables=[("deleted_pushers", "stream_id")]
- )
-
- def get_pushers_stream_token(self) -> int:
- return self._pushers_id_gen.get_current_token()
-
- def process_replication_rows(
- self, stream_name: str, instance_name: str, token: int, rows: Iterable[Any]
- ) -> None:
- if stream_name == PushersStream.NAME:
- self._pushers_id_gen.advance(instance_name, token)
- return super().process_replication_rows(stream_name, instance_name, token, rows)
diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py
index e4f2201c92..18252a2958 100644
--- a/synapse/replication/tcp/client.py
+++ b/synapse/replication/tcp/client.py
@@ -189,7 +189,9 @@ class ReplicationDataHandler:
if row.deleted:
self.stop_pusher(row.user_id, row.app_id, row.pushkey)
else:
- await self.start_pusher(row.user_id, row.app_id, row.pushkey)
+ await self.process_pusher_change(
+ row.user_id, row.app_id, row.pushkey
+ )
elif stream_name == EventsStream.NAME:
# We shouldn't get multiple rows per token for events stream, so
# we don't need to optimise this for multiple rows.
@@ -208,15 +210,16 @@ class ReplicationDataHandler:
max_token = self.store.get_room_max_token()
event_pos = PersistedEventPosition(instance_name, token)
- await self.notifier.on_new_room_event_args(
- event_pos=event_pos,
- max_room_stream_token=max_token,
- extra_users=extra_users,
- room_id=row.data.room_id,
- event_id=row.data.event_id,
- event_type=row.data.type,
- state_key=row.data.state_key,
- membership=row.data.membership,
+ event_entry = self.notifier.create_pending_room_event_entry(
+ event_pos,
+ extra_users,
+ row.data.room_id,
+ row.data.type,
+ row.data.state_key,
+ row.data.membership,
+ )
+ await self.notifier.notify_new_room_events(
+ [(event_entry, row.data.event_id)], max_token
)
# If this event is a join, make a note of it so we have an accurate
@@ -334,13 +337,15 @@ class ReplicationDataHandler:
logger.info("Stopping pusher %r / %r", user_id, key)
pusher.on_stop()
- async def start_pusher(self, user_id: str, app_id: str, pushkey: str) -> None:
+ async def process_pusher_change(
+ self, user_id: str, app_id: str, pushkey: str
+ ) -> None:
if not self._notify_pushers:
return
key = "%s:%s" % (app_id, pushkey)
logger.info("Starting pusher %r / %r", user_id, key)
- await self._pusher_pool.start_pusher_by_id(app_id, pushkey, user_id)
+ await self._pusher_pool.process_pusher_change_by_id(app_id, pushkey, user_id)
class FederationSenderHandler:
@@ -423,7 +428,8 @@ class FederationSenderHandler:
receipt.receipt_type,
receipt.user_id,
[receipt.event_id],
- receipt.data,
+ thread_id=receipt.thread_id,
+ data=receipt.data,
)
await self.federation_sender.send_read_receipt(receipt_info)
diff --git a/synapse/replication/tcp/handler.py b/synapse/replication/tcp/handler.py
index e1cbfa50eb..0f166d16aa 100644
--- a/synapse/replication/tcp/handler.py
+++ b/synapse/replication/tcp/handler.py
@@ -35,7 +35,6 @@ from twisted.internet.protocol import ReconnectingClientFactory
from synapse.metrics import LaterGauge
from synapse.metrics.background_process_metrics import run_as_background_process
-from synapse.replication.tcp.client import DirectTcpReplicationClientFactory
from synapse.replication.tcp.commands import (
ClearUserSyncsCommand,
Command,
@@ -332,46 +331,31 @@ class ReplicationCommandHandler:
def start_replication(self, hs: "HomeServer") -> None:
"""Helper method to start replication."""
- if hs.config.redis.redis_enabled:
- from synapse.replication.tcp.redis import (
- RedisDirectTcpReplicationClientFactory,
- )
+ from synapse.replication.tcp.redis import RedisDirectTcpReplicationClientFactory
- # First let's ensure that we have a ReplicationStreamer started.
- hs.get_replication_streamer()
+ # First let's ensure that we have a ReplicationStreamer started.
+ hs.get_replication_streamer()
- # We need two connections to redis, one for the subscription stream and
- # one to send commands to (as you can't send further redis commands to a
- # connection after SUBSCRIBE is called).
+ # We need two connections to redis, one for the subscription stream and
+ # one to send commands to (as you can't send further redis commands to a
+ # connection after SUBSCRIBE is called).
- # First create the connection for sending commands.
- outbound_redis_connection = hs.get_outbound_redis_connection()
+ # First create the connection for sending commands.
+ outbound_redis_connection = hs.get_outbound_redis_connection()
- # Now create the factory/connection for the subscription stream.
- self._factory = RedisDirectTcpReplicationClientFactory(
- hs,
- outbound_redis_connection,
- channel_names=self._channels_to_subscribe_to,
- )
- hs.get_reactor().connectTCP(
- hs.config.redis.redis_host,
- hs.config.redis.redis_port,
- self._factory,
- timeout=30,
- bindAddress=None,
- )
- else:
- client_name = hs.get_instance_name()
- self._factory = DirectTcpReplicationClientFactory(hs, client_name, self)
- host = hs.config.worker.worker_replication_host
- port = hs.config.worker.worker_replication_port
- hs.get_reactor().connectTCP(
- host,
- port,
- self._factory,
- timeout=30,
- bindAddress=None,
- )
+ # Now create the factory/connection for the subscription stream.
+ self._factory = RedisDirectTcpReplicationClientFactory(
+ hs,
+ outbound_redis_connection,
+ channel_names=self._channels_to_subscribe_to,
+ )
+ hs.get_reactor().connectTCP(
+ hs.config.redis.redis_host,
+ hs.config.redis.redis_port,
+ self._factory,
+ timeout=30,
+ bindAddress=None,
+ )
def get_streams(self) -> Dict[str, Stream]:
"""Get a map from stream name to all streams."""
diff --git a/synapse/replication/tcp/protocol.py b/synapse/replication/tcp/protocol.py
index 7763ffb2d0..56a5c21910 100644
--- a/synapse/replication/tcp/protocol.py
+++ b/synapse/replication/tcp/protocol.py
@@ -245,7 +245,7 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver):
self._parse_and_dispatch_line(line)
def _parse_and_dispatch_line(self, line: bytes) -> None:
- if line.strip() == "":
+ if line.strip() == b"":
# Ignore blank lines
return
diff --git a/synapse/replication/tcp/streams/_base.py b/synapse/replication/tcp/streams/_base.py
index 398bebeaa6..e01155ad59 100644
--- a/synapse/replication/tcp/streams/_base.py
+++ b/synapse/replication/tcp/streams/_base.py
@@ -361,6 +361,7 @@ class ReceiptsStream(Stream):
receipt_type: str
user_id: str
event_id: str
+ thread_id: Optional[str]
data: dict
NAME = "receipts"
diff --git a/synapse/res/templates/_base.html b/synapse/res/templates/_base.html
new file mode 100644
index 0000000000..46439fce6a
--- /dev/null
+++ b/synapse/res/templates/_base.html
@@ -0,0 +1,29 @@
+<!DOCTYPE html>
+<html lang="en">
+<head>
+ <meta charset="UTF-8">
+ <meta http-equiv="X-UA-Compatible" content="IE=edge">
+ <meta name="viewport" content="width=device-width, initial-scale=1.0">
+ <title>{% block title %}{% endblock %}</title>
+ <style type="text/css">
+ {%- include 'style.css' without context %}
+ </style>
+ {% block header %}{% endblock %}
+</head>
+<body>
+<header class="mx_Header">
+ {% if app_name == "Riot" %}
+ <img src="http://riot.im/img/external/riot-logo-email.png" width="83" height="83" alt="[Riot]"/>
+ {% elif app_name == "Vector" %}
+ <img src="http://matrix.org/img/vector-logo-email.png" width="64" height="83" alt="[Vector]"/>
+ {% elif app_name == "Element" %}
+ <img src="https://static.element.io/images/email-logo.png" width="83" height="83" alt="[Element]"/>
+ {% else %}
+ <img src="http://matrix.org/img/matrix-120x51.png" width="120" height="51" alt="[matrix]"/>
+ {% endif %}
+</header>
+
+{% block body %}{% endblock %}
+
+</body>
+</html>
diff --git a/synapse/res/templates/account_previously_renewed.html b/synapse/res/templates/account_previously_renewed.html
index b751359bdf..91582a8af0 100644
--- a/synapse/res/templates/account_previously_renewed.html
+++ b/synapse/res/templates/account_previously_renewed.html
@@ -1 +1,6 @@
-<html><body>Your account is valid until {{ expiration_ts|format_ts("%d-%m-%Y") }}.</body><html>
+{% extends "_base.html" %}
+{% block title %}Your account is valid until {{ expiration_ts|format_ts("%d-%m-%Y") }}.{% endblock %}
+
+{% block body %}
+<p>Your account is valid until {{ expiration_ts|format_ts("%d-%m-%Y") }}.</p>
+{% endblock %}
diff --git a/synapse/res/templates/account_renewed.html b/synapse/res/templates/account_renewed.html
index e8c0f52f05..18a57833f1 100644
--- a/synapse/res/templates/account_renewed.html
+++ b/synapse/res/templates/account_renewed.html
@@ -1 +1,6 @@
-<html><body>Your account has been successfully renewed and is valid until {{ expiration_ts|format_ts("%d-%m-%Y") }}.</body><html>
+{% extends "_base.html" %}
+{% block title %}Your account has been successfully renewed and is valid until {{ expiration_ts|format_ts("%d-%m-%Y") }}.{% endblock %}
+
+{% block body %}
+<p>Your account has been successfully renewed and is valid until {{ expiration_ts|format_ts("%d-%m-%Y") }}.</p>
+{% endblock %}
diff --git a/synapse/res/templates/add_threepid.html b/synapse/res/templates/add_threepid.html
index cc4ab07e09..33c883936a 100644
--- a/synapse/res/templates/add_threepid.html
+++ b/synapse/res/templates/add_threepid.html
@@ -1,9 +1,8 @@
-<html>
-<body>
- <p>A request to add an email address to your Matrix account has been received. If this was you, please click the link below to confirm adding this email:</p>
+{% extends "_base.html" %}
+{% block title %}Request to add an email address to your Matrix account{% endblock %}
- <a href="{{ link }}">{{ link }}</a>
-
- <p>If this was not you, you can safely ignore this email. Thank you.</p>
-</body>
-</html>
+{% block body %}
+<p>A request to add an email address to your Matrix account has been received. If this was you, please click the link below to confirm adding this email:</p>
+<a href="{{ link }}">{{ link }}</a>
+<p>If this was not you, you can safely ignore this email. Thank you.</p>
+{% endblock %}
diff --git a/synapse/res/templates/add_threepid_failure.html b/synapse/res/templates/add_threepid_failure.html
index 441d11c846..f6d7e33825 100644
--- a/synapse/res/templates/add_threepid_failure.html
+++ b/synapse/res/templates/add_threepid_failure.html
@@ -1,8 +1,7 @@
-<html>
-<head></head>
-<body>
-<p>The request failed for the following reason: {{ failure_reason }}.</p>
+{% extends "_base.html" %}
+{% block title %}Request failed{% endblock %}
+{% block body %}
+<p>The request failed for the following reason: {{ failure_reason }}.</p>
<p>No changes have been made to your account.</p>
-</body>
-</html>
+{% endblock %}
diff --git a/synapse/res/templates/add_threepid_success.html b/synapse/res/templates/add_threepid_success.html
index fbd6e4018f..6d45111796 100644
--- a/synapse/res/templates/add_threepid_success.html
+++ b/synapse/res/templates/add_threepid_success.html
@@ -1,6 +1,6 @@
-<html>
-<head></head>
-<body>
+{% extends "_base.html" %}
+{% block title %}Your email has now been validated{% endblock %}
+
+{% block body %}
<p>Your email has now been validated, please return to your client. You may now close this window.</p>
-</body>
-</html>
+{% endblock %}
diff --git a/synapse/res/templates/auth_success.html b/synapse/res/templates/auth_success.html
index baf4633142..9178332f59 100644
--- a/synapse/res/templates/auth_success.html
+++ b/synapse/res/templates/auth_success.html
@@ -1,21 +1,21 @@
-<html>
-<head>
-<title>Success!</title>
-<meta name='viewport' content='width=device-width, initial-scale=1,
- user-scalable=no, minimum-scale=1.0, maximum-scale=1.0'>
+{% extends "_base.html" %}
+{% block title %}Success!{% endblock %}
+
+{% block header %}
<link rel="stylesheet" href="/_matrix/static/client/register/style.css">
<script>
if (window.onAuthDone) {
window.onAuthDone();
} else if (window.opener && window.opener.postMessage) {
- window.opener.postMessage("authDone", "*");
+ window.opener.postMessage("authDone", "*");
}
</script>
-</head>
-<body>
- <div>
- <p>Thank you</p>
- <p>You may now close this window and return to the application</p>
- </div>
-</body>
-</html>
+{% endblock %}
+
+{% block body %}
+<div>
+ <p>Thank you</p>
+ <p>You may now close this window and return to the application</p>
+</div>
+
+{% endblock %}
diff --git a/synapse/res/templates/invalid_token.html b/synapse/res/templates/invalid_token.html
index 6bd2b98364..b19e3023a1 100644
--- a/synapse/res/templates/invalid_token.html
+++ b/synapse/res/templates/invalid_token.html
@@ -1 +1,6 @@
-<html><body>Invalid renewal token.</body><html>
+{% extends "_base.html" %}
+{% block title %}Invalid renewal token.{% endblock %}
+
+{% block body %}
+<p>Invalid renewal token.</p>
+{% endblock %}
diff --git a/synapse/res/templates/notice_expiry.html b/synapse/res/templates/notice_expiry.html
index d87311f659..406397aaca 100644
--- a/synapse/res/templates/notice_expiry.html
+++ b/synapse/res/templates/notice_expiry.html
@@ -1,45 +1,46 @@
-<!doctype html>
-<html lang="en">
- <head>
- <style type="text/css">
- {% include 'mail.css' without context %}
- {% include "mail-%s.css" % app_name ignore missing without context %}
- {% include 'mail-expiry.css' without context %}
- </style>
- </head>
- <body>
- <table id="page">
- <tr>
- <td> </td>
- <td id="inner">
- <table class="header">
- <tr>
- <td>
- <div class="salutation">Hi {{ display_name }},</div>
- </td>
- <td class="logo">
- {% if app_name == "Riot" %}
- <img src="http://riot.im/img/external/riot-logo-email.png" width="83" height="83" alt="[Riot]"/>
- {% elif app_name == "Vector" %}
- <img src="http://matrix.org/img/vector-logo-email.png" width="64" height="83" alt="[Vector]"/>
- {% elif app_name == "Element" %}
- <img src="https://static.element.io/images/email-logo.png" width="83" height="83" alt="[Element]"/>
- {% else %}
- <img src="http://matrix.org/img/matrix-120x51.png" width="120" height="51" alt="[matrix]"/>
- {% endif %}
- </td>
- </tr>
- <tr>
- <td colspan="2">
- <div class="noticetext">Your account will expire on {{ expiration_ts|format_ts("%d-%m-%Y") }}. This means that you will lose access to your account after this date.</div>
- <div class="noticetext">To extend the validity of your account, please click on the link below (or copy and paste it into a new browser tab):</div>
- <div class="noticetext"><a href="{{ url }}">{{ url }}</a></div>
- </td>
- </tr>
- </table>
- </td>
- <td> </td>
- </tr>
- </table>
- </body>
-</html>
+{% extends "_base.html" %}
+{% block title %}Notice of expiry{% endblock %}
+
+{% block header %}
+<style type="text/css">
+ {% include 'mail.css' without context %}
+ {% include "mail-%s.css" % app_name ignore missing without context %}
+ {% include 'mail-expiry.css' without context %}
+</style>
+{% endblock %}
+
+{% block body %}
+<table id="page">
+ <tr>
+ <td> </td>
+ <td id="inner">
+ <table class="header">
+ <tr>
+ <td>
+ <div class="salutation">Hi {{ display_name }},</div>
+ </td>
+ <td class="logo">
+ {% if app_name == "Riot" %}
+ <img src="http://riot.im/img/external/riot-logo-email.png" width="83" height="83" alt="[Riot]"/>
+ {% elif app_name == "Vector" %}
+ <img src="http://matrix.org/img/vector-logo-email.png" width="64" height="83" alt="[Vector]"/>
+ {% elif app_name == "Element" %}
+ <img src="https://static.element.io/images/email-logo.png" width="83" height="83" alt="[Element]"/>
+ {% else %}
+ <img src="http://matrix.org/img/matrix-120x51.png" width="120" height="51" alt="[matrix]"/>
+ {% endif %}
+ </td>
+ </tr>
+ <tr>
+ <td colspan="2">
+ <div class="noticetext">Your account will expire on {{ expiration_ts|format_ts("%d-%m-%Y") }}. This means that you will lose access to your account after this date.</div>
+ <div class="noticetext">To extend the validity of your account, please click on the link below (or copy and paste it into a new browser tab):</div>
+ <div class="noticetext"><a href="{{ url }}">{{ url }}</a></div>
+ </td>
+ </tr>
+ </table>
+ </td>
+ <td> </td>
+ </tr>
+</table>
+{% endblock %}
diff --git a/synapse/res/templates/notif_mail.html b/synapse/res/templates/notif_mail.html
index 27d4182790..2add9dd859 100644
--- a/synapse/res/templates/notif_mail.html
+++ b/synapse/res/templates/notif_mail.html
@@ -1,57 +1,59 @@
-<!doctype html>
-<html lang="en">
- <head>
- <style type="text/css">
- {%- include 'mail.css' without context %}
- {%- include "mail-%s.css" % app_name ignore missing without context %}
- </style>
- </head>
- <body>
- <table id="page">
- <tr>
- <td> </td>
- <td id="inner">
- <table class="header">
- <tr>
- <td>
- <div class="salutation">Hi {{ user_display_name }},</div>
- <div class="summarytext">{{ summary_text }}</div>
- </td>
- <td class="logo">
- {%- if app_name == "Riot" %}
- <img src="http://riot.im/img/external/riot-logo-email.png" width="83" height="83" alt="[Riot]"/>
- {%- elif app_name == "Vector" %}
- <img src="http://matrix.org/img/vector-logo-email.png" width="64" height="83" alt="[Vector]"/>
- {%- elif app_name == "Element" %}
- <img src="https://static.element.io/images/email-logo.png" width="83" height="83" alt="[Element]"/>
- {%- else %}
- <img src="http://matrix.org/img/matrix-120x51.png" width="120" height="51" alt="[matrix]"/>
- {%- endif %}
- </td>
- </tr>
- </table>
- {%- for room in rooms %}
- {%- include 'room.html' with context %}
- {%- endfor %}
- <div class="footer">
- <a href="{{ unsubscribe_link }}">Unsubscribe</a>
- <br/>
- <br/>
- <div class="debug">
- Sending email at {{ reason.now|format_ts("%c") }} due to activity in room {{ reason.room_name }} because
- an event was received at {{ reason.received_at|format_ts("%c") }}
- which is more than {{ "%.1f"|format(reason.delay_before_mail_ms / (60*1000)) }} ({{ reason.delay_before_mail_ms }}) mins ago,
- {%- if reason.last_sent_ts %}
- and the last time we sent a mail for this room was {{ reason.last_sent_ts|format_ts("%c") }},
- which is more than {{ "%.1f"|format(reason.throttle_ms / (60*1000)) }} (current throttle_ms) mins ago.
- {%- else %}
- and we don't have a last time we sent a mail for this room.
- {%- endif %}
- </div>
- </div>
- </td>
- <td> </td>
- </tr>
- </table>
- </body>
-</html>
+{% extends "_base.html" %}
+
+{% block title %}New activity in room{% endblock %}
+
+{% block header %}
+<style type="text/css">
+ {%- include 'mail.css' without context %}
+ {%- include "mail-%s.css" % app_name ignore missing without context %}
+</style>
+{% endblock %}
+
+{% block body %}
+<table id="page">
+ <tr>
+ <td> </td>
+ <td id="inner">
+ <table class="header">
+ <tr>
+ <td>
+ <div class="salutation">Hi {{ user_display_name }},</div>
+ <div class="summarytext">{{ summary_text }}</div>
+ </td>
+ <td class="logo">
+ {%- if app_name == "Riot" %}
+ <img src="http://riot.im/img/external/riot-logo-email.png" width="83" height="83" alt="[Riot]"/>
+ {%- elif app_name == "Vector" %}
+ <img src="http://matrix.org/img/vector-logo-email.png" width="64" height="83" alt="[Vector]"/>
+ {%- elif app_name == "Element" %}
+ <img src="https://static.element.io/images/email-logo.png" width="83" height="83" alt="[Element]"/>
+ {%- else %}
+ <img src="http://matrix.org/img/matrix-120x51.png" width="120" height="51" alt="[matrix]"/>
+ {%- endif %}
+ </td>
+ </tr>
+ </table>
+ {%- for room in rooms %}
+ {%- include 'room.html' with context %}
+ {%- endfor %}
+ <div class="footer">
+ <a href="{{ unsubscribe_link }}">Unsubscribe</a>
+ <br/>
+ <br/>
+ <div class="debug">
+ Sending email at {{ reason.now|format_ts("%c") }} due to activity in room {{ reason.room_name }} because
+ an event was received at {{ reason.received_at|format_ts("%c") }}
+ which is more than {{ "%.1f"|format(reason.delay_before_mail_ms / (60*1000)) }} ({{ reason.delay_before_mail_ms }}) mins ago,
+ {%- if reason.last_sent_ts %}
+ and the last time we sent a mail for this room was {{ reason.last_sent_ts|format_ts("%c") }},
+ which is more than {{ "%.1f"|format(reason.throttle_ms / (60*1000)) }} (current throttle_ms) mins ago.
+ {%- else %}
+ and we don't have a last time we sent a mail for this room.
+ {%- endif %}
+ </div>
+ </div>
+ </td>
+ <td> </td>
+ </tr>
+</table>
+{% endblock %}
diff --git a/synapse/res/templates/password_reset.html b/synapse/res/templates/password_reset.html
index a197bf872c..1f267946c8 100644
--- a/synapse/res/templates/password_reset.html
+++ b/synapse/res/templates/password_reset.html
@@ -1,9 +1,10 @@
-<html>
-<body>
- <p>A password reset request has been received for your Matrix account. If this was you, please click the link below to confirm resetting your password:</p>
+{% extends "_base.html" %}
+{% block title %}Password reset{% endblock %}
- <a href="{{ link }}">{{ link }}</a>
+{% block body %}
+<p>A password reset request has been received for your Matrix account. If this was you, please click the link below to confirm resetting your password:</p>
- <p>If this was not you, <strong>do not</strong> click the link above and instead contact your server administrator. Thank you.</p>
-</body>
-</html>
+<a href="{{ link }}">{{ link }}</a>
+
+<p>If this was not you, <strong>do not</strong> click the link above and instead contact your server administrator. Thank you.</p>
+{% endblock %}
diff --git a/synapse/res/templates/password_reset_confirmation.html b/synapse/res/templates/password_reset_confirmation.html
index def4b5162b..fabb9a6ed5 100644
--- a/synapse/res/templates/password_reset_confirmation.html
+++ b/synapse/res/templates/password_reset_confirmation.html
@@ -1,6 +1,7 @@
-<html>
-<head></head>
-<body>
+{% extends "_base.html" %}
+{% block title %}Password reset confirmation{% endblock %}
+
+{% block body %}
<!--Use a hidden form to resubmit the information necessary to reset the password-->
<form method="post">
<input type="hidden" name="sid" value="{{ sid }}">
@@ -11,6 +12,4 @@
If you did not mean to do this, please close this page and your password will not be changed.</p>
<p><button type="submit">Confirm changing my password</button></p>
</form>
-</body>
-</html>
-
+{% endblock %}
diff --git a/synapse/res/templates/password_reset_failure.html b/synapse/res/templates/password_reset_failure.html
index 9e3c4446e3..9990e860f9 100644
--- a/synapse/res/templates/password_reset_failure.html
+++ b/synapse/res/templates/password_reset_failure.html
@@ -1,8 +1,7 @@
-<html>
-<head></head>
-<body>
-<p>The request failed for the following reason: {{ failure_reason }}.</p>
+{% extends "_base.html" %}
+{% block title %}Password reset failure{% endblock %}
+{% block body %}
+<p>The request failed for the following reason: {{ failure_reason }}.</p>
<p>Your password has not been reset.</p>
-</body>
-</html>
+{% endblock %}
diff --git a/synapse/res/templates/password_reset_success.html b/synapse/res/templates/password_reset_success.html
index 7324d66d1e..edada513ab 100644
--- a/synapse/res/templates/password_reset_success.html
+++ b/synapse/res/templates/password_reset_success.html
@@ -1,6 +1,6 @@
-<html>
-<head></head>
-<body>
+{% extends "_base.html" %}
+{% block title %}Password reset success{% endblock %}
+
+{% block body %}
<p>Your email has now been validated, please return to your client to reset your password. You may now close this window.</p>
-</body>
-</html>
+{% endblock %}
diff --git a/synapse/res/templates/recaptcha.html b/synapse/res/templates/recaptcha.html
index b3db06ef97..8204928cdf 100644
--- a/synapse/res/templates/recaptcha.html
+++ b/synapse/res/templates/recaptcha.html
@@ -1,10 +1,8 @@
-<html>
-<head>
-<title>Authentication</title>
-<meta name='viewport' content='width=device-width, initial-scale=1,
- user-scalable=no, minimum-scale=1.0, maximum-scale=1.0'>
-<script src="https://www.recaptcha.net/recaptcha/api.js"
- async defer></script>
+{% extends "_base.html" %}
+{% block title %}Authentication{% endblock %}
+
+{% block header %}
+<script src="https://www.recaptcha.net/recaptcha/api.js" async defer></script>
<script src="//code.jquery.com/jquery-1.11.2.min.js"></script>
<link rel="stylesheet" href="/_matrix/static/client/register/style.css">
<script>
@@ -12,8 +10,9 @@ function captchaDone() {
$('#registrationForm').submit();
}
</script>
-</head>
-<body>
+{% endblock %}
+
+{% block body %}
<form id="registrationForm" method="post" action="{{ myurl }}">
<div>
{% if error is defined %}
@@ -37,5 +36,4 @@ function captchaDone() {
</div>
</div>
</form>
-</body>
-</html>
+{% endblock %}
\ No newline at end of file
diff --git a/synapse/res/templates/registration.html b/synapse/res/templates/registration.html
index 16730a527f..cdb815665e 100644
--- a/synapse/res/templates/registration.html
+++ b/synapse/res/templates/registration.html
@@ -1,11 +1,12 @@
-<html>
-<body>
- <p>You have asked us to register this email with a new Matrix account. If this was you, please click the link below to confirm your email address:</p>
+{% extends "_base.html" %}
+{% block title %}Registration{% endblock %}
- <a href="{{ link }}">Verify Your Email Address</a>
+{% block body %}
+<p>You have asked us to register this email with a new Matrix account. If this was you, please click the link below to confirm your email address:</p>
- <p>If this was not you, you can safely disregard this email.</p>
+<a href="{{ link }}">Verify Your Email Address</a>
- <p>Thank you.</p>
-</body>
-</html>
+<p>If this was not you, you can safely disregard this email.</p>
+
+<p>Thank you.</p>
+{% endblock %}
diff --git a/synapse/res/templates/registration_failure.html b/synapse/res/templates/registration_failure.html
index 2833d79c37..ae2a9cae2c 100644
--- a/synapse/res/templates/registration_failure.html
+++ b/synapse/res/templates/registration_failure.html
@@ -1,6 +1,6 @@
-<html>
-<head></head>
-<body>
+{% extends "_base.html" %}
+{% block title %}Registration failure{% endblock %}
+
+{% block body %}
<p>Validation failed for the following reason: {{ failure_reason }}.</p>
-</body>
-</html>
+{% endblock %}
diff --git a/synapse/res/templates/registration_success.html b/synapse/res/templates/registration_success.html
index fbd6e4018f..6d45111796 100644
--- a/synapse/res/templates/registration_success.html
+++ b/synapse/res/templates/registration_success.html
@@ -1,6 +1,6 @@
-<html>
-<head></head>
-<body>
+{% extends "_base.html" %}
+{% block title %}Your email has now been validated{% endblock %}
+
+{% block body %}
<p>Your email has now been validated, please return to your client. You may now close this window.</p>
-</body>
-</html>
+{% endblock %}
diff --git a/synapse/res/templates/registration_token.html b/synapse/res/templates/registration_token.html
index 4577ce1702..ee4e5295e7 100644
--- a/synapse/res/templates/registration_token.html
+++ b/synapse/res/templates/registration_token.html
@@ -1,11 +1,11 @@
-<html>
-<head>
-<title>Authentication</title>
-<meta name='viewport' content='width=device-width, initial-scale=1,
- user-scalable=no, minimum-scale=1.0, maximum-scale=1.0'>
+{% extends "_base.html" %}
+{% block title %}Authentication{% endblock %}
+
+{% block header %}
<link rel="stylesheet" href="/_matrix/static/client/register/style.css">
-</head>
-<body>
+{% endblock %}
+
+{% block body %}
<form id="registrationForm" method="post" action="{{ myurl }}">
<div>
{% if error is defined %}
@@ -19,5 +19,4 @@
<input type="submit" value="Authenticate" />
</div>
</form>
-</body>
-</html>
+{% endblock %}
diff --git a/synapse/res/templates/sso_account_deactivated.html b/synapse/res/templates/sso_account_deactivated.html
index c3e4deed93..b85d96cc74 100644
--- a/synapse/res/templates/sso_account_deactivated.html
+++ b/synapse/res/templates/sso_account_deactivated.html
@@ -1,25 +1,25 @@
-<!DOCTYPE html>
-<html lang="en">
- <head>
- <meta charset="UTF-8">
- <title>SSO account deactivated</title>
- <meta name="viewport" content="width=device-width, user-scalable=no">
- <style type="text/css">
- {% include "sso.css" without context %}
- </style>
- </head>
- <body class="error_page">
- <header>
- <h1>Your account has been deactivated</h1>
- <p>
- <strong>No account found</strong>
- </p>
- <p>
- Your account might have been deactivated by the server administrator.
- You can either try to create a new account or contact the server’s
- administrator.
- </p>
- </header>
- {% include "sso_footer.html" without context %}
- </body>
-</html>
+{% extends "_base.html" %}
+{% block title %}SSO account deactivated{% endblock %}
+
+{% block header %}
+<style type="text/css">
+ {% include "sso.css" without context %}
+</style>
+{% endblock %}
+
+{% block body %}
+<div class="error_page">
+ <header>
+ <h1>Your account has been deactivated</h1>
+ <p>
+ <strong>No account found</strong>
+ </p>
+ <p>
+ Your account might have been deactivated by the server administrator.
+ You can either try to create a new account or contact the server’s
+ administrator.
+ </p>
+ </header>
+</div>
+{% include "sso_footer.html" without context %}
+{% endblock %}
diff --git a/synapse/res/templates/sso_auth_account_details.html b/synapse/res/templates/sso_auth_account_details.html
index cf72df0a2a..11636d7f5d 100644
--- a/synapse/res/templates/sso_auth_account_details.html
+++ b/synapse/res/templates/sso_auth_account_details.html
@@ -1,188 +1,186 @@
-<!DOCTYPE html>
-<html lang="en">
- <head>
- <title>Create your account</title>
- <meta charset="utf-8">
- <meta name="viewport" content="width=device-width, user-scalable=no">
- <script type="text/javascript">
- let wasKeyboard = false;
- document.addEventListener("mousedown", function() { wasKeyboard = false; });
- document.addEventListener("keydown", function() { wasKeyboard = true; });
- document.addEventListener("focusin", function() {
- if (wasKeyboard) {
- document.body.classList.add("keyboard-focus");
- } else {
- document.body.classList.remove("keyboard-focus");
- }
- });
- </script>
- <style type="text/css">
- {% include "sso.css" without context %}
-
- body.keyboard-focus :focus, body.keyboard-focus .username_input:focus-within {
- outline: 3px solid #17191C;
- outline-offset: 4px;
- }
-
- .username_input {
- display: flex;
- border: 2px solid #418DED;
- border-radius: 8px;
- padding: 12px;
- position: relative;
- margin: 16px 0;
- align-items: center;
- font-size: 12px;
- }
-
- .username_input.invalid {
- border-color: #FE2928;
- }
-
- .username_input.invalid input, .username_input.invalid label {
- color: #FE2928;
- }
-
- .username_input div, .username_input input {
- line-height: 18px;
- font-size: 14px;
- }
-
- .username_input label {
- position: absolute;
- top: -5px;
- left: 14px;
- font-size: 10px;
- line-height: 10px;
- background: white;
- padding: 0 2px;
- }
-
- .username_input input {
- flex: 1;
- display: block;
- min-width: 0;
- border: none;
- }
-
- /* only clear the outline if we know it will be shown on the parent div using :focus-within */
- @supports selector(:focus-within) {
- .username_input input {
- outline: none !important;
- }
- }
-
- .username_input div {
- color: #8D99A5;
- }
-
- .idp-pick-details {
- border: 1px solid #E9ECF1;
- border-radius: 8px;
- margin: 24px 0;
- }
-
- .idp-pick-details h2 {
- margin: 0;
- padding: 8px 12px;
- }
-
- .idp-pick-details .idp-detail {
- border-top: 1px solid #E9ECF1;
- padding: 12px;
- display: block;
- }
- .idp-pick-details .check-row {
- display: flex;
- align-items: center;
- }
-
- .idp-pick-details .check-row .name {
- flex: 1;
- }
-
- .idp-pick-details .use, .idp-pick-details .idp-value {
- color: #737D8C;
- }
-
- .idp-pick-details .idp-value {
- margin: 0;
- margin-top: 8px;
- }
-
- .idp-pick-details .avatar {
- width: 53px;
- height: 53px;
- border-radius: 100%;
- display: block;
- margin-top: 8px;
- }
-
- output {
- padding: 0 14px;
- display: block;
- }
-
- output.error {
- color: #FE2928;
- }
- </style>
- </head>
- <body>
- <header>
- <h1>Create your account</h1>
- <p>This is required. Continue to create your account on {{ server_name }}. You can't change this later.</p>
- </header>
- <main>
- <form method="post" class="form__input" id="form">
- <div class="username_input" id="username_input">
- <label for="field-username">Username (required)</label>
- <div class="prefix">@</div>
- <input type="text" name="username" id="field-username" value="{{ user_attributes.localpart }}" autofocus autocorrect="off" autocapitalize="none">
- <div class="postfix">:{{ server_name }}</div>
+{% extends "_base.html" %}
+{% block title %}Create your account{% endblock %}
+
+{% block header %}
+<script type="text/javascript">
+ let wasKeyboard = false;
+ document.addEventListener("mousedown", function() { wasKeyboard = false; });
+ document.addEventListener("keydown", function() { wasKeyboard = true; });
+ document.addEventListener("focusin", function() {
+ if (wasKeyboard) {
+ document.body.classList.add("keyboard-focus");
+ } else {
+ document.body.classList.remove("keyboard-focus");
+ }
+ });
+</script>
+<style type="text/css">
+ {% include "sso.css" without context %}
+
+ body.keyboard-focus :focus, body.keyboard-focus .username_input:focus-within {
+ outline: 3px solid #17191C;
+ outline-offset: 4px;
+ }
+
+ .username_input {
+ display: flex;
+ border: 2px solid #418DED;
+ border-radius: 8px;
+ padding: 12px;
+ position: relative;
+ margin: 16px 0;
+ align-items: center;
+ font-size: 12px;
+ }
+
+ .username_input.invalid {
+ border-color: #FE2928;
+ }
+
+ .username_input.invalid input, .username_input.invalid label {
+ color: #FE2928;
+ }
+
+ .username_input div, .username_input input {
+ line-height: 18px;
+ font-size: 14px;
+ }
+
+ .username_input label {
+ position: absolute;
+ top: -5px;
+ left: 14px;
+ font-size: 10px;
+ line-height: 10px;
+ background: white;
+ padding: 0 2px;
+ }
+
+ .username_input input {
+ flex: 1;
+ display: block;
+ min-width: 0;
+ border: none;
+ }
+
+ /* only clear the outline if we know it will be shown on the parent div using :focus-within */
+ @supports selector(:focus-within) {
+ .username_input input {
+ outline: none !important;
+ }
+ }
+
+ .username_input div {
+ color: #8D99A5;
+ }
+
+ .idp-pick-details {
+ border: 1px solid #E9ECF1;
+ border-radius: 8px;
+ margin: 24px 0;
+ }
+
+ .idp-pick-details h2 {
+ margin: 0;
+ padding: 8px 12px;
+ }
+
+ .idp-pick-details .idp-detail {
+ border-top: 1px solid #E9ECF1;
+ padding: 12px;
+ display: block;
+ }
+ .idp-pick-details .check-row {
+ display: flex;
+ align-items: center;
+ }
+
+ .idp-pick-details .check-row .name {
+ flex: 1;
+ }
+
+ .idp-pick-details .use, .idp-pick-details .idp-value {
+ color: #737D8C;
+ }
+
+ .idp-pick-details .idp-value {
+ margin: 0;
+ margin-top: 8px;
+ }
+
+ .idp-pick-details .avatar {
+ width: 53px;
+ height: 53px;
+ border-radius: 100%;
+ display: block;
+ margin-top: 8px;
+ }
+
+ output {
+ padding: 0 14px;
+ display: block;
+ }
+
+ output.error {
+ color: #FE2928;
+ }
+</style>
+{% endblock %}
+
+{% block body %}
+<header>
+ <h1>Create your account</h1>
+ <p>This is required. Continue to create your account on {{ server_name }}. You can't change this later.</p>
+</header>
+<main>
+ <form method="post" class="form__input" id="form">
+ <div class="username_input" id="username_input">
+ <label for="field-username">Username (required)</label>
+ <div class="prefix">@</div>
+ <input type="text" name="username" id="field-username" value="{{ user_attributes.localpart }}" autofocus autocorrect="off" autocapitalize="none">
+ <div class="postfix">:{{ server_name }}</div>
+ </div>
+ <output for="username_input" id="field-username-output"></output>
+ <input type="submit" value="Continue" class="primary-button">
+ {% if user_attributes.avatar_url or user_attributes.display_name or user_attributes.emails %}
+ <section class="idp-pick-details">
+ <h2>{% if idp.idp_icon %}<img src="{{ idp.idp_icon | mxc_to_http(24, 24) }}"/>{% endif %}Optional data from {{ idp.idp_name }}</h2>
+ {% if user_attributes.avatar_url %}
+ <label class="idp-detail idp-avatar" for="idp-avatar">
+ <div class="check-row">
+ <span class="name">Avatar</span>
+ <span class="use">Use</span>
+ <input type="checkbox" name="use_avatar" id="idp-avatar" value="true" checked>
</div>
- <output for="username_input" id="field-username-output"></output>
- <input type="submit" value="Continue" class="primary-button">
- {% if user_attributes.avatar_url or user_attributes.display_name or user_attributes.emails %}
- <section class="idp-pick-details">
- <h2>{% if idp.idp_icon %}<img src="{{ idp.idp_icon | mxc_to_http(24, 24) }}"/>{% endif %}Optional data from {{ idp.idp_name }}</h2>
- {% if user_attributes.avatar_url %}
- <label class="idp-detail idp-avatar" for="idp-avatar">
- <div class="check-row">
- <span class="name">Avatar</span>
- <span class="use">Use</span>
- <input type="checkbox" name="use_avatar" id="idp-avatar" value="true" checked>
- </div>
- <img src="{{ user_attributes.avatar_url }}" class="avatar" />
- </label>
- {% endif %}
- {% if user_attributes.display_name %}
- <label class="idp-detail" for="idp-displayname">
- <div class="check-row">
- <span class="name">Display name</span>
- <span class="use">Use</span>
- <input type="checkbox" name="use_display_name" id="idp-displayname" value="true" checked>
- </div>
- <p class="idp-value">{{ user_attributes.display_name }}</p>
- </label>
- {% endif %}
- {% for email in user_attributes.emails %}
- <label class="idp-detail" for="idp-email{{ loop.index }}">
- <div class="check-row">
- <span class="name">E-mail</span>
- <span class="use">Use</span>
- <input type="checkbox" name="use_email" id="idp-email{{ loop.index }}" value="{{ email }}" checked>
- </div>
- <p class="idp-value">{{ email }}</p>
- </label>
- {% endfor %}
- </section>
- {% endif %}
- </form>
- </main>
- {% include "sso_footer.html" without context %}
- <script type="text/javascript">
- {% include "sso_auth_account_details.js" without context %}
- </script>
- </body>
-</html>
+ <img src="{{ user_attributes.avatar_url }}" class="avatar" />
+ </label>
+ {% endif %}
+ {% if user_attributes.display_name %}
+ <label class="idp-detail" for="idp-displayname">
+ <div class="check-row">
+ <span class="name">Display name</span>
+ <span class="use">Use</span>
+ <input type="checkbox" name="use_display_name" id="idp-displayname" value="true" checked>
+ </div>
+ <p class="idp-value">{{ user_attributes.display_name }}</p>
+ </label>
+ {% endif %}
+ {% for email in user_attributes.emails %}
+ <label class="idp-detail" for="idp-email{{ loop.index }}">
+ <div class="check-row">
+ <span class="name">E-mail</span>
+ <span class="use">Use</span>
+ <input type="checkbox" name="use_email" id="idp-email{{ loop.index }}" value="{{ email }}" checked>
+ </div>
+ <p class="idp-value">{{ email }}</p>
+ </label>
+ {% endfor %}
+ </section>
+ {% endif %}
+ </form>
+</main>
+{% include "sso_footer.html" without context %}
+<script type="text/javascript">
+ {% include "sso_auth_account_details.js" without context %}
+</script>
+{% endblock %}
diff --git a/synapse/res/templates/sso_auth_bad_user.html b/synapse/res/templates/sso_auth_bad_user.html
index da579ffe69..819d79a461 100644
--- a/synapse/res/templates/sso_auth_bad_user.html
+++ b/synapse/res/templates/sso_auth_bad_user.html
@@ -1,26 +1,26 @@
-<!DOCTYPE html>
-<html lang="en">
- <head>
- <meta charset="UTF-8">
- <title>Authentication failed</title>
- <meta name="viewport" content="width=device-width, user-scalable=no">
- <style type="text/css">
- {% include "sso.css" without context %}
- </style>
- </head>
- <body class="error_page">
- <header>
- <h1>That doesn't look right</h1>
- <p>
- <strong>We were unable to validate your {{ server_name }} account</strong>
- via single sign‑on (SSO), because the SSO Identity
- Provider returned different details than when you logged in.
- </p>
- <p>
- Try the operation again, and ensure that you use the same details on
- the Identity Provider as when you log into your account.
- </p>
- </header>
- {% include "sso_footer.html" without context %}
- </body>
-</html>
+{% extends "_base.html" %}
+{% block title %}Authentication failed{% endblock %}
+
+{% block header %}
+<style type="text/css">
+ {% include "sso.css" without context %}
+</style>
+{% endblock %}
+
+{% block body %}
+<div class="error_page">
+ <header>
+ <h1>That doesn't look right</h1>
+ <p>
+ <strong>We were unable to validate your {{ server_name }} account</strong>
+ via single sign‑on (SSO), because the SSO Identity
+ Provider returned different details than when you logged in.
+ </p>
+ <p>
+ Try the operation again, and ensure that you use the same details on
+ the Identity Provider as when you log into your account.
+ </p>
+ </header>
+</div>
+{% include "sso_footer.html" without context %}
+{% endblock %}
diff --git a/synapse/res/templates/sso_auth_confirm.html b/synapse/res/templates/sso_auth_confirm.html
index f9d0456f0a..3927d6eda3 100644
--- a/synapse/res/templates/sso_auth_confirm.html
+++ b/synapse/res/templates/sso_auth_confirm.html
@@ -1,29 +1,27 @@
-<!DOCTYPE html>
-<html lang="en">
- <head>
- <meta charset="UTF-8">
- <title>Confirm it's you</title>
- <meta name="viewport" content="width=device-width, user-scalable=no">
- <style type="text/css">
- {% include "sso.css" without context %}
- </style>
- </head>
- <body>
- <header>
- <h1>Confirm it's you to continue</h1>
- <p>
- A client is trying to {{ description }}. To confirm this action
- re-authorize your account with single sign-on.
- </p>
- <p><strong>
- If you did not expect this, your account may be compromised.
- </strong></p>
- </header>
- <main>
- <a href="{{ redirect_url }}" class="primary-button">
- Continue with {{ idp.idp_name }}
- </a>
- </main>
- {% include "sso_footer.html" without context %}
- </body>
-</html>
+{% extends "_base.html" %}
+{% block title %}Confirm it's you{% endblock %}
+
+{% block header %}
+<style type="text/css">
+ {% include "sso.css" without context %}
+</style>
+{% endblock %}
+
+{% block body %}
+<header>
+ <h1>Confirm it's you to continue</h1>
+ <p>
+ A client is trying to {{ description }}. To confirm this action
+ re-authorize your account with single sign-on.
+ </p>
+ <p><strong>
+ If you did not expect this, your account may be compromised.
+ </strong></p>
+</header>
+<main>
+ <a href="{{ redirect_url }}" class="primary-button">
+ Continue with {{ idp.idp_name }}
+ </a>
+</main>
+{% include "sso_footer.html" without context %}
+{% endblock %}
diff --git a/synapse/res/templates/sso_auth_success.html b/synapse/res/templates/sso_auth_success.html
index 1ed3967e87..afeffb7191 100644
--- a/synapse/res/templates/sso_auth_success.html
+++ b/synapse/res/templates/sso_auth_success.html
@@ -1,28 +1,26 @@
-<!DOCTYPE html>
-<html lang="en">
- <head>
- <meta charset="UTF-8">
- <title>Authentication successful</title>
- <meta name="viewport" content="width=device-width, user-scalable=no">
- <style type="text/css">
- {% include "sso.css" without context %}
- </style>
- <script>
- if (window.onAuthDone) {
- window.onAuthDone();
- } else if (window.opener && window.opener.postMessage) {
- window.opener.postMessage("authDone", "*");
- }
- </script>
- </head>
- <body>
- <header>
- <h1>Thank you</h1>
- <p>
- Now we know it’s you, you can close this window and return to the
- application.
- </p>
- </header>
- {% include "sso_footer.html" without context %}
- </body>
-</html>
+{% extends "_base.html" %}
+{% block title %}Authentication successful{% endblock %}
+
+{% block header %}
+<style type="text/css">
+ {% include "sso.css" without context %}
+</style>
+<script>
+ if (window.onAuthDone) {
+ window.onAuthDone();
+ } else if (window.opener && window.opener.postMessage) {
+ window.opener.postMessage("authDone", "*");
+ }
+</script>
+{% endblock %}
+
+{% block body %}
+<header>
+ <h1>Thank you</h1>
+ <p>
+ Now we know it’s you, you can close this window and return to the
+ application.
+ </p>
+</header>
+{% include "sso_footer.html" without context %}
+{% endblock %}
diff --git a/synapse/res/templates/sso_error.html b/synapse/res/templates/sso_error.html
index 472309c350..6fa36c11c9 100644
--- a/synapse/res/templates/sso_error.html
+++ b/synapse/res/templates/sso_error.html
@@ -1,18 +1,20 @@
-<!DOCTYPE html>
-<html lang="en">
- <head>
- <meta charset="UTF-8">
- <title>Authentication failed</title>
- <meta name="viewport" content="width=device-width, user-scalable=no">
- <style type="text/css">
- {% include "sso.css" without context %}
+{% extends "_base.html" %}
+{% block title %}Authentication failed{% endblock %}
- #error_code {
- margin-top: 56px;
- }
- </style>
- </head>
- <body class="error_page">
+{% block header %}
+{% if error == "unauthorised" %}
+<style type="text/css">
+ {% include "sso.css" without context %}
+
+ #error_code {
+ margin-top: 56px;
+ }
+</style>
+{% endif %}
+{% endblock %}
+
+{% block body %}
+<div class="error_page">
{# If an error of unauthorised is returned it means we have actively rejected their login #}
{% if error == "unauthorised" %}
<header>
@@ -65,5 +67,5 @@
}
</script>
{% endif %}
-</body>
-</html>
+</div>
+{% endblock %}
diff --git a/synapse/res/templates/sso_login_idp_picker.html b/synapse/res/templates/sso_login_idp_picker.html
index 53b82db84e..58b0b3121c 100644
--- a/synapse/res/templates/sso_login_idp_picker.html
+++ b/synapse/res/templates/sso_login_idp_picker.html
@@ -1,61 +1,60 @@
-<!DOCTYPE html>
-<html lang="en">
- <head>
- <meta charset="UTF-8">
- <title>Choose identity provider</title>
- <style type="text/css">
- {% include "sso.css" without context %}
+{% extends "_base.html" %}
+{% block title %}Choose identity provider{% endblock %}
- .providers {
- list-style: none;
- padding: 0;
- }
+{% block header %}
+<style type="text/css">
+ {% include "sso.css" without context %}
- .providers li {
- margin: 12px;
- }
+ .providers {
+ list-style: none;
+ padding: 0;
+ }
- .providers a {
- display: block;
- border-radius: 4px;
- border: 1px solid #17191C;
- padding: 8px;
- text-align: center;
- text-decoration: none;
- color: #17191C;
- display: flex;
- align-items: center;
- font-weight: bold;
- }
+ .providers li {
+ margin: 12px;
+ }
- .providers a img {
- width: 24px;
- height: 24px;
- }
- .providers a span {
- flex: 1;
- }
- </style>
- </head>
- <body>
- <header>
- <h1>Log in to {{ server_name }} </h1>
- <p>Choose an identity provider to log in</p>
- </header>
- <main>
- <ul class="providers">
- {% for p in providers %}
- <li>
- <a href="pick_idp?idp={{ p.idp_id }}&redirectUrl={{ redirect_url | urlencode }}">
- {% if p.idp_icon %}
- <img src="{{ p.idp_icon | mxc_to_http(32, 32) }}"/>
- {% endif %}
- <span>{{ p.idp_name }}</span>
- </a>
- </li>
- {% endfor %}
- </ul>
- </main>
- {% include "sso_footer.html" without context %}
- </body>
-</html>
+ .providers a {
+ display: block;
+ border-radius: 4px;
+ border: 1px solid #17191C;
+ padding: 8px;
+ text-align: center;
+ text-decoration: none;
+ color: #17191C;
+ display: flex;
+ align-items: center;
+ font-weight: bold;
+ }
+
+ .providers a img {
+ width: 24px;
+ height: 24px;
+ }
+ .providers a span {
+ flex: 1;
+ }
+</style>
+{% endblock %}
+
+{% block body %}
+<header>
+ <h1>Log in to {{ server_name }} </h1>
+ <p>Choose an identity provider to log in</p>
+</header>
+<main>
+ <ul class="providers">
+ {% for p in providers %}
+ <li>
+ <a href="pick_idp?idp={{ p.idp_id }}&redirectUrl={{ redirect_url | urlencode }}">
+ {% if p.idp_icon %}
+ <img src="{{ p.idp_icon | mxc_to_http(32, 32) }}"/>
+ {% endif %}
+ <span>{{ p.idp_name }}</span>
+ </a>
+ </li>
+ {% endfor %}
+ </ul>
+</main>
+{% include "sso_footer.html" without context %}
+{% endblock %}
diff --git a/synapse/res/templates/sso_new_user_consent.html b/synapse/res/templates/sso_new_user_consent.html
index 68c8b9f33a..fda29928d1 100644
--- a/synapse/res/templates/sso_new_user_consent.html
+++ b/synapse/res/templates/sso_new_user_consent.html
@@ -1,32 +1,30 @@
-<!DOCTYPE html>
-<html lang="en">
-<head>
- <meta charset="UTF-8">
- <title>Agree to terms and conditions</title>
- <meta name="viewport" content="width=device-width, user-scalable=no">
- <style type="text/css">
- {% include "sso.css" without context %}
+{% extends "_base.html" %}
+{% block title %}Agree to terms and conditions{% endblock %}
- #consent_form {
- margin-top: 56px;
- }
- </style>
-</head>
- <body>
- <header>
- <h1>Your account is nearly ready</h1>
- <p>Agree to the terms to create your account.</p>
- </header>
- <main>
- {% include "sso_partial_profile.html" %}
- <form method="post" action="{{my_url}}" id="consent_form">
- <p>
- <input id="accepted_version" type="checkbox" name="accepted_version" value="{{ consent_version }}" required>
- <label for="accepted_version">I have read and agree to the <a href="{{ terms_url }}" target="_blank" rel="noopener">terms and conditions</a>.</label>
- </p>
- <input type="submit" class="primary-button" value="Continue"/>
- </form>
- </main>
- {% include "sso_footer.html" without context %}
- </body>
-</html>
+{% block header %}
+<style type="text/css">
+ {% include "sso.css" without context %}
+
+ #consent_form {
+ margin-top: 56px;
+ }
+</style>
+{% endblock %}
+
+{% block body %}
+<header>
+ <h1>Your account is nearly ready</h1>
+ <p>Agree to the terms to create your account.</p>
+</header>
+<main>
+ {% include "sso_partial_profile.html" %}
+ <form method="post" action="{{my_url}}" id="consent_form">
+ <p>
+ <input id="accepted_version" type="checkbox" name="accepted_version" value="{{ consent_version }}" required>
+ <label for="accepted_version">I have read and agree to the <a href="{{ terms_url }}" target="_blank" rel="noopener">terms and conditions</a>.</label>
+ </p>
+ <input type="submit" class="primary-button" value="Continue"/>
+ </form>
+</main>
+{% include "sso_footer.html" without context %}
+{% endblock %}
diff --git a/synapse/res/templates/sso_redirect_confirm.html b/synapse/res/templates/sso_redirect_confirm.html
index 1b01471ac8..cc2e7b3a5b 100644
--- a/synapse/res/templates/sso_redirect_confirm.html
+++ b/synapse/res/templates/sso_redirect_confirm.html
@@ -1,40 +1,39 @@
-<!DOCTYPE html>
-<html lang="en">
-<head>
- <meta charset="UTF-8">
- <title>Continue to your account</title>
- <meta name="viewport" content="width=device-width, user-scalable=no">
- <style type="text/css">
- {% include "sso.css" without context %}
+{% extends "_base.html" %}
+{% block title %}Continue to your account{% endblock %}
- .confirm-trust {
- margin: 34px 0;
- color: #8D99A5;
- }
- .confirm-trust strong {
- color: #17191C;
- }
+{% block header %}
+<style type="text/css">
+ {% include "sso.css" without context %}
- .confirm-trust::before {
- content: "";
- background-image: url('data:image/svg+xml;base64,PHN2ZyB3aWR0aD0iMTgiIGhlaWdodD0iMTgiIHZpZXdCb3g9IjAgMCAxOCAxOCIgZmlsbD0ibm9uZSIgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIj4KPHBhdGggZmlsbC1ydWxlPSJldmVub2RkIiBjbGlwLXJ1bGU9ImV2ZW5vZGQiIGQ9Ik0xNi41IDlDMTYuNSAxMy4xNDIxIDEzLjE0MjEgMTYuNSA5IDE2LjVDNC44NTc4NiAxNi41IDEuNSAxMy4xNDIxIDEuNSA5QzEuNSA0Ljg1Nzg2IDQuODU3ODYgMS41IDkgMS41QzEzLjE0MjEgMS41IDE2LjUgNC44NTc4NiAxNi41IDlaTTcuMjUgOUM3LjI1IDkuNDY1OTYgNy41Njg2OSA5Ljg1NzQ4IDggOS45Njg1VjEyLjM3NUM4IDEyLjkyNzMgOC40NDc3MiAxMy4zNzUgOSAxMy4zNzVIMTAuMTI1QzEwLjY3NzMgMTMuMzc1IDExLjEyNSAxMi45MjczIDExLjEyNSAxMi4zNzVDMTEuMTI1IDExLjgyMjcgMTAuNjc3MyAxMS4zNzUgMTAuMTI1IDExLjM3NUgxMFY5QzEwIDguOTY1NDggOS45OTgyNSA4LjkzMTM3IDkuOTk0ODQgOC44OTc3NkM5Ljk0MzYzIDguMzkzNSA5LjUxNzc3IDggOSA4SDguMjVDNy42OTc3MiA4IDcuMjUgOC40NDc3MiA3LjI1IDlaTTkgNy41QzkuNjIxMzIgNy41IDEwLjEyNSA2Ljk5NjMyIDEwLjEyNSA2LjM3NUMxMC4xMjUgNS43NTM2OCA5LjYyMTMyIDUuMjUgOSA1LjI1QzguMzc4NjggNS4yNSA3Ljg3NSA1Ljc1MzY4IDcuODc1IDYuMzc1QzcuODc1IDYuOTk2MzIgOC4zNzg2OCA3LjUgOSA3LjVaIiBmaWxsPSIjQzFDNkNEIi8+Cjwvc3ZnPgoK');
- background-repeat: no-repeat;
- width: 24px;
- height: 24px;
- display: block;
- float: left;
- }
- </style>
-</head>
- <body>
- <header>
- <h1>Continue to your account</h1>
- </header>
- <main>
- {% include "sso_partial_profile.html" %}
- <p class="confirm-trust">Continuing will grant <strong>{{ display_url }}</strong> access to your account.</p>
- <a href="{{ redirect_url }}" class="primary-button">Continue</a>
- </main>
- {% include "sso_footer.html" without context %}
- </body>
-</html>
+ .confirm-trust {
+ margin: 34px 0;
+ color: #8D99A5;
+ }
+ .confirm-trust strong {
+ color: #17191C;
+ }
+
+ .confirm-trust::before {
+ content: "";
+ background-image: url('data:image/svg+xml;base64,PHN2ZyB3aWR0aD0iMTgiIGhlaWdodD0iMTgiIHZpZXdCb3g9IjAgMCAxOCAxOCIgZmlsbD0ibm9uZSIgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIj4KPHBhdGggZmlsbC1ydWxlPSJldmVub2RkIiBjbGlwLXJ1bGU9ImV2ZW5vZGQiIGQ9Ik0xNi41IDlDMTYuNSAxMy4xNDIxIDEzLjE0MjEgMTYuNSA5IDE2LjVDNC44NTc4NiAxNi41IDEuNSAxMy4xNDIxIDEuNSA5QzEuNSA0Ljg1Nzg2IDQuODU3ODYgMS41IDkgMS41QzEzLjE0MjEgMS41IDE2LjUgNC44NTc4NiAxNi41IDlaTTcuMjUgOUM3LjI1IDkuNDY1OTYgNy41Njg2OSA5Ljg1NzQ4IDggOS45Njg1VjEyLjM3NUM4IDEyLjkyNzMgOC40NDc3MiAxMy4zNzUgOSAxMy4zNzVIMTAuMTI1QzEwLjY3NzMgMTMuMzc1IDExLjEyNSAxMi45MjczIDExLjEyNSAxMi4zNzVDMTEuMTI1IDExLjgyMjcgMTAuNjc3MyAxMS4zNzUgMTAuMTI1IDExLjM3NUgxMFY5QzEwIDguOTY1NDggOS45OTgyNSA4LjkzMTM3IDkuOTk0ODQgOC44OTc3NkM5Ljk0MzYzIDguMzkzNSA5LjUxNzc3IDggOSA4SDguMjVDNy42OTc3MiA4IDcuMjUgOC40NDc3MiA3LjI1IDlaTTkgNy41QzkuNjIxMzIgNy41IDEwLjEyNSA2Ljk5NjMyIDEwLjEyNSA2LjM3NUMxMC4xMjUgNS43NTM2OCA5LjYyMTMyIDUuMjUgOSA1LjI1QzguMzc4NjggNS4yNSA3Ljg3NSA1Ljc1MzY4IDcuODc1IDYuMzc1QzcuODc1IDYuOTk2MzIgOC4zNzg2OCA3LjUgOSA3LjVaIiBmaWxsPSIjQzFDNkNEIi8+Cjwvc3ZnPgoK');
+ background-repeat: no-repeat;
+ width: 24px;
+ height: 24px;
+ display: block;
+ float: left;
+ }
+</style>
+{% endblock %}
+
+{% block body %}
+<header>
+ <h1>Continue to your account</h1>
+</header>
+<main>
+ {% include "sso_partial_profile.html" %}
+ <p class="confirm-trust">Continuing will grant <strong>{{ display_url }}</strong> access to your account.</p>
+ <a href="{{ redirect_url }}" class="primary-button">Continue</a>
+</main>
+{% include "sso_footer.html" without context %}
+
+{% endblock %}
diff --git a/synapse/res/templates/style.css b/synapse/res/templates/style.css
new file mode 100644
index 0000000000..097b235ae5
--- /dev/null
+++ b/synapse/res/templates/style.css
@@ -0,0 +1,29 @@
+html {
+ height: 100%;
+}
+
+body {
+ background: #f9fafb;
+ max-width: 680px;
+ margin: auto;
+ font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Helvetica, Arial, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol";
+}
+
+.mx_Header {
+ border-bottom: 3px solid #ddd;
+ margin-bottom: 1rem;
+ padding-top: 1rem;
+ padding-bottom: 1rem;
+ text-align: center;
+}
+
+@media screen and (max-width: 1120px) {
+ body {
+ font-size: 20px;
+ }
+
+ h1 { font-size: 1rem; }
+ h2 { font-size: .9rem; }
+ h3 { font-size: .85rem; }
+ h4 { font-size: .8rem; }
+}
diff --git a/synapse/res/templates/terms.html b/synapse/res/templates/terms.html
index 369ff446d2..ffabebdd8b 100644
--- a/synapse/res/templates/terms.html
+++ b/synapse/res/templates/terms.html
@@ -1,11 +1,11 @@
-<html>
-<head>
-<title>Authentication</title>
-<meta name='viewport' content='width=device-width, initial-scale=1,
- user-scalable=no, minimum-scale=1.0, maximum-scale=1.0'>
+{% extends "_base.html" %}
+{% block title %}Authentication{% endblock %}
+
+{% block header %}
<link rel="stylesheet" href="/_matrix/static/client/register/style.css">
-</head>
-<body>
+{% endblock %}
+
+{% block body %}
<form id="registrationForm" method="post" action="{{ myurl }}">
<div>
{% if error is defined %}
@@ -19,5 +19,4 @@
<input type="submit" value="Agree" />
</div>
</form>
-</body>
-</html>
+{% endblock %}
diff --git a/synapse/rest/__init__.py b/synapse/rest/__init__.py
index b712215112..28542cd774 100644
--- a/synapse/rest/__init__.py
+++ b/synapse/rest/__init__.py
@@ -30,6 +30,7 @@ from synapse.rest.client import (
keys,
knock,
login as v1_login,
+ login_token_request,
logout,
mutual_rooms,
notifications,
@@ -43,6 +44,7 @@ from synapse.rest.client import (
receipts,
register,
relations,
+ rendezvous,
report_event,
room,
room_batch,
@@ -130,3 +132,5 @@ class ClientRestResource(JsonResource):
# unstable
mutual_rooms.register_servlets(hs, client_resource)
+ login_token_request.register_servlets(hs, client_resource)
+ rendezvous.register_servlets(hs, client_resource)
diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py
index fa3266720b..fb73886df0 100644
--- a/synapse/rest/admin/__init__.py
+++ b/synapse/rest/admin/__init__.py
@@ -61,9 +61,11 @@ from synapse.rest.admin.rooms import (
MakeRoomAdminRestServlet,
RoomEventContextServlet,
RoomMembersRestServlet,
+ RoomMessagesRestServlet,
RoomRestServlet,
RoomRestV2Servlet,
RoomStateRestServlet,
+ RoomTimestampToEventRestServlet,
)
from synapse.rest.admin.server_notice_servlet import SendServerNoticeServlet
from synapse.rest.admin.statistics import UserMediaStatisticsRestServlet
@@ -78,6 +80,8 @@ from synapse.rest.admin.users import (
SearchUsersRestServlet,
ShadowBanRestServlet,
UserAdminServlet,
+ UserByExternalId,
+ UserByThreePid,
UserMembershipRestServlet,
UserRegisterServlet,
UserRestServletV2,
@@ -234,6 +238,10 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
"""
Register all the admin servlets.
"""
+ # Admin servlets aren't registered on workers.
+ if hs.config.worker.worker_app is not None:
+ return
+
register_servlets_for_client_rest_resource(hs, http_server)
BlockRoomRestServlet(hs).register(http_server)
ListRoomRestServlet(hs).register(http_server)
@@ -250,9 +258,6 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
UserTokenRestServlet(hs).register(http_server)
UserRestServletV2(hs).register(http_server)
UsersRestServletV2(hs).register(http_server)
- DeviceRestServlet(hs).register(http_server)
- DevicesRestServlet(hs).register(http_server)
- DeleteDevicesRestServlet(hs).register(http_server)
UserMediaStatisticsRestServlet(hs).register(http_server)
EventReportDetailRestServlet(hs).register(http_server)
EventReportsRestServlet(hs).register(http_server)
@@ -271,13 +276,18 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
DestinationResetConnectionRestServlet(hs).register(http_server)
DestinationRestServlet(hs).register(http_server)
ListDestinationsRestServlet(hs).register(http_server)
+ RoomMessagesRestServlet(hs).register(http_server)
+ RoomTimestampToEventRestServlet(hs).register(http_server)
+ UserByExternalId(hs).register(http_server)
+ UserByThreePid(hs).register(http_server)
- # Some servlets only get registered for the main process.
- if hs.config.worker.worker_app is None:
- SendServerNoticeServlet(hs).register(http_server)
- BackgroundUpdateEnabledRestServlet(hs).register(http_server)
- BackgroundUpdateRestServlet(hs).register(http_server)
- BackgroundUpdateStartJobRestServlet(hs).register(http_server)
+ DeviceRestServlet(hs).register(http_server)
+ DevicesRestServlet(hs).register(http_server)
+ DeleteDevicesRestServlet(hs).register(http_server)
+ SendServerNoticeServlet(hs).register(http_server)
+ BackgroundUpdateEnabledRestServlet(hs).register(http_server)
+ BackgroundUpdateRestServlet(hs).register(http_server)
+ BackgroundUpdateStartJobRestServlet(hs).register(http_server)
def register_servlets_for_client_rest_resource(
@@ -286,9 +296,11 @@ def register_servlets_for_client_rest_resource(
"""Register only the servlets which need to be exposed on /_matrix/client/xxx"""
WhoisRestServlet(hs).register(http_server)
PurgeHistoryStatusRestServlet(hs).register(http_server)
- DeactivateAccountRestServlet(hs).register(http_server)
PurgeHistoryRestServlet(hs).register(http_server)
- ResetPasswordRestServlet(hs).register(http_server)
+ # The following resources can only be run on the main process.
+ if hs.config.worker.worker_app is None:
+ DeactivateAccountRestServlet(hs).register(http_server)
+ ResetPasswordRestServlet(hs).register(http_server)
SearchUsersRestServlet(hs).register(http_server)
UserRegisterServlet(hs).register(http_server)
AccountValidityRenewServlet(hs).register(http_server)
diff --git a/synapse/rest/admin/_base.py b/synapse/rest/admin/_base.py
index 399b205aaf..b467a61dfb 100644
--- a/synapse/rest/admin/_base.py
+++ b/synapse/rest/admin/_base.py
@@ -19,7 +19,7 @@ from typing import Iterable, Pattern
from synapse.api.auth import Auth
from synapse.api.errors import AuthError
from synapse.http.site import SynapseRequest
-from synapse.types import UserID
+from synapse.types import Requester
def admin_patterns(path_regex: str, version: str = "v1") -> Iterable[Pattern]:
@@ -48,19 +48,19 @@ async def assert_requester_is_admin(auth: Auth, request: SynapseRequest) -> None
AuthError if the requester is not a server admin
"""
requester = await auth.get_user_by_req(request)
- await assert_user_is_admin(auth, requester.user)
+ await assert_user_is_admin(auth, requester)
-async def assert_user_is_admin(auth: Auth, user_id: UserID) -> None:
+async def assert_user_is_admin(auth: Auth, requester: Requester) -> None:
"""Verify that the given user is an admin user
Args:
auth: Auth singleton
- user_id: user to check
+ requester: The user making the request, according to the access token.
Raises:
AuthError if the user is not a server admin
"""
- is_admin = await auth.is_server_admin(user_id)
+ is_admin = await auth.is_server_admin(requester)
if not is_admin:
raise AuthError(HTTPStatus.FORBIDDEN, "You are not a server admin")
diff --git a/synapse/rest/admin/devices.py b/synapse/rest/admin/devices.py
index d934880102..3b2f2d9abb 100644
--- a/synapse/rest/admin/devices.py
+++ b/synapse/rest/admin/devices.py
@@ -16,6 +16,7 @@ from http import HTTPStatus
from typing import TYPE_CHECKING, Tuple
from synapse.api.errors import NotFoundError, SynapseError
+from synapse.handlers.device import DeviceHandler
from synapse.http.servlet import (
RestServlet,
assert_params_in_dict,
@@ -43,7 +44,9 @@ class DeviceRestServlet(RestServlet):
def __init__(self, hs: "HomeServer"):
super().__init__()
self.auth = hs.get_auth()
- self.device_handler = hs.get_device_handler()
+ handler = hs.get_device_handler()
+ assert isinstance(handler, DeviceHandler)
+ self.device_handler = handler
self.store = hs.get_datastores().main
self.is_mine = hs.is_mine
@@ -112,7 +115,9 @@ class DevicesRestServlet(RestServlet):
def __init__(self, hs: "HomeServer"):
self.auth = hs.get_auth()
- self.device_handler = hs.get_device_handler()
+ handler = hs.get_device_handler()
+ assert isinstance(handler, DeviceHandler)
+ self.device_handler = handler
self.store = hs.get_datastores().main
self.is_mine = hs.is_mine
@@ -143,7 +148,9 @@ class DeleteDevicesRestServlet(RestServlet):
def __init__(self, hs: "HomeServer"):
self.auth = hs.get_auth()
- self.device_handler = hs.get_device_handler()
+ handler = hs.get_device_handler()
+ assert isinstance(handler, DeviceHandler)
+ self.device_handler = handler
self.store = hs.get_datastores().main
self.is_mine = hs.is_mine
diff --git a/synapse/rest/admin/media.py b/synapse/rest/admin/media.py
index 19d4a008e8..73470f09ae 100644
--- a/synapse/rest/admin/media.py
+++ b/synapse/rest/admin/media.py
@@ -54,7 +54,7 @@ class QuarantineMediaInRoom(RestServlet):
self, request: SynapseRequest, room_id: str
) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
- await assert_user_is_admin(self.auth, requester.user)
+ await assert_user_is_admin(self.auth, requester)
logging.info("Quarantining room: %s", room_id)
@@ -81,7 +81,7 @@ class QuarantineMediaByUser(RestServlet):
self, request: SynapseRequest, user_id: str
) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
- await assert_user_is_admin(self.auth, requester.user)
+ await assert_user_is_admin(self.auth, requester)
logging.info("Quarantining media by user: %s", user_id)
@@ -110,7 +110,7 @@ class QuarantineMediaByID(RestServlet):
self, request: SynapseRequest, server_name: str, media_id: str
) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
- await assert_user_is_admin(self.auth, requester.user)
+ await assert_user_is_admin(self.auth, requester)
logging.info("Quarantining media by ID: %s/%s", server_name, media_id)
diff --git a/synapse/rest/admin/rooms.py b/synapse/rest/admin/rooms.py
index 9d953d58de..747e6fda83 100644
--- a/synapse/rest/admin/rooms.py
+++ b/synapse/rest/admin/rooms.py
@@ -35,6 +35,7 @@ from synapse.rest.admin._base import (
)
from synapse.storage.databases.main.room import RoomSortOrder
from synapse.storage.state import StateFilter
+from synapse.streams.config import PaginationConfig
from synapse.types import JsonDict, RoomID, UserID, create_requester
from synapse.util import json_decoder
@@ -75,7 +76,7 @@ class RoomRestV2Servlet(RestServlet):
) -> Tuple[int, JsonDict]:
requester = await self._auth.get_user_by_req(request)
- await assert_user_is_admin(self._auth, requester.user)
+ await assert_user_is_admin(self._auth, requester)
content = parse_json_object_from_request(request)
@@ -303,6 +304,7 @@ class RoomRestServlet(RestServlet):
members = await self.store.get_users_in_room(room_id)
ret["joined_local_devices"] = await self.store.count_devices_by_users(members)
+ ret["forgotten"] = await self.store.is_locally_forgotten_room(room_id)
return HTTPStatus.OK, ret
@@ -326,7 +328,7 @@ class RoomRestServlet(RestServlet):
pagination_handler: "PaginationHandler",
) -> Tuple[int, JsonDict]:
requester = await auth.get_user_by_req(request)
- await assert_user_is_admin(auth, requester.user)
+ await assert_user_is_admin(auth, requester)
content = parse_json_object_from_request(request)
@@ -460,7 +462,7 @@ class JoinRoomAliasServlet(ResolveRoomIdMixin, RestServlet):
assert request.args is not None
requester = await self.auth.get_user_by_req(request)
- await assert_user_is_admin(self.auth, requester.user)
+ await assert_user_is_admin(self.auth, requester)
content = parse_json_object_from_request(request)
@@ -550,7 +552,7 @@ class MakeRoomAdminRestServlet(ResolveRoomIdMixin, RestServlet):
self, request: SynapseRequest, room_identifier: str
) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
- await assert_user_is_admin(self.auth, requester.user)
+ await assert_user_is_admin(self.auth, requester)
content = parse_json_object_from_request(request, allow_empty_body=True)
room_id, _ = await self.resolve_room_id(room_identifier)
@@ -741,7 +743,7 @@ class RoomEventContextServlet(RestServlet):
self, request: SynapseRequest, room_id: str, event_id: str
) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request, allow_guest=False)
- await assert_user_is_admin(self.auth, requester.user)
+ await assert_user_is_admin(self.auth, requester)
limit = parse_integer(request, "limit", default=10)
@@ -833,7 +835,7 @@ class BlockRoomRestServlet(RestServlet):
self, request: SynapseRequest, room_id: str
) -> Tuple[int, JsonDict]:
requester = await self._auth.get_user_by_req(request)
- await assert_user_is_admin(self._auth, requester.user)
+ await assert_user_is_admin(self._auth, requester)
content = parse_json_object_from_request(request)
@@ -857,3 +859,106 @@ class BlockRoomRestServlet(RestServlet):
await self._store.unblock_room(room_id)
return HTTPStatus.OK, {"block": block}
+
+
+class RoomMessagesRestServlet(RestServlet):
+ """
+ Get messages list of a room.
+ """
+
+ PATTERNS = admin_patterns("/rooms/(?P<room_id>[^/]*)/messages$")
+
+ def __init__(self, hs: "HomeServer"):
+ self._hs = hs
+ self._clock = hs.get_clock()
+ self._pagination_handler = hs.get_pagination_handler()
+ self._auth = hs.get_auth()
+ self._store = hs.get_datastores().main
+
+ async def on_GET(
+ self, request: SynapseRequest, room_id: str
+ ) -> Tuple[int, JsonDict]:
+ requester = await self._auth.get_user_by_req(request)
+ await assert_user_is_admin(self._auth, requester)
+
+ pagination_config = await PaginationConfig.from_request(
+ self._store, request, default_limit=10
+ )
+ # Twisted will have processed the args by now.
+ assert request.args is not None
+ as_client_event = b"raw" not in request.args
+ filter_str = parse_string(request, "filter", encoding="utf-8")
+ if filter_str:
+ filter_json = urlparse.unquote(filter_str)
+ event_filter: Optional[Filter] = Filter(
+ self._hs, json_decoder.decode(filter_json)
+ )
+ if (
+ event_filter
+ and event_filter.filter_json.get("event_format", "client")
+ == "federation"
+ ):
+ as_client_event = False
+ else:
+ event_filter = None
+
+ msgs = await self._pagination_handler.get_messages(
+ room_id=room_id,
+ requester=requester,
+ pagin_config=pagination_config,
+ as_client_event=as_client_event,
+ event_filter=event_filter,
+ use_admin_priviledge=True,
+ )
+
+ return HTTPStatus.OK, msgs
+
+
+class RoomTimestampToEventRestServlet(RestServlet):
+ """
+ API endpoint to fetch the `event_id` of the closest event to the given
+ timestamp (`ts` query parameter) in the given direction (`dir` query
+ parameter).
+
+ Useful for cases like jump to date so you can start paginating messages from
+ a given date in the archive.
+
+ `ts` is a timestamp in milliseconds where we will find the closest event in
+ the given direction.
+
+ `dir` can be `f` or `b` to indicate forwards and backwards in time from the
+ given timestamp.
+
+ GET /_synapse/admin/v1/rooms/<roomID>/timestamp_to_event?ts=<timestamp>&dir=<direction>
+ {
+ "event_id": ...
+ }
+ """
+
+ PATTERNS = admin_patterns("/rooms/(?P<room_id>[^/]*)/timestamp_to_event$")
+
+ def __init__(self, hs: "HomeServer"):
+ self._auth = hs.get_auth()
+ self._store = hs.get_datastores().main
+ self._timestamp_lookup_handler = hs.get_timestamp_lookup_handler()
+
+ async def on_GET(
+ self, request: SynapseRequest, room_id: str
+ ) -> Tuple[int, JsonDict]:
+ requester = await self._auth.get_user_by_req(request)
+ await assert_user_is_admin(self._auth, requester)
+
+ timestamp = parse_integer(request, "ts", required=True)
+ direction = parse_string(request, "dir", default="f", allowed_values=["f", "b"])
+
+ (
+ event_id,
+ origin_server_ts,
+ ) = await self._timestamp_lookup_handler.get_event_for_timestamp(
+ requester, room_id, timestamp, direction
+ )
+
+ return HTTPStatus.OK, {
+ "event_id": event_id,
+ "origin_server_ts": origin_server_ts,
+ }
diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py
index ba2f7fa6d8..6e0c44be2a 100644
--- a/synapse/rest/admin/users.py
+++ b/synapse/rest/admin/users.py
@@ -69,6 +69,7 @@ class UsersRestServletV2(RestServlet):
self.store = hs.get_datastores().main
self.auth = hs.get_auth()
self.admin_handler = hs.get_admin_handler()
+ self._msc3866_enabled = hs.config.experimental.msc3866.enabled
async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
await assert_requester_is_admin(self.auth, request)
@@ -95,6 +96,13 @@ class UsersRestServletV2(RestServlet):
guests = parse_boolean(request, "guests", default=True)
deactivated = parse_boolean(request, "deactivated", default=False)
+ # If support for MSC3866 is not enabled, apply no filtering based on the
+ # `approved` column.
+ if self._msc3866_enabled:
+ approved = parse_boolean(request, "approved", default=True)
+ else:
+ approved = True
+
order_by = parse_string(
request,
"order_by",
@@ -115,8 +123,22 @@ class UsersRestServletV2(RestServlet):
direction = parse_string(request, "dir", default="f", allowed_values=("f", "b"))
users, total = await self.store.get_users_paginate(
- start, limit, user_id, name, guests, deactivated, order_by, direction
+ start,
+ limit,
+ user_id,
+ name,
+ guests,
+ deactivated,
+ order_by,
+ direction,
+ approved,
)
+
+ # If support for MSC3866 is not enabled, don't show the approval flag.
+ if not self._msc3866_enabled:
+ for user in users:
+ del user["approved"]
+
ret = {"users": users, "total": total}
if (start + limit) < total:
ret["next_token"] = str(start + len(users))
@@ -163,6 +185,7 @@ class UserRestServletV2(RestServlet):
self.deactivate_account_handler = hs.get_deactivate_account_handler()
self.registration_handler = hs.get_registration_handler()
self.pusher_pool = hs.get_pusherpool()
+ self._msc3866_enabled = hs.config.experimental.msc3866.enabled
async def on_GET(
self, request: SynapseRequest, user_id: str
@@ -183,7 +206,7 @@ class UserRestServletV2(RestServlet):
self, request: SynapseRequest, user_id: str
) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
- await assert_user_is_admin(self.auth, requester.user)
+ await assert_user_is_admin(self.auth, requester)
target_user = UserID.from_string(user_id)
body = parse_json_object_from_request(request)
@@ -239,6 +262,15 @@ class UserRestServletV2(RestServlet):
HTTPStatus.BAD_REQUEST, "'deactivated' parameter is not of type boolean"
)
+ approved: Optional[bool] = None
+ if "approved" in body and self._msc3866_enabled:
+ approved = body["approved"]
+ if not isinstance(approved, bool):
+ raise SynapseError(
+ HTTPStatus.BAD_REQUEST,
+ "'approved' parameter is not of type boolean",
+ )
+
# convert List[Dict[str, str]] into List[Tuple[str, str]]
if external_ids is not None:
new_external_ids = [
@@ -343,6 +375,9 @@ class UserRestServletV2(RestServlet):
if "user_type" in body:
await self.store.set_user_type(target_user, user_type)
+ if approved is not None:
+ await self.store.update_user_approval_status(target_user, approved)
+
user = await self.admin_handler.get_user(target_user)
assert user is not None
@@ -355,6 +390,10 @@ class UserRestServletV2(RestServlet):
if password is not None:
password_hash = await self.auth_handler.hash(password)
+ new_user_approved = True
+ if self._msc3866_enabled and approved is not None:
+ new_user_approved = approved
+
user_id = await self.registration_handler.register_user(
localpart=target_user.localpart,
password_hash=password_hash,
@@ -362,6 +401,7 @@ class UserRestServletV2(RestServlet):
default_display_name=displayname,
user_type=user_type,
by_admin=True,
+ approved=new_user_approved,
)
if threepids is not None:
@@ -375,7 +415,7 @@ class UserRestServletV2(RestServlet):
and self.hs.config.email.email_notif_for_new_users
and medium == "email"
):
- await self.pusher_pool.add_pusher(
+ await self.pusher_pool.add_or_update_pusher(
user_id=user_id,
access_token=None,
kind="email",
@@ -383,7 +423,7 @@ class UserRestServletV2(RestServlet):
app_display_name="Email Notifications",
device_display_name=address,
pushkey=address,
- lang=None, # We don't know a user's language here
+ lang=None,
data={},
)
@@ -550,6 +590,7 @@ class UserRegisterServlet(RestServlet):
user_type=user_type,
default_display_name=displayname,
by_admin=True,
+ approved=True,
)
result = await register._create_registration_details(user_id, body)
@@ -575,10 +616,9 @@ class WhoisRestServlet(RestServlet):
) -> Tuple[int, JsonDict]:
target_user = UserID.from_string(user_id)
requester = await self.auth.get_user_by_req(request)
- auth_user = requester.user
- if target_user != auth_user:
- await assert_user_is_admin(self.auth, auth_user)
+ if target_user != requester.user:
+ await assert_user_is_admin(self.auth, requester)
if not self.is_mine(target_user):
raise SynapseError(HTTPStatus.BAD_REQUEST, "Can only whois a local user")
@@ -601,7 +641,7 @@ class DeactivateAccountRestServlet(RestServlet):
self, request: SynapseRequest, target_user_id: str
) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
- await assert_user_is_admin(self.auth, requester.user)
+ await assert_user_is_admin(self.auth, requester)
if not self.is_mine(UserID.from_string(target_user_id)):
raise SynapseError(
@@ -693,7 +733,7 @@ class ResetPasswordRestServlet(RestServlet):
This needs user to have administrator access in Synapse.
"""
requester = await self.auth.get_user_by_req(request)
- await assert_user_is_admin(self.auth, requester.user)
+ await assert_user_is_admin(self.auth, requester)
UserID.from_string(target_user_id)
@@ -807,7 +847,7 @@ class UserAdminServlet(RestServlet):
self, request: SynapseRequest, user_id: str
) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
- await assert_user_is_admin(self.auth, requester.user)
+ await assert_user_is_admin(self.auth, requester)
auth_user = requester.user
target_user = UserID.from_string(user_id)
@@ -863,8 +903,9 @@ class PushersRestServlet(RestServlet):
@user:server/pushers
Returns:
- pushers: Dictionary containing pushers information.
- total: Number of pushers in dictionary `pushers`.
+ A dictionary with keys:
+ pushers: Dictionary containing pushers information.
+ total: Number of pushers in dictionary `pushers`.
"""
PATTERNS = admin_patterns("/users/(?P<user_id>[^/]*)/pushers$")
@@ -921,7 +962,7 @@ class UserTokenRestServlet(RestServlet):
self, request: SynapseRequest, user_id: str
) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
- await assert_user_is_admin(self.auth, requester.user)
+ await assert_user_is_admin(self.auth, requester)
auth_user = requester.user
if not self.is_mine_id(user_id):
@@ -1157,3 +1198,55 @@ class AccountDataRestServlet(RestServlet):
"rooms": by_room_data,
},
}
+
+
+class UserByExternalId(RestServlet):
+ """Find a user based on an external ID from an auth provider"""
+
+ PATTERNS = admin_patterns(
+ "/auth_providers/(?P<provider>[^/]*)/users/(?P<external_id>[^/]*)"
+ )
+
+ def __init__(self, hs: "HomeServer"):
+ self._auth = hs.get_auth()
+ self._store = hs.get_datastores().main
+
+ async def on_GET(
+ self,
+ request: SynapseRequest,
+ provider: str,
+ external_id: str,
+ ) -> Tuple[int, JsonDict]:
+ await assert_requester_is_admin(self._auth, request)
+
+ user_id = await self._store.get_user_by_external_id(provider, external_id)
+
+ if user_id is None:
+ raise NotFoundError("User not found")
+
+ return HTTPStatus.OK, {"user_id": user_id}
+
+
+class UserByThreePid(RestServlet):
+ """Find a user based on 3PID of a particular medium"""
+
+ PATTERNS = admin_patterns("/threepid/(?P<medium>[^/]*)/users/(?P<address>[^/]*)")
+
+ def __init__(self, hs: "HomeServer"):
+ self._auth = hs.get_auth()
+ self._store = hs.get_datastores().main
+
+ async def on_GET(
+ self,
+ request: SynapseRequest,
+ medium: str,
+ address: str,
+ ) -> Tuple[int, JsonDict]:
+ await assert_requester_is_admin(self._auth, request)
+
+ user_id = await self._store.get_user_id_by_threepid(medium, address)
+
+ if user_id is None:
+ raise NotFoundError("User not found")
+
+ return HTTPStatus.OK, {"user_id": user_id}
diff --git a/synapse/rest/client/account.py b/synapse/rest/client/account.py
index 50edc6b7d3..44f622bcce 100644
--- a/synapse/rest/client/account.py
+++ b/synapse/rest/client/account.py
@@ -15,10 +15,12 @@
# limitations under the License.
import logging
import random
-from http import HTTPStatus
-from typing import TYPE_CHECKING, Optional, Tuple
+from typing import TYPE_CHECKING, List, Optional, Tuple
from urllib.parse import urlparse
+from pydantic import StrictBool, StrictStr, constr
+from typing_extensions import Literal
+
from twisted.web.server import Request
from synapse.api.constants import LoginType
@@ -28,18 +30,25 @@ from synapse.api.errors import (
SynapseError,
ThreepidValidationError,
)
-from synapse.config.emailconfig import ThreepidBehaviour
from synapse.handlers.ui_auth import UIAuthSessionDataConstants
from synapse.http.server import HttpServer, finish_request, respond_with_html
from synapse.http.servlet import (
RestServlet,
assert_params_in_dict,
+ parse_and_validate_json_object_from_request,
parse_json_object_from_request,
parse_string,
)
from synapse.http.site import SynapseRequest
from synapse.metrics import threepid_send_requests
from synapse.push.mailer import Mailer
+from synapse.rest.client.models import (
+ AuthenticationData,
+ ClientSecretStr,
+ EmailRequestTokenBody,
+ MsisdnRequestTokenBody,
+)
+from synapse.rest.models import RequestBodyModel
from synapse.types import JsonDict
from synapse.util.msisdn import phone_number_to_msisdn
from synapse.util.stringutils import assert_valid_client_secret, random_string
@@ -64,7 +73,7 @@ class EmailPasswordRequestTokenRestServlet(RestServlet):
self.config = hs.config
self.identity_handler = hs.get_identity_handler()
- if self.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
+ if self.config.email.can_verify_email:
self.mailer = Mailer(
hs=self.hs,
app_name=self.config.email.email_app_name,
@@ -73,41 +82,24 @@ class EmailPasswordRequestTokenRestServlet(RestServlet):
)
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
- if self.config.email.threepid_behaviour_email == ThreepidBehaviour.OFF:
- if self.config.email.local_threepid_handling_disabled_due_to_email_config:
- logger.warning(
- "User password resets have been disabled due to lack of email config"
- )
+ if not self.config.email.can_verify_email:
+ logger.warning(
+ "User password resets have been disabled due to lack of email config"
+ )
raise SynapseError(
400, "Email-based password resets have been disabled on this server"
)
- body = parse_json_object_from_request(request)
-
- assert_params_in_dict(body, ["client_secret", "email", "send_attempt"])
-
- # Extract params from body
- client_secret = body["client_secret"]
- assert_valid_client_secret(client_secret)
-
- # Canonicalise the email address. The addresses are all stored canonicalised
- # in the database. This allows the user to reset his password without having to
- # know the exact spelling (eg. upper and lower case) of address in the database.
- # Stored in the database "foo@bar.com"
- # User requests with "FOO@bar.com" would raise a Not Found error
- try:
- email = validate_email(body["email"])
- except ValueError as e:
- raise SynapseError(400, str(e))
- send_attempt = body["send_attempt"]
- next_link = body.get("next_link") # Optional param
+ body = parse_and_validate_json_object_from_request(
+ request, EmailRequestTokenBody
+ )
- if next_link:
+ if body.next_link:
# Raise if the provided next_link value isn't valid
- assert_valid_next_link(self.hs, next_link)
+ assert_valid_next_link(self.hs, body.next_link)
await self.identity_handler.ratelimit_request_token_requests(
- request, "email", email
+ request, "email", body.email
)
# The email will be sent to the stored address.
@@ -115,7 +107,7 @@ class EmailPasswordRequestTokenRestServlet(RestServlet):
# an email address which is controlled by the attacker but which, after
# canonicalisation, matches the one in our database.
existing_user_id = await self.hs.get_datastores().main.get_user_id_by_threepid(
- "email", email
+ "email", body.email
)
if existing_user_id is None:
@@ -129,35 +121,20 @@ class EmailPasswordRequestTokenRestServlet(RestServlet):
raise SynapseError(400, "Email not found", Codes.THREEPID_NOT_FOUND)
- if self.config.email.threepid_behaviour_email == ThreepidBehaviour.REMOTE:
- assert self.hs.config.registration.account_threepid_delegate_email
-
- # Have the configured identity server handle the request
- ret = await self.identity_handler.request_email_token(
- self.hs.config.registration.account_threepid_delegate_email,
- email,
- client_secret,
- send_attempt,
- next_link,
- )
- else:
- # Send password reset emails from Synapse
- sid = await self.identity_handler.send_threepid_validation(
- email,
- client_secret,
- send_attempt,
- self.mailer.send_password_reset_mail,
- next_link,
- )
-
- # Wrap the session id in a JSON object
- ret = {"sid": sid}
-
+ # Send password reset emails from Synapse
+ sid = await self.identity_handler.send_threepid_validation(
+ body.email,
+ body.client_secret,
+ body.send_attempt,
+ self.mailer.send_password_reset_mail,
+ body.next_link,
+ )
threepid_send_requests.labels(type="email", reason="password_reset").observe(
- send_attempt
+ body.send_attempt
)
- return 200, ret
+ # Wrap the session id in a JSON object
+ return 200, {"sid": sid}
class PasswordRestServlet(RestServlet):
@@ -172,16 +149,23 @@ class PasswordRestServlet(RestServlet):
self.password_policy_handler = hs.get_password_policy_handler()
self._set_password_handler = hs.get_set_password_handler()
+ class PostBody(RequestBodyModel):
+ auth: Optional[AuthenticationData] = None
+ logout_devices: StrictBool = True
+ if TYPE_CHECKING:
+ # workaround for https://github.com/samuelcolvin/pydantic/issues/156
+ new_password: Optional[StrictStr] = None
+ else:
+ new_password: Optional[constr(max_length=512, strict=True)] = None
+
@interactive_auth_handler
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
- body = parse_json_object_from_request(request)
+ body = parse_and_validate_json_object_from_request(request, self.PostBody)
# we do basic sanity checks here because the auth layer will store these
# in sessions. Pull out the new password provided to us.
- new_password = body.pop("new_password", None)
+ new_password = body.new_password
if new_password is not None:
- if not isinstance(new_password, str) or len(new_password) > 512:
- raise SynapseError(400, "Invalid password")
self.password_policy_handler.validate_password(new_password)
# there are two possibilities here. Either the user does not have an
@@ -201,7 +185,7 @@ class PasswordRestServlet(RestServlet):
params, session_id = await self.auth_handler.validate_user_via_ui_auth(
requester,
request,
- body,
+ body.dict(exclude_unset=True),
"modify your account password",
)
except InteractiveAuthIncompleteError as e:
@@ -224,7 +208,7 @@ class PasswordRestServlet(RestServlet):
result, params, session_id = await self.auth_handler.check_ui_auth(
[[LoginType.EMAIL_IDENTITY]],
request,
- body,
+ body.dict(exclude_unset=True),
"modify your account password",
)
except InteractiveAuthIncompleteError as e:
@@ -299,37 +283,33 @@ class DeactivateAccountRestServlet(RestServlet):
self.auth_handler = hs.get_auth_handler()
self._deactivate_account_handler = hs.get_deactivate_account_handler()
+ class PostBody(RequestBodyModel):
+ auth: Optional[AuthenticationData] = None
+ id_server: Optional[StrictStr] = None
+ # Not specced, see https://github.com/matrix-org/matrix-spec/issues/297
+ erase: StrictBool = False
+
@interactive_auth_handler
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
- body = parse_json_object_from_request(request)
- erase = body.get("erase", False)
- if not isinstance(erase, bool):
- raise SynapseError(
- HTTPStatus.BAD_REQUEST,
- "Param 'erase' must be a boolean, if given",
- Codes.BAD_JSON,
- )
+ body = parse_and_validate_json_object_from_request(request, self.PostBody)
requester = await self.auth.get_user_by_req(request)
# allow ASes to deactivate their own users
if requester.app_service:
await self._deactivate_account_handler.deactivate_account(
- requester.user.to_string(), erase, requester
+ requester.user.to_string(), body.erase, requester
)
return 200, {}
await self.auth_handler.validate_user_via_ui_auth(
requester,
request,
- body,
+ body.dict(exclude_unset=True),
"deactivate your account",
)
result = await self._deactivate_account_handler.deactivate_account(
- requester.user.to_string(),
- erase,
- requester,
- id_server=body.get("id_server"),
+ requester.user.to_string(), body.erase, requester, id_server=body.id_server
)
if result:
id_server_unbind_result = "success"
@@ -349,7 +329,7 @@ class EmailThreepidRequestTokenRestServlet(RestServlet):
self.identity_handler = hs.get_identity_handler()
self.store = self.hs.get_datastores().main
- if self.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
+ if self.config.email.can_verify_email:
self.mailer = Mailer(
hs=self.hs,
app_name=self.config.email.email_app_name,
@@ -358,34 +338,20 @@ class EmailThreepidRequestTokenRestServlet(RestServlet):
)
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
- if self.config.email.threepid_behaviour_email == ThreepidBehaviour.OFF:
- if self.config.email.local_threepid_handling_disabled_due_to_email_config:
- logger.warning(
- "Adding emails have been disabled due to lack of an email config"
- )
+ if not self.config.email.can_verify_email:
+ logger.warning(
+ "Adding emails have been disabled due to lack of an email config"
+ )
raise SynapseError(
- 400, "Adding an email to your account is disabled on this server"
+ 400,
+ "Adding an email to your account is disabled on this server",
)
- body = parse_json_object_from_request(request)
- assert_params_in_dict(body, ["client_secret", "email", "send_attempt"])
- client_secret = body["client_secret"]
- assert_valid_client_secret(client_secret)
-
- # Canonicalise the email address. The addresses are all stored canonicalised
- # in the database.
- # This ensures that the validation email is sent to the canonicalised address
- # as it will later be entered into the database.
- # Otherwise the email will be sent to "FOO@bar.com" and stored as
- # "foo@bar.com" in database.
- try:
- email = validate_email(body["email"])
- except ValueError as e:
- raise SynapseError(400, str(e))
- send_attempt = body["send_attempt"]
- next_link = body.get("next_link") # Optional param
+ body = parse_and_validate_json_object_from_request(
+ request, EmailRequestTokenBody
+ )
- if not await check_3pid_allowed(self.hs, "email", email):
+ if not await check_3pid_allowed(self.hs, "email", body.email):
raise SynapseError(
403,
"Your email domain is not authorized on this server",
@@ -393,14 +359,14 @@ class EmailThreepidRequestTokenRestServlet(RestServlet):
)
await self.identity_handler.ratelimit_request_token_requests(
- request, "email", email
+ request, "email", body.email
)
- if next_link:
+ if body.next_link:
# Raise if the provided next_link value isn't valid
- assert_valid_next_link(self.hs, next_link)
+ assert_valid_next_link(self.hs, body.next_link)
- existing_user_id = await self.store.get_user_id_by_threepid("email", email)
+ existing_user_id = await self.store.get_user_id_by_threepid("email", body.email)
if existing_user_id is not None:
if self.config.server.request_token_inhibit_3pid_errors:
@@ -413,35 +379,21 @@ class EmailThreepidRequestTokenRestServlet(RestServlet):
raise SynapseError(400, "Email is already in use", Codes.THREEPID_IN_USE)
- if self.config.email.threepid_behaviour_email == ThreepidBehaviour.REMOTE:
- assert self.hs.config.registration.account_threepid_delegate_email
-
- # Have the configured identity server handle the request
- ret = await self.identity_handler.request_email_token(
- self.hs.config.registration.account_threepid_delegate_email,
- email,
- client_secret,
- send_attempt,
- next_link,
- )
- else:
- # Send threepid validation emails from Synapse
- sid = await self.identity_handler.send_threepid_validation(
- email,
- client_secret,
- send_attempt,
- self.mailer.send_add_threepid_mail,
- next_link,
- )
-
- # Wrap the session id in a JSON object
- ret = {"sid": sid}
+ # Send threepid validation emails from Synapse
+ sid = await self.identity_handler.send_threepid_validation(
+ body.email,
+ body.client_secret,
+ body.send_attempt,
+ self.mailer.send_add_threepid_mail,
+ body.next_link,
+ )
threepid_send_requests.labels(type="email", reason="add_threepid").observe(
- send_attempt
+ body.send_attempt
)
- return 200, ret
+ # Wrap the session id in a JSON object
+ return 200, {"sid": sid}
class MsisdnThreepidRequestTokenRestServlet(RestServlet):
@@ -454,23 +406,16 @@ class MsisdnThreepidRequestTokenRestServlet(RestServlet):
self.identity_handler = hs.get_identity_handler()
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
- body = parse_json_object_from_request(request)
- assert_params_in_dict(
- body, ["client_secret", "country", "phone_number", "send_attempt"]
+ body = parse_and_validate_json_object_from_request(
+ request, MsisdnRequestTokenBody
)
- client_secret = body["client_secret"]
- assert_valid_client_secret(client_secret)
-
- country = body["country"]
- phone_number = body["phone_number"]
- send_attempt = body["send_attempt"]
- next_link = body.get("next_link") # Optional param
-
- msisdn = phone_number_to_msisdn(country, phone_number)
+ msisdn = phone_number_to_msisdn(body.country, body.phone_number)
if not await check_3pid_allowed(self.hs, "msisdn", msisdn):
raise SynapseError(
403,
+ # TODO: is this error message accurate? Looks like we've only rejected
+ # this phone number, not necessarily all phone numbers
"Account phone numbers are not authorized on this server",
Codes.THREEPID_DENIED,
)
@@ -479,9 +424,9 @@ class MsisdnThreepidRequestTokenRestServlet(RestServlet):
request, "msisdn", msisdn
)
- if next_link:
+ if body.next_link:
# Raise if the provided next_link value isn't valid
- assert_valid_next_link(self.hs, next_link)
+ assert_valid_next_link(self.hs, body.next_link)
existing_user_id = await self.store.get_user_id_by_threepid("msisdn", msisdn)
@@ -508,15 +453,15 @@ class MsisdnThreepidRequestTokenRestServlet(RestServlet):
ret = await self.identity_handler.requestMsisdnToken(
self.hs.config.registration.account_threepid_delegate_msisdn,
- country,
- phone_number,
- client_secret,
- send_attempt,
- next_link,
+ body.country,
+ body.phone_number,
+ body.client_secret,
+ body.send_attempt,
+ body.next_link,
)
threepid_send_requests.labels(type="msisdn", reason="add_threepid").observe(
- send_attempt
+ body.send_attempt
)
return 200, ret
@@ -534,24 +479,18 @@ class AddThreepidEmailSubmitTokenServlet(RestServlet):
self.config = hs.config
self.clock = hs.get_clock()
self.store = hs.get_datastores().main
- if self.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
+ if self.config.email.can_verify_email:
self._failure_email_template = (
self.config.email.email_add_threepid_template_failure_html
)
async def on_GET(self, request: Request) -> None:
- if self.config.email.threepid_behaviour_email == ThreepidBehaviour.OFF:
- if self.config.email.local_threepid_handling_disabled_due_to_email_config:
- logger.warning(
- "Adding emails have been disabled due to lack of an email config"
- )
- raise SynapseError(
- 400, "Adding an email to your account is disabled on this server"
+ if not self.config.email.can_verify_email:
+ logger.warning(
+ "Adding emails have been disabled due to lack of an email config"
)
- elif self.config.email.threepid_behaviour_email == ThreepidBehaviour.REMOTE:
raise SynapseError(
- 400,
- "This homeserver is not validating threepids.",
+ 400, "Adding an email to your account is disabled on this server"
)
sid = parse_string(request, "sid", required=True)
@@ -595,6 +534,11 @@ class AddThreepidMsisdnSubmitTokenServlet(RestServlet):
"/add_threepid/msisdn/submit_token$", releases=(), unstable=True
)
+ class PostBody(RequestBodyModel):
+ client_secret: ClientSecretStr
+ sid: StrictStr
+ token: StrictStr
+
def __init__(self, hs: "HomeServer"):
super().__init__()
self.config = hs.config
@@ -610,16 +554,14 @@ class AddThreepidMsisdnSubmitTokenServlet(RestServlet):
"instead.",
)
- body = parse_json_object_from_request(request)
- assert_params_in_dict(body, ["client_secret", "sid", "token"])
- assert_valid_client_secret(body["client_secret"])
+ body = parse_and_validate_json_object_from_request(request, self.PostBody)
# Proxy submit_token request to msisdn threepid delegate
response = await self.identity_handler.proxy_msisdn_submit_token(
self.config.registration.account_threepid_delegate_msisdn,
- body["client_secret"],
- body["sid"],
- body["token"],
+ body.client_secret,
+ body.sid,
+ body.token,
)
return 200, response
@@ -642,6 +584,10 @@ class ThreepidRestServlet(RestServlet):
return 200, {"threepids": threepids}
+ # NOTE(dmr): I have chosen not to use Pydantic to parse this request's body, because
+ # the endpoint is deprecated. (If you really want to, you could do this by reusing
+ # ThreePidBindRestServelet.PostBody with an `alias_generator` to handle
+ # `threePidCreds` versus `three_pid_creds`.
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
if not self.hs.config.registration.enable_3pid_changes:
raise SynapseError(
@@ -690,6 +636,11 @@ class ThreepidAddRestServlet(RestServlet):
self.auth = hs.get_auth()
self.auth_handler = hs.get_auth_handler()
+ class PostBody(RequestBodyModel):
+ auth: Optional[AuthenticationData] = None
+ client_secret: ClientSecretStr
+ sid: StrictStr
+
@interactive_auth_handler
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
if not self.hs.config.registration.enable_3pid_changes:
@@ -699,22 +650,17 @@ class ThreepidAddRestServlet(RestServlet):
requester = await self.auth.get_user_by_req(request)
user_id = requester.user.to_string()
- body = parse_json_object_from_request(request)
-
- assert_params_in_dict(body, ["client_secret", "sid"])
- sid = body["sid"]
- client_secret = body["client_secret"]
- assert_valid_client_secret(client_secret)
+ body = parse_and_validate_json_object_from_request(request, self.PostBody)
await self.auth_handler.validate_user_via_ui_auth(
requester,
request,
- body,
+ body.dict(exclude_unset=True),
"add a third-party identifier to your account",
)
validation_session = await self.identity_handler.validate_threepid_session(
- client_secret, sid
+ body.client_secret, body.sid
)
if validation_session:
await self.auth_handler.add_threepid(
@@ -739,23 +685,20 @@ class ThreepidBindRestServlet(RestServlet):
self.identity_handler = hs.get_identity_handler()
self.auth = hs.get_auth()
- async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
- body = parse_json_object_from_request(request)
+ class PostBody(RequestBodyModel):
+ client_secret: ClientSecretStr
+ id_access_token: StrictStr
+ id_server: StrictStr
+ sid: StrictStr
- assert_params_in_dict(
- body, ["id_server", "sid", "id_access_token", "client_secret"]
- )
- id_server = body["id_server"]
- sid = body["sid"]
- id_access_token = body["id_access_token"]
- client_secret = body["client_secret"]
- assert_valid_client_secret(client_secret)
+ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
+ body = parse_and_validate_json_object_from_request(request, self.PostBody)
requester = await self.auth.get_user_by_req(request)
user_id = requester.user.to_string()
await self.identity_handler.bind_threepid(
- client_secret, sid, user_id, id_server, id_access_token
+ body.client_secret, body.sid, user_id, body.id_server, body.id_access_token
)
return 200, {}
@@ -771,23 +714,27 @@ class ThreepidUnbindRestServlet(RestServlet):
self.auth = hs.get_auth()
self.datastore = self.hs.get_datastores().main
+ class PostBody(RequestBodyModel):
+ address: StrictStr
+ id_server: Optional[StrictStr] = None
+ medium: Literal["email", "msisdn"]
+
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
"""Unbind the given 3pid from a specific identity server, or identity servers that are
known to have this 3pid bound
"""
requester = await self.auth.get_user_by_req(request)
- body = parse_json_object_from_request(request)
- assert_params_in_dict(body, ["medium", "address"])
-
- medium = body.get("medium")
- address = body.get("address")
- id_server = body.get("id_server")
+ body = parse_and_validate_json_object_from_request(request, self.PostBody)
# Attempt to unbind the threepid from an identity server. If id_server is None, try to
# unbind from all identity servers this threepid has been added to in the past
result = await self.identity_handler.try_unbind_threepid(
requester.user.to_string(),
- {"address": address, "medium": medium, "id_server": id_server},
+ {
+ "address": body.address,
+ "medium": body.medium,
+ "id_server": body.id_server,
+ },
)
return 200, {"id_server_unbind_result": "success" if result else "no-support"}
@@ -801,21 +748,25 @@ class ThreepidDeleteRestServlet(RestServlet):
self.auth = hs.get_auth()
self.auth_handler = hs.get_auth_handler()
+ class PostBody(RequestBodyModel):
+ address: StrictStr
+ id_server: Optional[StrictStr] = None
+ medium: Literal["email", "msisdn"]
+
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
if not self.hs.config.registration.enable_3pid_changes:
raise SynapseError(
400, "3PID changes are disabled on this server", Codes.FORBIDDEN
)
- body = parse_json_object_from_request(request)
- assert_params_in_dict(body, ["medium", "address"])
+ body = parse_and_validate_json_object_from_request(request, self.PostBody)
requester = await self.auth.get_user_by_req(request)
user_id = requester.user.to_string()
try:
ret = await self.auth_handler.delete_threepid(
- user_id, body["medium"], body["address"], body.get("id_server")
+ user_id, body.medium, body.address, body.id_server
)
except Exception:
# NB. This endpoint should succeed if there is nothing to
@@ -905,17 +856,18 @@ class AccountStatusRestServlet(RestServlet):
self._auth = hs.get_auth()
self._account_handler = hs.get_account_handler()
+ class PostBody(RequestBodyModel):
+ # TODO: we could validate that each user id is an mxid here, and/or parse it
+ # as a UserID
+ user_ids: List[StrictStr]
+
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
await self._auth.get_user_by_req(request)
- body = parse_json_object_from_request(request)
- if "user_ids" not in body:
- raise SynapseError(
- 400, "Required parameter 'user_ids' is missing", Codes.MISSING_PARAM
- )
+ body = parse_and_validate_json_object_from_request(request, self.PostBody)
statuses, failures = await self._account_handler.get_account_statuses(
- body["user_ids"],
+ body.user_ids,
allow_remote=True,
)
diff --git a/synapse/rest/client/capabilities.py b/synapse/rest/client/capabilities.py
index 4237071c61..e84dde31b1 100644
--- a/synapse/rest/client/capabilities.py
+++ b/synapse/rest/client/capabilities.py
@@ -77,6 +77,11 @@ class CapabilitiesRestServlet(RestServlet):
"enabled": True,
}
+ if self.config.experimental.msc3664_enabled:
+ response["capabilities"]["im.nheko.msc3664.related_event_match"] = {
+ "enabled": self.config.experimental.msc3664_enabled,
+ }
+
return HTTPStatus.OK, response
diff --git a/synapse/rest/client/devices.py b/synapse/rest/client/devices.py
index 6fab102437..69b803f9f8 100644
--- a/synapse/rest/client/devices.py
+++ b/synapse/rest/client/devices.py
@@ -14,18 +14,22 @@
# limitations under the License.
import logging
-from typing import TYPE_CHECKING, Tuple
+from typing import TYPE_CHECKING, List, Optional, Tuple
+
+from pydantic import Extra, StrictStr
from synapse.api import errors
from synapse.api.errors import NotFoundError
+from synapse.handlers.device import DeviceHandler
from synapse.http.server import HttpServer
from synapse.http.servlet import (
RestServlet,
- assert_params_in_dict,
- parse_json_object_from_request,
+ parse_and_validate_json_object_from_request,
)
from synapse.http.site import SynapseRequest
from synapse.rest.client._base import client_patterns, interactive_auth_handler
+from synapse.rest.client.models import AuthenticationData
+from synapse.rest.models import RequestBodyModel
from synapse.types import JsonDict
if TYPE_CHECKING:
@@ -42,12 +46,26 @@ class DevicesRestServlet(RestServlet):
self.hs = hs
self.auth = hs.get_auth()
self.device_handler = hs.get_device_handler()
+ self._msc3852_enabled = hs.config.experimental.msc3852_enabled
async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request, allow_guest=True)
devices = await self.device_handler.get_devices_by_user(
requester.user.to_string()
)
+
+ # If MSC3852 is disabled, then the "last_seen_user_agent" field will be
+ # removed from each device. If it is enabled, then the field name will
+ # be replaced by the unstable identifier.
+ #
+ # When MSC3852 is accepted, this block of code can just be removed to
+ # expose "last_seen_user_agent" to clients.
+ for device in devices:
+ last_seen_user_agent = device["last_seen_user_agent"]
+ del device["last_seen_user_agent"]
+ if self._msc3852_enabled:
+ device["org.matrix.msc3852.last_seen_user_agent"] = last_seen_user_agent
+
return 200, {"devices": devices}
@@ -63,30 +81,34 @@ class DeleteDevicesRestServlet(RestServlet):
super().__init__()
self.hs = hs
self.auth = hs.get_auth()
- self.device_handler = hs.get_device_handler()
+ handler = hs.get_device_handler()
+ assert isinstance(handler, DeviceHandler)
+ self.device_handler = handler
self.auth_handler = hs.get_auth_handler()
+ class PostBody(RequestBodyModel):
+ auth: Optional[AuthenticationData]
+ devices: List[StrictStr]
+
@interactive_auth_handler
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
try:
- body = parse_json_object_from_request(request)
+ body = parse_and_validate_json_object_from_request(request, self.PostBody)
except errors.SynapseError as e:
if e.errcode == errors.Codes.NOT_JSON:
- # DELETE
+ # TODO: Can/should we remove this fallback now?
# deal with older clients which didn't pass a JSON dict
# the same as those that pass an empty dict
- body = {}
+ body = self.PostBody.parse_obj({})
else:
raise e
- assert_params_in_dict(body, ["devices"])
-
await self.auth_handler.validate_user_via_ui_auth(
requester,
request,
- body,
+ body.dict(exclude_unset=True),
"remove device(s) from your account",
# Users might call this multiple times in a row while cleaning up
# devices, allow a single UI auth session to be re-used.
@@ -94,7 +116,7 @@ class DeleteDevicesRestServlet(RestServlet):
)
await self.device_handler.delete_devices(
- requester.user.to_string(), body["devices"]
+ requester.user.to_string(), body.devices
)
return 200, {}
@@ -106,8 +128,11 @@ class DeviceRestServlet(RestServlet):
super().__init__()
self.hs = hs
self.auth = hs.get_auth()
- self.device_handler = hs.get_device_handler()
+ handler = hs.get_device_handler()
+ assert isinstance(handler, DeviceHandler)
+ self.device_handler = handler
self.auth_handler = hs.get_auth_handler()
+ self._msc3852_enabled = hs.config.experimental.msc3852_enabled
async def on_GET(
self, request: SynapseRequest, device_id: str
@@ -118,8 +143,23 @@ class DeviceRestServlet(RestServlet):
)
if device is None:
raise NotFoundError("No device found")
+
+ # If MSC3852 is disabled, then the "last_seen_user_agent" field will be
+ # removed from each device. If it is enabled, then the field name will
+ # be replaced by the unstable identifier.
+ #
+ # When MSC3852 is accepted, this block of code can just be removed to
+ # expose "last_seen_user_agent" to clients.
+ last_seen_user_agent = device["last_seen_user_agent"]
+ del device["last_seen_user_agent"]
+ if self._msc3852_enabled:
+ device["org.matrix.msc3852.last_seen_user_agent"] = last_seen_user_agent
+
return 200, device
+ class DeleteBody(RequestBodyModel):
+ auth: Optional[AuthenticationData]
+
@interactive_auth_handler
async def on_DELETE(
self, request: SynapseRequest, device_id: str
@@ -127,20 +167,21 @@ class DeviceRestServlet(RestServlet):
requester = await self.auth.get_user_by_req(request)
try:
- body = parse_json_object_from_request(request)
+ body = parse_and_validate_json_object_from_request(request, self.DeleteBody)
except errors.SynapseError as e:
if e.errcode == errors.Codes.NOT_JSON:
+ # TODO: can/should we remove this fallback now?
# deal with older clients which didn't pass a JSON dict
# the same as those that pass an empty dict
- body = {}
+ body = self.DeleteBody.parse_obj({})
else:
raise
await self.auth_handler.validate_user_via_ui_auth(
requester,
request,
- body,
+ body.dict(exclude_unset=True),
"remove a device from your account",
# Users might call this multiple times in a row while cleaning up
# devices, allow a single UI auth session to be re-used.
@@ -152,18 +193,33 @@ class DeviceRestServlet(RestServlet):
)
return 200, {}
+ class PutBody(RequestBodyModel):
+ display_name: Optional[StrictStr]
+
async def on_PUT(
self, request: SynapseRequest, device_id: str
) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request, allow_guest=True)
- body = parse_json_object_from_request(request)
+ body = parse_and_validate_json_object_from_request(request, self.PutBody)
await self.device_handler.update_device(
- requester.user.to_string(), device_id, body
+ requester.user.to_string(), device_id, body.dict()
)
return 200, {}
+class DehydratedDeviceDataModel(RequestBodyModel):
+ """JSON blob describing a dehydrated device to be stored.
+
+ Expects other freeform fields. Use .dict() to access them.
+ """
+
+ class Config:
+ extra = Extra.allow
+
+ algorithm: StrictStr
+
+
class DehydratedDeviceServlet(RestServlet):
"""Retrieve or store a dehydrated device.
@@ -180,7 +236,7 @@ class DehydratedDeviceServlet(RestServlet):
}
}
- PUT /org.matrix.msc2697/dehydrated_device
+ PUT /org.matrix.msc2697.v2/dehydrated_device
Content-Type: application/json
{
@@ -205,7 +261,9 @@ class DehydratedDeviceServlet(RestServlet):
super().__init__()
self.hs = hs
self.auth = hs.get_auth()
- self.device_handler = hs.get_device_handler()
+ handler = hs.get_device_handler()
+ assert isinstance(handler, DeviceHandler)
+ self.device_handler = handler
async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
@@ -219,27 +277,18 @@ class DehydratedDeviceServlet(RestServlet):
else:
raise errors.NotFoundError("No dehydrated device available")
+ class PutBody(RequestBodyModel):
+ device_data: DehydratedDeviceDataModel
+ initial_device_display_name: Optional[StrictStr]
+
async def on_PUT(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
- submission = parse_json_object_from_request(request)
+ submission = parse_and_validate_json_object_from_request(request, self.PutBody)
requester = await self.auth.get_user_by_req(request)
- if "device_data" not in submission:
- raise errors.SynapseError(
- 400,
- "device_data missing",
- errcode=errors.Codes.MISSING_PARAM,
- )
- elif not isinstance(submission["device_data"], dict):
- raise errors.SynapseError(
- 400,
- "device_data must be an object",
- errcode=errors.Codes.INVALID_PARAM,
- )
-
device_id = await self.device_handler.store_dehydrated_device(
requester.user.to_string(),
- submission["device_data"],
- submission.get("initial_device_display_name", None),
+ submission.device_data.dict(),
+ submission.initial_device_display_name,
)
return 200, {"device_id": device_id}
@@ -271,30 +320,22 @@ class ClaimDehydratedDeviceServlet(RestServlet):
super().__init__()
self.hs = hs
self.auth = hs.get_auth()
- self.device_handler = hs.get_device_handler()
+ handler = hs.get_device_handler()
+ assert isinstance(handler, DeviceHandler)
+ self.device_handler = handler
+
+ class PostBody(RequestBodyModel):
+ device_id: StrictStr
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
- submission = parse_json_object_from_request(request)
-
- if "device_id" not in submission:
- raise errors.SynapseError(
- 400,
- "device_id missing",
- errcode=errors.Codes.MISSING_PARAM,
- )
- elif not isinstance(submission["device_id"], str):
- raise errors.SynapseError(
- 400,
- "device_id must be a string",
- errcode=errors.Codes.INVALID_PARAM,
- )
+ submission = parse_and_validate_json_object_from_request(request, self.PostBody)
result = await self.device_handler.rehydrate_device(
requester.user.to_string(),
self.auth.get_access_token_from_request(request),
- submission["device_id"],
+ submission.device_id,
)
return 200, result
diff --git a/synapse/rest/client/directory.py b/synapse/rest/client/directory.py
index bc1b18c92d..f17b4c8d22 100644
--- a/synapse/rest/client/directory.py
+++ b/synapse/rest/client/directory.py
@@ -13,15 +13,22 @@
# limitations under the License.
import logging
-from typing import TYPE_CHECKING, Tuple
+from typing import TYPE_CHECKING, List, Optional, Tuple
+
+from pydantic import StrictStr
+from typing_extensions import Literal
from twisted.web.server import Request
from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError
from synapse.http.server import HttpServer
-from synapse.http.servlet import RestServlet, parse_json_object_from_request
+from synapse.http.servlet import (
+ RestServlet,
+ parse_and_validate_json_object_from_request,
+)
from synapse.http.site import SynapseRequest
from synapse.rest.client._base import client_patterns
+from synapse.rest.models import RequestBodyModel
from synapse.types import JsonDict, RoomAlias
if TYPE_CHECKING:
@@ -54,6 +61,12 @@ class ClientDirectoryServer(RestServlet):
return 200, res
+ class PutBody(RequestBodyModel):
+ # TODO: get Pydantic to validate that this is a valid room id?
+ room_id: StrictStr
+ # `servers` is unspecced
+ servers: Optional[List[StrictStr]] = None
+
async def on_PUT(
self, request: SynapseRequest, room_alias: str
) -> Tuple[int, JsonDict]:
@@ -61,31 +74,22 @@ class ClientDirectoryServer(RestServlet):
raise SynapseError(400, "Room alias invalid", errcode=Codes.INVALID_PARAM)
room_alias_obj = RoomAlias.from_string(room_alias)
- content = parse_json_object_from_request(request)
- if "room_id" not in content:
- raise SynapseError(
- 400, 'Missing params: ["room_id"]', errcode=Codes.BAD_JSON
- )
+ content = parse_and_validate_json_object_from_request(request, self.PutBody)
logger.debug("Got content: %s", content)
logger.debug("Got room name: %s", room_alias_obj.to_string())
- room_id = content["room_id"]
- servers = content["servers"] if "servers" in content else None
-
- logger.debug("Got room_id: %s", room_id)
- logger.debug("Got servers: %s", servers)
+ logger.debug("Got room_id: %s", content.room_id)
+ logger.debug("Got servers: %s", content.servers)
- # TODO(erikj): Check types.
-
- room = await self.store.get_room(room_id)
+ room = await self.store.get_room(content.room_id)
if room is None:
raise SynapseError(400, "Room does not exist")
requester = await self.auth.get_user_by_req(request)
await self.directory_handler.create_association(
- requester, room_alias_obj, room_id, servers
+ requester, room_alias_obj, content.room_id, content.servers
)
return 200, {}
@@ -137,16 +141,18 @@ class ClientDirectoryListServer(RestServlet):
return 200, {"visibility": "public" if room["is_public"] else "private"}
+ class PutBody(RequestBodyModel):
+ visibility: Literal["public", "private"] = "public"
+
async def on_PUT(
self, request: SynapseRequest, room_id: str
) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
- content = parse_json_object_from_request(request)
- visibility = content.get("visibility", "public")
+ content = parse_and_validate_json_object_from_request(request, self.PutBody)
await self.directory_handler.edit_published_room_list(
- requester, room_id, visibility
+ requester, room_id, content.visibility
)
return 200, {}
@@ -163,12 +169,14 @@ class ClientAppserviceDirectoryListServer(RestServlet):
self.directory_handler = hs.get_directory_handler()
self.auth = hs.get_auth()
+ class PutBody(RequestBodyModel):
+ visibility: Literal["public", "private"] = "public"
+
async def on_PUT(
self, request: SynapseRequest, network_id: str, room_id: str
) -> Tuple[int, JsonDict]:
- content = parse_json_object_from_request(request)
- visibility = content.get("visibility", "public")
- return await self._edit(request, network_id, room_id, visibility)
+ content = parse_and_validate_json_object_from_request(request, self.PutBody)
+ return await self._edit(request, network_id, room_id, content.visibility)
async def on_DELETE(
self, request: SynapseRequest, network_id: str, room_id: str
@@ -176,7 +184,11 @@ class ClientAppserviceDirectoryListServer(RestServlet):
return await self._edit(request, network_id, room_id, "private")
async def _edit(
- self, request: SynapseRequest, network_id: str, room_id: str, visibility: str
+ self,
+ request: SynapseRequest,
+ network_id: str,
+ room_id: str,
+ visibility: Literal["public", "private"],
) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
if not requester.app_service:
diff --git a/synapse/rest/client/events.py b/synapse/rest/client/events.py
index 916f5230f1..782e7d14e8 100644
--- a/synapse/rest/client/events.py
+++ b/synapse/rest/client/events.py
@@ -50,7 +50,9 @@ class EventStreamRestServlet(RestServlet):
raise SynapseError(400, "Guest users must specify room_id param")
room_id = parse_string(request, "room_id")
- pagin_config = await PaginationConfig.from_request(self.store, request)
+ pagin_config = await PaginationConfig.from_request(
+ self.store, request, default_limit=10
+ )
timeout = EventStreamRestServlet.DEFAULT_LONGPOLL_TIME_MS
if b"timeout" in args:
try:
diff --git a/synapse/rest/client/initial_sync.py b/synapse/rest/client/initial_sync.py
index cfadcb8e50..9b1bb8b521 100644
--- a/synapse/rest/client/initial_sync.py
+++ b/synapse/rest/client/initial_sync.py
@@ -39,7 +39,9 @@ class InitialSyncRestServlet(RestServlet):
requester = await self.auth.get_user_by_req(request)
args: Dict[bytes, List[bytes]] = request.args # type: ignore
as_client_event = b"raw" not in args
- pagination_config = await PaginationConfig.from_request(self.store, request)
+ pagination_config = await PaginationConfig.from_request(
+ self.store, request, default_limit=10
+ )
include_archived = parse_boolean(request, "archived", default=False)
content = await self.initial_sync_handler.snapshot_all_rooms(
user_id=requester.user.to_string(),
diff --git a/synapse/rest/client/keys.py b/synapse/rest/client/keys.py
index e3f454896a..ee038c7192 100644
--- a/synapse/rest/client/keys.py
+++ b/synapse/rest/client/keys.py
@@ -26,10 +26,11 @@ from synapse.http.servlet import (
parse_string,
)
from synapse.http.site import SynapseRequest
-from synapse.logging.opentracing import log_kv, set_tag, trace_with_opname
+from synapse.logging.opentracing import log_kv, set_tag
+from synapse.replication.http.devices import ReplicationUploadKeysForUserRestServlet
+from synapse.rest.client._base import client_patterns, interactive_auth_handler
from synapse.types import JsonDict, StreamToken
-
-from ._base import client_patterns, interactive_auth_handler
+from synapse.util.cancellation import cancellable
if TYPE_CHECKING:
from synapse.server import HomeServer
@@ -43,24 +44,48 @@ class KeyUploadServlet(RestServlet):
Content-Type: application/json
{
- "device_keys": {
- "user_id": "<user_id>",
- "device_id": "<device_id>",
- "valid_until_ts": <millisecond_timestamp>,
- "algorithms": [
- "m.olm.curve25519-aes-sha2",
- ]
- "keys": {
- "<algorithm>:<device_id>": "<key_base64>",
+ "device_keys": {
+ "user_id": "<user_id>",
+ "device_id": "<device_id>",
+ "valid_until_ts": <millisecond_timestamp>,
+ "algorithms": [
+ "m.olm.curve25519-aes-sha2",
+ ]
+ "keys": {
+ "<algorithm>:<device_id>": "<key_base64>",
+ },
+ "signatures:" {
+ "<user_id>" {
+ "<algorithm>:<device_id>": "<signature_base64>"
+ }
+ }
},
- "signatures:" {
- "<user_id>" {
- "<algorithm>:<device_id>": "<signature_base64>"
- } } },
- "one_time_keys": {
- "<algorithm>:<key_id>": "<key_base64>"
- },
+ "fallback_keys": {
+ "<algorithm>:<device_id>": "<key_base64>",
+ "signed_<algorithm>:<device_id>": {
+ "fallback": true,
+ "key": "<key_base64>",
+ "signatures": {
+ "<user_id>": {
+ "<algorithm>:<device_id>": "<key_base64>"
+ }
+ }
+ }
+ }
+ "one_time_keys": {
+ "<algorithm>:<key_id>": "<key_base64>"
+ },
+ }
+
+ response, e.g.:
+
+ {
+ "one_time_key_counts": {
+ "curve25519": 10,
+ "signed_curve25519": 20
+ }
}
+
"""
PATTERNS = client_patterns("/keys/upload(/(?P<device_id>[^/]+))?$")
@@ -71,7 +96,13 @@ class KeyUploadServlet(RestServlet):
self.e2e_keys_handler = hs.get_e2e_keys_handler()
self.device_handler = hs.get_device_handler()
- @trace_with_opname("upload_keys")
+ if hs.config.worker.worker_app is None:
+ # if main process
+ self.key_uploader = self.e2e_keys_handler.upload_keys_for_user
+ else:
+ # then a worker
+ self.key_uploader = ReplicationUploadKeysForUserRestServlet.make_client(hs)
+
async def on_POST(
self, request: SynapseRequest, device_id: Optional[str]
) -> Tuple[int, JsonDict]:
@@ -110,8 +141,8 @@ class KeyUploadServlet(RestServlet):
400, "To upload keys, you must pass device_id when authenticating"
)
- result = await self.e2e_keys_handler.upload_keys_for_user(
- user_id, device_id, body
+ result = await self.key_uploader(
+ user_id=user_id, device_id=device_id, keys=body
)
return 200, result
@@ -157,6 +188,7 @@ class KeyQueryServlet(RestServlet):
self.auth = hs.get_auth()
self.e2e_keys_handler = hs.get_e2e_keys_handler()
+ @cancellable
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request, allow_guest=True)
user_id = requester.user.to_string()
@@ -200,6 +232,7 @@ class KeyChangesServlet(RestServlet):
self.device_handler = hs.get_device_handler()
self.store = hs.get_datastores().main
+ @cancellable
async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request, allow_guest=True)
diff --git a/synapse/rest/client/login.py b/synapse/rest/client/login.py
index 0437c87d8d..8adced41e5 100644
--- a/synapse/rest/client/login.py
+++ b/synapse/rest/client/login.py
@@ -28,7 +28,14 @@ from typing import (
from typing_extensions import TypedDict
-from synapse.api.errors import Codes, InvalidClientTokenError, LoginError, SynapseError
+from synapse.api.constants import ApprovalNoticeMedium
+from synapse.api.errors import (
+ Codes,
+ InvalidClientTokenError,
+ LoginError,
+ NotApprovedError,
+ SynapseError,
+)
from synapse.api.ratelimiting import Ratelimiter
from synapse.api.urls import CLIENT_API_PREFIX
from synapse.appservice import ApplicationService
@@ -55,11 +62,11 @@ logger = logging.getLogger(__name__)
class LoginResponse(TypedDict, total=False):
user_id: str
- access_token: str
+ access_token: Optional[str]
home_server: str
expires_in_ms: Optional[int]
refresh_token: Optional[str]
- device_id: str
+ device_id: Optional[str]
well_known: Optional[Dict[str, Any]]
@@ -92,6 +99,12 @@ class LoginRestServlet(RestServlet):
hs.config.registration.refreshable_access_token_lifetime is not None
)
+ # Whether we need to check if the user has been approved or not.
+ self._require_approval = (
+ hs.config.experimental.msc3866.enabled
+ and hs.config.experimental.msc3866.require_approval_for_new_accounts
+ )
+
self.auth = hs.get_auth()
self.clock = hs.get_clock()
@@ -220,6 +233,14 @@ class LoginRestServlet(RestServlet):
except KeyError:
raise SynapseError(400, "Missing JSON keys.")
+ if self._require_approval:
+ approved = await self.auth_handler.is_user_approved(result["user_id"])
+ if not approved:
+ raise NotApprovedError(
+ msg="This account is pending approval by a server administrator.",
+ approval_notice_medium=ApprovalNoticeMedium.NONE,
+ )
+
well_known_data = self._well_known_builder.get_well_known()
if well_known_data:
result["well_known"] = well_known_data
@@ -329,7 +350,7 @@ class LoginRestServlet(RestServlet):
auth_provider_session_id: The session ID got during login from the SSO IdP.
Returns:
- result: Dictionary of account information after successful login.
+ Dictionary of account information after successful login.
"""
# Before we actually log them in we check if they've already logged in
@@ -356,6 +377,16 @@ class LoginRestServlet(RestServlet):
errcode=Codes.INVALID_PARAM,
)
+ if self._require_approval:
+ approved = await self.auth_handler.is_user_approved(user_id)
+ if not approved:
+ # If the user isn't approved (and needs to be) we won't allow them to
+ # actually log in, so we don't want to create a device/access token.
+ return LoginResponse(
+ user_id=user_id,
+ home_server=self.hs.hostname,
+ )
+
initial_display_name = login_submission.get("initial_device_display_name")
(
device_id,
@@ -405,8 +436,7 @@ class LoginRestServlet(RestServlet):
The body of the JSON response.
"""
token = login_submission["token"]
- auth_handler = self.auth_handler
- res = await auth_handler.validate_short_term_login_token(token)
+ res = await self.auth_handler.consume_login_token(token)
return await self._complete_login(
res.user_id,
@@ -506,7 +536,7 @@ def _get_auth_flow_dict_for_idp(idp: SsoIdentityProvider) -> JsonDict:
class RefreshTokenServlet(RestServlet):
- PATTERNS = (re.compile("^/_matrix/client/v1/refresh$"),)
+ PATTERNS = client_patterns("/refresh$")
def __init__(self, hs: "HomeServer"):
self._auth_handler = hs.get_auth_handler()
diff --git a/synapse/rest/client/login_token_request.py b/synapse/rest/client/login_token_request.py
new file mode 100644
index 0000000000..43ea21d5e6
--- /dev/null
+++ b/synapse/rest/client/login_token_request.py
@@ -0,0 +1,95 @@
+# Copyright 2022 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+from typing import TYPE_CHECKING, Tuple
+
+from synapse.http.server import HttpServer
+from synapse.http.servlet import RestServlet, parse_json_object_from_request
+from synapse.http.site import SynapseRequest
+from synapse.rest.client._base import client_patterns, interactive_auth_handler
+from synapse.types import JsonDict
+
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
+
+logger = logging.getLogger(__name__)
+
+
+class LoginTokenRequestServlet(RestServlet):
+ """
+ Get a token that can be used with `m.login.token` to log in a second device.
+
+ Request:
+
+ POST /login/token HTTP/1.1
+ Content-Type: application/json
+
+ {}
+
+ Response:
+
+ HTTP/1.1 200 OK
+ {
+ "login_token": "ABDEFGH",
+ "expires_in": 3600,
+ }
+ """
+
+ PATTERNS = client_patterns(
+ "/org.matrix.msc3882/login/token$", releases=[], v1=False, unstable=True
+ )
+
+ def __init__(self, hs: "HomeServer"):
+ super().__init__()
+ self.auth = hs.get_auth()
+ self.store = hs.get_datastores().main
+ self.clock = hs.get_clock()
+ self.server_name = hs.config.server.server_name
+ self.auth_handler = hs.get_auth_handler()
+ self.token_timeout = hs.config.experimental.msc3882_token_timeout
+ self.ui_auth = hs.config.experimental.msc3882_ui_auth
+
+ @interactive_auth_handler
+ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
+ requester = await self.auth.get_user_by_req(request)
+ body = parse_json_object_from_request(request)
+
+ if self.ui_auth:
+ await self.auth_handler.validate_user_via_ui_auth(
+ requester,
+ request,
+ body,
+ "issue a new access token for your account",
+ can_skip_ui_auth=False, # Don't allow skipping of UI auth
+ )
+
+ login_token = await self.auth_handler.create_login_token_for_user_id(
+ user_id=requester.user.to_string(),
+ auth_provider_id="org.matrix.msc3882.login_token_request",
+ duration_ms=self.token_timeout,
+ )
+
+ return (
+ 200,
+ {
+ "login_token": login_token,
+ "expires_in": self.token_timeout // 1000,
+ },
+ )
+
+
+def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
+ if hs.config.experimental.msc3882_enabled:
+ LoginTokenRequestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/logout.py b/synapse/rest/client/logout.py
index 23dfa4518f..6d34625ad5 100644
--- a/synapse/rest/client/logout.py
+++ b/synapse/rest/client/logout.py
@@ -15,6 +15,7 @@
import logging
from typing import TYPE_CHECKING, Tuple
+from synapse.handlers.device import DeviceHandler
from synapse.http.server import HttpServer
from synapse.http.servlet import RestServlet
from synapse.http.site import SynapseRequest
@@ -34,7 +35,9 @@ class LogoutRestServlet(RestServlet):
super().__init__()
self.auth = hs.get_auth()
self._auth_handler = hs.get_auth_handler()
- self._device_handler = hs.get_device_handler()
+ handler = hs.get_device_handler()
+ assert isinstance(handler, DeviceHandler)
+ self._device_handler = handler
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request, allow_expired=True)
@@ -59,7 +62,9 @@ class LogoutAllRestServlet(RestServlet):
super().__init__()
self.auth = hs.get_auth()
self._auth_handler = hs.get_auth_handler()
- self._device_handler = hs.get_device_handler()
+ handler = hs.get_device_handler()
+ assert isinstance(handler, DeviceHandler)
+ self._device_handler = handler
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request, allow_expired=True)
diff --git a/synapse/rest/client/models.py b/synapse/rest/client/models.py
new file mode 100644
index 0000000000..3d7940b0fc
--- /dev/null
+++ b/synapse/rest/client/models.py
@@ -0,0 +1,87 @@
+# Copyright 2022 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING, Dict, Optional
+
+from pydantic import Extra, StrictInt, StrictStr, constr, validator
+
+from synapse.rest.models import RequestBodyModel
+from synapse.util.threepids import validate_email
+
+
+class AuthenticationData(RequestBodyModel):
+ """
+ Data used during user-interactive authentication.
+
+ (The name "Authentication Data" is taken directly from the spec.)
+
+ Additional keys will be present, depending on the `type` field. Use
+ `.dict(exclude_unset=True)` to access them.
+ """
+
+ class Config:
+ extra = Extra.allow
+
+ session: Optional[StrictStr] = None
+ type: Optional[StrictStr] = None
+
+
+if TYPE_CHECKING:
+ ClientSecretStr = StrictStr
+else:
+ # See also assert_valid_client_secret()
+ ClientSecretStr = constr(
+ regex="[0-9a-zA-Z.=_-]", # noqa: F722
+ min_length=1,
+ max_length=255,
+ strict=True,
+ )
+
+
+class ThreepidRequestTokenBody(RequestBodyModel):
+ client_secret: ClientSecretStr
+ id_server: Optional[StrictStr]
+ id_access_token: Optional[StrictStr]
+ next_link: Optional[StrictStr]
+ send_attempt: StrictInt
+
+ @validator("id_access_token", always=True)
+ def token_required_for_identity_server(
+ cls, token: Optional[str], values: Dict[str, object]
+ ) -> Optional[str]:
+ if values.get("id_server") is not None and token is None:
+ raise ValueError("id_access_token is required if an id_server is supplied.")
+ return token
+
+
+class EmailRequestTokenBody(ThreepidRequestTokenBody):
+ email: StrictStr
+
+ # Canonicalise the email address. The addresses are all stored canonicalised
+ # in the database. This allows the user to reset his password without having to
+ # know the exact spelling (eg. upper and lower case) of address in the database.
+ # Without this, an email stored in the database as "foo@bar.com" would cause
+ # user requests for "FOO@bar.com" to raise a Not Found error.
+ _email_validator = validator("email", allow_reuse=True)(validate_email)
+
+
+if TYPE_CHECKING:
+ ISO3116_1_Alpha_2 = StrictStr
+else:
+ # Per spec: two-letter uppercase ISO-3166-1-alpha-2
+ ISO3116_1_Alpha_2 = constr(regex="[A-Z]{2}", strict=True)
+
+
+class MsisdnRequestTokenBody(ThreepidRequestTokenBody):
+ country: ISO3116_1_Alpha_2
+ phone_number: StrictStr
diff --git a/synapse/rest/client/notifications.py b/synapse/rest/client/notifications.py
index 24bc7c9095..61268e3af1 100644
--- a/synapse/rest/client/notifications.py
+++ b/synapse/rest/client/notifications.py
@@ -58,7 +58,11 @@ class NotificationsServlet(RestServlet):
)
receipts_by_room = await self.store.get_receipts_for_user_with_orderings(
- user_id, [ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE]
+ user_id,
+ [
+ ReceiptTypes.READ,
+ ReceiptTypes.READ_PRIVATE,
+ ],
)
notif_event_ids = [pa.event_id for pa in push_actions]
diff --git a/synapse/rest/client/profile.py b/synapse/rest/client/profile.py
index c16d707909..e69fa0829d 100644
--- a/synapse/rest/client/profile.py
+++ b/synapse/rest/client/profile.py
@@ -66,7 +66,7 @@ class ProfileDisplaynameRestServlet(RestServlet):
) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request, allow_guest=True)
user = UserID.from_string(user_id)
- is_admin = await self.auth.is_server_admin(requester.user)
+ is_admin = await self.auth.is_server_admin(requester)
content = parse_json_object_from_request(request)
@@ -123,7 +123,7 @@ class ProfileAvatarURLRestServlet(RestServlet):
) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
user = UserID.from_string(user_id)
- is_admin = await self.auth.is_server_admin(requester.user)
+ is_admin = await self.auth.is_server_admin(requester)
content = parse_json_object_from_request(request)
try:
diff --git a/synapse/rest/client/pusher.py b/synapse/rest/client/pusher.py
index 9a1f10f4be..975eef2144 100644
--- a/synapse/rest/client/pusher.py
+++ b/synapse/rest/client/pusher.py
@@ -42,6 +42,7 @@ class PushersRestServlet(RestServlet):
super().__init__()
self.hs = hs
self.auth = hs.get_auth()
+ self._msc3881_enabled = self.hs.config.experimental.msc3881_enabled
async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
@@ -51,9 +52,16 @@ class PushersRestServlet(RestServlet):
user.to_string()
)
- filtered_pushers = [p.as_dict() for p in pushers]
+ pusher_dicts = [p.as_dict() for p in pushers]
- return 200, {"pushers": filtered_pushers}
+ for pusher in pusher_dicts:
+ if self._msc3881_enabled:
+ pusher["org.matrix.msc3881.enabled"] = pusher["enabled"]
+ pusher["org.matrix.msc3881.device_id"] = pusher["device_id"]
+ del pusher["enabled"]
+ del pusher["device_id"]
+
+ return 200, {"pushers": pusher_dicts}
class PushersSetRestServlet(RestServlet):
@@ -65,6 +73,7 @@ class PushersSetRestServlet(RestServlet):
self.auth = hs.get_auth()
self.notifier = hs.get_notifier()
self.pusher_pool = self.hs.get_pusherpool()
+ self._msc3881_enabled = self.hs.config.experimental.msc3881_enabled
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
@@ -103,6 +112,10 @@ class PushersSetRestServlet(RestServlet):
if "append" in content:
append = content["append"]
+ enabled = True
+ if self._msc3881_enabled and "org.matrix.msc3881.enabled" in content:
+ enabled = content["org.matrix.msc3881.enabled"]
+
if not append:
await self.pusher_pool.remove_pushers_by_app_id_and_pushkey_not_user(
app_id=content["app_id"],
@@ -111,7 +124,7 @@ class PushersSetRestServlet(RestServlet):
)
try:
- await self.pusher_pool.add_pusher(
+ await self.pusher_pool.add_or_update_pusher(
user_id=user.to_string(),
access_token=requester.access_token_id,
kind=content["kind"],
@@ -122,6 +135,8 @@ class PushersSetRestServlet(RestServlet):
lang=content["lang"],
data=content["data"],
profile_tag=content.get("profile_tag", ""),
+ enabled=enabled,
+ device_id=requester.device_id,
)
except PusherConfigException as pce:
raise SynapseError(
diff --git a/synapse/rest/client/read_marker.py b/synapse/rest/client/read_marker.py
index 8896f2df50..852838515c 100644
--- a/synapse/rest/client/read_marker.py
+++ b/synapse/rest/client/read_marker.py
@@ -40,9 +40,11 @@ class ReadMarkerRestServlet(RestServlet):
self.read_marker_handler = hs.get_read_marker_handler()
self.presence_handler = hs.get_presence_handler()
- self._known_receipt_types = {ReceiptTypes.READ, ReceiptTypes.FULLY_READ}
- if hs.config.experimental.msc2285_enabled:
- self._known_receipt_types.add(ReceiptTypes.READ_PRIVATE)
+ self._known_receipt_types = {
+ ReceiptTypes.READ,
+ ReceiptTypes.FULLY_READ,
+ ReceiptTypes.READ_PRIVATE,
+ }
async def on_POST(
self, request: SynapseRequest, room_id: str
@@ -81,6 +83,8 @@ class ReadMarkerRestServlet(RestServlet):
receipt_type,
user_id=requester.user.to_string(),
event_id=event_id,
+ # Setting the thread ID is not possible with the /read_markers endpoint.
+ thread_id=None,
)
return 200, {}
diff --git a/synapse/rest/client/receipts.py b/synapse/rest/client/receipts.py
index 409bfd43c1..18a282b22c 100644
--- a/synapse/rest/client/receipts.py
+++ b/synapse/rest/client/receipts.py
@@ -15,8 +15,8 @@
import logging
from typing import TYPE_CHECKING, Tuple
-from synapse.api.constants import ReceiptTypes
-from synapse.api.errors import SynapseError
+from synapse.api.constants import MAIN_TIMELINE, ReceiptTypes
+from synapse.api.errors import Codes, SynapseError
from synapse.http.server import HttpServer
from synapse.http.servlet import RestServlet, parse_json_object_from_request
from synapse.http.site import SynapseRequest
@@ -43,12 +43,13 @@ class ReceiptRestServlet(RestServlet):
self.receipts_handler = hs.get_receipts_handler()
self.read_marker_handler = hs.get_read_marker_handler()
self.presence_handler = hs.get_presence_handler()
+ self._main_store = hs.get_datastores().main
- self._known_receipt_types = {ReceiptTypes.READ}
- if hs.config.experimental.msc2285_enabled:
- self._known_receipt_types.update(
- (ReceiptTypes.READ_PRIVATE, ReceiptTypes.FULLY_READ)
- )
+ self._known_receipt_types = {
+ ReceiptTypes.READ,
+ ReceiptTypes.READ_PRIVATE,
+ ReceiptTypes.FULLY_READ,
+ }
async def on_POST(
self, request: SynapseRequest, room_id: str, receipt_type: str, event_id: str
@@ -61,7 +62,33 @@ class ReceiptRestServlet(RestServlet):
f"Receipt type must be {', '.join(self._known_receipt_types)}",
)
- parse_json_object_from_request(request, allow_empty_body=False)
+ body = parse_json_object_from_request(request)
+
+ # Pull the thread ID, if one exists.
+ thread_id = None
+ if "thread_id" in body:
+ thread_id = body.get("thread_id")
+ if not thread_id or not isinstance(thread_id, str):
+ raise SynapseError(
+ 400,
+ "thread_id field must be a non-empty string",
+ Codes.INVALID_PARAM,
+ )
+
+ if receipt_type == ReceiptTypes.FULLY_READ:
+ raise SynapseError(
+ 400,
+ f"thread_id is not compatible with {ReceiptTypes.FULLY_READ} receipts.",
+ Codes.INVALID_PARAM,
+ )
+
+ # Ensure the event ID roughly correlates to the thread ID.
+ if not await self._is_event_in_thread(event_id, thread_id):
+ raise SynapseError(
+ 400,
+ f"event_id {event_id} is not related to thread {thread_id}",
+ Codes.INVALID_PARAM,
+ )
await self.presence_handler.bump_presence_active_time(requester.user)
@@ -77,10 +104,51 @@ class ReceiptRestServlet(RestServlet):
receipt_type,
user_id=requester.user.to_string(),
event_id=event_id,
+ thread_id=thread_id,
)
return 200, {}
+ async def _is_event_in_thread(self, event_id: str, thread_id: str) -> bool:
+ """
+ The event must be related to the thread ID (in a vague sense) to ensure
+ clients aren't sending bogus receipts.
+
+ A thread ID is considered valid for a given event E if:
+
+ 1. E has a thread relation which matches the thread ID;
+ 2. E has another event which has a thread relation to E matching the
+ thread ID; or
+ 3. E is recursively related (via any rel_type) to an event which
+ satisfies 1 or 2.
+
+ Given the following DAG:
+
+ A <---[m.thread]-- B <--[m.annotation]-- C
+ ^
+ |--[m.reference]-- D <--[m.annotation]-- E
+
+ It is valid to send a receipt for thread A on A, B, C, D, or E.
+
+ It is valid to send a receipt for the main timeline on A, D, and E.
+
+ Args:
+ event_id: The event ID to check.
+ thread_id: The thread ID the event is potentially part of.
+
+ Returns:
+ True if the event belongs to the given thread, otherwise False.
+ """
+
+ # If the receipt is on the main timeline, it is enough to check whether
+ # the event is directly related to a thread.
+ if thread_id == MAIN_TIMELINE:
+ return MAIN_TIMELINE == await self._main_store.get_thread_id(event_id)
+
+ # Otherwise, check if the event is directly part of a thread, or is the
+ # root message (or related to the root message) of a thread.
+ return thread_id == await self._main_store.get_thread_id_for_receipts(event_id)
+
def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
ReceiptRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/register.py b/synapse/rest/client/register.py
index 956c45e60a..de810ae3ec 100644
--- a/synapse/rest/client/register.py
+++ b/synapse/rest/client/register.py
@@ -21,17 +21,21 @@ from twisted.web.server import Request
import synapse
import synapse.api.auth
import synapse.types
-from synapse.api.constants import APP_SERVICE_REGISTRATION_TYPE, LoginType
+from synapse.api.constants import (
+ APP_SERVICE_REGISTRATION_TYPE,
+ ApprovalNoticeMedium,
+ LoginType,
+)
from synapse.api.errors import (
Codes,
InteractiveAuthIncompleteError,
+ NotApprovedError,
SynapseError,
ThreepidValidationError,
UnrecognizedRequestError,
)
from synapse.api.ratelimiting import Ratelimiter
from synapse.config import ConfigError
-from synapse.config.emailconfig import ThreepidBehaviour
from synapse.config.homeserver import HomeServerConfig
from synapse.config.ratelimiting import FederationRatelimitSettings
from synapse.config.server import is_threepid_reserved
@@ -74,7 +78,7 @@ class EmailRegisterRequestTokenRestServlet(RestServlet):
self.identity_handler = hs.get_identity_handler()
self.config = hs.config
- if self.hs.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
+ if self.hs.config.email.can_verify_email:
self.mailer = Mailer(
hs=self.hs,
app_name=self.config.email.email_app_name,
@@ -83,13 +87,10 @@ class EmailRegisterRequestTokenRestServlet(RestServlet):
)
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
- if self.hs.config.email.threepid_behaviour_email == ThreepidBehaviour.OFF:
- if (
- self.hs.config.email.local_threepid_handling_disabled_due_to_email_config
- ):
- logger.warning(
- "Email registration has been disabled due to lack of email config"
- )
+ if not self.hs.config.email.can_verify_email:
+ logger.warning(
+ "Email registration has been disabled due to lack of email config"
+ )
raise SynapseError(
400, "Email-based registration has been disabled on this server"
)
@@ -138,35 +139,21 @@ class EmailRegisterRequestTokenRestServlet(RestServlet):
raise SynapseError(400, "Email is already in use", Codes.THREEPID_IN_USE)
- if self.config.email.threepid_behaviour_email == ThreepidBehaviour.REMOTE:
- assert self.hs.config.registration.account_threepid_delegate_email
-
- # Have the configured identity server handle the request
- ret = await self.identity_handler.request_email_token(
- self.hs.config.registration.account_threepid_delegate_email,
- email,
- client_secret,
- send_attempt,
- next_link,
- )
- else:
- # Send registration emails from Synapse,
- # wrapping the session id in a JSON object.
- ret = {
- "sid": await self.identity_handler.send_threepid_validation(
- email,
- client_secret,
- send_attempt,
- self.mailer.send_registration_mail,
- next_link,
- )
- }
+ # Send registration emails from Synapse
+ sid = await self.identity_handler.send_threepid_validation(
+ email,
+ client_secret,
+ send_attempt,
+ self.mailer.send_registration_mail,
+ next_link,
+ )
threepid_send_requests.labels(type="email", reason="register").observe(
send_attempt
)
- return 200, ret
+ # Wrap the session id in a JSON object
+ return 200, {"sid": sid}
class MsisdnRegisterRequestTokenRestServlet(RestServlet):
@@ -260,7 +247,7 @@ class RegistrationSubmitTokenServlet(RestServlet):
self.clock = hs.get_clock()
self.store = hs.get_datastores().main
- if self.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
+ if self.config.email.can_verify_email:
self._failure_email_template = (
self.config.email.email_registration_template_failure_html
)
@@ -270,11 +257,10 @@ class RegistrationSubmitTokenServlet(RestServlet):
raise SynapseError(
400, "This medium is currently not supported for registration"
)
- if self.config.email.threepid_behaviour_email == ThreepidBehaviour.OFF:
- if self.config.email.local_threepid_handling_disabled_due_to_email_config:
- logger.warning(
- "User registration via email has been disabled due to lack of email config"
- )
+ if not self.config.email.can_verify_email:
+ logger.warning(
+ "User registration via email has been disabled due to lack of email config"
+ )
raise SynapseError(
400, "Email-based registration is disabled on this server"
)
@@ -433,6 +419,11 @@ class RegisterRestServlet(RestServlet):
hs.config.registration.inhibit_user_in_use_error
)
+ self._require_approval = (
+ hs.config.experimental.msc3866.enabled
+ and hs.config.experimental.msc3866.require_approval_for_new_accounts
+ )
+
self._registration_flows = _calculate_registration_flows(
hs.config, self.auth_handler
)
@@ -484,9 +475,6 @@ class RegisterRestServlet(RestServlet):
"Appservice token must be provided when using a type of m.login.application_service",
)
- # Verify the AS
- self.auth.get_appservice_by_req(request)
-
# Set the desired user according to the AS API (which uses the
# 'user' key not 'username'). Since this is a new addition, we'll
# fallback to 'username' if they gave one.
@@ -756,6 +744,12 @@ class RegisterRestServlet(RestServlet):
access_token=return_dict.get("access_token"),
)
+ if self._require_approval:
+ raise NotApprovedError(
+ msg="This account needs to be approved by an administrator before it can be used.",
+ approval_notice_medium=ApprovalNoticeMedium.NONE,
+ )
+
return 200, return_dict
async def _do_appservice_registration(
@@ -800,7 +794,9 @@ class RegisterRestServlet(RestServlet):
"user_id": user_id,
"home_server": self.hs.hostname,
}
- if not params.get("inhibit_login", False):
+ # We don't want to log the user in if we're going to deny them access because
+ # they need to be approved first.
+ if not params.get("inhibit_login", False) and not self._require_approval:
device_id = params.get("device_id")
initial_display_name = params.get("initial_device_display_name")
(
diff --git a/synapse/rest/client/relations.py b/synapse/rest/client/relations.py
index ce97080013..9dd59196d9 100644
--- a/synapse/rest/client/relations.py
+++ b/synapse/rest/client/relations.py
@@ -13,13 +13,17 @@
# limitations under the License.
import logging
+import re
from typing import TYPE_CHECKING, Optional, Tuple
+from synapse.handlers.relations import ThreadsListInclude
from synapse.http.server import HttpServer
from synapse.http.servlet import RestServlet, parse_integer, parse_string
from synapse.http.site import SynapseRequest
from synapse.rest.client._base import client_patterns
-from synapse.types import JsonDict, StreamToken
+from synapse.storage.databases.main.relations import ThreadsNextBatch
+from synapse.streams.config import PaginationConfig
+from synapse.types import JsonDict
if TYPE_CHECKING:
from synapse.server import HomeServer
@@ -41,9 +45,8 @@ class RelationPaginationServlet(RestServlet):
def __init__(self, hs: "HomeServer"):
super().__init__()
self.auth = hs.get_auth()
- self.store = hs.get_datastores().main
+ self._store = hs.get_datastores().main
self._relations_handler = hs.get_relations_handler()
- self._msc3715_enabled = hs.config.experimental.msc3715_enabled
async def on_GET(
self,
@@ -55,37 +58,63 @@ class RelationPaginationServlet(RestServlet):
) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request, allow_guest=True)
+ pagination_config = await PaginationConfig.from_request(
+ self._store, request, default_limit=5, default_dir="b"
+ )
+
+ # The unstable version of this API returns an extra field for client
+ # compatibility, see https://github.com/matrix-org/synapse/issues/12930.
+ assert request.path is not None
+ include_original_event = request.path.startswith(b"/_matrix/client/unstable/")
+
+ # Return the relations
+ result = await self._relations_handler.get_relations(
+ requester=requester,
+ event_id=parent_id,
+ room_id=room_id,
+ pagin_config=pagination_config,
+ include_original_event=include_original_event,
+ relation_type=relation_type,
+ event_type=event_type,
+ )
+
+ return 200, result
+
+
+class ThreadsServlet(RestServlet):
+ PATTERNS = (re.compile("^/_matrix/client/v1/rooms/(?P<room_id>[^/]*)/threads"),)
+
+ def __init__(self, hs: "HomeServer"):
+ super().__init__()
+ self.auth = hs.get_auth()
+ self.store = hs.get_datastores().main
+ self._relations_handler = hs.get_relations_handler()
+
+ async def on_GET(
+ self, request: SynapseRequest, room_id: str
+ ) -> Tuple[int, JsonDict]:
+ requester = await self.auth.get_user_by_req(request)
+
limit = parse_integer(request, "limit", default=5)
- if self._msc3715_enabled:
- direction = parse_string(
- request,
- "org.matrix.msc3715.dir",
- default="b",
- allowed_values=["f", "b"],
- )
- else:
- direction = "b"
from_token_str = parse_string(request, "from")
- to_token_str = parse_string(request, "to")
+ include = parse_string(
+ request,
+ "include",
+ default=ThreadsListInclude.all.value,
+ allowed_values=[v.value for v in ThreadsListInclude],
+ )
# Return the relations
from_token = None
if from_token_str:
- from_token = await StreamToken.from_string(self.store, from_token_str)
- to_token = None
- if to_token_str:
- to_token = await StreamToken.from_string(self.store, to_token_str)
+ from_token = ThreadsNextBatch.from_string(from_token_str)
- result = await self._relations_handler.get_relations(
+ result = await self._relations_handler.get_threads(
requester=requester,
- event_id=parent_id,
room_id=room_id,
- relation_type=relation_type,
- event_type=event_type,
+ include=ThreadsListInclude(include),
limit=limit,
- direction=direction,
from_token=from_token,
- to_token=to_token,
)
return 200, result
@@ -93,3 +122,4 @@ class RelationPaginationServlet(RestServlet):
def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
RelationPaginationServlet(hs).register(http_server)
+ ThreadsServlet(hs).register(http_server)
diff --git a/synapse/rest/client/rendezvous.py b/synapse/rest/client/rendezvous.py
new file mode 100644
index 0000000000..89176b1ffa
--- /dev/null
+++ b/synapse/rest/client/rendezvous.py
@@ -0,0 +1,74 @@
+# Copyright 2022 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+from http.client import TEMPORARY_REDIRECT
+from typing import TYPE_CHECKING, Optional
+
+from synapse.http.server import HttpServer, respond_with_redirect
+from synapse.http.servlet import RestServlet
+from synapse.http.site import SynapseRequest
+from synapse.rest.client._base import client_patterns
+
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
+
+logger = logging.getLogger(__name__)
+
+
+class RendezvousServlet(RestServlet):
+ """
+ This is a placeholder implementation of [MSC3886](https://github.com/matrix-org/matrix-spec-proposals/pull/3886)
+ simple client rendezvous capability that is used by the "Sign in with QR" functionality.
+
+ This implementation only serves as a 307 redirect to a configured server rather than being a full implementation.
+
+ A module that implements the full functionality is available at: https://pypi.org/project/matrix-http-rendezvous-synapse/.
+
+ Request:
+
+ POST /rendezvous HTTP/1.1
+ Content-Type: ...
+
+ ...
+
+ Response:
+
+ HTTP/1.1 307
+ Location: <configured endpoint>
+ """
+
+ PATTERNS = client_patterns(
+ "/org.matrix.msc3886/rendezvous$", releases=[], v1=False, unstable=True
+ )
+
+ def __init__(self, hs: "HomeServer"):
+ super().__init__()
+ redirection_target: Optional[str] = hs.config.experimental.msc3886_endpoint
+ assert (
+ redirection_target is not None
+ ), "Servlet is only registered if there is a redirection target"
+ self.endpoint = redirection_target.encode("utf-8")
+
+ async def on_POST(self, request: SynapseRequest) -> None:
+ respond_with_redirect(
+ request, self.endpoint, statusCode=TEMPORARY_REDIRECT, cors=True
+ )
+
+ # PUT, GET and DELETE are not implemented as they should be fulfilled by the redirect target.
+
+
+def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
+ if hs.config.experimental.msc3886_endpoint is not None:
+ RendezvousServlet(hs).register(http_server)
diff --git a/synapse/rest/client/room.py b/synapse/rest/client/room.py
index 2f513164cb..91cb791139 100644
--- a/synapse/rest/client/room.py
+++ b/synapse/rest/client/room.py
@@ -16,9 +16,13 @@
""" This module contains REST servlets to do with rooms: /rooms/<paths> """
import logging
import re
+from enum import Enum
+from http import HTTPStatus
from typing import TYPE_CHECKING, Awaitable, Dict, List, Optional, Tuple
from urllib import parse as urlparse
+from prometheus_client.core import Histogram
+
from twisted.web.server import Request
from synapse import event_auth
@@ -34,7 +38,7 @@ from synapse.api.errors import (
)
from synapse.api.filtering import Filter
from synapse.events.utils import format_event_for_client_v2
-from synapse.http.server import HttpServer, cancellable
+from synapse.http.server import HttpServer
from synapse.http.servlet import (
ResolveRoomIdMixin,
RestServlet,
@@ -46,13 +50,16 @@ from synapse.http.servlet import (
parse_strings_from_args,
)
from synapse.http.site import SynapseRequest
+from synapse.logging.context import make_deferred_yieldable, run_in_background
from synapse.logging.opentracing import set_tag
+from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.rest.client._base import client_patterns
from synapse.rest.client.transactions import HttpTransactionCache
from synapse.storage.state import StateFilter
from synapse.streams.config import PaginationConfig
from synapse.types import JsonDict, StreamToken, ThirdPartyInstanceID, UserID
from synapse.util import json_decoder
+from synapse.util.cancellation import cancellable
from synapse.util.stringutils import parse_and_validate_server_name, random_string
if TYPE_CHECKING:
@@ -61,6 +68,70 @@ if TYPE_CHECKING:
logger = logging.getLogger(__name__)
+class _RoomSize(Enum):
+ """
+ Enum to differentiate sizes of rooms. This is a pretty good approximation
+ about how hard it will be to get events in the room. We could also look at
+ room "complexity".
+ """
+
+ # This doesn't necessarily mean the room is a DM, just that there is a DM
+ # amount of people there.
+ DM_SIZE = "direct_message_size"
+ SMALL = "small"
+ SUBSTANTIAL = "substantial"
+ LARGE = "large"
+
+ @staticmethod
+ def from_member_count(member_count: int) -> "_RoomSize":
+ if member_count <= 2:
+ return _RoomSize.DM_SIZE
+ elif member_count < 100:
+ return _RoomSize.SMALL
+ elif member_count < 1000:
+ return _RoomSize.SUBSTANTIAL
+ else:
+ return _RoomSize.LARGE
+
+
+# This is an extra metric on top of `synapse_http_server_response_time_seconds`
+# which times the same sort of thing but this one allows us to see values
+# greater than 10s. We use a separate dedicated histogram with its own buckets
+# so that we don't increase the cardinality of the general one because it's
+# multiplied across hundreds of servlets.
+messsages_response_timer = Histogram(
+ "synapse_room_message_list_rest_servlet_response_time_seconds",
+ "sec",
+ # We have a label for room size so we can try to see a more realistic
+ # picture of /messages response time for bigger rooms. We don't want the
+ # tiny rooms that can always respond fast skewing our results when we're trying
+ # to optimize the bigger cases.
+ ["room_size"],
+ buckets=(
+ 0.005,
+ 0.01,
+ 0.025,
+ 0.05,
+ 0.1,
+ 0.25,
+ 0.5,
+ 1.0,
+ 2.5,
+ 5.0,
+ 10.0,
+ 20.0,
+ 30.0,
+ 60.0,
+ 80.0,
+ 100.0,
+ 120.0,
+ 150.0,
+ 180.0,
+ "+Inf",
+ ),
+)
+
+
class TransactionRestServlet(RestServlet):
def __init__(self, hs: "HomeServer"):
super().__init__()
@@ -165,7 +236,7 @@ class RoomStateEventRestServlet(TransactionRestServlet):
msg_handler = self.message_handler
data = await msg_handler.get_room_data(
- user_id=requester.user.to_string(),
+ requester=requester,
room_id=room_id,
event_type=event_type,
state_key=state_key,
@@ -198,15 +269,9 @@ class RoomStateEventRestServlet(TransactionRestServlet):
content = parse_json_object_from_request(request)
- event_dict = {
- "type": event_type,
- "content": content,
- "room_id": room_id,
- "sender": requester.user.to_string(),
- }
-
- if state_key is not None:
- event_dict["state_key"] = state_key
+ origin_server_ts = None
+ if requester.app_service:
+ origin_server_ts = parse_integer(request, "ts")
try:
if event_type == EventTypes.Member:
@@ -217,8 +282,22 @@ class RoomStateEventRestServlet(TransactionRestServlet):
room_id=room_id,
action=membership,
content=content,
+ origin_server_ts=origin_server_ts,
)
else:
+ event_dict: JsonDict = {
+ "type": event_type,
+ "content": content,
+ "room_id": room_id,
+ "sender": requester.user.to_string(),
+ }
+
+ if state_key is not None:
+ event_dict["state_key"] = state_key
+
+ if origin_server_ts is not None:
+ event_dict["origin_server_ts"] = origin_server_ts
+
(
event,
_,
@@ -263,10 +342,10 @@ class RoomSendEventRestServlet(TransactionRestServlet):
"sender": requester.user.to_string(),
}
- # Twisted will have processed the args by now.
- assert request.args is not None
- if b"ts" in request.args and requester.app_service:
- event_dict["origin_server_ts"] = parse_integer(request, "ts", 0)
+ if requester.app_service:
+ origin_server_ts = parse_integer(request, "ts")
+ if origin_server_ts is not None:
+ event_dict["origin_server_ts"] = origin_server_ts
try:
(
@@ -510,7 +589,7 @@ class RoomMemberListRestServlet(RestServlet):
events = await handler.get_state_events(
room_id=room_id,
- user_id=requester.user.to_string(),
+ requester=requester,
at_token=at_token,
state_filter=StateFilter.from_types([(EventTypes.Member, None)]),
)
@@ -556,6 +635,7 @@ class RoomMessageListRestServlet(RestServlet):
def __init__(self, hs: "HomeServer"):
super().__init__()
self._hs = hs
+ self.clock = hs.get_clock()
self.pagination_handler = hs.get_pagination_handler()
self.auth = hs.get_auth()
self.store = hs.get_datastores().main
@@ -563,6 +643,18 @@ class RoomMessageListRestServlet(RestServlet):
async def on_GET(
self, request: SynapseRequest, room_id: str
) -> Tuple[int, JsonDict]:
+ processing_start_time = self.clock.time_msec()
+ # Fire off and hope that we get a result by the end.
+ #
+ # We're using the mypy type ignore comment because the `@cached`
+ # decorator on `get_number_joined_users_in_room` doesn't play well with
+ # the type system. Maybe in the future, it can use some ParamSpec
+ # wizardry to fix it up.
+ room_member_count_deferred = run_in_background( # type: ignore[call-arg]
+ self.store.get_number_joined_users_in_room,
+ room_id, # type: ignore[arg-type]
+ )
+
requester = await self.auth.get_user_by_req(request, allow_guest=True)
pagination_config = await PaginationConfig.from_request(
self.store, request, default_limit=10
@@ -593,6 +685,12 @@ class RoomMessageListRestServlet(RestServlet):
event_filter=event_filter,
)
+ processing_end_time = self.clock.time_msec()
+ room_member_count = await make_deferred_yieldable(room_member_count_deferred)
+ messsages_response_timer.labels(
+ room_size=_RoomSize.from_member_count(room_member_count)
+ ).observe((processing_end_time - processing_start_time) / 1000)
+
return 200, msgs
@@ -613,8 +711,7 @@ class RoomStateRestServlet(RestServlet):
# Get all the current state for this room
events = await self.message_handler.get_state_events(
room_id=room_id,
- user_id=requester.user.to_string(),
- is_guest=requester.is_guest,
+ requester=requester,
)
return 200, events
@@ -633,7 +730,9 @@ class RoomInitialSyncRestServlet(RestServlet):
self, request: SynapseRequest, room_id: str
) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request, allow_guest=True)
- pagination_config = await PaginationConfig.from_request(self.store, request)
+ pagination_config = await PaginationConfig.from_request(
+ self.store, request, default_limit=10
+ )
content = await self.initial_sync_handler.room_initial_sync(
room_id=room_id, requester=requester, pagin_config=pagination_config
)
@@ -672,7 +771,7 @@ class RoomEventServlet(RestServlet):
== "true"
)
if include_unredacted_content and not await self.auth.is_server_admin(
- requester.user
+ requester
):
power_level_event = (
await self._storage_controllers.state.get_current_state_event(
@@ -860,7 +959,16 @@ class RoomMembershipRestServlet(TransactionRestServlet):
# cheekily send invalid bodies.
content = {}
- if membership_action == "invite" and self._has_3pid_invite_keys(content):
+ if membership_action == "invite" and all(
+ key in content for key in ("medium", "address")
+ ):
+ if not all(key in content for key in ("id_server", "id_access_token")):
+ raise SynapseError(
+ HTTPStatus.BAD_REQUEST,
+ "`id_server` and `id_access_token` are required when doing 3pid invite",
+ Codes.MISSING_PARAM,
+ )
+
try:
await self.room_member_handler.do_3pid_invite(
room_id,
@@ -870,7 +978,7 @@ class RoomMembershipRestServlet(TransactionRestServlet):
content["id_server"],
requester,
txn_id,
- content.get("id_access_token"),
+ content["id_access_token"],
)
except ShadowBanError:
# Pretend the request succeeded.
@@ -907,12 +1015,6 @@ class RoomMembershipRestServlet(TransactionRestServlet):
return 200, return_value
- def _has_3pid_invite_keys(self, content: JsonDict) -> bool:
- for key in {"id_server", "medium", "address"}:
- if key not in content:
- return False
- return True
-
def on_PUT(
self, request: SynapseRequest, room_id: str, membership_action: str, txn_id: str
) -> Awaitable[Tuple[int, JsonDict]]:
@@ -928,6 +1030,8 @@ class RoomRedactEventRestServlet(TransactionRestServlet):
super().__init__(hs)
self.event_creation_handler = hs.get_event_creation_handler()
self.auth = hs.get_auth()
+ self._relation_handler = hs.get_relations_handler()
+ self._msc3912_enabled = hs.config.experimental.msc3912_enabled
def register(self, http_server: HttpServer) -> None:
PATTERNS = "/rooms/(?P<room_id>[^/]*)/redact/(?P<event_id>[^/]*)"
@@ -944,20 +1048,46 @@ class RoomRedactEventRestServlet(TransactionRestServlet):
content = parse_json_object_from_request(request)
try:
- (
- event,
- _,
- ) = await self.event_creation_handler.create_and_send_nonmember_event(
- requester,
- {
- "type": EventTypes.Redaction,
- "content": content,
- "room_id": room_id,
- "sender": requester.user.to_string(),
- "redacts": event_id,
- },
- txn_id=txn_id,
- )
+ with_relations = None
+ if self._msc3912_enabled and "org.matrix.msc3912.with_relations" in content:
+ with_relations = content["org.matrix.msc3912.with_relations"]
+ del content["org.matrix.msc3912.with_relations"]
+
+ # Check if there's an existing event for this transaction now (even though
+ # create_and_send_nonmember_event also does it) because, if there's one,
+ # then we want to skip the call to redact_events_related_to.
+ event = None
+ if txn_id:
+ event = await self.event_creation_handler.get_event_from_transaction(
+ requester, txn_id, room_id
+ )
+
+ if event is None:
+ (
+ event,
+ _,
+ ) = await self.event_creation_handler.create_and_send_nonmember_event(
+ requester,
+ {
+ "type": EventTypes.Redaction,
+ "content": content,
+ "room_id": room_id,
+ "sender": requester.user.to_string(),
+ "redacts": event_id,
+ },
+ txn_id=txn_id,
+ )
+
+ if with_relations:
+ run_as_background_process(
+ "redact_related_events",
+ self._relation_handler.redact_events_related_to,
+ requester=requester,
+ event_id=event_id,
+ initial_redaction_event=event,
+ relation_types=with_relations,
+ )
+
event_id = event.event_id
except ShadowBanError:
event_id = "$" + random_string(43)
@@ -1177,9 +1307,7 @@ class TimestampLookupRestServlet(RestServlet):
self, request: SynapseRequest, room_id: str
) -> Tuple[int, JsonDict]:
requester = await self._auth.get_user_by_req(request)
- await self._auth.check_user_in_room_or_world_readable(
- room_id, requester.user.to_string()
- )
+ await self._auth.check_user_in_room_or_world_readable(room_id, requester)
timestamp = parse_integer(request, "ts", required=True)
direction = parse_string(request, "dir", default="f", allowed_values=["f", "b"])
diff --git a/synapse/rest/client/room_batch.py b/synapse/rest/client/room_batch.py
index dd91dabedd..10be4a781b 100644
--- a/synapse/rest/client/room_batch.py
+++ b/synapse/rest/client/room_batch.py
@@ -108,6 +108,13 @@ class RoomBatchSendEventRestServlet(RestServlet):
errcode=Codes.MISSING_PARAM,
)
+ if await self.store.is_partial_state_room(room_id):
+ raise SynapseError(
+ HTTPStatus.BAD_REQUEST,
+ "Cannot insert history batches until we have fully joined the room",
+ errcode=Codes.UNABLE_DUE_TO_PARTIAL_STATE,
+ )
+
# Verify the batch_id_from_query corresponds to an actual insertion event
# and have the batch connected.
if batch_id_from_query:
diff --git a/synapse/rest/client/sendtodevice.py b/synapse/rest/client/sendtodevice.py
index 1a8e9a96d4..46a8b03829 100644
--- a/synapse/rest/client/sendtodevice.py
+++ b/synapse/rest/client/sendtodevice.py
@@ -19,7 +19,7 @@ from synapse.http import servlet
from synapse.http.server import HttpServer
from synapse.http.servlet import assert_params_in_dict, parse_json_object_from_request
from synapse.http.site import SynapseRequest
-from synapse.logging.opentracing import set_tag, trace_with_opname
+from synapse.logging.opentracing import set_tag
from synapse.rest.client.transactions import HttpTransactionCache
from synapse.types import JsonDict
@@ -43,7 +43,6 @@ class SendToDeviceRestServlet(servlet.RestServlet):
self.txns = HttpTransactionCache(hs)
self.device_message_handler = hs.get_device_message_handler()
- @trace_with_opname("sendToDevice")
def on_PUT(
self, request: SynapseRequest, message_type: str, txn_id: str
) -> Awaitable[Tuple[int, JsonDict]]:
diff --git a/synapse/rest/client/sync.py b/synapse/rest/client/sync.py
index c2989765ce..f2013faeb2 100644
--- a/synapse/rest/client/sync.py
+++ b/synapse/rest/client/sync.py
@@ -100,6 +100,7 @@ class SyncRestServlet(RestServlet):
self._server_notices_sender = hs.get_server_notices_sender()
self._event_serializer = hs.get_event_client_serializer()
self._msc2654_enabled = hs.config.experimental.msc2654_enabled
+ self._msc3773_enabled = hs.config.experimental.msc3773_enabled
async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
# This will always be set by the time Twisted calls us.
@@ -145,12 +146,12 @@ class SyncRestServlet(RestServlet):
elif filter_id.startswith("{"):
try:
filter_object = json_decoder.decode(filter_id)
- set_timeline_upper_limit(
- filter_object, self.hs.config.server.filter_timeline_limit
- )
except Exception:
- raise SynapseError(400, "Invalid filter JSON")
+ raise SynapseError(400, "Invalid filter JSON", errcode=Codes.NOT_JSON)
self.filtering.check_valid_filter(filter_object)
+ set_timeline_upper_limit(
+ filter_object, self.hs.config.server.filter_timeline_limit
+ )
filter_collection = FilterCollection(self.hs, filter_object)
else:
try:
@@ -509,6 +510,12 @@ class SyncRestServlet(RestServlet):
ephemeral_events = room.ephemeral
result["ephemeral"] = {"events": ephemeral_events}
result["unread_notifications"] = room.unread_notifications
+ if room.unread_thread_notifications:
+ result["unread_thread_notifications"] = room.unread_thread_notifications
+ if self._msc3773_enabled:
+ result[
+ "org.matrix.msc3773.unread_thread_notifications"
+ ] = room.unread_thread_notifications
result["summary"] = room.summary
if self._msc2654_enabled:
result["org.matrix.msc2654.unread_count"] = room.unread_count
diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py
index 0366986755..180a11ef88 100644
--- a/synapse/rest/client/versions.py
+++ b/synapse/rest/client/versions.py
@@ -75,6 +75,8 @@ class VersionsRestServlet(RestServlet):
"r0.6.1",
"v1.1",
"v1.2",
+ "v1.3",
+ "v1.4",
],
# as per MSC1497:
"unstable_features": {
@@ -94,7 +96,7 @@ class VersionsRestServlet(RestServlet):
# Supports the busy presence state described in MSC3026.
"org.matrix.msc3026.busy_presence": self.config.experimental.msc3026_enabled,
# Supports receiving private read receipts as per MSC2285
- "org.matrix.msc2285": self.config.experimental.msc2285_enabled,
+ "org.matrix.msc2285.stable": True, # TODO: Remove when MSC2285 becomes a part of the spec
# Supports filtering of /publicRooms by room type as per MSC3827
"org.matrix.msc3827.stable": True,
# Adds support for importing historical messages as per MSC2716
@@ -103,8 +105,22 @@ class VersionsRestServlet(RestServlet):
"org.matrix.msc3030": self.config.experimental.msc3030_enabled,
# Adds support for thread relations, per MSC3440.
"org.matrix.msc3440.stable": True, # TODO: remove when "v1.3" is added above
+ # Support for thread read receipts & notification counts.
+ "org.matrix.msc3771": True,
+ "org.matrix.msc3773": self.config.experimental.msc3773_enabled,
# Allows moderators to fetch redacted event content as described in MSC2815
"fi.mau.msc2815": self.config.experimental.msc2815_enabled,
+ # Adds support for login token requests as per MSC3882
+ "org.matrix.msc3882": self.config.experimental.msc3882_enabled,
+ # Adds support for remotely enabling/disabling pushers, as per MSC3881
+ "org.matrix.msc3881": self.config.experimental.msc3881_enabled,
+ # Adds support for filtering /messages by event relation.
+ "org.matrix.msc3874": self.config.experimental.msc3874_enabled,
+ # Adds support for simple HTTP rendezvous as per MSC3886
+ "org.matrix.msc3886": self.config.experimental.msc3886_endpoint
+ is not None,
+ # Adds support for relation-based redactions as per MSC3912.
+ "org.matrix.msc3912": self.config.experimental.msc3912_enabled,
},
},
)
diff --git a/synapse/rest/key/v2/__init__.py b/synapse/rest/key/v2/__init__.py
index 7f8c1de1ff..26403facb8 100644
--- a/synapse/rest/key/v2/__init__.py
+++ b/synapse/rest/key/v2/__init__.py
@@ -14,17 +14,20 @@
from typing import TYPE_CHECKING
-from twisted.web.resource import Resource
-
-from .local_key_resource import LocalKey
-from .remote_key_resource import RemoteKey
+from synapse.http.server import HttpServer, JsonResource
+from synapse.rest.key.v2.local_key_resource import LocalKey
+from synapse.rest.key.v2.remote_key_resource import RemoteKey
if TYPE_CHECKING:
from synapse.server import HomeServer
-class KeyApiV2Resource(Resource):
+class KeyResource(JsonResource):
def __init__(self, hs: "HomeServer"):
- Resource.__init__(self)
- self.putChild(b"server", LocalKey(hs))
- self.putChild(b"query", RemoteKey(hs))
+ super().__init__(hs, canonical_json=True)
+ self.register_servlets(self, hs)
+
+ @staticmethod
+ def register_servlets(http_server: HttpServer, hs: "HomeServer") -> None:
+ LocalKey(hs).register(http_server)
+ RemoteKey(hs).register(http_server)
diff --git a/synapse/rest/key/v2/local_key_resource.py b/synapse/rest/key/v2/local_key_resource.py
index 0c9f042c84..d03e728d42 100644
--- a/synapse/rest/key/v2/local_key_resource.py
+++ b/synapse/rest/key/v2/local_key_resource.py
@@ -13,16 +13,15 @@
# limitations under the License.
import logging
-from typing import TYPE_CHECKING, Optional
+import re
+from typing import TYPE_CHECKING, Optional, Tuple
-from canonicaljson import encode_canonical_json
from signedjson.sign import sign_json
from unpaddedbase64 import encode_base64
-from twisted.web.resource import Resource
from twisted.web.server import Request
-from synapse.http.server import respond_with_json_bytes
+from synapse.http.servlet import RestServlet
from synapse.types import JsonDict
if TYPE_CHECKING:
@@ -31,7 +30,7 @@ if TYPE_CHECKING:
logger = logging.getLogger(__name__)
-class LocalKey(Resource):
+class LocalKey(RestServlet):
"""HTTP resource containing encoding the TLS X.509 certificate and NACL
signature verification keys for this server::
@@ -61,18 +60,17 @@ class LocalKey(Resource):
}
"""
- isLeaf = True
+ PATTERNS = (re.compile("^/_matrix/key/v2/server(/(?P<key_id>[^/]*))?$"),)
def __init__(self, hs: "HomeServer"):
self.config = hs.config
self.clock = hs.get_clock()
self.update_response_body(self.clock.time_msec())
- Resource.__init__(self)
def update_response_body(self, time_now_msec: int) -> None:
refresh_interval = self.config.key.key_refresh_interval
self.valid_until_ts = int(time_now_msec + refresh_interval)
- self.response_body = encode_canonical_json(self.response_json_object())
+ self.response_body = self.response_json_object()
def response_json_object(self) -> JsonDict:
verify_keys = {}
@@ -99,9 +97,11 @@ class LocalKey(Resource):
json_object = sign_json(json_object, self.config.server.server_name, key)
return json_object
- def render_GET(self, request: Request) -> Optional[int]:
+ def on_GET(
+ self, request: Request, key_id: Optional[str] = None
+ ) -> Tuple[int, JsonDict]:
time_now = self.clock.time_msec()
# Update the expiry time if less than half the interval remains.
if time_now + self.config.key.key_refresh_interval / 2 > self.valid_until_ts:
self.update_response_body(time_now)
- return respond_with_json_bytes(request, 200, self.response_body)
+ return 200, self.response_body
diff --git a/synapse/rest/key/v2/remote_key_resource.py b/synapse/rest/key/v2/remote_key_resource.py
index f597157581..19820886f5 100644
--- a/synapse/rest/key/v2/remote_key_resource.py
+++ b/synapse/rest/key/v2/remote_key_resource.py
@@ -13,15 +13,20 @@
# limitations under the License.
import logging
-from typing import TYPE_CHECKING, Dict, Set
+import re
+from typing import TYPE_CHECKING, Dict, Optional, Set, Tuple
from signedjson.sign import sign_json
-from synapse.api.errors import Codes, SynapseError
+from twisted.web.server import Request
+
from synapse.crypto.keyring import ServerKeyFetcher
-from synapse.http.server import DirectServeJsonResource, respond_with_json
-from synapse.http.servlet import parse_integer, parse_json_object_from_request
-from synapse.http.site import SynapseRequest
+from synapse.http.server import HttpServer
+from synapse.http.servlet import (
+ RestServlet,
+ parse_integer,
+ parse_json_object_from_request,
+)
from synapse.types import JsonDict
from synapse.util import json_decoder
from synapse.util.async_helpers import yieldable_gather_results
@@ -32,7 +37,7 @@ if TYPE_CHECKING:
logger = logging.getLogger(__name__)
-class RemoteKey(DirectServeJsonResource):
+class RemoteKey(RestServlet):
"""HTTP resource for retrieving the TLS certificate and NACL signature
verification keys for a collection of servers. Checks that the reported
X.509 TLS certificate matches the one used in the HTTPS connection. Checks
@@ -88,11 +93,7 @@ class RemoteKey(DirectServeJsonResource):
}
"""
- isLeaf = True
-
def __init__(self, hs: "HomeServer"):
- super().__init__()
-
self.fetcher = ServerKeyFetcher(hs)
self.store = hs.get_datastores().main
self.clock = hs.get_clock()
@@ -101,47 +102,52 @@ class RemoteKey(DirectServeJsonResource):
)
self.config = hs.config
- async def _async_render_GET(self, request: SynapseRequest) -> None:
- assert request.postpath is not None
- if len(request.postpath) == 1:
- (server,) = request.postpath
- query: dict = {server.decode("ascii"): {}}
- elif len(request.postpath) == 2:
- server, key_id = request.postpath
+ def register(self, http_server: HttpServer) -> None:
+ http_server.register_paths(
+ "GET",
+ (
+ re.compile(
+ "^/_matrix/key/v2/query/(?P<server>[^/]*)(/(?P<key_id>[^/]*))?$"
+ ),
+ ),
+ self.on_GET,
+ self.__class__.__name__,
+ )
+ http_server.register_paths(
+ "POST",
+ (re.compile("^/_matrix/key/v2/query$"),),
+ self.on_POST,
+ self.__class__.__name__,
+ )
+
+ async def on_GET(
+ self, request: Request, server: str, key_id: Optional[str] = None
+ ) -> Tuple[int, JsonDict]:
+ if server and key_id:
minimum_valid_until_ts = parse_integer(request, "minimum_valid_until_ts")
arguments = {}
if minimum_valid_until_ts is not None:
arguments["minimum_valid_until_ts"] = minimum_valid_until_ts
- query = {server.decode("ascii"): {key_id.decode("ascii"): arguments}}
+ query = {server: {key_id: arguments}}
else:
- raise SynapseError(404, "Not found %r" % request.postpath, Codes.NOT_FOUND)
+ query = {server: {}}
- await self.query_keys(request, query, query_remote_on_cache_miss=True)
+ return 200, await self.query_keys(query, query_remote_on_cache_miss=True)
- async def _async_render_POST(self, request: SynapseRequest) -> None:
+ async def on_POST(self, request: Request) -> Tuple[int, JsonDict]:
content = parse_json_object_from_request(request)
query = content["server_keys"]
- await self.query_keys(request, query, query_remote_on_cache_miss=True)
+ return 200, await self.query_keys(query, query_remote_on_cache_miss=True)
async def query_keys(
- self,
- request: SynapseRequest,
- query: JsonDict,
- query_remote_on_cache_miss: bool = False,
- ) -> None:
+ self, query: JsonDict, query_remote_on_cache_miss: bool = False
+ ) -> JsonDict:
logger.info("Handling query for keys %r", query)
store_queries = []
for server_name, key_ids in query.items():
- if (
- self.federation_domain_whitelist is not None
- and server_name not in self.federation_domain_whitelist
- ):
- logger.debug("Federation denied with %s", server_name)
- continue
-
if not key_ids:
key_ids = (None,)
for key_id in key_ids:
@@ -153,21 +159,28 @@ class RemoteKey(DirectServeJsonResource):
time_now_ms = self.clock.time_msec()
- # Note that the value is unused.
+ # Map server_name->key_id->int. Note that the value of the init is unused.
+ # XXX: why don't we just use a set?
cache_misses: Dict[str, Dict[str, int]] = {}
for (server_name, key_id, _), key_results in cached.items():
results = [(result["ts_added_ms"], result) for result in key_results]
- if not results and key_id is not None:
- cache_misses.setdefault(server_name, {})[key_id] = 0
+ if key_id is None:
+ # all keys were requested. Just return what we have without worrying
+ # about validity
+ for _, result in results:
+ # Cast to bytes since postgresql returns a memoryview.
+ json_results.add(bytes(result["key_json"]))
continue
- if key_id is not None:
+ miss = False
+ if not results:
+ miss = True
+ else:
ts_added_ms, most_recent_result = max(results)
ts_valid_until_ms = most_recent_result["ts_valid_until_ms"]
req_key = query.get(server_name, {}).get(key_id, {})
req_valid_until = req_key.get("minimum_valid_until_ts")
- miss = False
if req_valid_until is not None:
if ts_valid_until_ms < req_valid_until:
logger.debug(
@@ -211,19 +224,20 @@ class RemoteKey(DirectServeJsonResource):
ts_valid_until_ms,
time_now_ms,
)
-
- if miss:
- cache_misses.setdefault(server_name, {})[key_id] = 0
# Cast to bytes since postgresql returns a memoryview.
json_results.add(bytes(most_recent_result["key_json"]))
- else:
- for _, result in results:
- # Cast to bytes since postgresql returns a memoryview.
- json_results.add(bytes(result["key_json"]))
+
+ if miss and query_remote_on_cache_miss:
+ # only bother attempting to fetch keys from servers on our whitelist
+ if (
+ self.federation_domain_whitelist is None
+ or server_name in self.federation_domain_whitelist
+ ):
+ cache_misses.setdefault(server_name, {})[key_id] = 0
# If there is a cache miss, request the missing keys, then recurse (and
# ensure the result is sent).
- if cache_misses and query_remote_on_cache_miss:
+ if cache_misses:
await yieldable_gather_results(
lambda t: self.fetcher.get_keys(*t),
(
@@ -231,7 +245,7 @@ class RemoteKey(DirectServeJsonResource):
for server_name, keys in cache_misses.items()
),
)
- await self.query_keys(request, query, query_remote_on_cache_miss=False)
+ return await self.query_keys(query, query_remote_on_cache_miss=False)
else:
signed_keys = []
for key_json_raw in json_results:
@@ -243,6 +257,4 @@ class RemoteKey(DirectServeJsonResource):
signed_keys.append(key_json)
- response = {"server_keys": signed_keys}
-
- respond_with_json(request, 200, response, canonical_json=True)
+ return {"server_keys": signed_keys}
diff --git a/synapse/rest/media/v1/_base.py b/synapse/rest/media/v1/_base.py
index c35d42fab8..d30878f704 100644
--- a/synapse/rest/media/v1/_base.py
+++ b/synapse/rest/media/v1/_base.py
@@ -254,30 +254,32 @@ async def respond_with_responder(
file_size: Size in bytes of the media. If not known it should be None
upload_name: The name of the requested file, if any.
"""
- if request._disconnected:
- logger.warning(
- "Not sending response to request %s, already disconnected.", request
- )
- return
-
if not responder:
respond_404(request)
return
- logger.debug("Responding to media request with responder %s", responder)
- add_file_headers(request, media_type, file_size, upload_name)
- try:
- with responder:
+ # If we have a responder we *must* use it as a context manager.
+ with responder:
+ if request._disconnected:
+ logger.warning(
+ "Not sending response to request %s, already disconnected.", request
+ )
+ return
+
+ logger.debug("Responding to media request with responder %s", responder)
+ add_file_headers(request, media_type, file_size, upload_name)
+ try:
+
await responder.write_to_consumer(request)
- except Exception as e:
- # The majority of the time this will be due to the client having gone
- # away. Unfortunately, Twisted simply throws a generic exception at us
- # in that case.
- logger.warning("Failed to write to consumer: %s %s", type(e), e)
-
- # Unregister the producer, if it has one, so Twisted doesn't complain
- if request.producer:
- request.unregisterProducer()
+ except Exception as e:
+ # The majority of the time this will be due to the client having gone
+ # away. Unfortunately, Twisted simply throws a generic exception at us
+ # in that case.
+ logger.warning("Failed to write to consumer: %s %s", type(e), e)
+
+ # Unregister the producer, if it has one, so Twisted doesn't complain
+ if request.producer:
+ request.unregisterProducer()
finish_request(request)
diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py
index 7435fd9130..40b0d39eb2 100644
--- a/synapse/rest/media/v1/media_repository.py
+++ b/synapse/rest/media/v1/media_repository.py
@@ -19,6 +19,8 @@ import shutil
from io import BytesIO
from typing import IO, TYPE_CHECKING, Dict, List, Optional, Set, Tuple
+from matrix_common.types.mxc_uri import MXCUri
+
import twisted.internet.error
import twisted.web.http
from twisted.internet.defer import Deferred
@@ -64,7 +66,6 @@ if TYPE_CHECKING:
logger = logging.getLogger(__name__)
-
# How often to run the background job to update the "recently accessed"
# attribute of local and remote media.
UPDATE_RECENTLY_ACCESSED_TS = 60 * 1000 # 1 minute
@@ -187,7 +188,7 @@ class MediaRepository:
content: IO,
content_length: int,
auth_user: UserID,
- ) -> str:
+ ) -> MXCUri:
"""Store uploaded content for a local user and return the mxc URL
Args:
@@ -220,7 +221,7 @@ class MediaRepository:
await self._generate_thumbnails(None, media_id, media_id, media_type)
- return "mxc://%s/%s" % (self.server_name, media_id)
+ return MXCUri(self.server_name, media_id)
async def get_local_media(
self, request: SynapseRequest, media_id: str, name: Optional[str]
@@ -343,8 +344,8 @@ class MediaRepository:
download from remote server.
Args:
- server_name (str): Remote server_name where the media originated.
- media_id (str): The media ID of the content (as defined by the
+ server_name: Remote server_name where the media originated.
+ media_id: The media ID of the content (as defined by the
remote server).
Returns:
diff --git a/synapse/rest/media/v1/oembed.py b/synapse/rest/media/v1/oembed.py
index 2177b46c9e..827afd868d 100644
--- a/synapse/rest/media/v1/oembed.py
+++ b/synapse/rest/media/v1/oembed.py
@@ -139,65 +139,72 @@ class OEmbedProvider:
try:
# oEmbed responses *must* be UTF-8 according to the spec.
oembed = json_decoder.decode(raw_body.decode("utf-8"))
+ except ValueError:
+ return OEmbedResult({}, None, None)
- # The version is a required string field, but not always provided,
- # or sometimes provided as a float. Be lenient.
- oembed_version = oembed.get("version", "1.0")
- if oembed_version != "1.0" and oembed_version != 1:
- raise RuntimeError(f"Invalid oEmbed version: {oembed_version}")
+ # The version is a required string field, but not always provided,
+ # or sometimes provided as a float. Be lenient.
+ oembed_version = oembed.get("version", "1.0")
+ if oembed_version != "1.0" and oembed_version != 1:
+ return OEmbedResult({}, None, None)
- # Ensure the cache age is None or an int.
- cache_age = oembed.get("cache_age")
- if cache_age:
- cache_age = int(cache_age) * 1000
-
- # The results.
- open_graph_response = {
- "og:url": url,
- }
-
- title = oembed.get("title")
- if title:
- open_graph_response["og:title"] = title
-
- author_name = oembed.get("author_name")
+ # Attempt to parse the cache age, if possible.
+ try:
+ cache_age = int(oembed.get("cache_age")) * 1000
+ except (TypeError, ValueError):
+ # If the cache age cannot be parsed (e.g. wrong type or invalid
+ # string), ignore it.
+ cache_age = None
- # Use the provider name and as the site.
- provider_name = oembed.get("provider_name")
- if provider_name:
- open_graph_response["og:site_name"] = provider_name
+ # The oEmbed response converted to Open Graph.
+ open_graph_response: JsonDict = {"og:url": url}
- # If a thumbnail exists, use it. Note that dimensions will be calculated later.
- if "thumbnail_url" in oembed:
- open_graph_response["og:image"] = oembed["thumbnail_url"]
+ title = oembed.get("title")
+ if title and isinstance(title, str):
+ open_graph_response["og:title"] = title
- # Process each type separately.
- oembed_type = oembed["type"]
- if oembed_type == "rich":
- calc_description_and_urls(open_graph_response, oembed["html"])
-
- elif oembed_type == "photo":
- # If this is a photo, use the full image, not the thumbnail.
- open_graph_response["og:image"] = oembed["url"]
+ author_name = oembed.get("author_name")
+ if not isinstance(author_name, str):
+ author_name = None
- elif oembed_type == "video":
- open_graph_response["og:type"] = "video.other"
+ # Use the provider name and as the site.
+ provider_name = oembed.get("provider_name")
+ if provider_name and isinstance(provider_name, str):
+ open_graph_response["og:site_name"] = provider_name
+
+ # If a thumbnail exists, use it. Note that dimensions will be calculated later.
+ thumbnail_url = oembed.get("thumbnail_url")
+ if thumbnail_url and isinstance(thumbnail_url, str):
+ open_graph_response["og:image"] = thumbnail_url
+
+ # Process each type separately.
+ oembed_type = oembed.get("type")
+ if oembed_type == "rich":
+ html = oembed.get("html")
+ if isinstance(html, str):
+ calc_description_and_urls(open_graph_response, html)
+
+ elif oembed_type == "photo":
+ # If this is a photo, use the full image, not the thumbnail.
+ url = oembed.get("url")
+ if url and isinstance(url, str):
+ open_graph_response["og:image"] = url
+
+ elif oembed_type == "video":
+ open_graph_response["og:type"] = "video.other"
+ html = oembed.get("html")
+ if html and isinstance(html, str):
calc_description_and_urls(open_graph_response, oembed["html"])
- open_graph_response["og:video:width"] = oembed["width"]
- open_graph_response["og:video:height"] = oembed["height"]
-
- elif oembed_type == "link":
- open_graph_response["og:type"] = "website"
+ for size in ("width", "height"):
+ val = oembed.get(size)
+ if val is not None and isinstance(val, int):
+ open_graph_response[f"og:video:{size}"] = val
- else:
- raise RuntimeError(f"Unknown oEmbed type: {oembed_type}")
+ elif oembed_type == "link":
+ open_graph_response["og:type"] = "website"
- except Exception as e:
- # Trap any exception and let the code follow as usual.
- logger.warning("Error parsing oEmbed metadata from %s: %r", url, e)
- open_graph_response = {}
- author_name = None
- cache_age = None
+ else:
+ logger.warning("Unknown oEmbed type: %s", oembed_type)
return OEmbedResult(open_graph_response, author_name, cache_age)
diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py
index b36c98a08e..a8f6fd6b35 100644
--- a/synapse/rest/media/v1/preview_url_resource.py
+++ b/synapse/rest/media/v1/preview_url_resource.py
@@ -732,10 +732,6 @@ class PreviewUrlResource(DirectServeJsonResource):
logger.debug("Running url preview cache expiry")
- if not (await self.store.db_pool.updates.has_completed_background_updates()):
- logger.debug("Still running DB updates; skipping url preview cache expiry")
- return
-
def try_remove_parent_dirs(dirs: Iterable[str]) -> None:
"""Attempt to remove the given chain of parent directories
diff --git a/synapse/rest/media/v1/thumbnailer.py b/synapse/rest/media/v1/thumbnailer.py
index 9b93b9b4f6..a48a4de92a 100644
--- a/synapse/rest/media/v1/thumbnailer.py
+++ b/synapse/rest/media/v1/thumbnailer.py
@@ -138,7 +138,7 @@ class Thumbnailer:
"""Rescales the image to the given dimensions.
Returns:
- BytesIO: the bytes of the encoded image ready to be written to disk
+ The bytes of the encoded image ready to be written to disk
"""
with self._resize(width, height) as scaled:
return self._encode_image(scaled, output_type)
@@ -155,7 +155,7 @@ class Thumbnailer:
max_height: The largest possible height.
Returns:
- BytesIO: the bytes of the encoded image ready to be written to disk
+ The bytes of the encoded image ready to be written to disk
"""
if width * self.height > height * self.width:
scaled_width = width
diff --git a/synapse/rest/media/v1/upload_resource.py b/synapse/rest/media/v1/upload_resource.py
index e73e431dc9..97548b54e5 100644
--- a/synapse/rest/media/v1/upload_resource.py
+++ b/synapse/rest/media/v1/upload_resource.py
@@ -101,6 +101,8 @@ class UploadResource(DirectServeJsonResource):
# the default 404, as that would just be confusing.
raise SynapseError(400, "Bad content")
- logger.info("Uploaded content with URI %r", content_uri)
+ logger.info("Uploaded content with URI '%s'", content_uri)
- respond_with_json(request, 200, {"content_uri": content_uri}, send_cors=True)
+ respond_with_json(
+ request, 200, {"content_uri": str(content_uri)}, send_cors=True
+ )
diff --git a/synapse/rest/models.py b/synapse/rest/models.py
new file mode 100644
index 0000000000..ac39cda8e5
--- /dev/null
+++ b/synapse/rest/models.py
@@ -0,0 +1,23 @@
+from pydantic import BaseModel, Extra
+
+
+class RequestBodyModel(BaseModel):
+ """A custom version of Pydantic's BaseModel which
+
+ - ignores unknown fields and
+ - does not allow fields to be overwritten after construction,
+
+ but otherwise uses Pydantic's default behaviour.
+
+ Ignoring unknown fields is a useful default. It means that clients can provide
+ unstable field not known to the server without the request being refused outright.
+
+ Subclassing in this way is recommended by
+ https://pydantic-docs.helpmanual.io/usage/model_config/#change-behaviour-globally
+ """
+
+ class Config:
+ # By default, ignore fields that we don't recognise.
+ extra = Extra.ignore
+ # By default, don't allow fields to be reassigned after parsing.
+ allow_mutation = False
diff --git a/synapse/rest/synapse/client/new_user_consent.py b/synapse/rest/synapse/client/new_user_consent.py
index 1c1c7b3613..22784157e6 100644
--- a/synapse/rest/synapse/client/new_user_consent.py
+++ b/synapse/rest/synapse/client/new_user_consent.py
@@ -20,6 +20,7 @@ from synapse.api.errors import SynapseError
from synapse.handlers.sso import get_username_mapping_session_cookie_from_request
from synapse.http.server import DirectServeHtmlResource, respond_with_html
from synapse.http.servlet import parse_string
+from synapse.http.site import SynapseRequest
from synapse.types import UserID
from synapse.util.templates import build_jinja_env
@@ -88,7 +89,7 @@ class NewUserConsentResource(DirectServeHtmlResource):
html = template.render(template_params)
respond_with_html(request, 200, html)
- async def _async_render_POST(self, request: Request) -> None:
+ async def _async_render_POST(self, request: SynapseRequest) -> None:
try:
session_id = get_username_mapping_session_cookie_from_request(request)
except SynapseError as e:
diff --git a/synapse/rest/synapse/client/oidc/__init__.py b/synapse/rest/synapse/client/oidc/__init__.py
index 81fec39659..e4b28ce3df 100644
--- a/synapse/rest/synapse/client/oidc/__init__.py
+++ b/synapse/rest/synapse/client/oidc/__init__.py
@@ -17,6 +17,9 @@ from typing import TYPE_CHECKING
from twisted.web.resource import Resource
+from synapse.rest.synapse.client.oidc.backchannel_logout_resource import (
+ OIDCBackchannelLogoutResource,
+)
from synapse.rest.synapse.client.oidc.callback_resource import OIDCCallbackResource
if TYPE_CHECKING:
@@ -29,6 +32,7 @@ class OIDCResource(Resource):
def __init__(self, hs: "HomeServer"):
Resource.__init__(self)
self.putChild(b"callback", OIDCCallbackResource(hs))
+ self.putChild(b"backchannel_logout", OIDCBackchannelLogoutResource(hs))
__all__ = ["OIDCResource"]
diff --git a/synapse/replication/slave/storage/filtering.py b/synapse/rest/synapse/client/oidc/backchannel_logout_resource.py
index c52679cd60..e07e76855a 100644
--- a/synapse/replication/slave/storage/filtering.py
+++ b/synapse/rest/synapse/client/oidc/backchannel_logout_resource.py
@@ -1,4 +1,4 @@
-# Copyright 2015, 2016 OpenMarket Ltd
+# Copyright 2022 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -12,24 +12,24 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import logging
from typing import TYPE_CHECKING
-from synapse.storage._base import SQLBaseStore
-from synapse.storage.database import DatabasePool, LoggingDatabaseConnection
-from synapse.storage.databases.main.filtering import FilteringStore
+from synapse.http.server import DirectServeJsonResource
+from synapse.http.site import SynapseRequest
if TYPE_CHECKING:
from synapse.server import HomeServer
+logger = logging.getLogger(__name__)
-class SlavedFilteringStore(SQLBaseStore):
- def __init__(
- self,
- database: DatabasePool,
- db_conn: LoggingDatabaseConnection,
- hs: "HomeServer",
- ):
- super().__init__(database, db_conn, hs)
- # Filters are immutable so this cache doesn't need to be expired
- get_user_filter = FilteringStore.__dict__["get_user_filter"]
+class OIDCBackchannelLogoutResource(DirectServeJsonResource):
+ isLeaf = 1
+
+ def __init__(self, hs: "HomeServer"):
+ super().__init__()
+ self._oidc_handler = hs.get_oidc_handler()
+
+ async def _async_render_POST(self, request: SynapseRequest) -> None:
+ await self._oidc_handler.handle_backchannel_logout(request)
diff --git a/synapse/rest/synapse/client/password_reset.py b/synapse/rest/synapse/client/password_reset.py
index 6ac9dbc7c9..b9402cfb75 100644
--- a/synapse/rest/synapse/client/password_reset.py
+++ b/synapse/rest/synapse/client/password_reset.py
@@ -17,7 +17,6 @@ from typing import TYPE_CHECKING, Tuple
from twisted.web.server import Request
from synapse.api.errors import ThreepidValidationError
-from synapse.config.emailconfig import ThreepidBehaviour
from synapse.http.server import DirectServeHtmlResource
from synapse.http.servlet import parse_string
from synapse.util.stringutils import assert_valid_client_secret
@@ -46,9 +45,6 @@ class PasswordResetSubmitTokenResource(DirectServeHtmlResource):
self.clock = hs.get_clock()
self.store = hs.get_datastores().main
- self._local_threepid_handling_disabled_due_to_email_config = (
- hs.config.email.local_threepid_handling_disabled_due_to_email_config
- )
self._confirmation_email_template = (
hs.config.email.email_password_reset_template_confirmation_html
)
@@ -59,8 +55,8 @@ class PasswordResetSubmitTokenResource(DirectServeHtmlResource):
hs.config.email.email_password_reset_template_failure_html
)
- # This resource should not be mounted if threepid behaviour is not LOCAL
- assert hs.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL
+ # This resource should only be mounted if email validation is enabled
+ assert hs.config.email.can_verify_email
async def _async_render_GET(self, request: Request) -> Tuple[int, bytes]:
sid = parse_string(request, "sid", required=True)
diff --git a/synapse/rest/well_known.py b/synapse/rest/well_known.py
index 6f7ac54c65..e2174fdfea 100644
--- a/synapse/rest/well_known.py
+++ b/synapse/rest/well_known.py
@@ -18,6 +18,7 @@ from twisted.web.resource import Resource
from twisted.web.server import Request
from synapse.http.server import set_cors_headers
+from synapse.http.site import SynapseRequest
from synapse.types import JsonDict
from synapse.util import json_encoder
from synapse.util.stringutils import parse_server_name
@@ -63,7 +64,7 @@ class ClientWellKnownResource(Resource):
Resource.__init__(self)
self._well_known_builder = WellKnownBuilder(hs)
- def render_GET(self, request: Request) -> bytes:
+ def render_GET(self, request: SynapseRequest) -> bytes:
set_cors_headers(request)
r = self._well_known_builder.get_well_known()
if not r:
diff --git a/synapse/server.py b/synapse/server.py
index 181984a1a4..5baae2325e 100644
--- a/synapse/server.py
+++ b/synapse/server.py
@@ -105,6 +105,7 @@ from synapse.handlers.typing import FollowerTypingHandler, TypingWriterHandler
from synapse.handlers.user_directory import UserDirectoryHandler
from synapse.http.client import InsecureInterceptableContextFactory, SimpleHttpClient
from synapse.http.matrixfederationclient import MatrixFederationHttpClient
+from synapse.metrics.common_usage_metrics import CommonUsageMetricsManager
from synapse.module_api import ModuleApi
from synapse.notifier import Notifier
from synapse.push.bulk_push_rule_evaluator import BulkPushRuleEvaluator
@@ -220,8 +221,6 @@ class HomeServer(metaclass=abc.ABCMeta):
# instantiated during setup() for future return by get_datastores()
DATASTORE_CLASS = abc.abstractproperty()
- tls_server_context_factory: Optional[IOpenSSLContextFactory]
-
def __init__(
self,
hostname: str,
@@ -257,6 +256,9 @@ class HomeServer(metaclass=abc.ABCMeta):
self._module_web_resources: Dict[str, Resource] = {}
self._module_web_resources_consumed = False
+ # This attribute is set by the free function `refresh_certificate`.
+ self.tls_server_context_factory: Optional[IOpenSSLContextFactory] = None
+
def register_module_web_resource(self, path: str, resource: Resource) -> None:
"""Allows a module to register a web resource to be served at the given path.
@@ -314,7 +316,7 @@ class HomeServer(metaclass=abc.ABCMeta):
if self.config.worker.run_background_tasks:
self.setup_background_tasks()
- def start_listening(self) -> None:
+ def start_listening(self) -> None: # noqa: B027 (no-op by design)
"""Start the HTTP, manhole, metrics, etc listeners
Does nothing in this base class; overridden in derived classes to start the
@@ -340,7 +342,17 @@ class HomeServer(metaclass=abc.ABCMeta):
return domain_specific_string.domain == self.hostname
def is_mine_id(self, string: str) -> bool:
- return string.split(":", 1)[1] == self.hostname
+ """Determines whether a user ID or room alias originates from this homeserver.
+
+ Returns:
+ `True` if the hostname part of the user ID or room alias matches this
+ homeserver.
+ `False` otherwise, or if the user ID or room alias is malformed.
+ """
+ localpart_hostname = string.split(":", 1)
+ if len(localpart_hostname) < 2:
+ return False
+ return localpart_hostname[1] == self.hostname
@cache_in_self
def get_clock(self) -> Clock:
@@ -498,7 +510,7 @@ class HomeServer(metaclass=abc.ABCMeta):
)
@cache_in_self
- def get_device_handler(self):
+ def get_device_handler(self) -> DeviceWorkerHandler:
if self.config.worker.worker_app:
return DeviceWorkerHandler(self)
else:
@@ -756,7 +768,9 @@ class HomeServer(metaclass=abc.ABCMeta):
@cache_in_self
def get_federation_ratelimiter(self) -> FederationRateLimiter:
return FederationRateLimiter(
- self.get_clock(), config=self.config.ratelimiting.rc_federation
+ self.get_clock(),
+ config=self.config.ratelimiting.rc_federation,
+ metrics_name="federation_servlets",
)
@cache_in_self
@@ -827,3 +841,8 @@ class HomeServer(metaclass=abc.ABCMeta):
self.config.ratelimiting.rc_message,
self.config.ratelimiting.rc_admin_redaction,
)
+
+ @cache_in_self
+ def get_common_usage_metrics_manager(self) -> CommonUsageMetricsManager:
+ """Usage metrics shared between phone home stats and the prometheus exporter."""
+ return CommonUsageMetricsManager(self)
diff --git a/synapse/server_notices/consent_server_notices.py b/synapse/server_notices/consent_server_notices.py
index 698ca742ed..94025ba41f 100644
--- a/synapse/server_notices/consent_server_notices.py
+++ b/synapse/server_notices/consent_server_notices.py
@@ -113,9 +113,8 @@ def copy_with_str_subst(x: Any, substitutions: Any) -> Any:
"""Deep-copy a structure, carrying out string substitutions on any strings
Args:
- x (object): structure to be copied
- substitutions (object): substitutions to be made - passed into the
- string '%' operator
+ x: structure to be copied
+ substitutions: substitutions to be made - passed into the string '%' operator
Returns:
copy of x
diff --git a/synapse/server_notices/resource_limits_server_notices.py b/synapse/server_notices/resource_limits_server_notices.py
index 3134cd2d3d..a31a2c99a7 100644
--- a/synapse/server_notices/resource_limits_server_notices.py
+++ b/synapse/server_notices/resource_limits_server_notices.py
@@ -170,11 +170,13 @@ class ResourceLimitsServerNotices:
room_id: The room id of the server notices room
Returns:
- bool: Is the room currently blocked
- list: The list of pinned event IDs that are unrelated to limit blocking
- This list can be used as a convenience in the case where the block
- is to be lifted and the remaining pinned event references need to be
- preserved
+ Tuple of:
+ Is the room currently blocked
+
+ The list of pinned event IDs that are unrelated to limit blocking
+ This list can be used as a convenience in the case where the block
+ is to be lifted and the remaining pinned event references need to be
+ preserved
"""
currently_blocked = False
pinned_state_event = None
diff --git a/synapse/server_notices/server_notices_manager.py b/synapse/server_notices/server_notices_manager.py
index 8ecab86ec7..564e3705c2 100644
--- a/synapse/server_notices/server_notices_manager.py
+++ b/synapse/server_notices/server_notices_manager.py
@@ -102,6 +102,10 @@ class ServerNoticesManager:
Returns:
The room's ID, or None if no room could be found.
"""
+ # If there is no server notices MXID, then there is no server notices room
+ if self.server_notices_mxid is None:
+ return None
+
rooms = await self._store.get_rooms_for_local_user_where_membership_is(
user_id, [Membership.INVITE, Membership.JOIN]
)
@@ -111,8 +115,10 @@ class ServerNoticesManager:
# be joined. This is kinda deliberate, in that if somebody somehow
# manages to invite the system user to a room, that doesn't make it
# the server notices room.
- user_ids = await self._store.get_users_in_room(room.room_id)
- if len(user_ids) <= 2 and self.server_notices_mxid in user_ids:
+ is_server_notices_room = await self._store.check_local_user_in_room(
+ user_id=self.server_notices_mxid, room_id=room.room_id
+ )
+ if is_server_notices_room:
# we found a room which our user shares with the system notice
# user
return room.room_id
@@ -244,7 +250,7 @@ class ServerNoticesManager:
assert self.server_notices_mxid is not None
notice_user_data_in_room = await self._message_handler.get_room_data(
- self.server_notices_mxid,
+ create_requester(self.server_notices_mxid),
room_id,
EventTypes.Member,
self.server_notices_mxid,
diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py
index c355e4f98a..833ffec3de 100644
--- a/synapse/state/__init__.py
+++ b/synapse/state/__init__.py
@@ -44,7 +44,6 @@ from synapse.logging.context import ContextResourceUsage
from synapse.replication.http.state import ReplicationUpdateCurrentStateRestServlet
from synapse.state import v1, v2
from synapse.storage.databases.main.events_worker import EventRedactBehaviour
-from synapse.storage.roommember import ProfileInfo
from synapse.storage.state import StateFilter
from synapse.types import StateMap
from synapse.util.async_helpers import Linearizer
@@ -191,6 +190,7 @@ class StateHandler:
room_id: str,
event_ids: Collection[str],
state_filter: Optional[StateFilter] = None,
+ await_full_state: bool = True,
) -> StateMap[str]:
"""Fetch the state after each of the given event IDs. Resolve them and return.
@@ -201,20 +201,25 @@ class StateHandler:
Args:
room_id: the room_id containing the given events.
event_ids: the events whose state should be fetched and resolved.
+ await_full_state: if `True`, will block if we do not yet have complete state
+ at the given `event_id`s, regardless of whether `state_filter` is
+ satisfied by partial state.
Returns:
the state dict (a mapping from (event_type, state_key) -> event_id) which
holds the resolution of the states after the given event IDs.
"""
logger.debug("calling resolve_state_groups from compute_state_after_events")
- ret = await self.resolve_state_groups_for_events(room_id, event_ids)
+ ret = await self.resolve_state_groups_for_events(
+ room_id, event_ids, await_full_state
+ )
return await ret.get_state(self._state_storage_controller, state_filter)
- async def get_current_users_in_room(
+ async def get_current_user_ids_in_room(
self, room_id: str, latest_event_ids: List[str]
- ) -> Dict[str, ProfileInfo]:
+ ) -> Set[str]:
"""
- Get the users who are currently in a room.
+ Get the users IDs who are currently in a room.
Note: This is much slower than using the equivalent method
`DataStore.get_users_in_room` or `DataStore.get_users_in_room_with_profiles`,
@@ -225,15 +230,15 @@ class StateHandler:
room_id: The ID of the room.
latest_event_ids: Precomputed list of latest event IDs. Will be computed if None.
Returns:
- Dictionary of user IDs to their profileinfo.
+ Set of user IDs in the room.
"""
assert latest_event_ids is not None
- logger.debug("calling resolve_state_groups from get_current_users_in_room")
+ logger.debug("calling resolve_state_groups from get_current_user_ids_in_room")
entry = await self.resolve_state_groups_for_events(room_id, latest_event_ids)
state = await entry.get_state(self._state_storage_controller, StateFilter.all())
- return await self.store.get_joined_users_from_state(room_id, state, entry)
+ return await self.store.get_joined_user_ids_from_state(room_id, state)
async def get_hosts_in_room_at_events(
self, room_id: str, event_ids: Collection[str]
@@ -421,6 +426,69 @@ class StateHandler:
partial_state=partial_state,
)
+ async def compute_event_context_for_batched(
+ self,
+ event: EventBase,
+ state_ids_before_event: StateMap[str],
+ current_state_group: int,
+ ) -> EventContext:
+ """
+ Generate an event context for an event that has not yet been persisted to the
+ database. Intended for use with events that are created to be persisted in a batch.
+ Args:
+ event: the event the context is being computed for
+ state_ids_before_event: a state map consisting of the state ids of the events
+ created prior to this event.
+ current_state_group: the current state group before the event.
+ """
+ state_group_before_event_prev_group = None
+ deltas_to_state_group_before_event = None
+
+ state_group_before_event = current_state_group
+
+ # if the event is not state, we are set
+ if not event.is_state():
+ return EventContext.with_state(
+ storage=self._storage_controllers,
+ state_group_before_event=state_group_before_event,
+ state_group=state_group_before_event,
+ state_delta_due_to_event={},
+ prev_group=state_group_before_event_prev_group,
+ delta_ids=deltas_to_state_group_before_event,
+ partial_state=False,
+ )
+
+ # otherwise, we'll need to create a new state group for after the event
+ key = (event.type, event.state_key)
+
+ if state_ids_before_event is not None:
+ replaces = state_ids_before_event.get(key)
+
+ if replaces and replaces != event.event_id:
+ event.unsigned["replaces_state"] = replaces
+
+ delta_ids = {key: event.event_id}
+
+ state_group_after_event = (
+ await self._state_storage_controller.store_state_group(
+ event.event_id,
+ event.room_id,
+ prev_group=state_group_before_event,
+ delta_ids=delta_ids,
+ current_state_ids=None,
+ )
+ )
+
+ return EventContext.with_state(
+ storage=self._storage_controllers,
+ state_group=state_group_after_event,
+ state_group_before_event=state_group_before_event,
+ state_delta_due_to_event=delta_ids,
+ prev_group=state_group_before_event,
+ delta_ids=delta_ids,
+ partial_state=False,
+ )
+
@measure_func()
async def resolve_state_groups_for_events(
self, room_id: str, event_ids: Collection[str], await_full_state: bool = True
diff --git a/synapse/state/v2.py b/synapse/state/v2.py
index 7db032203b..1b9d7d8457 100644
--- a/synapse/state/v2.py
+++ b/synapse/state/v2.py
@@ -271,40 +271,41 @@ async def _get_power_level_for_sender(
async def _get_auth_chain_difference(
room_id: str,
state_sets: Sequence[Mapping[Any, str]],
- event_map: Dict[str, EventBase],
+ unpersisted_events: Dict[str, EventBase],
state_res_store: StateResolutionStore,
) -> Set[str]:
"""Compare the auth chains of each state set and return the set of events
- that only appear in some but not all of the auth chains.
+ that only appear in some, but not all of the auth chains.
Args:
- state_sets
- event_map
- state_res_store
+ state_sets: The input state sets we are trying to resolve across.
+ unpersisted_events: A map from event ID to EventBase containing all unpersisted
+ events involved in this resolution.
+ state_res_store:
Returns:
- Set of event IDs
+ The auth difference of the given state sets, as a set of event IDs.
"""
# The `StateResolutionStore.get_auth_chain_difference` function assumes that
# all events passed to it (and their auth chains) have been persisted
- # previously. This is not the case for any events in the `event_map`, and so
- # we need to manually handle those events.
+ # previously. We need to manually handle any other events that are yet to be
+ # persisted.
#
- # We do this by:
- # 1. calculating the auth chain difference for the state sets based on the
- # events in `event_map` alone
- # 2. replacing any events in the state_sets that are also in `event_map`
- # with their auth events (recursively), and then calling
- # `store.get_auth_chain_difference` as normal
- # 3. adding the results of 1 and 2 together.
-
- # Map from event ID in `event_map` to their auth event IDs, and their auth
- # event IDs if they appear in the `event_map`. This is the intersection of
- # the event's auth chain with the events in the `event_map` *plus* their
+ # We do this in three steps:
+ # 1. Compute the set of unpersisted events belonging to the auth difference.
+ # 2. Replacing any unpersisted events in the state_sets with their auth events,
+ # recursively, until the state_sets contain only persisted events.
+ # Then we call `store.get_auth_chain_difference` as normal, which computes
+ # the set of persisted events belonging to the auth difference.
+ # 3. Adding the results of 1 and 2 together.
+
+ # Map from event ID in `unpersisted_events` to their auth event IDs, and their auth
+ # event IDs if they appear in the `unpersisted_events`. This is the intersection of
+ # the event's auth chain with the events in `unpersisted_events` *plus* their
# auth event IDs.
events_to_auth_chain: Dict[str, Set[str]] = {}
- for event in event_map.values():
+ for event in unpersisted_events.values():
chain = {event.event_id}
events_to_auth_chain[event.event_id] = chain
@@ -312,16 +313,16 @@ async def _get_auth_chain_difference(
while to_search:
for auth_id in to_search.pop().auth_event_ids():
chain.add(auth_id)
- auth_event = event_map.get(auth_id)
+ auth_event = unpersisted_events.get(auth_id)
if auth_event:
to_search.append(auth_event)
- # We now a) calculate the auth chain difference for the unpersisted events
- # and b) work out the state sets to pass to the store.
+ # We now 1) calculate the auth chain difference for the unpersisted events
+ # and 2) work out the state sets to pass to the store.
#
- # Note: If the `event_map` is empty (which is the common case), we can do a
+ # Note: If there are no `unpersisted_events` (which is the common case), we can do a
# much simpler calculation.
- if event_map:
+ if unpersisted_events:
# The list of state sets to pass to the store, where each state set is a set
# of the event ids making up the state. This is similar to `state_sets`,
# except that (a) we only have event ids, not the complete
@@ -344,14 +345,18 @@ async def _get_auth_chain_difference(
for event_id in state_set.values():
event_chain = events_to_auth_chain.get(event_id)
if event_chain is not None:
- # We have an event in `event_map`. We add all the auth
- # events that it references (that aren't also in `event_map`).
- set_ids.update(e for e in event_chain if e not in event_map)
+ # We have an unpersisted event. We add all the auth
+ # events that it references which are also unpersisted.
+ set_ids.update(
+ e for e in event_chain if e not in unpersisted_events
+ )
# We also add the full chain of unpersisted event IDs
# referenced by this state set, so that we can work out the
# auth chain difference of the unpersisted events.
- unpersisted_ids.update(e for e in event_chain if e in event_map)
+ unpersisted_ids.update(
+ e for e in event_chain if e in unpersisted_events
+ )
else:
set_ids.add(event_id)
@@ -361,15 +366,15 @@ async def _get_auth_chain_difference(
union = unpersisted_set_ids[0].union(*unpersisted_set_ids[1:])
intersection = unpersisted_set_ids[0].intersection(*unpersisted_set_ids[1:])
- difference_from_event_map: Collection[str] = union - intersection
+ auth_difference_unpersisted_part: Collection[str] = union - intersection
else:
- difference_from_event_map = ()
+ auth_difference_unpersisted_part = ()
state_sets_ids = [set(state_set.values()) for state_set in state_sets]
difference = await state_res_store.get_auth_chain_difference(
room_id, state_sets_ids
)
- difference.update(difference_from_event_map)
+ difference.update(auth_difference_unpersisted_part)
return difference
@@ -434,7 +439,7 @@ async def _add_event_and_auth_chain_to_graph(
event_id: str,
event_map: Dict[str, EventBase],
state_res_store: StateResolutionStore,
- auth_diff: Set[str],
+ full_conflicted_set: Set[str],
) -> None:
"""Helper function for _reverse_topological_power_sort that add the event
and its auth chain (that is in the auth diff) to the graph
@@ -445,7 +450,7 @@ async def _add_event_and_auth_chain_to_graph(
event_id: Event to add to the graph
event_map
state_res_store
- auth_diff: Set of event IDs that are in the auth difference.
+ full_conflicted_set: Set of event IDs that are in the full conflicted set.
"""
state = [event_id]
@@ -455,7 +460,7 @@ async def _add_event_and_auth_chain_to_graph(
event = await _get_event(room_id, eid, event_map, state_res_store)
for aid in event.auth_event_ids():
- if aid in auth_diff:
+ if aid in full_conflicted_set:
if aid not in graph:
state.append(aid)
@@ -468,7 +473,7 @@ async def _reverse_topological_power_sort(
event_ids: Iterable[str],
event_map: Dict[str, EventBase],
state_res_store: StateResolutionStore,
- auth_diff: Set[str],
+ full_conflicted_set: Set[str],
) -> List[str]:
"""Returns a list of the event_ids sorted by reverse topological ordering,
and then by power level and origin_server_ts
@@ -479,7 +484,7 @@ async def _reverse_topological_power_sort(
event_ids: The events to sort
event_map
state_res_store
- auth_diff: Set of event IDs that are in the auth difference.
+ full_conflicted_set: Set of event IDs that are in the full conflicted set.
Returns:
The sorted list
@@ -488,7 +493,7 @@ async def _reverse_topological_power_sort(
graph: Dict[str, Set[str]] = {}
for idx, event_id in enumerate(event_ids, start=1):
await _add_event_and_auth_chain_to_graph(
- graph, room_id, event_id, event_map, state_res_store, auth_diff
+ graph, room_id, event_id, event_map, state_res_store, full_conflicted_set
)
# We await occasionally when we're working with large data sets to
@@ -572,6 +577,21 @@ async def _iterative_auth_checks(
if ev.rejected_reason is None:
auth_events[key] = event_map[ev_id]
+ if event.rejected_reason is not None:
+ # Do not admit previously rejected events into state.
+ # TODO: This isn't spec compliant. Events that were previously rejected due
+ # to failing auth checks at their state, but pass auth checks during
+ # state resolution should be accepted. Synapse does not handle the
+ # change of rejection status well, so we preserve the previous
+ # rejection status for now.
+ #
+ # Note that events rejected for non-state reasons, such as having the
+ # wrong auth events, should remain rejected.
+ #
+ # https://spec.matrix.org/v1.2/rooms/v9/#rejected-events
+ # https://github.com/matrix-org/synapse/issues/13797
+ continue
+
try:
event_auth.check_state_dependent_auth_rules(
event,
diff --git a/synapse/static/client/login/index.html b/synapse/static/client/login/index.html
index 9e6daf38ac..40510889ac 100644
--- a/synapse/static/client/login/index.html
+++ b/synapse/static/client/login/index.html
@@ -3,7 +3,8 @@
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<title> Login </title>
- <meta name='viewport' content='width=device-width, initial-scale=1, user-scalable=no, minimum-scale=1.0, maximum-scale=1.0'>
+ <meta http-equiv="X-UA-Compatible" content="IE=edge">
+ <meta name="viewport" content="width=device-width, initial-scale=1.0">
<link rel="stylesheet" href="style.css">
<script src="js/jquery-3.4.1.min.js"></script>
<script src="js/login.js"></script>
diff --git a/synapse/static/client/register/index.html b/synapse/static/client/register/index.html
index 140653574d..27bbd76f51 100644
--- a/synapse/static/client/register/index.html
+++ b/synapse/static/client/register/index.html
@@ -2,7 +2,8 @@
<html>
<head>
<title> Registration </title>
-<meta name='viewport' content='width=device-width, initial-scale=1, user-scalable=no, minimum-scale=1.0, maximum-scale=1.0'>
+<meta http-equiv="X-UA-Compatible" content="IE=edge">
+<meta name="viewport" content="width=device-width, initial-scale=1.0">
<link rel="stylesheet" href="style.css">
<script src="js/jquery-3.4.1.min.js"></script>
<script src="https://www.recaptcha.net/recaptcha/api/js/recaptcha_ajax.js"></script>
diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py
index e30f9c76d4..69abf6fa87 100644
--- a/synapse/storage/_base.py
+++ b/synapse/storage/_base.py
@@ -15,12 +15,13 @@
# limitations under the License.
import logging
from abc import ABCMeta
-from typing import TYPE_CHECKING, Any, Collection, Iterable, Optional, Union
+from typing import TYPE_CHECKING, Any, Collection, Dict, Iterable, Optional, Union
from synapse.storage.database import make_in_list_sql_clause # noqa: F401; noqa: F401
from synapse.storage.database import DatabasePool, LoggingDatabaseConnection
from synapse.types import get_domain_from_id
from synapse.util import json_decoder
+from synapse.util.caches.descriptors import CachedFunction
if TYPE_CHECKING:
from synapse.server import HomeServer
@@ -47,7 +48,9 @@ class SQLBaseStore(metaclass=ABCMeta):
self.database_engine = database.engine
self.db_pool = database
- def process_replication_rows(
+ self.external_cached_functions: Dict[str, CachedFunction] = {}
+
+ def process_replication_rows( # noqa: B027 (no-op by design)
self,
stream_name: str,
instance_name: str,
@@ -88,6 +91,10 @@ class SQLBaseStore(metaclass=ABCMeta):
self._attempt_to_invalidate_cache(
"get_user_in_room_with_profile", (room_id, user_id)
)
+ self._attempt_to_invalidate_cache(
+ "get_rooms_for_user_with_stream_ordering", (user_id,)
+ )
+ self._attempt_to_invalidate_cache("get_rooms_for_user", (user_id,))
# Purge other caches based on room state.
self._attempt_to_invalidate_cache("get_room_summary", (room_id,))
@@ -95,7 +102,7 @@ class SQLBaseStore(metaclass=ABCMeta):
def _attempt_to_invalidate_cache(
self, cache_name: str, key: Optional[Collection[Any]]
- ) -> None:
+ ) -> bool:
"""Attempts to invalidate the cache of the given name, ignoring if the
cache doesn't exist. Mainly used for invalidating caches on workers,
where they may not have the cache.
@@ -113,9 +120,12 @@ class SQLBaseStore(metaclass=ABCMeta):
try:
cache = getattr(self, cache_name)
except AttributeError:
- # We probably haven't pulled in the cache in this worker,
- # which is fine.
- return
+ # Check if an externally defined module cache has been registered
+ cache = self.external_cached_functions.get(cache_name)
+ if not cache:
+ # We probably haven't pulled in the cache in this worker,
+ # which is fine.
+ return False
if key is None:
cache.invalidate_all()
@@ -125,6 +135,13 @@ class SQLBaseStore(metaclass=ABCMeta):
invalidate_method = getattr(cache, "invalidate_local", cache.invalidate)
invalidate_method(tuple(key))
+ return True
+
+ def register_external_cached_function(
+ self, cache_name: str, func: CachedFunction
+ ) -> None:
+ self.external_cached_functions[cache_name] = func
+
def db_to_json(db_content: Union[memoryview, bytes, bytearray, str]) -> Any:
"""
diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py
index 555b4e77d2..2056ecb2c3 100644
--- a/synapse/storage/background_updates.py
+++ b/synapse/storage/background_updates.py
@@ -285,7 +285,10 @@ class BackgroundUpdater:
back_to_back_failures = 0
try:
- logger.info("Starting background schema updates")
+ logger.info(
+ "Starting background schema updates for database %s",
+ self._database_name,
+ )
while self.enabled:
try:
result = await self.do_next_background_update(sleep)
@@ -533,6 +536,7 @@ class BackgroundUpdater:
index_name: name of index to add
table: table to add index to
columns: columns/expressions to include in index
+ where_clause: A WHERE clause to specify a partial unique index.
unique: true to make a UNIQUE index
psql_only: true to only create this index on psql databases (useful
for virtual sqlite tables)
@@ -581,9 +585,6 @@ class BackgroundUpdater:
def create_index_sqlite(conn: Connection) -> None:
# Sqlite doesn't support concurrent creation of indexes.
#
- # We don't use partial indices on SQLite as it wasn't introduced
- # until 3.8, and wheezy and CentOS 7 have 3.7
- #
# We assume that sqlite doesn't give us invalid indices; however
# we may still end up with the index existing but the
# background_updates not having been recorded if synapse got shut
@@ -591,12 +592,13 @@ class BackgroundUpdater:
# has supported CREATE TABLE|INDEX IF NOT EXISTS since 3.3.0.)
sql = (
"CREATE %(unique)s INDEX IF NOT EXISTS %(name)s ON %(table)s"
- " (%(columns)s)"
+ " (%(columns)s) %(where_clause)s"
) % {
"unique": "UNIQUE" if unique else "",
"name": index_name,
"table": table,
"columns": ", ".join(columns),
+ "where_clause": "WHERE " + where_clause if where_clause else "",
}
c = conn.cursor()
diff --git a/synapse/storage/controllers/persist_events.py b/synapse/storage/controllers/persist_events.py
index cf98b0ab48..33ffef521b 100644
--- a/synapse/storage/controllers/persist_events.py
+++ b/synapse/storage/controllers/persist_events.py
@@ -45,8 +45,14 @@ from twisted.internet import defer
from synapse.api.constants import EventTypes, Membership
from synapse.events import EventBase
from synapse.events.snapshot import EventContext
-from synapse.logging import opentracing
from synapse.logging.context import PreserveLoggingContext, make_deferred_yieldable
+from synapse.logging.opentracing import (
+ SynapseTags,
+ active_span,
+ set_tag,
+ start_active_span_follows_from,
+ trace,
+)
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.storage.controllers.state import StateStorageController
from synapse.storage.databases import Databases
@@ -198,9 +204,8 @@ class _EventPeristenceQueue(Generic[_PersistResult]):
process to to so, calling the per_item_callback for each item.
Args:
- room_id (str):
- task (_EventPersistQueueTask): A _PersistEventsTask or
- _UpdateCurrentStateTask to process.
+ room_id:
+ task: A _PersistEventsTask or _UpdateCurrentStateTask to process.
Returns:
the result returned by the `_per_item_callback` passed to
@@ -223,7 +228,7 @@ class _EventPeristenceQueue(Generic[_PersistResult]):
queue.append(end_item)
# also add our active opentracing span to the item so that we get a link back
- span = opentracing.active_span()
+ span = active_span()
if span:
end_item.parent_opentracing_span_contexts.append(span.context)
@@ -234,7 +239,7 @@ class _EventPeristenceQueue(Generic[_PersistResult]):
res = await make_deferred_yieldable(end_item.deferred.observe())
# add another opentracing span which links to the persist trace.
- with opentracing.start_active_span_follows_from(
+ with start_active_span_follows_from(
f"{task.name}_complete", (end_item.opentracing_span_context,)
):
pass
@@ -266,7 +271,7 @@ class _EventPeristenceQueue(Generic[_PersistResult]):
queue = self._get_drainining_queue(room_id)
for item in queue:
try:
- with opentracing.start_active_span_follows_from(
+ with start_active_span_follows_from(
item.task.name,
item.parent_opentracing_span_contexts,
inherit_force_tracing=True,
@@ -355,7 +360,7 @@ class EventsPersistenceStorageController:
f"Found an unexpected task type in event persistence queue: {task}"
)
- @opentracing.trace
+ @trace
async def persist_events(
self,
events_and_contexts: Iterable[Tuple[EventBase, EventContext]],
@@ -380,9 +385,21 @@ class EventsPersistenceStorageController:
PartialStateConflictError: if attempting to persist a partial state event in
a room that has been un-partial stated.
"""
+ event_ids: List[str] = []
partitioned: Dict[str, List[Tuple[EventBase, EventContext]]] = {}
for event, ctx in events_and_contexts:
partitioned.setdefault(event.room_id, []).append((event, ctx))
+ event_ids.append(event.event_id)
+
+ set_tag(
+ SynapseTags.FUNC_ARG_PREFIX + "event_ids",
+ str(event_ids),
+ )
+ set_tag(
+ SynapseTags.FUNC_ARG_PREFIX + "event_ids.length",
+ str(len(event_ids)),
+ )
+ set_tag(SynapseTags.FUNC_ARG_PREFIX + "backfilled", str(backfilled))
async def enqueue(
item: Tuple[str, List[Tuple[EventBase, EventContext]]]
@@ -405,20 +422,22 @@ class EventsPersistenceStorageController:
for d in ret_vals:
replaced_events.update(d)
- events = []
+ persisted_events = []
for event, _ in events_and_contexts:
existing_event_id = replaced_events.get(event.event_id)
if existing_event_id:
- events.append(await self.main_store.get_event(existing_event_id))
+ persisted_events.append(
+ await self.main_store.get_event(existing_event_id)
+ )
else:
- events.append(event)
+ persisted_events.append(event)
return (
- events,
+ persisted_events,
self.main_store.get_room_max_token(),
)
- @opentracing.trace
+ @trace
async def persist_event(
self, event: EventBase, context: EventContext, backfilled: bool = False
) -> Tuple[EventBase, PersistedEventPosition, RoomStreamToken]:
@@ -580,11 +599,6 @@ class EventsPersistenceStorageController:
# room
state_delta_for_room: Dict[str, DeltaState] = {}
- # Set of remote users which were in rooms the server has left. We
- # should check if we still share any rooms and if not we mark their
- # device lists as stale.
- potentially_left_users: Set[str] = set()
-
if not backfilled:
with Measure(self._clock, "_calculate_state_and_extrem"):
# Work out the new "current state" for each room.
@@ -698,13 +712,9 @@ class EventsPersistenceStorageController:
room_id,
ev_ctx_rm,
delta,
- current_state,
- potentially_left_users,
)
if not is_still_joined:
logger.info("Server no longer in room %s", room_id)
- latest_event_ids = set()
- current_state = {}
delta.no_longer_in_room = True
state_delta_for_room[room_id] = delta
@@ -717,8 +727,6 @@ class EventsPersistenceStorageController:
inhibit_local_membership_updates=backfilled,
)
- await self._handle_potentially_left_users(potentially_left_users)
-
return replaced_events
async def _calculate_new_extremities(
@@ -1094,8 +1102,6 @@ class EventsPersistenceStorageController:
room_id: str,
ev_ctx_rm: List[Tuple[EventBase, EventContext]],
delta: DeltaState,
- current_state: Optional[StateMap[str]],
- potentially_left_users: Set[str],
) -> bool:
"""Check if the server will still be joined after the given events have
been persised.
@@ -1105,11 +1111,6 @@ class EventsPersistenceStorageController:
ev_ctx_rm
delta: The delta of current state between what is in the database
and what the new current state will be.
- current_state: The new current state if it already been calculated,
- otherwise None.
- potentially_left_users: If the server has left the room, then joined
- remote users will be added to this set to indicate that the
- server may no longer be sharing a room with them.
"""
if not any(
@@ -1163,45 +1164,4 @@ class EventsPersistenceStorageController:
):
return True
- # The server will leave the room, so we go and find out which remote
- # users will still be joined when we leave.
- if current_state is None:
- current_state = await self.main_store.get_partial_current_state_ids(room_id)
- current_state = dict(current_state)
- for key in delta.to_delete:
- current_state.pop(key, None)
-
- current_state.update(delta.to_insert)
-
- remote_event_ids = [
- event_id
- for (
- typ,
- state_key,
- ), event_id in current_state.items()
- if typ == EventTypes.Member and not self.is_mine_id(state_key)
- ]
- members = await self.main_store.get_membership_from_event_ids(remote_event_ids)
- potentially_left_users.update(
- member.user_id
- for member in members.values()
- if member and member.membership == Membership.JOIN
- )
-
return False
-
- async def _handle_potentially_left_users(self, user_ids: Set[str]) -> None:
- """Given a set of remote users check if the server still shares a room with
- them. If not then mark those users' device cache as stale.
- """
-
- if not user_ids:
- return
-
- joined_users = await self.main_store.get_users_server_still_shares_room_with(
- user_ids
- )
- left_users = user_ids - joined_users
-
- for user_id in left_users:
- await self.main_store.mark_remote_user_device_list_as_unsubscribed(user_id)
diff --git a/synapse/storage/controllers/state.py b/synapse/storage/controllers/state.py
index 1e35046e07..2b31ce54bb 100644
--- a/synapse/storage/controllers/state.py
+++ b/synapse/storage/controllers/state.py
@@ -29,12 +29,15 @@ from typing import (
from synapse.api.constants import EventTypes
from synapse.events import EventBase
+from synapse.logging.opentracing import tag_args, trace
+from synapse.storage.roommember import ProfileInfo
from synapse.storage.state import StateFilter
from synapse.storage.util.partial_state_events_tracker import (
PartialCurrentStateTracker,
PartialStateEventsTracker,
)
from synapse.types import MutableStateMap, StateMap
+from synapse.util.cancellation import cancellable
if TYPE_CHECKING:
from synapse.server import HomeServer
@@ -179,6 +182,7 @@ class StateStorageController:
return self.stores.state._get_state_groups_from_groups(groups, state_filter)
+ @trace
async def get_state_for_events(
self, event_ids: Collection[str], state_filter: Optional[StateFilter] = None
) -> Dict[str, StateMap[EventBase]]:
@@ -225,10 +229,14 @@ class StateStorageController:
return {event: event_to_state[event] for event in event_ids}
+ @trace
+ @tag_args
+ @cancellable
async def get_state_ids_for_events(
self,
event_ids: Collection[str],
state_filter: Optional[StateFilter] = None,
+ await_full_state: bool = True,
) -> Dict[str, StateMap[str]]:
"""
Get the state dicts corresponding to a list of events, containing the event_ids
@@ -237,6 +245,9 @@ class StateStorageController:
Args:
event_ids: events whose state should be returned
state_filter: The state filter used to fetch state from the database.
+ await_full_state: if `True`, will block if we do not yet have complete state
+ at these events and `state_filter` is not satisfied by partial state.
+ Defaults to `True`.
Returns:
A dict from event_id -> (type, state_key) -> event_id
@@ -245,8 +256,12 @@ class StateStorageController:
RuntimeError if we don't have a state group for one or more of the events
(ie they are outliers or unknown)
"""
- await_full_state = True
- if state_filter and not state_filter.must_await_full_state(self._is_mine_id):
+ if (
+ await_full_state
+ and state_filter
+ and not state_filter.must_await_full_state(self._is_mine_id)
+ ):
+ # Full state is not required if the state filter is restrictive enough.
await_full_state = False
event_to_groups = await self.get_state_group_for_events(
@@ -287,8 +302,12 @@ class StateStorageController:
)
return state_map[event_id]
+ @trace
async def get_state_ids_for_event(
- self, event_id: str, state_filter: Optional[StateFilter] = None
+ self,
+ event_id: str,
+ state_filter: Optional[StateFilter] = None,
+ await_full_state: bool = True,
) -> StateMap[str]:
"""
Get the state dict corresponding to a particular event
@@ -296,6 +315,9 @@ class StateStorageController:
Args:
event_id: event whose state should be returned
state_filter: The state filter used to fetch state from the database.
+ await_full_state: if `True`, will block if we do not yet have complete state
+ at the event and `state_filter` is not satisfied by partial state.
+ Defaults to `True`.
Returns:
A dict from (type, state_key) -> state_event_id
@@ -305,7 +327,9 @@ class StateStorageController:
outlier or is unknown)
"""
state_map = await self.get_state_ids_for_events(
- [event_id], state_filter or StateFilter.all()
+ [event_id],
+ state_filter or StateFilter.all(),
+ await_full_state=await_full_state,
)
return state_map[event_id]
@@ -327,6 +351,9 @@ class StateStorageController:
groups, state_filter or StateFilter.all()
)
+ @trace
+ @tag_args
+ @cancellable
async def get_state_group_for_events(
self,
event_ids: Collection[str],
@@ -375,10 +402,12 @@ class StateStorageController:
event_id, room_id, prev_group, delta_ids, current_state_ids
)
+ @cancellable
async def get_current_state_ids(
self,
room_id: str,
state_filter: Optional[StateFilter] = None,
+ await_full_state: bool = True,
on_invalidate: Optional[Callable[[], None]] = None,
) -> StateMap[str]:
"""Get the current state event ids for a room based on the
@@ -391,13 +420,17 @@ class StateStorageController:
room_id: The room to get the state IDs of. state_filter: The state
filter used to fetch state from the
database.
+ await_full_state: if true, will block if we do not yet have complete
+ state for the room.
on_invalidate: Callback for when the `get_current_state_ids` cache
for the room gets invalidated.
Returns:
The current state of the room.
"""
- if not state_filter or state_filter.must_await_full_state(self._is_mine_id):
+ if await_full_state and (
+ not state_filter or state_filter.must_await_full_state(self._is_mine_id)
+ ):
await self._partial_state_room_tracker.await_full_state(room_id)
if state_filter and not state_filter.is_full():
@@ -468,6 +501,7 @@ class StateStorageController:
prev_stream_id, max_stream_id
)
+ @trace
async def get_current_state(
self, room_id: str, state_filter: Optional[StateFilter] = None
) -> StateMap[EventBase]:
@@ -496,8 +530,67 @@ class StateStorageController:
return state_map.get(key)
async def get_current_hosts_in_room(self, room_id: str) -> Set[str]:
- """Get current hosts in room based on current state."""
+ """Get current hosts in room based on current state.
+
+ Blocks until we have full state for the given room. This only happens for rooms
+ with partial state.
+ """
await self._partial_state_room_tracker.await_full_state(room_id)
return await self.stores.main.get_current_hosts_in_room(room_id)
+
+ async def get_current_hosts_in_room_ordered(self, room_id: str) -> List[str]:
+ """Get current hosts in room based on current state.
+
+ Blocks until we have full state for the given room. This only happens for rooms
+ with partial state.
+
+ Returns:
+ A list of hosts in the room, sorted by longest in the room first. (aka.
+ sorted by join with the lowest depth first).
+ """
+
+ await self._partial_state_room_tracker.await_full_state(room_id)
+
+ return await self.stores.main.get_current_hosts_in_room_ordered(room_id)
+
+ async def get_current_hosts_in_room_or_partial_state_approximation(
+ self, room_id: str
+ ) -> Collection[str]:
+ """Get approximation of current hosts in room based on current state.
+
+ For rooms with full state, this is equivalent to `get_current_hosts_in_room`,
+ with the same order of results.
+
+ For rooms with partial state, no blocking occurs. Instead, the list of hosts
+ in the room at the time of joining is combined with the list of hosts which
+ joined the room afterwards. The returned list may include hosts that are not
+ actually in the room and exclude hosts that are in the room, since we may
+ calculate state incorrectly during the partial state phase. The order of results
+ is arbitrary for rooms with partial state.
+ """
+ # We have to read this list first to mitigate races with un-partial stating.
+ # This will be empty for rooms with full state.
+ hosts_at_join = await self.stores.main.get_partial_state_servers_at_join(
+ room_id
+ )
+
+ hosts_from_state = await self.stores.main.get_current_hosts_in_room(room_id)
+
+ hosts = set(hosts_at_join)
+ hosts.update(hosts_from_state)
+
+ return hosts
+
+ async def get_users_in_room_with_profiles(
+ self, room_id: str
+ ) -> Dict[str, ProfileInfo]:
+ """
+ Get the current users in the room with their profiles.
+ If the room is currently partial-stated, this will block until the room has
+ full state.
+ """
+ await self._partial_state_room_tracker.await_full_state(room_id)
+
+ return await self.stores.main.get_users_in_room_with_profiles(room_id)
diff --git a/synapse/storage/database.py b/synapse/storage/database.py
index b394a6658b..a14b13aec8 100644
--- a/synapse/storage/database.py
+++ b/synapse/storage/database.py
@@ -94,7 +94,9 @@ UNIQUE_INDEX_BACKGROUND_UPDATES = {
"event_search": "event_search_event_id_idx",
"local_media_repository_thumbnails": "local_media_repository_thumbnails_method_idx",
"remote_media_cache_thumbnails": "remote_media_repository_thumbnails_method_idx",
- "event_push_summary": "event_push_summary_unique_index",
+ "event_push_summary": "event_push_summary_unique_index2",
+ "receipts_linearized": "receipts_linearized_unique_index",
+ "receipts_graph": "receipts_graph_unique_index",
}
@@ -288,8 +290,7 @@ class LoggingTransaction:
# LoggingTransaction isn't expecting there to be any callbacks; assert that
# is not the case.
assert self.after_callbacks is not None
- # type-ignore: need mypy containing https://github.com/python/mypy/pull/12668
- self.after_callbacks.append((callback, args, kwargs)) # type: ignore[arg-type]
+ self.after_callbacks.append((callback, args, kwargs))
def async_call_after(
self, callback: Callable[P, Awaitable], *args: P.args, **kwargs: P.kwargs
@@ -310,8 +311,7 @@ class LoggingTransaction:
# LoggingTransaction isn't expecting there to be any callbacks; assert that
# is not the case.
assert self.async_after_callbacks is not None
- # type-ignore: need mypy containing https://github.com/python/mypy/pull/12668
- self.async_after_callbacks.append((callback, args, kwargs)) # type: ignore[arg-type]
+ self.async_after_callbacks.append((callback, args, kwargs))
def call_on_exception(
self, callback: Callable[P, object], *args: P.args, **kwargs: P.kwargs
@@ -329,8 +329,7 @@ class LoggingTransaction:
# LoggingTransaction isn't expecting there to be any callbacks; assert that
# is not the case.
assert self.exception_callbacks is not None
- # type-ignore: need mypy containing https://github.com/python/mypy/pull/12668
- self.exception_callbacks.append((callback, args, kwargs)) # type: ignore[arg-type]
+ self.exception_callbacks.append((callback, args, kwargs))
def fetchone(self) -> Optional[Tuple]:
return self.txn.fetchone()
@@ -391,6 +390,14 @@ class LoggingTransaction:
def executemany(self, sql: str, *args: Any) -> None:
self._do_execute(self.txn.executemany, sql, *args)
+ def executescript(self, sql: str) -> None:
+ if isinstance(self.database_engine, Sqlite3Engine):
+ self._do_execute(self.txn.executescript, sql) # type: ignore[attr-defined]
+ else:
+ raise NotImplementedError(
+ f"executescript only exists for sqlite driver, not {type(self.database_engine)}"
+ )
+
def _make_sql_one_line(self, sql: str) -> str:
"Strip newlines out of SQL so that the loggers in the DB are on one line"
return " ".join(line.strip() for line in sql.splitlines() if line.strip())
@@ -411,10 +418,7 @@ class LoggingTransaction:
sql = self.database_engine.convert_param_style(sql)
if args:
try:
- # The type-ignore should be redundant once mypy releases a version with
- # https://github.com/python/mypy/pull/12668. (`args` might be empty,
- # (but we'll catch the index error if so.)
- sql_logger.debug("[SQL values] {%s} %r", self.name, args[0]) # type: ignore[index]
+ sql_logger.debug("[SQL values] {%s} %r", self.name, args[0])
except Exception:
# Don't let logging failures stop SQL from working
pass
@@ -533,15 +537,14 @@ class DatabasePool:
if isinstance(self.engine, Sqlite3Engine):
self._unsafe_to_upsert_tables.add("user_directory_search")
- if self.engine.can_native_upsert:
- # Check ASAP (and then later, every 1s) to see if we have finished
- # background updates of tables that aren't safe to update.
- self._clock.call_later(
- 0.0,
- run_as_background_process,
- "upsert_safety_check",
- self._check_safe_to_upsert,
- )
+ # Check ASAP (and then later, every 1s) to see if we have finished
+ # background updates of tables that aren't safe to update.
+ self._clock.call_later(
+ 0.0,
+ run_as_background_process,
+ "upsert_safety_check",
+ self._check_safe_to_upsert,
+ )
def name(self) -> str:
"Return the name of this database"
@@ -566,15 +569,15 @@ class DatabasePool:
retcols=["update_name"],
desc="check_background_updates",
)
- updates = [x["update_name"] for x in updates]
+ background_update_names = [x["update_name"] for x in updates]
for table, update_name in UNIQUE_INDEX_BACKGROUND_UPDATES.items():
- if update_name not in updates:
+ if update_name not in background_update_names:
logger.debug("Now safe to upsert in %s", table)
self._unsafe_to_upsert_tables.discard(table)
# If there's any updates still running, reschedule to run.
- if updates:
+ if background_update_names:
self._clock.call_later(
15.0,
run_as_background_process,
@@ -646,9 +649,7 @@ class DatabasePool:
# For now, we just log an error, and hope that it works on the first attempt.
# TODO: raise an exception.
- # Type-ignore Mypy doesn't yet consider ParamSpec.args to be iterable; see
- # https://github.com/python/mypy/pull/12668
- for i, arg in enumerate(args): # type: ignore[arg-type, var-annotated]
+ for i, arg in enumerate(args):
if inspect.isgenerator(arg):
logger.error(
"Programming error: generator passed to new_transaction as "
@@ -656,9 +657,7 @@ class DatabasePool:
i,
func,
)
- # Type-ignore Mypy doesn't yet consider ParamSpec.args to be a mapping; see
- # https://github.com/python/mypy/pull/12668
- for name, val in kwargs.items(): # type: ignore[attr-defined]
+ for name, val in kwargs.items():
if inspect.isgenerator(val):
logger.error(
"Programming error: generator passed to new_transaction as "
@@ -1132,17 +1131,57 @@ class DatabasePool:
desc: str = "simple_upsert",
lock: bool = True,
) -> bool:
- """
+ """Insert a row with values + insertion_values; on conflict, update with values.
+
+ All of our supported databases accept the nonstandard "upsert" statement in
+ their dialect of SQL. We call this a "native upsert". The syntax looks roughly
+ like:
+
+ INSERT INTO table VALUES (values + insertion_values)
+ ON CONFLICT (keyvalues)
+ DO UPDATE SET (values); -- overwrite `values` columns only
+
+ If (values) is empty, the resulting query is slighlty simpler:
+
+ INSERT INTO table VALUES (insertion_values)
+ ON CONFLICT (keyvalues)
+ DO NOTHING; -- do not overwrite any columns
+
+ This function is a helper to build such queries.
- `lock` should generally be set to True (the default), but can be set
- to False if either of the following are true:
- 1. there is a UNIQUE INDEX on the key columns. In this case a conflict
- will cause an IntegrityError in which case this function will retry
- the update.
- 2. we somehow know that we are the only thread which will be updating
- this table.
- As an additional note, this parameter only matters for old SQLite versions
- because we will use native upserts otherwise.
+ In order for upserts to make sense, the database must be able to determine when
+ an upsert CONFLICTs with an existing row. Postgres and SQLite ensure this by
+ requiring that a unique index exist on the column names used to detect a
+ conflict (i.e. `keyvalues.keys()`).
+
+ If there is no such index, we can "emulate" an upsert with a SELECT followed
+ by either an INSERT or an UPDATE. This is unsafe: we cannot make the same
+ atomicity guarantees that a native upsert can and are very vulnerable to races
+ and crashes. Therefore if we wish to upsert without an appropriate unique index,
+ we must either:
+
+ 1. Acquire a table-level lock before the emulated upsert (`lock=True`), or
+ 2. VERY CAREFULLY ensure that we are the only thread and worker which will be
+ writing to this table, in which case we can proceed without a lock
+ (`lock=False`).
+
+ Generally speaking, you should use `lock=True`. If the table in question has a
+ unique index[*], this class will use a native upsert (which is atomic and so can
+ ignore the `lock` argument). Otherwise this class will use an emulated upsert,
+ in which case we want the safer option unless we been VERY CAREFUL.
+
+ [*]: Some tables have unique indices added to them in the background. Those
+ tables `T` are keys in the dictionary UNIQUE_INDEX_BACKGROUND_UPDATES,
+ where `T` maps to the background update that adds a unique index to `T`.
+ This dictionary is maintained by hand.
+
+ At runtime, we constantly check to see if each of these background updates
+ has run. If so, we deem the coresponding table safe to upsert into, because
+ we can now use a native insert to do so. If not, we deem the table unsafe
+ to upsert into and require an emulated upsert.
+
+ Tables that do not appear in this dictionary are assumed to have an
+ appropriate unique index and therefore be safe to upsert into.
Args:
table: The table to upsert into
@@ -1160,11 +1199,8 @@ class DatabasePool:
attempts = 0
while True:
try:
- # We can autocommit if we are going to use native upserts
- autocommit = (
- self.engine.can_native_upsert
- and table not in self._unsafe_to_upsert_tables
- )
+ # We can autocommit if it is safe to upsert
+ autocommit = table not in self._unsafe_to_upsert_tables
return await self.runInteraction(
desc,
@@ -1195,11 +1231,12 @@ class DatabasePool:
keyvalues: Dict[str, Any],
values: Dict[str, Any],
insertion_values: Optional[Dict[str, Any]] = None,
+ where_clause: Optional[str] = None,
lock: bool = True,
) -> bool:
"""
Pick the UPSERT method which works best on the platform. Either the
- native one (Pg9.5+, recent SQLites), or fall back to an emulated method.
+ native one (Pg9.5+, SQLite >= 3.24), or fall back to an emulated method.
Args:
txn: The transaction to use.
@@ -1207,16 +1244,23 @@ class DatabasePool:
keyvalues: The unique key tables and their new values
values: The nonunique columns and their new values
insertion_values: additional key/values to use only when inserting
- lock: True to lock the table when doing the upsert.
+ where_clause: An index predicate to apply to the upsert.
+ lock: True to lock the table when doing the upsert. Unused when performing
+ a native upsert.
Returns:
Returns True if a row was inserted or updated (i.e. if `values` is
not empty then this always returns True)
"""
insertion_values = insertion_values or {}
- if self.engine.can_native_upsert and table not in self._unsafe_to_upsert_tables:
+ if table not in self._unsafe_to_upsert_tables:
return self.simple_upsert_txn_native_upsert(
- txn, table, keyvalues, values, insertion_values=insertion_values
+ txn,
+ table,
+ keyvalues,
+ values,
+ insertion_values=insertion_values,
+ where_clause=where_clause,
)
else:
return self.simple_upsert_txn_emulated(
@@ -1225,6 +1269,7 @@ class DatabasePool:
keyvalues,
values,
insertion_values=insertion_values,
+ where_clause=where_clause,
lock=lock,
)
@@ -1235,6 +1280,7 @@ class DatabasePool:
keyvalues: Dict[str, Any],
values: Dict[str, Any],
insertion_values: Optional[Dict[str, Any]] = None,
+ where_clause: Optional[str] = None,
lock: bool = True,
) -> bool:
"""
@@ -1243,6 +1289,7 @@ class DatabasePool:
keyvalues: The unique key tables and their new values
values: The nonunique columns and their new values
insertion_values: additional key/values to use only when inserting
+ where_clause: An index predicate to apply to the upsert.
lock: True to lock the table when doing the upsert.
Returns:
Returns True if a row was inserted or updated (i.e. if `values` is
@@ -1262,14 +1309,17 @@ class DatabasePool:
else:
return "%s = ?" % (key,)
+ # Generate a where clause of each keyvalue and optionally the provided
+ # index predicate.
+ where = [_getwhere(k) for k in keyvalues]
+ if where_clause:
+ where.append(where_clause)
+
if not values:
# If `values` is empty, then all of the values we care about are in
# the unique key, so there is nothing to UPDATE. We can just do a
# SELECT instead to see if it exists.
- sql = "SELECT 1 FROM %s WHERE %s" % (
- table,
- " AND ".join(_getwhere(k) for k in keyvalues),
- )
+ sql = "SELECT 1 FROM %s WHERE %s" % (table, " AND ".join(where))
sqlargs = list(keyvalues.values())
txn.execute(sql, sqlargs)
if txn.fetchall():
@@ -1280,7 +1330,7 @@ class DatabasePool:
sql = "UPDATE %s SET %s WHERE %s" % (
table,
", ".join("%s = ?" % (k,) for k in values),
- " AND ".join(_getwhere(k) for k in keyvalues),
+ " AND ".join(where),
)
sqlargs = list(values.values()) + list(keyvalues.values())
@@ -1310,6 +1360,7 @@ class DatabasePool:
keyvalues: Dict[str, Any],
values: Dict[str, Any],
insertion_values: Optional[Dict[str, Any]] = None,
+ where_clause: Optional[str] = None,
) -> bool:
"""
Use the native UPSERT functionality in PostgreSQL.
@@ -1319,6 +1370,7 @@ class DatabasePool:
keyvalues: The unique key tables and their new values
values: The nonunique columns and their new values
insertion_values: additional key/values to use only when inserting
+ where_clause: An index predicate to apply to the upsert.
Returns:
Returns True if a row was inserted or updated (i.e. if `values` is
@@ -1334,11 +1386,12 @@ class DatabasePool:
allvalues.update(values)
latter = "UPDATE SET " + ", ".join(k + "=EXCLUDED." + k for k in values)
- sql = ("INSERT INTO %s (%s) VALUES (%s) ON CONFLICT (%s) DO %s") % (
+ sql = "INSERT INTO %s (%s) VALUES (%s) ON CONFLICT (%s) %s DO %s" % (
table,
", ".join(k for k in allvalues),
", ".join("?" for _ in allvalues),
", ".join(k for k in keyvalues),
+ f"WHERE {where_clause}" if where_clause else "",
latter,
)
txn.execute(sql, list(allvalues.values()))
@@ -1365,14 +1418,12 @@ class DatabasePool:
value_names: The value column names
value_values: A list of each row's value column values.
Ignored if value_names is empty.
- lock: True to lock the table when doing the upsert. Unused if the database engine
- supports native upserts.
+ lock: True to lock the table when doing the upsert. Unused when performing
+ a native upsert.
"""
- # We can autocommit if we are going to use native upserts
- autocommit = (
- self.engine.can_native_upsert and table not in self._unsafe_to_upsert_tables
- )
+ # We can autocommit if it safe to upsert
+ autocommit = table not in self._unsafe_to_upsert_tables
await self.runInteraction(
desc,
@@ -1406,10 +1457,10 @@ class DatabasePool:
value_names: The value column names
value_values: A list of each row's value column values.
Ignored if value_names is empty.
- lock: True to lock the table when doing the upsert. Unused if the database engine
- supports native upserts.
+ lock: True to lock the table when doing the upsert. Unused when performing
+ a native upsert.
"""
- if self.engine.can_native_upsert and table not in self._unsafe_to_upsert_tables:
+ if table not in self._unsafe_to_upsert_tables:
return self.simple_upsert_many_txn_native_upsert(
txn, table, key_names, key_values, value_names, value_values
)
@@ -1607,7 +1658,7 @@ class DatabasePool:
table: string giving the table name
keyvalues: dict of column names and values to select the row with
retcol: string giving the name of the column to return
- allow_none: If true, return None instead of failing if the SELECT
+ allow_none: If true, return None instead of raising StoreError if the SELECT
statement returns no rows
desc: description of the transaction, for logging and metrics
"""
@@ -2024,13 +2075,14 @@ class DatabasePool:
retcols: Collection[str],
allow_none: bool = False,
) -> Optional[Dict[str, Any]]:
- select_sql = "SELECT %s FROM %s WHERE %s" % (
- ", ".join(retcols),
- table,
- " AND ".join("%s = ?" % (k,) for k in keyvalues),
- )
+ select_sql = "SELECT %s FROM %s" % (", ".join(retcols), table)
+
+ if keyvalues:
+ select_sql += " WHERE %s" % (" AND ".join("%s = ?" % k for k in keyvalues),)
+ txn.execute(select_sql, list(keyvalues.values()))
+ else:
+ txn.execute(select_sql)
- txn.execute(select_sql, list(keyvalues.values()))
row = txn.fetchone()
if not row:
@@ -2410,6 +2462,66 @@ def make_in_list_sql_clause(
return "%s IN (%s)" % (column, ",".join("?" for _ in iterable)), list(iterable)
+# These overloads ensure that `columns` and `iterable` values have the same length.
+# Suppress "Single overload definition, multiple required" complaint.
+@overload # type: ignore[misc]
+def make_tuple_in_list_sql_clause(
+ database_engine: BaseDatabaseEngine,
+ columns: Tuple[str, str],
+ iterable: Collection[Tuple[Any, Any]],
+) -> Tuple[str, list]:
+ ...
+
+
+def make_tuple_in_list_sql_clause(
+ database_engine: BaseDatabaseEngine,
+ columns: Tuple[str, ...],
+ iterable: Collection[Tuple[Any, ...]],
+) -> Tuple[str, list]:
+ """Returns an SQL clause that checks the given tuple of columns is in the iterable.
+
+ Args:
+ database_engine
+ columns: Names of the columns in the tuple.
+ iterable: The tuples to check the columns against.
+
+ Returns:
+ A tuple of SQL query and the args
+ """
+ if len(columns) == 0:
+ # Should be unreachable due to mypy, as long as the overloads are set up right.
+ if () in iterable:
+ return "TRUE", []
+ else:
+ return "FALSE", []
+
+ if len(columns) == 1:
+ # Use `= ANY(?)` on postgres.
+ return make_in_list_sql_clause(
+ database_engine, next(iter(columns)), [values[0] for values in iterable]
+ )
+
+ # There are multiple columns. Avoid using an `= ANY(?)` clause on postgres, as
+ # indices are not used when there are multiple columns. Instead, use an `IN`
+ # expression.
+ #
+ # `IN ((?, ...), ...)` with tuples is supported by postgres only, whereas
+ # `IN (VALUES (?, ...), ...)` is supported by both sqlite and postgres.
+ # Thus, the latter is chosen.
+
+ if len(iterable) == 0:
+ # A 0-length `VALUES` list is not allowed in sqlite or postgres.
+ # Also note that a 0-length `IN (...)` clause (not using `VALUES`) is not
+ # allowed in postgres.
+ return "FALSE", []
+
+ tuple_sql = "(%s)" % (",".join("?" for _ in columns),)
+ return "(%s) IN (VALUES %s)" % (
+ ",".join(column for column in columns),
+ ",".join(tuple_sql for _ in iterable),
+ ), [value for values in iterable for value in values]
+
+
KV = TypeVar("KV")
diff --git a/synapse/storage/databases/main/__init__.py b/synapse/storage/databases/main/__init__.py
index 4dccbb732a..0e47592be3 100644
--- a/synapse/storage/databases/main/__init__.py
+++ b/synapse/storage/databases/main/__init__.py
@@ -26,9 +26,7 @@ from synapse.storage.database import (
from synapse.storage.databases.main.stats import UserSortOrder
from synapse.storage.engines import BaseDatabaseEngine
from synapse.storage.types import Cursor
-from synapse.storage.util.id_generators import StreamIdGenerator
from synapse.types import JsonDict, get_domain_from_id
-from synapse.util.caches.stream_change_cache import StreamChangeCache
from .account_data import AccountDataStore
from .appservice import ApplicationServiceStore, ApplicationServiceTransactionStore
@@ -83,6 +81,7 @@ logger = logging.getLogger(__name__)
class DataStore(
EventsBackgroundUpdatesStore,
+ DeviceStore,
RoomMemberStore,
RoomStore,
RoomBatchStore,
@@ -114,7 +113,6 @@ class DataStore(
StreamWorkerStore,
OpenIdStore,
ClientIpWorkerStore,
- DeviceStore,
DeviceInboxStore,
UserDirectoryStore,
UserErasureStore,
@@ -138,41 +136,8 @@ class DataStore(
self._clock = hs.get_clock()
self.database_engine = database.engine
- self._device_list_id_gen = StreamIdGenerator(
- db_conn,
- "device_lists_stream",
- "stream_id",
- extra_tables=[
- ("user_signature_stream", "stream_id"),
- ("device_lists_outbound_pokes", "stream_id"),
- ("device_lists_changes_in_room", "stream_id"),
- ],
- )
-
super().__init__(database, db_conn, hs)
- events_max = self._stream_id_gen.get_current_token()
- curr_state_delta_prefill, min_curr_state_delta_id = self.db_pool.get_cache_dict(
- db_conn,
- "current_state_delta_stream",
- entity_column="room_id",
- stream_column="stream_id",
- max_value=events_max, # As we share the stream id with events token
- limit=1000,
- )
- self._curr_state_delta_stream_cache = StreamChangeCache(
- "_curr_state_delta_stream_cache",
- min_curr_state_delta_id,
- prefilled_cache=curr_state_delta_prefill,
- )
-
- self._stream_order_on_start = self.get_room_max_stream_ordering()
- self._min_stream_order_on_start = self.get_room_min_stream_ordering()
-
- def get_device_stream_token(self) -> int:
- # TODO: shouldn't this be moved to `DeviceWorkerStore`?
- return self._device_list_id_gen.get_current_token()
-
async def get_users(self) -> List[JsonDict]:
"""Function to retrieve a list of users in users table.
@@ -201,8 +166,9 @@ class DataStore(
name: Optional[str] = None,
guests: bool = True,
deactivated: bool = False,
- order_by: str = UserSortOrder.USER_ID.value,
+ order_by: str = UserSortOrder.NAME.value,
direction: str = "f",
+ approved: bool = True,
) -> Tuple[List[JsonDict], int]:
"""Function to retrieve a paginated list of users from
users list. This will return a json list of users and the
@@ -217,6 +183,7 @@ class DataStore(
deactivated: whether to include deactivated users
order_by: the sort order of the returned list
direction: sort ascending or descending
+ approved: whether to include approved users
Returns:
A tuple of a list of mappings from user to information and a count of total users.
"""
@@ -249,11 +216,17 @@ class DataStore(
if not deactivated:
filters.append("deactivated = 0")
+ if not approved:
+ # We ignore NULL values for the approved flag because these should only
+ # be already existing users that we consider as already approved.
+ filters.append("approved IS FALSE")
+
where_clause = "WHERE " + " AND ".join(filters) if len(filters) > 0 else ""
sql_base = f"""
FROM users as u
LEFT JOIN profiles AS p ON u.name = '@' || p.user_id || ':' || ?
+ LEFT JOIN erased_users AS eu ON u.name = eu.user_id
{where_clause}
"""
sql = "SELECT COUNT(*) as total_users " + sql_base
@@ -262,7 +235,8 @@ class DataStore(
sql = f"""
SELECT name, user_type, is_guest, admin, deactivated, shadow_banned,
- displayname, avatar_url, creation_ts * 1000 as creation_ts
+ displayname, avatar_url, creation_ts * 1000 as creation_ts, approved,
+ eu.user_id is not null as erased
{sql_base}
ORDER BY {order_by_column} {order}, u.name ASC
LIMIT ? OFFSET ?
@@ -270,6 +244,13 @@ class DataStore(
args += [limit, start]
txn.execute(sql, args)
users = self.db_pool.cursor_to_dict(txn)
+
+ # some of those boolean values are returned as integers when we're on SQLite
+ columns_to_boolify = ["erased"]
+ for user in users:
+ for column in columns_to_boolify:
+ user[column] = bool(user[column])
+
return users, count
return await self.db_pool.runInteraction(
diff --git a/synapse/storage/databases/main/account_data.py b/synapse/storage/databases/main/account_data.py
index 9af9f4f18e..282687ebce 100644
--- a/synapse/storage/databases/main/account_data.py
+++ b/synapse/storage/databases/main/account_data.py
@@ -27,7 +27,6 @@ from typing import (
)
from synapse.api.constants import AccountDataTypes
-from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker
from synapse.replication.tcp.streams import AccountDataStream, TagAccountDataStream
from synapse.storage._base import db_to_json
from synapse.storage.database import (
@@ -68,12 +67,11 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore)
# to write account data. A value of `True` implies that `_account_data_id_gen`
# is an `AbstractStreamIdGenerator` and not just a tracker.
self._account_data_id_gen: AbstractStreamIdTracker
+ self._can_write_to_account_data = (
+ self._instance_name in hs.config.worker.writers.account_data
+ )
if isinstance(database.engine, PostgresEngine):
- self._can_write_to_account_data = (
- self._instance_name in hs.config.worker.writers.account_data
- )
-
self._account_data_id_gen = MultiWriterIdGenerator(
db_conn=db_conn,
db=database,
@@ -95,21 +93,13 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore)
# `StreamIdGenerator`, otherwise we use `SlavedIdTracker` which gets
# updated over replication. (Multiple writers are not supported for
# SQLite).
- if self._instance_name in hs.config.worker.writers.account_data:
- self._can_write_to_account_data = True
- self._account_data_id_gen = StreamIdGenerator(
- db_conn,
- "room_account_data",
- "stream_id",
- extra_tables=[("room_tags_revisions", "stream_id")],
- )
- else:
- self._account_data_id_gen = SlavedIdTracker(
- db_conn,
- "room_account_data",
- "stream_id",
- extra_tables=[("room_tags_revisions", "stream_id")],
- )
+ self._account_data_id_gen = StreamIdGenerator(
+ db_conn,
+ "room_account_data",
+ "stream_id",
+ extra_tables=[("room_tags_revisions", "stream_id")],
+ is_writer=self._instance_name in hs.config.worker.writers.account_data,
+ )
account_max = self.get_max_account_data_stream_id()
self._account_data_stream_cache = StreamChangeCache(
@@ -650,9 +640,6 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore)
txn, self.get_account_data_for_room, (user_id,)
)
self._invalidate_cache_and_stream(txn, self.get_push_rules_for_user, (user_id,))
- self._invalidate_cache_and_stream(
- txn, self.get_push_rules_enabled_for_user, (user_id,)
- )
# This user might be contained in the ignored_by cache for other users,
# so we have to invalidate it all.
self._invalidate_all_cache_and_stream(txn, self.ignored_by)
diff --git a/synapse/storage/databases/main/appservice.py b/synapse/storage/databases/main/appservice.py
index 64b70a7b28..63046c0527 100644
--- a/synapse/storage/databases/main/appservice.py
+++ b/synapse/storage/databases/main/appservice.py
@@ -157,10 +157,23 @@ class ApplicationServiceWorkerStore(RoomMemberWorkerStore):
app_service: "ApplicationService",
cache_context: _CacheContext,
) -> List[str]:
- users_in_room = await self.get_users_in_room(
+ """
+ Get all users in a room that the appservice controls.
+
+ Args:
+ room_id: The room to check in.
+ app_service: The application service to check interest/control against
+
+ Returns:
+ List of user IDs that the appservice controls.
+ """
+ # We can use `get_local_users_in_room(...)` here because an application service
+ # can only be interested in local users of the server it's on (ignore any remote
+ # users that might match the user namespace regex).
+ local_users_in_room = await self.get_local_users_in_room(
room_id, on_invalidate=cache_context.invalidate
)
- return list(filter(app_service.is_interested_in_user, users_in_room))
+ return list(filter(app_service.is_interested_in_user, local_users_in_room))
class ApplicationServiceStore(ApplicationServiceWorkerStore):
diff --git a/synapse/storage/databases/main/cache.py b/synapse/storage/databases/main/cache.py
index 12e9a42382..a58668a380 100644
--- a/synapse/storage/databases/main/cache.py
+++ b/synapse/storage/databases/main/cache.py
@@ -33,7 +33,7 @@ from synapse.storage.database import (
)
from synapse.storage.engines import PostgresEngine
from synapse.storage.util.id_generators import MultiWriterIdGenerator
-from synapse.util.caches.descriptors import _CachedFunction
+from synapse.util.caches.descriptors import CachedFunction
from synapse.util.iterutils import batch_iter
if TYPE_CHECKING:
@@ -205,6 +205,7 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
self.get_rooms_for_user_with_stream_ordering.invalidate(
(data.state_key,)
)
+ self.get_rooms_for_user.invalidate((data.state_key,))
else:
raise Exception("Unknown events stream row type %s" % (row.type,))
@@ -223,15 +224,16 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
# process triggering the invalidation is responsible for clearing any external
# cached objects.
self._invalidate_local_get_event_cache(event_id)
- self.have_seen_event.invalidate((room_id, event_id))
- self.get_latest_event_ids_in_room.invalidate((room_id,))
-
- self.get_unread_event_push_actions_by_room_for_user.invalidate((room_id,))
+ self._attempt_to_invalidate_cache("have_seen_event", (room_id, event_id))
+ self._attempt_to_invalidate_cache("get_latest_event_ids_in_room", (room_id,))
+ self._attempt_to_invalidate_cache(
+ "get_unread_event_push_actions_by_room_for_user", (room_id,)
+ )
# The `_get_membership_from_event_id` is immutable, except for the
# case where we look up an event *before* persisting it.
- self._get_membership_from_event_id.invalidate((event_id,))
+ self._attempt_to_invalidate_cache("_get_membership_from_event_id", (event_id,))
if not backfilled:
self._events_stream_cache.entity_has_changed(room_id, stream_ordering)
@@ -240,19 +242,31 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
self._invalidate_local_get_event_cache(redacts)
# Caches which might leak edits must be invalidated for the event being
# redacted.
- self.get_relations_for_event.invalidate((redacts,))
- self.get_applicable_edit.invalidate((redacts,))
+ self._attempt_to_invalidate_cache("get_relations_for_event", (redacts,))
+ self._attempt_to_invalidate_cache("get_applicable_edit", (redacts,))
+ self._attempt_to_invalidate_cache("get_thread_id", (redacts,))
+ self._attempt_to_invalidate_cache("get_thread_id_for_receipts", (redacts,))
if etype == EventTypes.Member:
self._membership_stream_cache.entity_has_changed(state_key, stream_ordering)
- self.get_invited_rooms_for_local_user.invalidate((state_key,))
+ self._attempt_to_invalidate_cache(
+ "get_invited_rooms_for_local_user", (state_key,)
+ )
+ self._attempt_to_invalidate_cache(
+ "get_rooms_for_user_with_stream_ordering", (state_key,)
+ )
+ self._attempt_to_invalidate_cache("get_rooms_for_user", (state_key,))
if relates_to:
- self.get_relations_for_event.invalidate((relates_to,))
- self.get_aggregation_groups_for_event.invalidate((relates_to,))
- self.get_applicable_edit.invalidate((relates_to,))
- self.get_thread_summary.invalidate((relates_to,))
- self.get_thread_participated.invalidate((relates_to,))
+ self._attempt_to_invalidate_cache("get_relations_for_event", (relates_to,))
+ self._attempt_to_invalidate_cache("get_references_for_event", (relates_to,))
+ self._attempt_to_invalidate_cache(
+ "get_aggregation_groups_for_event", (relates_to,)
+ )
+ self._attempt_to_invalidate_cache("get_applicable_edit", (relates_to,))
+ self._attempt_to_invalidate_cache("get_thread_summary", (relates_to,))
+ self._attempt_to_invalidate_cache("get_thread_participated", (relates_to,))
+ self._attempt_to_invalidate_cache("get_threads", (room_id,))
async def invalidate_cache_and_stream(
self, cache_name: str, keys: Tuple[Any, ...]
@@ -269,9 +283,7 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
return
cache_func.invalidate(keys)
- await self.db_pool.runInteraction(
- "invalidate_cache_and_stream",
- self._send_invalidation_to_replication,
+ await self.send_invalidation_to_replication(
cache_func.__name__,
keys,
)
@@ -279,7 +291,7 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
def _invalidate_cache_and_stream(
self,
txn: LoggingTransaction,
- cache_func: _CachedFunction,
+ cache_func: CachedFunction,
keys: Tuple[Any, ...],
) -> None:
"""Invalidates the cache and adds it to the cache stream so slaves
@@ -293,7 +305,7 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
self._send_invalidation_to_replication(txn, cache_func.__name__, keys)
def _invalidate_all_cache_and_stream(
- self, txn: LoggingTransaction, cache_func: _CachedFunction
+ self, txn: LoggingTransaction, cache_func: CachedFunction
) -> None:
"""Invalidates the entire cache and adds it to the cache stream so slaves
will know to invalidate their caches.
@@ -334,6 +346,16 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
txn, CURRENT_STATE_CACHE_NAME, [room_id]
)
+ async def send_invalidation_to_replication(
+ self, cache_name: str, keys: Optional[Collection[Any]]
+ ) -> None:
+ await self.db_pool.runInteraction(
+ "send_invalidation_to_replication",
+ self._send_invalidation_to_replication,
+ cache_name,
+ keys,
+ )
+
def _send_invalidation_to_replication(
self, txn: LoggingTransaction, cache_name: str, keys: Optional[Iterable[Any]]
) -> None:
diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py
index ca0fe8c4be..05a193f889 100644
--- a/synapse/storage/databases/main/devices.py
+++ b/synapse/storage/databases/main/devices.py
@@ -13,7 +13,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-import abc
import logging
from typing import (
TYPE_CHECKING,
@@ -39,6 +38,7 @@ from synapse.logging.opentracing import (
whitelisted_homeserver,
)
from synapse.metrics.background_process_metrics import wrap_as_background_process
+from synapse.replication.tcp.streams._base import DeviceListsStream, UserSignatureStream
from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause
from synapse.storage.database import (
DatabasePool,
@@ -47,12 +47,19 @@ from synapse.storage.database import (
make_tuple_comparison_clause,
)
from synapse.storage.databases.main.end_to_end_keys import EndToEndKeyWorkerStore
+from synapse.storage.databases.main.roommember import RoomMemberWorkerStore
from synapse.storage.types import Cursor
+from synapse.storage.util.id_generators import (
+ AbstractStreamIdGenerator,
+ AbstractStreamIdTracker,
+ StreamIdGenerator,
+)
from synapse.types import JsonDict, get_verify_key_from_cross_signing_key
from synapse.util import json_decoder, json_encoder
from synapse.util.caches.descriptors import cached, cachedList
from synapse.util.caches.lrucache import LruCache
from synapse.util.caches.stream_change_cache import StreamChangeCache
+from synapse.util.cancellation import cancellable
from synapse.util.iterutils import batch_iter
from synapse.util.stringutils import shortstr
@@ -69,7 +76,7 @@ DROP_DEVICE_LIST_STREAMS_NON_UNIQUE_INDEXES = (
BG_UPDATE_REMOVE_DUP_OUTBOUND_POKES = "remove_dup_outbound_pokes"
-class DeviceWorkerStore(EndToEndKeyWorkerStore):
+class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
def __init__(
self,
database: DatabasePool,
@@ -78,9 +85,23 @@ class DeviceWorkerStore(EndToEndKeyWorkerStore):
):
super().__init__(database, db_conn, hs)
+ # In the worker store this is an ID tracker which we overwrite in the non-worker
+ # class below that is used on the main process.
+ self._device_list_id_gen: AbstractStreamIdTracker = StreamIdGenerator(
+ db_conn,
+ "device_lists_stream",
+ "stream_id",
+ extra_tables=[
+ ("user_signature_stream", "stream_id"),
+ ("device_lists_outbound_pokes", "stream_id"),
+ ("device_lists_changes_in_room", "stream_id"),
+ ],
+ is_writer=hs.config.worker.worker_app is None,
+ )
+
# Type-ignore: _device_list_id_gen is mixed in from either DataStore (as a
# StreamIdGenerator) or SlavedDataStore (as a SlavedIdTracker).
- device_list_max = self._device_list_id_gen.get_current_token() # type: ignore[attr-defined]
+ device_list_max = self._device_list_id_gen.get_current_token()
device_list_prefill, min_device_list_id = self.db_pool.get_cache_dict(
db_conn,
"device_lists_stream",
@@ -134,6 +155,39 @@ class DeviceWorkerStore(EndToEndKeyWorkerStore):
self._prune_old_outbound_device_pokes, 60 * 60 * 1000
)
+ def process_replication_rows(
+ self, stream_name: str, instance_name: str, token: int, rows: Iterable[Any]
+ ) -> None:
+ if stream_name == DeviceListsStream.NAME:
+ self._device_list_id_gen.advance(instance_name, token)
+ self._invalidate_caches_for_devices(token, rows)
+ elif stream_name == UserSignatureStream.NAME:
+ self._device_list_id_gen.advance(instance_name, token)
+ for row in rows:
+ self._user_signature_stream_cache.entity_has_changed(row.user_id, token)
+ return super().process_replication_rows(stream_name, instance_name, token, rows)
+
+ def _invalidate_caches_for_devices(
+ self, token: int, rows: Iterable[DeviceListsStream.DeviceListsStreamRow]
+ ) -> None:
+ for row in rows:
+ # The entities are either user IDs (starting with '@') whose devices
+ # have changed, or remote servers that we need to tell about
+ # changes.
+ if row.entity.startswith("@"):
+ self._device_list_stream_cache.entity_has_changed(row.entity, token)
+ self.get_cached_devices_for_user.invalidate((row.entity,))
+ self._get_cached_user_device.invalidate((row.entity,))
+ self.get_device_list_last_stream_id_for_remote.invalidate((row.entity,))
+
+ else:
+ self._device_list_federation_stream_cache.entity_has_changed(
+ row.entity, token
+ )
+
+ def get_device_stream_token(self) -> int:
+ return self._device_list_id_gen.get_current_token()
+
async def count_devices_by_users(self, user_ids: Optional[List[str]] = None) -> int:
"""Retrieve number of all devices of given users.
Only returns number of devices that are not marked as hidden.
@@ -272,6 +326,13 @@ class DeviceWorkerStore(EndToEndKeyWorkerStore):
destination, int(from_stream_id)
)
if not has_changed:
+ # debugging for https://github.com/matrix-org/synapse/issues/14251
+ issue_8631_logger.debug(
+ "%s: no change between %i and %i",
+ destination,
+ from_stream_id,
+ now_stream_id,
+ )
return now_stream_id, []
updates = await self.db_pool.runInteraction(
@@ -464,7 +525,7 @@ class DeviceWorkerStore(EndToEndKeyWorkerStore):
limit: Maximum number of device updates to return
Returns:
- List: List of device update tuples:
+ List of device update tuples:
- user_id
- device_id
- stream_id
@@ -537,9 +598,11 @@ class DeviceWorkerStore(EndToEndKeyWorkerStore):
"device_id": device_id,
"prev_id": [prev_id] if prev_id else [],
"stream_id": stream_id,
- "org.matrix.opentracing_context": opentracing_context,
}
+ if opentracing_context != "{}":
+ result["org.matrix.opentracing_context"] = opentracing_context
+
prev_id = stream_id
if device is not None:
@@ -547,7 +610,11 @@ class DeviceWorkerStore(EndToEndKeyWorkerStore):
if keys:
result["keys"] = keys
- device_display_name = device.display_name
+ device_display_name = None
+ if (
+ self.hs.config.federation.allow_device_name_lookup_over_federation
+ ):
+ device_display_name = device.display_name
if device_display_name:
result["device_display_name"] = device_display_name
else:
@@ -662,12 +729,8 @@ class DeviceWorkerStore(EndToEndKeyWorkerStore):
},
)
- @abc.abstractmethod
- def get_device_stream_token(self) -> int:
- """Get the current stream id from the _device_list_id_gen"""
- ...
-
@trace
+ @cancellable
async def get_user_devices_from_cache(
self, query_list: List[Tuple[str, Optional[str]]]
) -> Tuple[Set[str], Dict[str, Dict[str, JsonDict]]]:
@@ -743,6 +806,7 @@ class DeviceWorkerStore(EndToEndKeyWorkerStore):
return self._device_list_stream_cache.get_all_entities_changed(from_key)
+ @cancellable
async def get_users_whose_devices_changed(
self,
from_key: int,
@@ -982,24 +1046,59 @@ class DeviceWorkerStore(EndToEndKeyWorkerStore):
desc="mark_remote_user_device_cache_as_valid",
)
+ async def handle_potentially_left_users(self, user_ids: Set[str]) -> None:
+ """Given a set of remote users check if the server still shares a room with
+ them. If not then mark those users' device cache as stale.
+ """
+
+ if not user_ids:
+ return
+
+ await self.db_pool.runInteraction(
+ "_handle_potentially_left_users",
+ self.handle_potentially_left_users_txn,
+ user_ids,
+ )
+
+ def handle_potentially_left_users_txn(
+ self,
+ txn: LoggingTransaction,
+ user_ids: Set[str],
+ ) -> None:
+ """Given a set of remote users check if the server still shares a room with
+ them. If not then mark those users' device cache as stale.
+ """
+
+ if not user_ids:
+ return
+
+ joined_users = self.get_users_server_still_shares_room_with_txn(txn, user_ids)
+ left_users = user_ids - joined_users
+
+ for user_id in left_users:
+ self.mark_remote_user_device_list_as_unsubscribed_txn(txn, user_id)
+
async def mark_remote_user_device_list_as_unsubscribed(self, user_id: str) -> None:
"""Mark that we no longer track device lists for remote user."""
- def _mark_remote_user_device_list_as_unsubscribed_txn(
- txn: LoggingTransaction,
- ) -> None:
- self.db_pool.simple_delete_txn(
- txn,
- table="device_lists_remote_extremeties",
- keyvalues={"user_id": user_id},
- )
- self._invalidate_cache_and_stream(
- txn, self.get_device_list_last_stream_id_for_remote, (user_id,)
- )
-
await self.db_pool.runInteraction(
"mark_remote_user_device_list_as_unsubscribed",
- _mark_remote_user_device_list_as_unsubscribed_txn,
+ self.mark_remote_user_device_list_as_unsubscribed_txn,
+ user_id,
+ )
+
+ def mark_remote_user_device_list_as_unsubscribed_txn(
+ self,
+ txn: LoggingTransaction,
+ user_id: str,
+ ) -> None:
+ self.db_pool.simple_delete_txn(
+ txn,
+ table="device_lists_remote_extremeties",
+ keyvalues={"user_id": user_id},
+ )
+ self._invalidate_cache_and_stream(
+ txn, self.get_device_list_last_stream_id_for_remote, (user_id,)
)
async def get_dehydrated_device(
@@ -1221,6 +1320,7 @@ class DeviceWorkerStore(EndToEndKeyWorkerStore):
desc="get_min_device_lists_changes_in_room",
)
+ @cancellable
async def get_device_list_changes_in_rooms(
self, room_ids: Collection[str], from_id: int
) -> Optional[Set[str]]:
@@ -1267,6 +1367,33 @@ class DeviceWorkerStore(EndToEndKeyWorkerStore):
return changes
+ async def get_device_list_changes_in_room(
+ self, room_id: str, min_stream_id: int
+ ) -> Collection[Tuple[str, str]]:
+ """Get all device list changes that happened in the room since the given
+ stream ID.
+
+ Returns:
+ Collection of user ID/device ID tuples of all devices that have
+ changed
+ """
+
+ sql = """
+ SELECT DISTINCT user_id, device_id FROM device_lists_changes_in_room
+ WHERE room_id = ? AND stream_id > ?
+ """
+
+ def get_device_list_changes_in_room_txn(
+ txn: LoggingTransaction,
+ ) -> Collection[Tuple[str, str]]:
+ txn.execute(sql, (room_id, min_stream_id))
+ return cast(Collection[Tuple[str, str]], txn.fetchall())
+
+ return await self.db_pool.runInteraction(
+ "get_device_list_changes_in_room",
+ get_device_list_changes_in_room_txn,
+ )
+
class DeviceBackgroundUpdateStore(SQLBaseStore):
def __init__(
@@ -1314,6 +1441,13 @@ class DeviceBackgroundUpdateStore(SQLBaseStore):
self._remove_duplicate_outbound_pokes,
)
+ self.db_pool.updates.register_background_index_update(
+ "device_lists_changes_in_room_by_room_index",
+ index_name="device_lists_changes_in_room_by_room_idx",
+ table="device_lists_changes_in_room",
+ columns=["room_id", "stream_id"],
+ )
+
async def _drop_device_list_streams_non_unique_indexes(
self, progress: JsonDict, batch_size: int
) -> int:
@@ -1401,6 +1535,10 @@ class DeviceBackgroundUpdateStore(SQLBaseStore):
class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
+ # Because we have write access, this will be a StreamIdGenerator
+ # (see DeviceWorkerStore.__init__)
+ _device_list_id_gen: AbstractStreamIdGenerator
+
def __init__(
self,
database: DatabasePool,
@@ -1725,7 +1863,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
context,
)
- async with self._device_list_id_gen.get_next_mult( # type: ignore[attr-defined]
+ async with self._device_list_id_gen.get_next_mult(
len(device_ids)
) as stream_ids:
await self.db_pool.runInteraction(
@@ -1775,7 +1913,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
self,
txn: LoggingTransaction,
user_id: str,
- device_ids: Iterable[str],
+ device_id: str,
hosts: Collection[str],
stream_ids: List[int],
context: Optional[Dict[str, str]],
@@ -1791,6 +1929,21 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
stream_id_iterator = iter(stream_ids)
encoded_context = json_encoder.encode(context)
+ mark_sent = not self.hs.is_mine_id(user_id)
+
+ values = [
+ (
+ destination,
+ next(stream_id_iterator),
+ user_id,
+ device_id,
+ mark_sent,
+ now,
+ encoded_context if whitelisted_homeserver(destination) else "{}",
+ )
+ for destination in hosts
+ ]
+
self.db_pool.simple_insert_many_txn(
txn,
table="device_lists_outbound_pokes",
@@ -1803,23 +1956,21 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
"ts",
"opentracing_context",
),
- values=[
- (
- destination,
- next(stream_id_iterator),
- user_id,
- device_id,
- not self.hs.is_mine_id(
- user_id
- ), # We only need to send out update for *our* users
- now,
- encoded_context if whitelisted_homeserver(destination) else "{}",
- )
- for destination in hosts
- for device_id in device_ids
- ],
+ values=values,
)
+ # debugging for https://github.com/matrix-org/synapse/issues/14251
+ if issue_8631_logger.isEnabledFor(logging.DEBUG):
+ issue_8631_logger.debug(
+ "Recorded outbound pokes for %s:%s with device stream ids %s",
+ user_id,
+ device_id,
+ {
+ stream_id: destination
+ for (destination, stream_id, _, _, _, _, _) in values
+ },
+ )
+
def _add_device_outbound_room_poke_txn(
self,
txn: LoggingTransaction,
@@ -1864,27 +2015,48 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
)
async def get_uncoverted_outbound_room_pokes(
- self, limit: int = 10
+ self, start_stream_id: int, start_room_id: str, limit: int = 10
) -> List[Tuple[str, str, str, int, Optional[Dict[str, str]]]]:
"""Get device list changes by room that have not yet been handled and
written to `device_lists_outbound_pokes`.
+ Args:
+ start_stream_id: Together with `start_room_id`, indicates the position after
+ which to return device list changes.
+ start_room_id: Together with `start_stream_id`, indicates the position after
+ which to return device list changes.
+ limit: The maximum number of device list changes to return.
+
Returns:
- A list of user ID, device ID, room ID, stream ID and optional opentracing context.
+ A list of user ID, device ID, room ID, stream ID and optional opentracing
+ context, in order of ascending (stream ID, room ID).
"""
sql = """
SELECT user_id, device_id, room_id, stream_id, opentracing_context
FROM device_lists_changes_in_room
- WHERE NOT converted_to_destinations
- ORDER BY stream_id
+ WHERE
+ (stream_id, room_id) > (?, ?) AND
+ stream_id <= ? AND
+ NOT converted_to_destinations
+ ORDER BY stream_id ASC, room_id ASC
LIMIT ?
"""
def get_uncoverted_outbound_room_pokes_txn(
txn: LoggingTransaction,
) -> List[Tuple[str, str, str, int, Optional[Dict[str, str]]]]:
- txn.execute(sql, (limit,))
+ txn.execute(
+ sql,
+ (
+ start_stream_id,
+ start_room_id,
+ # Avoid returning rows if there may be uncommitted device list
+ # changes with smaller stream IDs.
+ self._device_list_id_gen.get_current_token(),
+ limit,
+ ),
+ )
return [
(
@@ -1906,52 +2078,119 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
user_id: str,
device_id: str,
room_id: str,
- stream_id: int,
hosts: Collection[str],
context: Optional[Dict[str, str]],
) -> None:
"""Queue the device update to be sent to the given set of hosts,
calculated from the room ID.
-
- Marks the associated row in `device_lists_changes_in_room` as handled.
"""
+ if not hosts:
+ return
def add_device_list_outbound_pokes_txn(
txn: LoggingTransaction, stream_ids: List[int]
) -> None:
- if hosts:
- self._add_device_outbound_poke_to_stream_txn(
- txn,
- user_id=user_id,
- device_ids=[device_id],
- hosts=hosts,
- stream_ids=stream_ids,
- context=context,
- )
-
- self.db_pool.simple_update_txn(
+ self._add_device_outbound_poke_to_stream_txn(
txn,
- table="device_lists_changes_in_room",
- keyvalues={
- "user_id": user_id,
- "device_id": device_id,
- "stream_id": stream_id,
- "room_id": room_id,
- },
- updatevalues={"converted_to_destinations": True},
+ user_id=user_id,
+ device_id=device_id,
+ hosts=hosts,
+ stream_ids=stream_ids,
+ context=context,
)
- if not hosts:
- # If there are no hosts then we don't try and generate stream IDs.
+ async with self._device_list_id_gen.get_next_mult(len(hosts)) as stream_ids:
return await self.db_pool.runInteraction(
"add_device_list_outbound_pokes",
add_device_list_outbound_pokes_txn,
- [],
+ stream_ids,
)
- async with self._device_list_id_gen.get_next_mult(len(hosts)) as stream_ids: # type: ignore[attr-defined]
- return await self.db_pool.runInteraction(
- "add_device_list_outbound_pokes",
- add_device_list_outbound_pokes_txn,
- stream_ids,
+ async def add_remote_device_list_to_pending(
+ self, user_id: str, device_id: str
+ ) -> None:
+ """Add a device list update to the table tracking remote device list
+ updates during partial joins.
+ """
+
+ async with self._device_list_id_gen.get_next() as stream_id:
+ await self.db_pool.simple_upsert(
+ table="device_lists_remote_pending",
+ keyvalues={
+ "user_id": user_id,
+ "device_id": device_id,
+ },
+ values={"stream_id": stream_id},
+ desc="add_remote_device_list_to_pending",
)
+
+ async def get_pending_remote_device_list_updates_for_room(
+ self, room_id: str
+ ) -> Collection[Tuple[str, str]]:
+ """Get the set of remote device list updates from the pending table for
+ the room.
+ """
+
+ min_device_stream_id = await self.db_pool.simple_select_one_onecol(
+ table="partial_state_rooms",
+ keyvalues={
+ "room_id": room_id,
+ },
+ retcol="device_lists_stream_id",
+ desc="get_pending_remote_device_list_updates_for_room_device",
+ )
+
+ sql = """
+ SELECT user_id, device_id FROM device_lists_remote_pending AS d
+ INNER JOIN current_state_events AS c ON
+ type = 'm.room.member'
+ AND state_key = user_id
+ AND membership = 'join'
+ WHERE
+ room_id = ? AND stream_id > ?
+ """
+
+ def get_pending_remote_device_list_updates_for_room_txn(
+ txn: LoggingTransaction,
+ ) -> Collection[Tuple[str, str]]:
+ txn.execute(sql, (room_id, min_device_stream_id))
+ return cast(Collection[Tuple[str, str]], txn.fetchall())
+
+ return await self.db_pool.runInteraction(
+ "get_pending_remote_device_list_updates_for_room",
+ get_pending_remote_device_list_updates_for_room_txn,
+ )
+
+ async def get_device_change_last_converted_pos(self) -> Tuple[int, str]:
+ """
+ Get the position of the last row in `device_list_changes_in_room` that has been
+ converted to `device_lists_outbound_pokes`.
+
+ Rows with a strictly greater position where `converted_to_destinations` is
+ `FALSE` have not been converted.
+ """
+
+ row = await self.db_pool.simple_select_one(
+ table="device_lists_changes_converted_stream_position",
+ keyvalues={},
+ retcols=["stream_id", "room_id"],
+ desc="get_device_change_last_converted_pos",
+ )
+ return row["stream_id"], row["room_id"]
+
+ async def set_device_change_last_converted_pos(
+ self,
+ stream_id: int,
+ room_id: str,
+ ) -> None:
+ """
+ Set the position of the last row in `device_list_changes_in_room` that has been
+ converted to `device_lists_outbound_pokes`.
+ """
+
+ await self.db_pool.simple_update_one(
+ table="device_lists_changes_converted_stream_position",
+ keyvalues={},
+ updatevalues={"stream_id": stream_id, "room_id": room_id},
+ desc="set_device_change_last_converted_pos",
+ )
diff --git a/synapse/storage/databases/main/e2e_room_keys.py b/synapse/storage/databases/main/e2e_room_keys.py
index af59be6b48..6240f9a75e 100644
--- a/synapse/storage/databases/main/e2e_room_keys.py
+++ b/synapse/storage/databases/main/e2e_room_keys.py
@@ -391,10 +391,10 @@ class EndToEndRoomKeyStore(SQLBaseStore):
Returns:
A dict giving the info metadata for this backup version, with
fields including:
- version(str)
- algorithm(str)
- auth_data(object): opaque dict supplied by the client
- etag(int): tag of the keys in the backup
+ version (str)
+ algorithm (str)
+ auth_data (object): opaque dict supplied by the client
+ etag (int): tag of the keys in the backup
"""
def _get_e2e_room_keys_version_info_txn(txn: LoggingTransaction) -> JsonDict:
diff --git a/synapse/storage/databases/main/end_to_end_keys.py b/synapse/storage/databases/main/end_to_end_keys.py
index 46c0d06157..cf33e73e2b 100644
--- a/synapse/storage/databases/main/end_to_end_keys.py
+++ b/synapse/storage/databases/main/end_to_end_keys.py
@@ -43,6 +43,7 @@ from synapse.storage.database import (
LoggingDatabaseConnection,
LoggingTransaction,
make_in_list_sql_clause,
+ make_tuple_in_list_sql_clause,
)
from synapse.storage.databases.main.cache import CacheInvalidationWorkerStore
from synapse.storage.engines import PostgresEngine
@@ -50,6 +51,7 @@ from synapse.storage.util.id_generators import StreamIdGenerator
from synapse.types import JsonDict
from synapse.util import json_encoder
from synapse.util.caches.descriptors import cached, cachedList
+from synapse.util.cancellation import cancellable
from synapse.util.iterutils import batch_iter
if TYPE_CHECKING:
@@ -135,12 +137,17 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
return now_stream_id, []
@trace
+ @cancellable
async def get_e2e_device_keys_for_cs_api(
- self, query_list: List[Tuple[str, Optional[str]]]
+ self,
+ query_list: List[Tuple[str, Optional[str]]],
+ include_displaynames: bool = True,
) -> Dict[str, Dict[str, JsonDict]]:
"""Fetch a list of device keys, formatted suitably for the C/S API.
Args:
- query_list(list): List of pairs of user_ids and device_ids.
+ query_list: List of pairs of user_ids and device_ids.
+ include_displaynames: Whether to include the displayname of returned devices
+ (if one exists).
Returns:
Dict mapping from user-id to dict mapping from device_id to
key data. The key data will be a dict in the same format as the
@@ -163,9 +170,12 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
continue
r["unsigned"] = {}
- display_name = device_info.display_name
- if display_name is not None:
- r["unsigned"]["device_display_name"] = display_name
+ if include_displaynames:
+ # Include the device's display name in the "unsigned" dictionary
+ display_name = device_info.display_name
+ if display_name is not None:
+ r["unsigned"]["device_display_name"] = display_name
+
rv[user_id][device_id] = r
return rv
@@ -197,6 +207,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
...
@trace
+ @cancellable
async def get_e2e_device_keys_and_signatures(
self,
query_list: Collection[Tuple[str, Optional[str]]],
@@ -275,7 +286,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
def _get_e2e_device_keys_txn(
self,
txn: LoggingTransaction,
- query_list: Collection[Tuple[str, str]],
+ query_list: Collection[Tuple[str, Optional[str]]],
include_all_devices: bool = False,
include_deleted_devices: bool = False,
) -> Dict[str, Dict[str, Optional[DeviceKeyLookupResult]]]:
@@ -285,8 +296,8 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
cross-signing signatures which have been added subsequently (for which, see
get_e2e_device_keys_and_signatures)
"""
- query_clauses = []
- query_params = []
+ query_clauses: List[str] = []
+ query_params_list: List[List[object]] = []
if include_all_devices is False:
include_deleted_devices = False
@@ -294,40 +305,64 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
if include_deleted_devices:
deleted_devices = set(query_list)
+ # Split the query list into queries for users and queries for particular
+ # devices.
+ user_list = []
+ user_device_list = []
for (user_id, device_id) in query_list:
- query_clause = "user_id = ?"
- query_params.append(user_id)
-
- if device_id is not None:
- query_clause += " AND device_id = ?"
- query_params.append(device_id)
-
- query_clauses.append(query_clause)
-
- sql = (
- "SELECT user_id, device_id, "
- " d.display_name, "
- " k.key_json"
- " FROM devices d"
- " %s JOIN e2e_device_keys_json k USING (user_id, device_id)"
- " WHERE %s AND NOT d.hidden"
- ) % (
- "LEFT" if include_all_devices else "INNER",
- " OR ".join("(" + q + ")" for q in query_clauses),
- )
+ if device_id is None:
+ user_list.append(user_id)
+ else:
+ user_device_list.append((user_id, device_id))
- txn.execute(sql, query_params)
+ if user_list:
+ user_id_in_list_clause, user_args = make_in_list_sql_clause(
+ txn.database_engine, "user_id", user_list
+ )
+ query_clauses.append(user_id_in_list_clause)
+ query_params_list.append(user_args)
+
+ if user_device_list:
+ # Divide the device queries into batches, to avoid excessively large
+ # queries.
+ for user_device_batch in batch_iter(user_device_list, 1024):
+ (
+ user_device_id_in_list_clause,
+ user_device_args,
+ ) = make_tuple_in_list_sql_clause(
+ txn.database_engine, ("user_id", "device_id"), user_device_batch
+ )
+ query_clauses.append(user_device_id_in_list_clause)
+ query_params_list.append(user_device_args)
result: Dict[str, Dict[str, Optional[DeviceKeyLookupResult]]] = {}
- for (user_id, device_id, display_name, key_json) in txn:
- if include_deleted_devices:
- deleted_devices.remove((user_id, device_id))
- result.setdefault(user_id, {})[device_id] = DeviceKeyLookupResult(
- display_name, db_to_json(key_json) if key_json else None
+ for query_clause, query_params in zip(query_clauses, query_params_list):
+ sql = (
+ "SELECT user_id, device_id, "
+ " d.display_name, "
+ " k.key_json"
+ " FROM devices d"
+ " %s JOIN e2e_device_keys_json k USING (user_id, device_id)"
+ " WHERE %s AND NOT d.hidden"
+ ) % (
+ "LEFT" if include_all_devices else "INNER",
+ query_clause,
)
+ txn.execute(sql, query_params)
+
+ for (user_id, device_id, display_name, key_json) in txn:
+ assert device_id is not None
+ if include_deleted_devices:
+ deleted_devices.remove((user_id, device_id))
+ result.setdefault(user_id, {})[device_id] = DeviceKeyLookupResult(
+ display_name, db_to_json(key_json) if key_json else None
+ )
+
if include_deleted_devices:
for user_id, device_id in deleted_devices:
+ if device_id is None:
+ continue
result.setdefault(user_id, {})[device_id] = None
return result
@@ -377,10 +412,9 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
"""Retrieve a number of one-time keys for a user
Args:
- user_id(str): id of user to get keys for
- device_id(str): id of device to get keys for
- key_ids(list[str]): list of key ids (excluding algorithm) to
- retrieve
+ user_id: id of user to get keys for
+ device_id: id of device to get keys for
+ key_ids: list of key ids (excluding algorithm) to retrieve
Returns:
A map from (algorithm, key_id) to json string for key
@@ -887,6 +921,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
return keys
+ @cancellable
async def get_e2e_cross_signing_keys_bulk(
self, user_ids: List[str], from_user_id: Optional[str] = None
) -> Dict[str, Optional[Dict[str, JsonDict]]]:
@@ -902,7 +937,6 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
keys were not found, either their user ID will not be in the dict,
or their user ID will map to None.
"""
-
result = await self._get_bare_e2e_cross_signing_keys_bulk(user_ids)
if from_user_id:
diff --git a/synapse/storage/databases/main/event_federation.py b/synapse/storage/databases/main/event_federation.py
index eec55b6478..309a4ba664 100644
--- a/synapse/storage/databases/main/event_federation.py
+++ b/synapse/storage/databases/main/event_federation.py
@@ -11,6 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+import datetime
import itertools
import logging
from queue import Empty, PriorityQueue
@@ -33,6 +34,7 @@ from synapse.api.constants import MAX_DEPTH, EventTypes
from synapse.api.errors import StoreError
from synapse.api.room_versions import EventFormatVersions, RoomVersion
from synapse.events import EventBase, make_event_from_dict
+from synapse.logging.opentracing import tag_args, trace
from synapse.metrics.background_process_metrics import wrap_as_background_process
from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause
from synapse.storage.database import (
@@ -42,11 +44,12 @@ from synapse.storage.database import (
)
from synapse.storage.databases.main.events_worker import EventsWorkerStore
from synapse.storage.databases.main.signatures import SignatureWorkerStore
-from synapse.storage.engines import PostgresEngine
+from synapse.storage.engines import PostgresEngine, Sqlite3Engine
from synapse.types import JsonDict
from synapse.util import json_encoder
from synapse.util.caches.descriptors import cached
from synapse.util.caches.lrucache import LruCache
+from synapse.util.cancellation import cancellable
from synapse.util.iterutils import batch_iter
if TYPE_CHECKING:
@@ -70,6 +73,30 @@ pdus_pruned_from_federation_queue = Counter(
logger = logging.getLogger(__name__)
+# Parameters controlling exponential backoff between backfill failures.
+# After the first failure to backfill, we wait 2 hours before trying again. If the
+# second attempt fails, we wait 4 hours before trying again. If the third attempt fails,
+# we wait 8 hours before trying again, ... and so on.
+#
+# Each successive backoff period is twice as long as the last. However we cap this
+# period at a maximum of 2^8 = 256 hours: a little over 10 days. (This is the smallest
+# power of 2 which yields a maximum backoff period of at least 7 days---which was the
+# original maximum backoff period.) Even when we hit this cap, we will continue to
+# make backfill attempts once every 10 days.
+BACKFILL_EVENT_EXPONENTIAL_BACKOFF_MAXIMUM_DOUBLING_STEPS = 8
+BACKFILL_EVENT_EXPONENTIAL_BACKOFF_STEP_MILLISECONDS = int(
+ datetime.timedelta(hours=1).total_seconds() * 1000
+)
+
+# We need a cap on the power of 2 or else the backoff period
+# 2^N * (milliseconds per hour)
+# will overflow when calcuated within the database. We ensure overflow does not occur
+# by checking that the largest backoff period fits in a 32-bit signed integer.
+_LONGEST_BACKOFF_PERIOD_MILLISECONDS = (
+ 2**BACKFILL_EVENT_EXPONENTIAL_BACKOFF_MAXIMUM_DOUBLING_STEPS
+) * BACKFILL_EVENT_EXPONENTIAL_BACKOFF_STEP_MILLISECONDS
+assert 0 < _LONGEST_BACKOFF_PERIOD_MILLISECONDS <= ((2**31) - 1)
+
# All the info we need while iterating the DAG while backfilling
@attr.s(frozen=True, slots=True, auto_attribs=True)
@@ -126,6 +153,8 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas
)
return await self.get_events_as_list(event_ids)
+ @trace
+ @tag_args
async def get_auth_chain_ids(
self,
room_id: str,
@@ -709,95 +738,264 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas
# Return all events where not all sets can reach them.
return {eid for eid, n in event_to_missing_sets.items() if n}
- async def get_oldest_event_ids_with_depth_in_room(
- self, room_id: str
+ @trace
+ @tag_args
+ async def get_backfill_points_in_room(
+ self,
+ room_id: str,
+ current_depth: int,
+ limit: int,
) -> List[Tuple[str, int]]:
- """Gets the oldest events(backwards extremities) in the room along with the
- aproximate depth.
-
- We use this function so that we can compare and see if someones current
- depth at their current scrollback is within pagination range of the
- event extremeties. If the current depth is close to the depth of given
- oldest event, we can trigger a backfill.
+ """
+ Get the backward extremities to backfill from in the room along with the
+ approximate depth.
+
+ Only returns events that are at a depth lower than or
+ equal to the `current_depth`. Sorted by depth, highest to lowest (descending)
+ so the closest events to the `current_depth` are first in the list.
+
+ We ignore extremities that are newer than the user's current scroll position
+ (ie, those with depth greater than `current_depth`) as:
+ 1. we don't really care about getting events that have happened
+ after our current position; and
+ 2. by the nature of paginating and scrolling back, we have likely
+ previously tried and failed to backfill from that extremity, so
+ to avoid getting "stuck" requesting the same backfill repeatedly
+ we drop those extremities.
Args:
room_id: Room where we want to find the oldest events
+ current_depth: The depth at the user's current scrollback position
+ limit: The max number of backfill points to return
Returns:
- List of (event_id, depth) tuples
+ List of (event_id, depth) tuples. Sorted by depth, highest to lowest
+ (descending) so the closest events to the `current_depth` are first
+ in the list.
"""
- def get_oldest_event_ids_with_depth_in_room_txn(
+ def get_backfill_points_in_room_txn(
txn: LoggingTransaction, room_id: str
) -> List[Tuple[str, int]]:
- # Assemble a dictionary with event_id -> depth for the oldest events
+ # Assemble a tuple lookup of event_id -> depth for the oldest events
# we know of in the room. Backwards extremeties are the oldest
# events we know of in the room but we only know of them because
- # some other event referenced them by prev_event and aren't peristed
- # in our database yet (meaning we don't know their depth
- # specifically). So we need to look for the aproximate depth from
+ # some other event referenced them by prev_event and aren't
+ # persisted in our database yet (meaning we don't know their depth
+ # specifically). So we need to look for the approximate depth from
# the events connected to the current backwards extremeties.
- sql = """
- SELECT b.event_id, MAX(e.depth) FROM events as e
+
+ if isinstance(self.database_engine, PostgresEngine):
+ least_function = "LEAST"
+ elif isinstance(self.database_engine, Sqlite3Engine):
+ least_function = "MIN"
+ else:
+ raise RuntimeError("Unknown database engine")
+
+ sql = f"""
+ SELECT backward_extrem.event_id, event.depth FROM events AS event
/**
* Get the edge connections from the event_edges table
* so we can see whether this event's prev_events points
* to a backward extremity in the next join.
*/
- INNER JOIN event_edges as g
- ON g.event_id = e.event_id
+ INNER JOIN event_edges AS edge
+ ON edge.event_id = event.event_id
/**
* We find the "oldest" events in the room by looking for
* events connected to backwards extremeties (oldest events
* in the room that we know of so far).
*/
- INNER JOIN event_backward_extremities as b
- ON g.prev_event_id = b.event_id
- WHERE b.room_id = ? AND g.is_state is ?
- GROUP BY b.event_id
+ INNER JOIN event_backward_extremities AS backward_extrem
+ ON edge.prev_event_id = backward_extrem.event_id
+ /**
+ * We use this info to make sure we don't retry to use a backfill point
+ * if we've already attempted to backfill from it recently.
+ */
+ LEFT JOIN event_failed_pull_attempts AS failed_backfill_attempt_info
+ ON
+ failed_backfill_attempt_info.room_id = backward_extrem.room_id
+ AND failed_backfill_attempt_info.event_id = backward_extrem.event_id
+ WHERE
+ backward_extrem.room_id = ?
+ /* We only care about non-state edges because we used to use
+ * `event_edges` for two different sorts of "edges" (the current
+ * event DAG, but also a link to the previous state, for state
+ * events). These legacy state event edges can be distinguished by
+ * `is_state` and are removed from the codebase and schema but
+ * because the schema change is in a background update, it's not
+ * necessarily safe to assume that it will have been completed.
+ */
+ AND edge.is_state is ? /* False */
+ /**
+ * We only want backwards extremities that are older than or at
+ * the same position of the given `current_depth` (where older
+ * means less than the given depth) because we're looking backwards
+ * from the `current_depth` when backfilling.
+ *
+ * current_depth (ignore events that come after this, ignore 2-4)
+ * |
+ * ▼
+ * <oldest-in-time> [0]<--[1]<--[2]<--[3]<--[4] <newest-in-time>
+ */
+ AND event.depth <= ? /* current_depth */
+ /**
+ * Exponential back-off (up to the upper bound) so we don't retry the
+ * same backfill point over and over. ex. 2hr, 4hr, 8hr, 16hr, etc.
+ *
+ * We use `1 << n` as a power of 2 equivalent for compatibility
+ * with older SQLites. The left shift equivalent only works with
+ * powers of 2 because left shift is a binary operation (base-2).
+ * Otherwise, we would use `power(2, n)` or the power operator, `2^n`.
+ */
+ AND (
+ failed_backfill_attempt_info.event_id IS NULL
+ OR ? /* current_time */ >= failed_backfill_attempt_info.last_attempt_ts + (
+ (1 << {least_function}(failed_backfill_attempt_info.num_attempts, ? /* max doubling steps */))
+ * ? /* step */
+ )
+ )
+ /**
+ * Sort from highest (closest to the `current_depth`) to the lowest depth
+ * because the closest are most relevant to backfill from first.
+ * Then tie-break on alphabetical order of the event_ids so we get a
+ * consistent ordering which is nice when asserting things in tests.
+ */
+ ORDER BY event.depth DESC, backward_extrem.event_id DESC
+ LIMIT ?
"""
- txn.execute(sql, (room_id, False))
+ txn.execute(
+ sql,
+ (
+ room_id,
+ False,
+ current_depth,
+ self._clock.time_msec(),
+ BACKFILL_EVENT_EXPONENTIAL_BACKOFF_MAXIMUM_DOUBLING_STEPS,
+ BACKFILL_EVENT_EXPONENTIAL_BACKOFF_STEP_MILLISECONDS,
+ limit,
+ ),
+ )
return cast(List[Tuple[str, int]], txn.fetchall())
return await self.db_pool.runInteraction(
- "get_oldest_event_ids_with_depth_in_room",
- get_oldest_event_ids_with_depth_in_room_txn,
+ "get_backfill_points_in_room",
+ get_backfill_points_in_room_txn,
room_id,
)
+ @trace
async def get_insertion_event_backward_extremities_in_room(
- self, room_id: str
+ self,
+ room_id: str,
+ current_depth: int,
+ limit: int,
) -> List[Tuple[str, int]]:
- """Get the insertion events we know about that we haven't backfilled yet.
-
- We use this function so that we can compare and see if someones current
- depth at their current scrollback is within pagination range of the
- insertion event. If the current depth is close to the depth of given
- insertion event, we can trigger a backfill.
+ """
+ Get the insertion events we know about that we haven't backfilled yet
+ along with the approximate depth. Only returns insertion events that are
+ at a depth lower than or equal to the `current_depth`. Sorted by depth,
+ highest to lowest (descending) so the closest events to the
+ `current_depth` are first in the list.
+
+ We ignore insertion events that are newer than the user's current scroll
+ position (ie, those with depth greater than `current_depth`) as:
+ 1. we don't really care about getting events that have happened
+ after our current position; and
+ 2. by the nature of paginating and scrolling back, we have likely
+ previously tried and failed to backfill from that insertion event, so
+ to avoid getting "stuck" requesting the same backfill repeatedly
+ we drop those insertion event.
Args:
room_id: Room where we want to find the oldest events
+ current_depth: The depth at the user's current scrollback position
+ limit: The max number of insertion event extremities to return
Returns:
- List of (event_id, depth) tuples
+ List of (event_id, depth) tuples. Sorted by depth, highest to lowest
+ (descending) so the closest events to the `current_depth` are first
+ in the list.
"""
def get_insertion_event_backward_extremities_in_room_txn(
txn: LoggingTransaction, room_id: str
) -> List[Tuple[str, int]]:
- sql = """
- SELECT b.event_id, MAX(e.depth) FROM insertion_events as i
+ if isinstance(self.database_engine, PostgresEngine):
+ least_function = "LEAST"
+ elif isinstance(self.database_engine, Sqlite3Engine):
+ least_function = "MIN"
+ else:
+ raise RuntimeError("Unknown database engine")
+
+ sql = f"""
+ SELECT
+ insertion_event_extremity.event_id, event.depth
/* We only want insertion events that are also marked as backwards extremities */
- INNER JOIN insertion_event_extremities as b USING (event_id)
+ FROM insertion_event_extremities AS insertion_event_extremity
/* Get the depth of the insertion event from the events table */
- INNER JOIN events AS e USING (event_id)
- WHERE b.room_id = ?
- GROUP BY b.event_id
+ INNER JOIN events AS event USING (event_id)
+ /**
+ * We use this info to make sure we don't retry to use a backfill point
+ * if we've already attempted to backfill from it recently.
+ */
+ LEFT JOIN event_failed_pull_attempts AS failed_backfill_attempt_info
+ ON
+ failed_backfill_attempt_info.room_id = insertion_event_extremity.room_id
+ AND failed_backfill_attempt_info.event_id = insertion_event_extremity.event_id
+ WHERE
+ insertion_event_extremity.room_id = ?
+ /**
+ * We only want extremities that are older than or at
+ * the same position of the given `current_depth` (where older
+ * means less than the given depth) because we're looking backwards
+ * from the `current_depth` when backfilling.
+ *
+ * current_depth (ignore events that come after this, ignore 2-4)
+ * |
+ * ▼
+ * <oldest-in-time> [0]<--[1]<--[2]<--[3]<--[4] <newest-in-time>
+ */
+ AND event.depth <= ? /* current_depth */
+ /**
+ * Exponential back-off (up to the upper bound) so we don't retry the
+ * same backfill point over and over. ex. 2hr, 4hr, 8hr, 16hr, etc
+ *
+ * We use `1 << n` as a power of 2 equivalent for compatibility
+ * with older SQLites. The left shift equivalent only works with
+ * powers of 2 because left shift is a binary operation (base-2).
+ * Otherwise, we would use `power(2, n)` or the power operator, `2^n`.
+ */
+ AND (
+ failed_backfill_attempt_info.event_id IS NULL
+ OR ? /* current_time */ >= failed_backfill_attempt_info.last_attempt_ts + (
+ (1 << {least_function}(failed_backfill_attempt_info.num_attempts, ? /* max doubling steps */))
+ * ? /* step */
+ )
+ )
+ /**
+ * Sort from highest (closest to the `current_depth`) to the lowest depth
+ * because the closest are most relevant to backfill from first.
+ * Then tie-break on alphabetical order of the event_ids so we get a
+ * consistent ordering which is nice when asserting things in tests.
+ */
+ ORDER BY event.depth DESC, insertion_event_extremity.event_id DESC
+ LIMIT ?
"""
- txn.execute(sql, (room_id,))
+ txn.execute(
+ sql,
+ (
+ room_id,
+ current_depth,
+ self._clock.time_msec(),
+ BACKFILL_EVENT_EXPONENTIAL_BACKOFF_MAXIMUM_DOUBLING_STEPS,
+ BACKFILL_EVENT_EXPONENTIAL_BACKOFF_STEP_MILLISECONDS,
+ limit,
+ ),
+ )
return cast(List[Tuple[str, int]], txn.fetchall())
return await self.db_pool.runInteraction(
@@ -970,6 +1168,7 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas
return int(min_depth) if min_depth is not None else None
+ @cancellable
async def get_forward_extremities_for_room_at_stream_ordering(
self, room_id: str, stream_ordering: int
) -> List[str]:
@@ -1286,6 +1485,105 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas
return event_id_results
+ @trace
+ async def record_event_failed_pull_attempt(
+ self, room_id: str, event_id: str, cause: str
+ ) -> None:
+ """
+ Record when we fail to pull an event over federation.
+
+ This information allows us to be more intelligent when we decide to
+ retry (we don't need to fail over and over) and we can process that
+ event in the background so we don't block on it each time.
+
+ Args:
+ room_id: The room where the event failed to pull from
+ event_id: The event that failed to be fetched or processed
+ cause: The error message or reason that we failed to pull the event
+ """
+ logger.debug(
+ "record_event_failed_pull_attempt room_id=%s, event_id=%s, cause=%s",
+ room_id,
+ event_id,
+ cause,
+ )
+ await self.db_pool.runInteraction(
+ "record_event_failed_pull_attempt",
+ self._record_event_failed_pull_attempt_upsert_txn,
+ room_id,
+ event_id,
+ cause,
+ db_autocommit=True, # Safe as it's a single upsert
+ )
+
+ def _record_event_failed_pull_attempt_upsert_txn(
+ self,
+ txn: LoggingTransaction,
+ room_id: str,
+ event_id: str,
+ cause: str,
+ ) -> None:
+ sql = """
+ INSERT INTO event_failed_pull_attempts (
+ room_id, event_id, num_attempts, last_attempt_ts, last_cause
+ )
+ VALUES (?, ?, ?, ?, ?)
+ ON CONFLICT (room_id, event_id) DO UPDATE SET
+ num_attempts=event_failed_pull_attempts.num_attempts + 1,
+ last_attempt_ts=EXCLUDED.last_attempt_ts,
+ last_cause=EXCLUDED.last_cause;
+ """
+
+ txn.execute(sql, (room_id, event_id, 1, self._clock.time_msec(), cause))
+
+ @trace
+ async def get_event_ids_to_not_pull_from_backoff(
+ self,
+ room_id: str,
+ event_ids: Collection[str],
+ ) -> List[str]:
+ """
+ Filter down the events to ones that we've failed to pull before recently. Uses
+ exponential backoff.
+
+ Args:
+ room_id: The room that the events belong to
+ event_ids: A list of events to filter down
+
+ Returns:
+ List of event_ids that should not be attempted to be pulled
+ """
+ event_failed_pull_attempts = await self.db_pool.simple_select_many_batch(
+ table="event_failed_pull_attempts",
+ column="event_id",
+ iterable=event_ids,
+ keyvalues={},
+ retcols=(
+ "event_id",
+ "last_attempt_ts",
+ "num_attempts",
+ ),
+ desc="get_event_ids_to_not_pull_from_backoff",
+ )
+
+ current_time = self._clock.time_msec()
+ return [
+ event_failed_pull_attempt["event_id"]
+ for event_failed_pull_attempt in event_failed_pull_attempts
+ # Exponential back-off (up to the upper bound) so we don't try to
+ # pull the same event over and over. ex. 2hr, 4hr, 8hr, 16hr, etc.
+ if current_time
+ < event_failed_pull_attempt["last_attempt_ts"]
+ + (
+ 2
+ ** min(
+ event_failed_pull_attempt["num_attempts"],
+ BACKFILL_EVENT_EXPONENTIAL_BACKOFF_MAXIMUM_DOUBLING_STEPS,
+ )
+ )
+ * BACKFILL_EVENT_EXPONENTIAL_BACKOFF_STEP_MILLISECONDS
+ ]
+
async def get_missing_events(
self,
room_id: str,
@@ -1339,6 +1637,8 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas
event_results.reverse()
return event_results
+ @trace
+ @tag_args
async def get_successor_events(self, event_id: str) -> List[str]:
"""Fetch all events that have the given event as a prev event
@@ -1375,6 +1675,7 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas
_delete_old_forward_extrem_cache_txn,
)
+ @trace
async def insert_insertion_extremity(self, event_id: str, room_id: str) -> None:
await self.db_pool.simple_upsert(
table="insertion_event_extremities",
@@ -1483,7 +1784,12 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas
self,
room_id: str,
) -> Optional[Tuple[str, str]]:
- """Get the next event ID in the staging area for the given room."""
+ """
+ Get the next event ID in the staging area for the given room.
+
+ Returns:
+ Tuple of the `origin` and `event_id`
+ """
def _get_next_staged_event_id_for_room_txn(
txn: LoggingTransaction,
@@ -1597,7 +1903,7 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas
logger.info("Invalid prev_events for %s", event_id)
continue
- if room_version.event_format == EventFormatVersions.V1:
+ if room_version.event_format == EventFormatVersions.ROOM_V1_V2:
for prev_event_tuple in prev_events:
if (
not isinstance(prev_event_tuple, list)
diff --git a/synapse/storage/databases/main/event_push_actions.py b/synapse/storage/databases/main/event_push_actions.py
index dd2627037c..b283ab0f9c 100644
--- a/synapse/storage/databases/main/event_push_actions.py
+++ b/synapse/storage/databases/main/event_push_actions.py
@@ -12,14 +12,85 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+
+"""Responsible for storing and fetching push actions / notifications.
+
+There are two main uses for push actions:
+ 1. Sending out push to a user's device; and
+ 2. Tracking per-room per-user notification counts (used in sync requests).
+
+For the former we simply use the `event_push_actions` table, which contains all
+the calculated actions for a given user (which were calculated by the
+`BulkPushRuleEvaluator`).
+
+For the latter we could simply count the number of rows in `event_push_actions`
+table for a given room/user, but in practice this is *very* heavyweight when
+there were a large number of notifications (due to e.g. the user never reading a
+room). Plus, keeping all push actions indefinitely uses a lot of disk space.
+
+To fix these issues, we add a new table `event_push_summary` that tracks
+per-user per-room counts of all notifications that happened before a stream
+ordering S. Thus, to get the notification count for a user / room we can simply
+query a single row in `event_push_summary` and count the number of rows in
+`event_push_actions` with a stream ordering larger than S (and as long as S is
+"recent", the number of rows needing to be scanned will be small).
+
+The `event_push_summary` table is updated via a background job that periodically
+chooses a new stream ordering S' (usually the latest stream ordering), counts
+all notifications in `event_push_actions` between the existing S and S', and
+adds them to the existing counts in `event_push_summary`.
+
+This allows us to delete old rows from `event_push_actions` once those rows have
+been counted and added to `event_push_summary` (we call this process
+"rotation").
+
+
+We need to handle when a user sends a read receipt to the room. Again this is
+done as a background process. For each receipt we clear the row in
+`event_push_summary` and count the number of notifications in
+`event_push_actions` that happened after the receipt but before S, and insert
+that count into `event_push_summary` (If the receipt happened *after* S then we
+simply clear the `event_push_summary`.)
+
+Note that its possible that if the read receipt is for an old event the relevant
+`event_push_actions` rows will have been rotated and we get the wrong count
+(it'll be too low). We accept this as a rare edge case that is unlikely to
+impact the user much (since the vast majority of read receipts will be for the
+latest event).
+
+The last complication is to handle the race where we request the notifications
+counts after a user sends a read receipt into the room, but *before* the
+background update handles the receipt (without any special handling the counts
+would be outdated). We fix this by including in `event_push_summary` the read
+receipt we used when updating `event_push_summary`, and every time we query the
+table we check if that matches the most recent read receipt in the room. If yes,
+continue as above, if not we simply query the `event_push_actions` table
+directly.
+
+Since read receipts are almost always for recent events, scanning the
+`event_push_actions` table in this case is unlikely to be a problem. Even if it
+is a problem, it is temporary until the background job handles the new read
+receipt.
+"""
+
import logging
-from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union, cast
+from typing import (
+ TYPE_CHECKING,
+ Collection,
+ Dict,
+ List,
+ Mapping,
+ Optional,
+ Tuple,
+ Union,
+ cast,
+)
import attr
-from synapse.api.constants import ReceiptTypes
+from synapse.api.constants import MAIN_TIMELINE, ReceiptTypes
from synapse.metrics.background_process_metrics import wrap_as_background_process
-from synapse.storage._base import SQLBaseStore, db_to_json
+from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause
from synapse.storage.database import (
DatabasePool,
LoggingDatabaseConnection,
@@ -27,6 +98,7 @@ from synapse.storage.database import (
)
from synapse.storage.databases.main.receipts import ReceiptsWorkerStore
from synapse.storage.databases.main.stream import StreamWorkerStore
+from synapse.types import JsonDict
from synapse.util import json_encoder
from synapse.util.caches.descriptors import cached
@@ -47,6 +119,32 @@ DEFAULT_HIGHLIGHT_ACTION: List[Union[dict, str]] = [
]
+@attr.s(slots=True, auto_attribs=True)
+class _RoomReceipt:
+ """
+ HttpPushAction instances include the information used to generate HTTP
+ requests to a push gateway.
+ """
+
+ unthreaded_stream_ordering: int = 0
+ # threaded_stream_ordering includes the main pseudo-thread.
+ threaded_stream_ordering: Dict[str, int] = attr.Factory(dict)
+
+ def is_unread(self, thread_id: str, stream_ordering: int) -> bool:
+ """Returns True if the stream ordering is unread according to the receipt information."""
+
+ # Only include push actions with a stream ordering after both the unthreaded
+ # and threaded receipt. Properly handles a user without any receipts present.
+ return (
+ self.unthreaded_stream_ordering < stream_ordering
+ and self.threaded_stream_ordering.get(thread_id, 0) < stream_ordering
+ )
+
+
+# A _RoomReceipt with no receipts in it.
+MISSING_ROOM_RECEIPT = _RoomReceipt()
+
+
@attr.s(slots=True, frozen=True, auto_attribs=True)
class HttpPushAction:
"""
@@ -85,7 +183,7 @@ class UserPushAction(EmailPushAction):
@attr.s(slots=True, auto_attribs=True)
class NotifCounts:
"""
- The per-user, per-room count of notifications. Used by sync and push.
+ The per-user, per-room, per-thread count of notifications. Used by sync and push.
"""
notify_count: int = 0
@@ -93,7 +191,24 @@ class NotifCounts:
highlight_count: int = 0
-def _serialize_action(actions: List[Union[dict, str]], is_highlight: bool) -> str:
+@attr.s(slots=True, auto_attribs=True)
+class RoomNotifCounts:
+ """
+ The per-user, per-room count of notifications. Used by sync and push.
+ """
+
+ main_timeline: NotifCounts
+ # Map of thread ID to the notification counts.
+ threads: Dict[str, NotifCounts]
+
+ def __len__(self) -> int:
+ # To properly account for the amount of space in any caches.
+ return len(self.threads) + 1
+
+
+def _serialize_action(
+ actions: Collection[Union[Mapping, str]], is_highlight: bool
+) -> str:
"""Custom serializer for actions. This allows us to "compress" common actions.
We use the fact that most users have the same actions for notifs (and for
@@ -131,6 +246,9 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
):
super().__init__(database, db_conn, hs)
+ # Track when the process started.
+ self._started_ts = self._clock.time_msec()
+
# These get correctly set by _find_stream_orderings_for_times_txn
self.stream_ordering_month_ago: Optional[int] = None
self.stream_ordering_day_ago: Optional[int] = None
@@ -150,6 +268,10 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
self._rotate_notifs, 30 * 1000
)
+ self._clear_old_staging_loop = self._clock.looping_call(
+ self._clear_old_push_actions_staging, 30 * 60 * 1000
+ )
+
self.db_pool.updates.register_background_index_update(
"event_push_summary_unique_index",
index_name="event_push_summary_unique_index",
@@ -159,14 +281,196 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
replaces_index="event_push_summary_user_rm",
)
- @cached(tree=True, max_entries=5000)
+ self.db_pool.updates.register_background_index_update(
+ "event_push_summary_unique_index2",
+ index_name="event_push_summary_unique_index2",
+ table="event_push_summary",
+ columns=["user_id", "room_id", "thread_id"],
+ unique=True,
+ )
+
+ self.db_pool.updates.register_background_update_handler(
+ "event_push_backfill_thread_id",
+ self._background_backfill_thread_id,
+ )
+
+ # Indexes which will be used to quickly make the thread_id column non-null.
+ self.db_pool.updates.register_background_index_update(
+ "event_push_actions_thread_id_null",
+ index_name="event_push_actions_thread_id_null",
+ table="event_push_actions",
+ columns=["thread_id"],
+ where_clause="thread_id IS NULL",
+ )
+ self.db_pool.updates.register_background_index_update(
+ "event_push_summary_thread_id_null",
+ index_name="event_push_summary_thread_id_null",
+ table="event_push_summary",
+ columns=["thread_id"],
+ where_clause="thread_id IS NULL",
+ )
+
+ # Check ASAP (and then later, every 1s) to see if we have finished
+ # background updates the event_push_actions and event_push_summary tables.
+ self._clock.call_later(0.0, self._check_event_push_backfill_thread_id)
+ self._event_push_backfill_thread_id_done = False
+
+ @wrap_as_background_process("check_event_push_backfill_thread_id")
+ async def _check_event_push_backfill_thread_id(self) -> None:
+ """
+ Has thread_id finished backfilling?
+
+ If not, we need to just-in-time update it so the queries work.
+ """
+ done = await self.db_pool.updates.has_completed_background_update(
+ "event_push_backfill_thread_id"
+ )
+
+ if done:
+ self._event_push_backfill_thread_id_done = True
+ else:
+ # Reschedule to run.
+ self._clock.call_later(15.0, self._check_event_push_backfill_thread_id)
+
+ async def _background_backfill_thread_id(
+ self, progress: JsonDict, batch_size: int
+ ) -> int:
+ """
+ Fill in the thread_id field for event_push_actions and event_push_summary.
+
+ This is preparatory so that it can be made non-nullable in the future.
+
+ Because all current (null) data is done in an unthreaded manner this
+ simply assumes it is on the "main" timeline. Since event_push_actions
+ are periodically cleared it is not possible to correctly re-calculate
+ the thread_id.
+ """
+ event_push_actions_done = progress.get("event_push_actions_done", False)
+
+ def add_thread_id_txn(
+ txn: LoggingTransaction, start_stream_ordering: int
+ ) -> int:
+ sql = """
+ SELECT stream_ordering
+ FROM event_push_actions
+ WHERE
+ thread_id IS NULL
+ AND stream_ordering > ?
+ ORDER BY stream_ordering
+ LIMIT ?
+ """
+ txn.execute(sql, (start_stream_ordering, batch_size))
+
+ # No more rows to process.
+ rows = txn.fetchall()
+ if not rows:
+ progress["event_push_actions_done"] = True
+ self.db_pool.updates._background_update_progress_txn(
+ txn, "event_push_backfill_thread_id", progress
+ )
+ return 0
+
+ # Update the thread ID for any of those rows.
+ max_stream_ordering = rows[-1][0]
+
+ sql = """
+ UPDATE event_push_actions
+ SET thread_id = 'main'
+ WHERE ? < stream_ordering AND stream_ordering <= ? AND thread_id IS NULL
+ """
+ txn.execute(
+ sql,
+ (
+ start_stream_ordering,
+ max_stream_ordering,
+ ),
+ )
+
+ # Update progress.
+ processed_rows = txn.rowcount
+ progress["max_event_push_actions_stream_ordering"] = max_stream_ordering
+ self.db_pool.updates._background_update_progress_txn(
+ txn, "event_push_backfill_thread_id", progress
+ )
+
+ return processed_rows
+
+ def add_thread_id_summary_txn(txn: LoggingTransaction) -> int:
+ min_user_id = progress.get("max_summary_user_id", "")
+ min_room_id = progress.get("max_summary_room_id", "")
+
+ # Slightly overcomplicated query for getting the Nth user ID / room
+ # ID tuple, or the last if there are less than N remaining.
+ sql = """
+ SELECT user_id, room_id FROM (
+ SELECT user_id, room_id FROM event_push_summary
+ WHERE (user_id, room_id) > (?, ?)
+ AND thread_id IS NULL
+ ORDER BY user_id, room_id
+ LIMIT ?
+ ) AS e
+ ORDER BY user_id DESC, room_id DESC
+ LIMIT 1
+ """
+
+ txn.execute(sql, (min_user_id, min_room_id, batch_size))
+ row = txn.fetchone()
+ if not row:
+ return 0
+
+ max_user_id, max_room_id = row
+
+ sql = """
+ UPDATE event_push_summary
+ SET thread_id = 'main'
+ WHERE
+ (?, ?) < (user_id, room_id) AND (user_id, room_id) <= (?, ?)
+ AND thread_id IS NULL
+ """
+ txn.execute(sql, (min_user_id, min_room_id, max_user_id, max_room_id))
+ processed_rows = txn.rowcount
+
+ progress["max_summary_user_id"] = max_user_id
+ progress["max_summary_room_id"] = max_room_id
+ self.db_pool.updates._background_update_progress_txn(
+ txn, "event_push_backfill_thread_id", progress
+ )
+
+ return processed_rows
+
+ # First update the event_push_actions table, then the event_push_summary table.
+ #
+ # Note that the event_push_actions_staging table is ignored since it is
+ # assumed that items in that table will only exist for a short period of
+ # time.
+ if not event_push_actions_done:
+ result = await self.db_pool.runInteraction(
+ "event_push_backfill_thread_id",
+ add_thread_id_txn,
+ progress.get("max_event_push_actions_stream_ordering", 0),
+ )
+ else:
+ result = await self.db_pool.runInteraction(
+ "event_push_backfill_thread_id",
+ add_thread_id_summary_txn,
+ )
+
+ # Only done after the event_push_summary table is done.
+ if not result:
+ await self.db_pool.updates._end_background_update(
+ "event_push_backfill_thread_id"
+ )
+
+ return result
+
+ @cached(tree=True, max_entries=5000, iterable=True)
async def get_unread_event_push_actions_by_room_for_user(
self,
room_id: str,
user_id: str,
- ) -> NotifCounts:
+ ) -> RoomNotifCounts:
"""Get the notification count, the highlight count and the unread message count
- for a given user in a given room after the given read receipt.
+ for a given user in a given room after their latest read receipt.
Note that this function assumes the user to be a current member of the room,
since it's either called by the sync handler to handle joined room entries, or by
@@ -177,9 +481,9 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
user_id: The user to retrieve the counts for.
Returns
- A dict containing the counts mentioned earlier in this docstring,
- respectively under the keys "notify_count", "highlight_count" and
- "unread_count".
+ A RoomNotifCounts object containing the notification count, the
+ highlight count and the unread message count for both the main timeline
+ and threads.
"""
return await self.db_pool.runInteraction(
"get_unread_event_push_actions_by_room",
@@ -193,21 +497,20 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
txn: LoggingTransaction,
room_id: str,
user_id: str,
- ) -> NotifCounts:
- result = self.get_last_receipt_for_user_txn(
+ ) -> RoomNotifCounts:
+ # Get the stream ordering of the user's latest receipt in the room.
+ result = self.get_last_unthreaded_receipt_for_user_txn(
txn,
user_id,
room_id,
receipt_types=(ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE),
)
- stream_ordering = None
if result:
_, stream_ordering = result
- if stream_ordering is None:
- # Either last_read_event_id is None, or it's an event we don't have (e.g.
- # because it's been purged), in which case retrieve the stream ordering for
+ else:
+ # If the user has no receipts in the room, retrieve the stream ordering for
# the latest membership event from this user in this room (which we assume is
# a join).
event_id = self.db_pool.simple_select_one_onecol_txn(
@@ -224,72 +527,236 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
)
def _get_unread_counts_by_pos_txn(
- self, txn: LoggingTransaction, room_id: str, user_id: str, stream_ordering: int
- ) -> NotifCounts:
+ self,
+ txn: LoggingTransaction,
+ room_id: str,
+ user_id: str,
+ unthreaded_receipt_stream_ordering: int,
+ ) -> RoomNotifCounts:
"""Get the number of unread messages for a user/room that have happened
since the given stream ordering.
+
+ Args:
+ txn: The database transaction.
+ room_id: The room ID to get unread counts for.
+ user_id: The user ID to get unread counts for.
+ unthreaded_receipt_stream_ordering: The stream ordering of the user's latest
+ unthreaded receipt in the room. If there are no unthreaded receipts,
+ the stream ordering of the user's join event.
+
+ Returns:
+ A RoomNotifCounts object containing the notification count, the
+ highlight count and the unread message count for both the main timeline
+ and threads.
"""
- counts = NotifCounts()
+ main_counts = NotifCounts()
+ thread_counts: Dict[str, NotifCounts] = {}
+
+ def _get_thread(thread_id: str) -> NotifCounts:
+ if thread_id == MAIN_TIMELINE:
+ return main_counts
+ return thread_counts.setdefault(thread_id, NotifCounts())
+
+ receipt_types_clause, receipts_args = make_in_list_sql_clause(
+ self.database_engine,
+ "receipt_type",
+ (ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE),
+ )
+
+ # First ensure that the existing rows have an updated thread_id field.
+ if not self._event_push_backfill_thread_id_done:
+ txn.execute(
+ """
+ UPDATE event_push_summary
+ SET thread_id = ?
+ WHERE room_id = ? AND user_id = ? AND thread_id is NULL
+ """,
+ (MAIN_TIMELINE, room_id, user_id),
+ )
+ txn.execute(
+ """
+ UPDATE event_push_actions
+ SET thread_id = ?
+ WHERE room_id = ? AND user_id = ? AND thread_id is NULL
+ """,
+ (MAIN_TIMELINE, room_id, user_id),
+ )
# First we pull the counts from the summary table.
#
- # We check that `last_receipt_stream_ordering` matches the stream
- # ordering given. If it doesn't match then a new read receipt has arrived and
- # we haven't yet updated the counts in `event_push_summary` to reflect
- # that; in that case we simply ignore `event_push_summary` counts
- # and do a manual count of all of the rows in the `event_push_actions` table
- # for this user/room.
+ # We check that `last_receipt_stream_ordering` matches the stream ordering of the
+ # latest receipt for the thread (which may be either the unthreaded read receipt
+ # or the threaded read receipt).
+ #
+ # If it doesn't match then a new read receipt has arrived and we haven't yet
+ # updated the counts in `event_push_summary` to reflect that; in that case we
+ # simply ignore `event_push_summary` counts.
+ #
+ # We then do a manual count of all the rows in the `event_push_actions` table
+ # for any user/room/thread which did not have a valid summary found.
#
- # If `last_receipt_stream_ordering` is null then that means it's up to
- # date (as the row was written by an older version of Synapse that
+ # If `last_receipt_stream_ordering` is null then that means it's up-to-date
+ # (as the row was written by an older version of Synapse that
# updated `event_push_summary` synchronously when persisting a new read
# receipt).
txn.execute(
- """
- SELECT stream_ordering, notif_count, COALESCE(unread_count, 0)
+ f"""
+ SELECT notif_count, COALESCE(unread_count, 0), thread_id
FROM event_push_summary
+ LEFT JOIN (
+ SELECT thread_id, MAX(stream_ordering) AS threaded_receipt_stream_ordering
+ FROM receipts_linearized
+ LEFT JOIN events USING (room_id, event_id)
+ WHERE
+ user_id = ?
+ AND room_id = ?
+ AND stream_ordering > ?
+ AND {receipt_types_clause}
+ GROUP BY thread_id
+ ) AS receipts USING (thread_id)
WHERE room_id = ? AND user_id = ?
AND (
- (last_receipt_stream_ordering IS NULL AND stream_ordering > ?)
- OR last_receipt_stream_ordering = ?
- )
+ (last_receipt_stream_ordering IS NULL AND stream_ordering > COALESCE(threaded_receipt_stream_ordering, ?))
+ OR last_receipt_stream_ordering = COALESCE(threaded_receipt_stream_ordering, ?)
+ ) AND (notif_count != 0 OR COALESCE(unread_count, 0) != 0)
""",
- (room_id, user_id, stream_ordering, stream_ordering),
+ (
+ user_id,
+ room_id,
+ unthreaded_receipt_stream_ordering,
+ *receipts_args,
+ room_id,
+ user_id,
+ unthreaded_receipt_stream_ordering,
+ unthreaded_receipt_stream_ordering,
+ ),
)
- row = txn.fetchone()
-
- summary_stream_ordering = 0
- if row:
- summary_stream_ordering = row[0]
- counts.notify_count += row[1]
- counts.unread_count += row[2]
-
- # Next we need to count highlights, which aren't summarized
- sql = """
- SELECT COUNT(*) FROM event_push_actions
+ summarised_threads = set()
+ for notif_count, unread_count, thread_id in txn:
+ summarised_threads.add(thread_id)
+ counts = _get_thread(thread_id)
+ counts.notify_count += notif_count
+ counts.unread_count += unread_count
+
+ # Next we need to count highlights, which aren't summarised
+ sql = f"""
+ SELECT COUNT(*), thread_id FROM event_push_actions
+ LEFT JOIN (
+ SELECT thread_id, MAX(stream_ordering) AS threaded_receipt_stream_ordering
+ FROM receipts_linearized
+ LEFT JOIN events USING (room_id, event_id)
+ WHERE
+ user_id = ?
+ AND room_id = ?
+ AND stream_ordering > ?
+ AND {receipt_types_clause}
+ GROUP BY thread_id
+ ) AS receipts USING (thread_id)
WHERE user_id = ?
AND room_id = ?
- AND stream_ordering > ?
+ AND stream_ordering > COALESCE(threaded_receipt_stream_ordering, ?)
AND highlight = 1
+ GROUP BY thread_id
"""
- txn.execute(sql, (user_id, room_id, stream_ordering))
- row = txn.fetchone()
- if row:
- counts.highlight_count += row[0]
+ txn.execute(
+ sql,
+ (
+ user_id,
+ room_id,
+ unthreaded_receipt_stream_ordering,
+ *receipts_args,
+ user_id,
+ room_id,
+ unthreaded_receipt_stream_ordering,
+ ),
+ )
+ for highlight_count, thread_id in txn:
+ _get_thread(thread_id).highlight_count += highlight_count
- # Finally we need to count push actions that aren't included in the
- # summary returned above, e.g. recent events that haven't been
- # summarized yet, or the summary is empty due to a recent read receipt.
- stream_ordering = max(stream_ordering, summary_stream_ordering)
- notify_count, unread_count = self._get_notif_unread_count_for_user_room(
- txn, room_id, user_id, stream_ordering
+ # For threads which were summarised we need to count actions since the last
+ # rotation.
+ thread_id_clause, thread_id_args = make_in_list_sql_clause(
+ self.database_engine, "thread_id", summarised_threads
)
- counts.notify_count += notify_count
- counts.unread_count += unread_count
+ # The (inclusive) event stream ordering that was previously summarised.
+ rotated_upto_stream_ordering = self.db_pool.simple_select_one_onecol_txn(
+ txn,
+ table="event_push_summary_stream_ordering",
+ keyvalues={},
+ retcol="stream_ordering",
+ )
- return counts
+ unread_counts = self._get_notif_unread_count_for_user_room(
+ txn, room_id, user_id, rotated_upto_stream_ordering
+ )
+ for notif_count, unread_count, thread_id in unread_counts:
+ if thread_id not in summarised_threads:
+ continue
+
+ if thread_id == MAIN_TIMELINE:
+ counts.notify_count += notif_count
+ counts.unread_count += unread_count
+ elif thread_id in thread_counts:
+ thread_counts[thread_id].notify_count += notif_count
+ thread_counts[thread_id].unread_count += unread_count
+ else:
+ # Previous thread summaries of 0 are discarded above.
+ #
+ # TODO If empty summaries are deleted this can be removed.
+ thread_counts[thread_id] = NotifCounts(
+ notify_count=notif_count,
+ unread_count=unread_count,
+ highlight_count=0,
+ )
+
+ # Finally we need to count push actions that aren't included in the
+ # summary returned above. This might be due to recent events that haven't
+ # been summarised yet or the summary is out of date due to a recent read
+ # receipt.
+ sql = f"""
+ SELECT
+ COUNT(CASE WHEN notif = 1 THEN 1 END),
+ COUNT(CASE WHEN unread = 1 THEN 1 END),
+ thread_id
+ FROM event_push_actions
+ LEFT JOIN (
+ SELECT thread_id, MAX(stream_ordering) AS threaded_receipt_stream_ordering
+ FROM receipts_linearized
+ LEFT JOIN events USING (room_id, event_id)
+ WHERE
+ user_id = ?
+ AND room_id = ?
+ AND stream_ordering > ?
+ AND {receipt_types_clause}
+ GROUP BY thread_id
+ ) AS receipts USING (thread_id)
+ WHERE user_id = ?
+ AND room_id = ?
+ AND stream_ordering > COALESCE(threaded_receipt_stream_ordering, ?)
+ AND NOT {thread_id_clause}
+ GROUP BY thread_id
+ """
+ txn.execute(
+ sql,
+ (
+ user_id,
+ room_id,
+ unthreaded_receipt_stream_ordering,
+ *receipts_args,
+ user_id,
+ room_id,
+ unthreaded_receipt_stream_ordering,
+ *thread_id_args,
+ ),
+ )
+ for notif_count, unread_count, thread_id in txn:
+ counts = _get_thread(thread_id)
+ counts.notify_count += notif_count
+ counts.unread_count += unread_count
+
+ return RoomNotifCounts(main_counts, thread_counts)
def _get_notif_unread_count_for_user_room(
self,
@@ -298,48 +765,70 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
user_id: str,
stream_ordering: int,
max_stream_ordering: Optional[int] = None,
- ) -> Tuple[int, int]:
+ thread_id: Optional[str] = None,
+ ) -> List[Tuple[int, int, str]]:
"""Returns the notify and unread counts from `event_push_actions` for
the given user/room in the given range.
Does not consult `event_push_summary` table, which may include push
actions that have been deleted from `event_push_actions` table.
+
+ Args:
+ txn: The database transaction.
+ room_id: The room ID to get unread counts for.
+ user_id: The user ID to get unread counts for.
+ stream_ordering: The (exclusive) minimum stream ordering to consider.
+ max_stream_ordering: The (inclusive) maximum stream ordering to consider.
+ If this is not given, then no maximum is applied.
+ thread_id: The thread ID to fetch unread counts for. If this is not provided
+ then the results for *all* threads is returned.
+
+ Note that if this is provided the resulting list will only have 0 or
+ 1 tuples in it.
+
+ Return:
+ A tuple of the notif count and unread count in the given range for
+ each thread.
"""
# If there have been no events in the room since the stream ordering,
# there can't be any push actions either.
if not self._events_stream_cache.has_entity_changed(room_id, stream_ordering):
- return 0, 0
+ return []
- clause = ""
+ stream_ordering_clause = ""
args = [user_id, room_id, stream_ordering]
if max_stream_ordering is not None:
- clause = "AND ea.stream_ordering <= ?"
+ stream_ordering_clause = "AND ea.stream_ordering <= ?"
args.append(max_stream_ordering)
# If the max stream ordering is less than the min stream ordering,
# then obviously there are zero push actions in that range.
if max_stream_ordering <= stream_ordering:
- return 0, 0
+ return []
+
+ # Either limit the results to a specific thread or fetch all threads.
+ thread_id_clause = ""
+ if thread_id is not None:
+ thread_id_clause = "AND thread_id = ?"
+ args.append(thread_id)
sql = f"""
SELECT
COUNT(CASE WHEN notif = 1 THEN 1 END),
- COUNT(CASE WHEN unread = 1 THEN 1 END)
- FROM event_push_actions ea
- WHERE user_id = ?
+ COUNT(CASE WHEN unread = 1 THEN 1 END),
+ thread_id
+ FROM event_push_actions ea
+ WHERE user_id = ?
AND room_id = ?
AND ea.stream_ordering > ?
- {clause}
+ {stream_ordering_clause}
+ {thread_id_clause}
+ GROUP BY thread_id
"""
txn.execute(sql, args)
- row = txn.fetchone()
-
- if row:
- return cast(Tuple[int, int], row)
-
- return 0, 0
+ return cast(List[Tuple[int, int, str]], txn.fetchall())
async def get_push_action_users_in_range(
self, min_stream_ordering: int, max_stream_ordering: int
@@ -354,6 +843,49 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
return await self.db_pool.runInteraction("get_push_action_users_in_range", f)
+ def _get_receipts_by_room_txn(
+ self, txn: LoggingTransaction, user_id: str
+ ) -> Dict[str, _RoomReceipt]:
+ """
+ Generate a map of room ID to the latest stream ordering that has been
+ read by the given user.
+
+ Args:
+ txn:
+ user_id: The user to fetch receipts for.
+
+ Returns:
+ A map including all rooms the user is in with a receipt. It maps
+ room IDs to _RoomReceipt instances
+ """
+ receipt_types_clause, args = make_in_list_sql_clause(
+ self.database_engine,
+ "receipt_type",
+ (ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE),
+ )
+
+ sql = f"""
+ SELECT room_id, thread_id, MAX(stream_ordering)
+ FROM receipts_linearized
+ INNER JOIN events USING (room_id, event_id)
+ WHERE {receipt_types_clause}
+ AND user_id = ?
+ GROUP BY room_id, thread_id
+ """
+
+ args.extend((user_id,))
+ txn.execute(sql, args)
+
+ result: Dict[str, _RoomReceipt] = {}
+ for room_id, thread_id, stream_ordering in txn:
+ room_receipt = result.setdefault(room_id, _RoomReceipt())
+ if thread_id is None:
+ room_receipt.unthreaded_stream_ordering = stream_ordering
+ else:
+ room_receipt.threaded_stream_ordering[thread_id] = stream_ordering
+
+ return result
+
async def get_unread_push_actions_for_user_in_range_for_http(
self,
user_id: str,
@@ -376,81 +908,45 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
The list will be ordered by ascending stream_ordering.
The list will have between 0~limit entries.
"""
- # find rooms that have a read receipt in them and return the next
- # push actions
- def get_after_receipt(
- txn: LoggingTransaction,
- ) -> List[Tuple[str, str, int, str, bool]]:
- # find rooms that have a read receipt in them and return the next
- # push actions
- sql = (
- "SELECT ep.event_id, ep.room_id, ep.stream_ordering, ep.actions,"
- " ep.highlight "
- " FROM ("
- " SELECT room_id,"
- " MAX(stream_ordering) as stream_ordering"
- " FROM events"
- " INNER JOIN receipts_linearized USING (room_id, event_id)"
- " WHERE receipt_type = 'm.read' AND user_id = ?"
- " GROUP BY room_id"
- ") AS rl,"
- " event_push_actions AS ep"
- " WHERE"
- " ep.room_id = rl.room_id"
- " AND ep.stream_ordering > rl.stream_ordering"
- " AND ep.user_id = ?"
- " AND ep.stream_ordering > ?"
- " AND ep.stream_ordering <= ?"
- " AND ep.notif = 1"
- " ORDER BY ep.stream_ordering ASC LIMIT ?"
- )
- args = [user_id, user_id, min_stream_ordering, max_stream_ordering, limit]
- txn.execute(sql, args)
- return cast(List[Tuple[str, str, int, str, bool]], txn.fetchall())
- after_read_receipt = await self.db_pool.runInteraction(
- "get_unread_push_actions_for_user_in_range_http_arr", get_after_receipt
+ receipts_by_room = await self.db_pool.runInteraction(
+ "get_unread_push_actions_for_user_in_range_http_receipts",
+ self._get_receipts_by_room_txn,
+ user_id=user_id,
)
- # There are rooms with push actions in them but you don't have a read receipt in
- # them e.g. rooms you've been invited to, so get push actions for rooms which do
- # not have read receipts in them too.
- def get_no_receipt(
+ def get_push_actions_txn(
txn: LoggingTransaction,
- ) -> List[Tuple[str, str, int, str, bool]]:
- sql = (
- "SELECT ep.event_id, ep.room_id, ep.stream_ordering, ep.actions,"
- " ep.highlight "
- " FROM event_push_actions AS ep"
- " INNER JOIN events AS e USING (room_id, event_id)"
- " WHERE"
- " ep.room_id NOT IN ("
- " SELECT room_id FROM receipts_linearized"
- " WHERE receipt_type = 'm.read' AND user_id = ?"
- " GROUP BY room_id"
- " )"
- " AND ep.user_id = ?"
- " AND ep.stream_ordering > ?"
- " AND ep.stream_ordering <= ?"
- " AND ep.notif = 1"
- " ORDER BY ep.stream_ordering ASC LIMIT ?"
- )
- args = [user_id, user_id, min_stream_ordering, max_stream_ordering, limit]
- txn.execute(sql, args)
- return cast(List[Tuple[str, str, int, str, bool]], txn.fetchall())
+ ) -> List[Tuple[str, str, str, int, str, bool]]:
+ sql = """
+ SELECT ep.event_id, ep.room_id, ep.thread_id, ep.stream_ordering,
+ ep.actions, ep.highlight
+ FROM event_push_actions AS ep
+ WHERE
+ ep.user_id = ?
+ AND ep.stream_ordering > ?
+ AND ep.stream_ordering <= ?
+ AND ep.notif = 1
+ ORDER BY ep.stream_ordering ASC LIMIT ?
+ """
+ txn.execute(sql, (user_id, min_stream_ordering, max_stream_ordering, limit))
+ return cast(List[Tuple[str, str, str, int, str, bool]], txn.fetchall())
- no_read_receipt = await self.db_pool.runInteraction(
- "get_unread_push_actions_for_user_in_range_http_nrr", get_no_receipt
+ push_actions = await self.db_pool.runInteraction(
+ "get_unread_push_actions_for_user_in_range_http", get_push_actions_txn
)
notifs = [
HttpPushAction(
- event_id=row[0],
- room_id=row[1],
- stream_ordering=row[2],
- actions=_deserialize_action(row[3], row[4]),
+ event_id=event_id,
+ room_id=room_id,
+ stream_ordering=stream_ordering,
+ actions=_deserialize_action(actions, highlight),
+ )
+ for event_id, room_id, thread_id, stream_ordering, actions, highlight in push_actions
+ if receipts_by_room.get(room_id, MISSING_ROOM_RECEIPT).is_unread(
+ thread_id, stream_ordering
)
- for row in after_read_receipt + no_read_receipt
]
# Now sort it so it's ordered correctly, since currently it will
@@ -485,82 +981,48 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
The list will be ordered by descending received_ts.
The list will have between 0~limit entries.
"""
- # find rooms that have a read receipt in them and return the most recent
- # push actions
- def get_after_receipt(
- txn: LoggingTransaction,
- ) -> List[Tuple[str, str, int, str, bool, int]]:
- sql = (
- "SELECT ep.event_id, ep.room_id, ep.stream_ordering, ep.actions,"
- " ep.highlight, e.received_ts"
- " FROM ("
- " SELECT room_id,"
- " MAX(stream_ordering) as stream_ordering"
- " FROM events"
- " INNER JOIN receipts_linearized USING (room_id, event_id)"
- " WHERE receipt_type = 'm.read' AND user_id = ?"
- " GROUP BY room_id"
- ") AS rl,"
- " event_push_actions AS ep"
- " INNER JOIN events AS e USING (room_id, event_id)"
- " WHERE"
- " ep.room_id = rl.room_id"
- " AND ep.stream_ordering > rl.stream_ordering"
- " AND ep.user_id = ?"
- " AND ep.stream_ordering > ?"
- " AND ep.stream_ordering <= ?"
- " AND ep.notif = 1"
- " ORDER BY ep.stream_ordering DESC LIMIT ?"
- )
- args = [user_id, user_id, min_stream_ordering, max_stream_ordering, limit]
- txn.execute(sql, args)
- return cast(List[Tuple[str, str, int, str, bool, int]], txn.fetchall())
- after_read_receipt = await self.db_pool.runInteraction(
- "get_unread_push_actions_for_user_in_range_email_arr", get_after_receipt
+ receipts_by_room = await self.db_pool.runInteraction(
+ "get_unread_push_actions_for_user_in_range_email_receipts",
+ self._get_receipts_by_room_txn,
+ user_id=user_id,
)
- # There are rooms with push actions in them but you don't have a read receipt in
- # them e.g. rooms you've been invited to, so get push actions for rooms which do
- # not have read receipts in them too.
- def get_no_receipt(
+ def get_push_actions_txn(
txn: LoggingTransaction,
- ) -> List[Tuple[str, str, int, str, bool, int]]:
- sql = (
- "SELECT ep.event_id, ep.room_id, ep.stream_ordering, ep.actions,"
- " ep.highlight, e.received_ts"
- " FROM event_push_actions AS ep"
- " INNER JOIN events AS e USING (room_id, event_id)"
- " WHERE"
- " ep.room_id NOT IN ("
- " SELECT room_id FROM receipts_linearized"
- " WHERE receipt_type = 'm.read' AND user_id = ?"
- " GROUP BY room_id"
- " )"
- " AND ep.user_id = ?"
- " AND ep.stream_ordering > ?"
- " AND ep.stream_ordering <= ?"
- " AND ep.notif = 1"
- " ORDER BY ep.stream_ordering DESC LIMIT ?"
- )
- args = [user_id, user_id, min_stream_ordering, max_stream_ordering, limit]
- txn.execute(sql, args)
- return cast(List[Tuple[str, str, int, str, bool, int]], txn.fetchall())
+ ) -> List[Tuple[str, str, str, int, str, bool, int]]:
+ sql = """
+ SELECT ep.event_id, ep.room_id, ep.thread_id, ep.stream_ordering,
+ ep.actions, ep.highlight, e.received_ts
+ FROM event_push_actions AS ep
+ INNER JOIN events AS e USING (room_id, event_id)
+ WHERE
+ ep.user_id = ?
+ AND ep.stream_ordering > ?
+ AND ep.stream_ordering <= ?
+ AND ep.notif = 1
+ ORDER BY ep.stream_ordering DESC LIMIT ?
+ """
+ txn.execute(sql, (user_id, min_stream_ordering, max_stream_ordering, limit))
+ return cast(List[Tuple[str, str, str, int, str, bool, int]], txn.fetchall())
- no_read_receipt = await self.db_pool.runInteraction(
- "get_unread_push_actions_for_user_in_range_email_nrr", get_no_receipt
+ push_actions = await self.db_pool.runInteraction(
+ "get_unread_push_actions_for_user_in_range_email", get_push_actions_txn
)
# Make a list of dicts from the two sets of results.
notifs = [
EmailPushAction(
- event_id=row[0],
- room_id=row[1],
- stream_ordering=row[2],
- actions=_deserialize_action(row[3], row[4]),
- received_ts=row[5],
+ event_id=event_id,
+ room_id=room_id,
+ stream_ordering=stream_ordering,
+ actions=_deserialize_action(actions, highlight),
+ received_ts=received_ts,
+ )
+ for event_id, room_id, thread_id, stream_ordering, actions, highlight, received_ts in push_actions
+ if receipts_by_room.get(room_id, MISSING_ROOM_RECEIPT).is_unread(
+ thread_id, stream_ordering
)
- for row in after_read_receipt + no_read_receipt
]
# Now sort it so it's ordered correctly, since currently it will
@@ -606,8 +1068,9 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
async def add_push_actions_to_staging(
self,
event_id: str,
- user_id_actions: Dict[str, List[Union[dict, str]]],
+ user_id_actions: Dict[str, Collection[Union[Mapping, str]]],
count_as_unread: bool,
+ thread_id: str,
) -> None:
"""Add the push actions for the event to the push action staging area.
@@ -616,6 +1079,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
user_id_actions: A mapping of user_id to list of push actions, where
an action can either be a string or dict.
count_as_unread: Whether this event should increment unread counts.
+ thread_id: The thread this event is parent of, if applicable.
"""
if not user_id_actions:
return
@@ -623,8 +1087,8 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
# This is a helper function for generating the necessary tuple that
# can be used to insert into the `event_push_actions_staging` table.
def _gen_entry(
- user_id: str, actions: List[Union[dict, str]]
- ) -> Tuple[str, str, str, int, int, int]:
+ user_id: str, actions: Collection[Union[Mapping, str]]
+ ) -> Tuple[str, str, str, int, int, int, str, int]:
is_highlight = 1 if _action_has_highlight(actions) else 0
notif = 1 if "notify" in actions else 0
return (
@@ -634,28 +1098,27 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
notif, # notif column
is_highlight, # highlight column
int(count_as_unread), # unread column
+ thread_id, # thread_id column
+ self._clock.time_msec(), # inserted_ts column
)
- def _add_push_actions_to_staging_txn(txn: LoggingTransaction) -> None:
- # We don't use simple_insert_many here to avoid the overhead
- # of generating lists of dicts.
-
- sql = """
- INSERT INTO event_push_actions_staging
- (event_id, user_id, actions, notif, highlight, unread)
- VALUES (?, ?, ?, ?, ?, ?)
- """
-
- txn.execute_batch(
- sql,
- (
- _gen_entry(user_id, actions)
- for user_id, actions in user_id_actions.items()
- ),
- )
-
- return await self.db_pool.runInteraction(
- "add_push_actions_to_staging", _add_push_actions_to_staging_txn
+ await self.db_pool.simple_insert_many(
+ "event_push_actions_staging",
+ keys=(
+ "event_id",
+ "user_id",
+ "actions",
+ "notif",
+ "highlight",
+ "unread",
+ "thread_id",
+ "inserted_ts",
+ ),
+ values=[
+ _gen_entry(user_id, actions)
+ for user_id, actions in user_id_actions.items()
+ ],
+ desc="add_push_actions_to_staging",
)
async def remove_push_actions_from_staging(self, event_id: str) -> None:
@@ -769,12 +1232,12 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
# [10, <none>, 20], we should treat this as being equivalent to
# [10, 10, 20].
#
- sql = (
- "SELECT received_ts FROM events"
- " WHERE stream_ordering <= ?"
- " ORDER BY stream_ordering DESC"
- " LIMIT 1"
- )
+ sql = """
+ SELECT received_ts FROM events
+ WHERE stream_ordering <= ?
+ ORDER BY stream_ordering DESC
+ LIMIT 1
+ """
while range_end - range_start > 0:
middle = (range_end + range_start) // 2
@@ -802,14 +1265,14 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
self, stream_ordering: int
) -> Optional[int]:
def f(txn: LoggingTransaction) -> Optional[Tuple[int]]:
- sql = (
- "SELECT e.received_ts"
- " FROM event_push_actions AS ep"
- " JOIN events e ON ep.room_id = e.room_id AND ep.event_id = e.event_id"
- " WHERE ep.stream_ordering > ? AND notif = 1"
- " ORDER BY ep.stream_ordering ASC"
- " LIMIT 1"
- )
+ sql = """
+ SELECT e.received_ts
+ FROM event_push_actions AS ep
+ JOIN events e ON ep.room_id = e.room_id AND ep.event_id = e.event_id
+ WHERE ep.stream_ordering > ? AND notif = 1
+ ORDER BY ep.stream_ordering ASC
+ LIMIT 1
+ """
txn.execute(sql, (stream_ordering,))
return cast(Optional[Tuple[int]], txn.fetchone())
@@ -858,10 +1321,13 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
Any push actions which predate the user's most recent read receipt are
now redundant, so we can remove them from `event_push_actions` and
update `event_push_summary`.
+
+ Returns true if all new receipts have been processed.
"""
limit = 100
+ # The (inclusive) receipt stream ID that was previously processed..
min_receipts_stream_id = self.db_pool.simple_select_one_onecol_txn(
txn,
table="event_push_summary_last_receipt_stream_id",
@@ -871,8 +1337,16 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
max_receipts_stream_id = self._receipts_id_gen.get_current_token()
+ # The (inclusive) event stream ordering that was previously summarised.
+ old_rotate_stream_ordering = self.db_pool.simple_select_one_onecol_txn(
+ txn,
+ table="event_push_summary_stream_ordering",
+ keyvalues={},
+ retcol="stream_ordering",
+ )
+
sql = """
- SELECT r.stream_id, r.room_id, r.user_id, e.stream_ordering
+ SELECT r.stream_id, r.room_id, r.user_id, r.thread_id, e.stream_ordering
FROM receipts_linearized AS r
INNER JOIN events AS e USING (event_id)
WHERE ? < r.stream_id AND r.stream_id <= ? AND user_id LIKE ?
@@ -893,73 +1367,133 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
limit,
),
)
- rows = txn.fetchall()
-
- old_rotate_stream_ordering = self.db_pool.simple_select_one_onecol_txn(
- txn,
- table="event_push_summary_stream_ordering",
- keyvalues={},
- retcol="stream_ordering",
- )
+ rows = cast(List[Tuple[int, str, str, Optional[str], int]], txn.fetchall())
# For each new read receipt we delete push actions from before it and
# recalculate the summary.
- for _, room_id, user_id, stream_ordering in rows:
+ #
+ # Care must be taken of whether it is a threaded or unthreaded receipt.
+ for _, room_id, user_id, thread_id, stream_ordering in rows:
# Only handle our own read receipts.
if not self.hs.is_mine_id(user_id):
continue
+ thread_clause = ""
+ thread_args: Tuple = ()
+ if thread_id is not None:
+ thread_clause = "AND thread_id = ?"
+ thread_args = (thread_id,)
+
+ # For each new read receipt we delete push actions from before it and
+ # recalculate the summary.
txn.execute(
- """
+ f"""
DELETE FROM event_push_actions
WHERE room_id = ?
AND user_id = ?
AND stream_ordering <= ?
AND highlight = 0
+ {thread_clause}
""",
- (room_id, user_id, stream_ordering),
+ (room_id, user_id, stream_ordering, *thread_args),
)
- notif_count, unread_count = self._get_notif_unread_count_for_user_room(
- txn, room_id, user_id, stream_ordering, old_rotate_stream_ordering
+ # First ensure that the existing rows have an updated thread_id field.
+ if not self._event_push_backfill_thread_id_done:
+ txn.execute(
+ """
+ UPDATE event_push_summary
+ SET thread_id = ?
+ WHERE room_id = ? AND user_id = ? AND thread_id is NULL
+ """,
+ (MAIN_TIMELINE, room_id, user_id),
+ )
+ txn.execute(
+ """
+ UPDATE event_push_actions
+ SET thread_id = ?
+ WHERE room_id = ? AND user_id = ? AND thread_id is NULL
+ """,
+ (MAIN_TIMELINE, room_id, user_id),
+ )
+
+ # Fetch the notification counts between the stream ordering of the
+ # latest receipt and what was previously summarised.
+ unread_counts = self._get_notif_unread_count_for_user_room(
+ txn,
+ room_id,
+ user_id,
+ stream_ordering,
+ old_rotate_stream_ordering,
+ thread_id,
)
- self.db_pool.simple_upsert_txn(
+ # For an unthreaded receipt, mark the summary for all threads in the room
+ # as cleared.
+ if thread_id is None:
+ self.db_pool.simple_update_txn(
+ txn,
+ table="event_push_summary",
+ keyvalues={"user_id": user_id, "room_id": room_id},
+ updatevalues={
+ "notif_count": 0,
+ "unread_count": 0,
+ "stream_ordering": old_rotate_stream_ordering,
+ "last_receipt_stream_ordering": stream_ordering,
+ },
+ )
+
+ # For a threaded receipt, we *always* want to update that receipt,
+ # event if there are no new notifications in that thread. This ensures
+ # the stream_ordering & last_receipt_stream_ordering are updated.
+ elif not unread_counts:
+ unread_counts = [(0, 0, thread_id)]
+
+ # Then any updated threads get their notification count and unread
+ # count updated.
+ self.db_pool.simple_update_many_txn(
txn,
table="event_push_summary",
- keyvalues={"room_id": room_id, "user_id": user_id},
- values={
- "notif_count": notif_count,
- "unread_count": unread_count,
- "stream_ordering": old_rotate_stream_ordering,
- "last_receipt_stream_ordering": stream_ordering,
- },
+ key_names=("room_id", "user_id", "thread_id"),
+ key_values=[(room_id, user_id, row[2]) for row in unread_counts],
+ value_names=(
+ "notif_count",
+ "unread_count",
+ "stream_ordering",
+ "last_receipt_stream_ordering",
+ ),
+ value_values=[
+ (row[0], row[1], old_rotate_stream_ordering, stream_ordering)
+ for row in unread_counts
+ ],
)
# We always update `event_push_summary_last_receipt_stream_id` to
# ensure that we don't rescan the same receipts for remote users.
- upper_limit = max_receipts_stream_id
+ receipts_last_processed_stream_id = max_receipts_stream_id
if len(rows) >= limit:
# If we pulled out a limited number of rows we only update the
# position to the last receipt we processed, so we continue
# processing the rest next iteration.
- upper_limit = rows[-1][0]
+ receipts_last_processed_stream_id = rows[-1][0]
self.db_pool.simple_update_txn(
txn,
table="event_push_summary_last_receipt_stream_id",
keyvalues={},
- updatevalues={"stream_id": upper_limit},
+ updatevalues={"stream_id": receipts_last_processed_stream_id},
)
return len(rows) < limit
def _rotate_notifs_txn(self, txn: LoggingTransaction) -> bool:
- """Archives older notifications into event_push_summary. Returns whether
- the archiving process has caught up or not.
+ """Archives older notifications (from event_push_actions) into event_push_summary.
+
+ Returns whether the archiving process has caught up or not.
"""
+ # The (inclusive) event stream ordering that was previously summarised.
old_rotate_stream_ordering = self.db_pool.simple_select_one_onecol_txn(
txn,
table="event_push_summary_stream_ordering",
@@ -974,7 +1508,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
SELECT stream_ordering FROM event_push_actions
WHERE stream_ordering > ?
ORDER BY stream_ordering ASC LIMIT 1 OFFSET ?
- """,
+ """,
(old_rotate_stream_ordering, self._rotate_count),
)
stream_row = txn.fetchone()
@@ -993,39 +1527,62 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
logger.info("Rotating notifications up to: %s", rotate_to_stream_ordering)
- self._rotate_notifs_before_txn(txn, rotate_to_stream_ordering)
+ self._rotate_notifs_before_txn(
+ txn, old_rotate_stream_ordering, rotate_to_stream_ordering
+ )
return caught_up
def _rotate_notifs_before_txn(
- self, txn: LoggingTransaction, rotate_to_stream_ordering: int
+ self,
+ txn: LoggingTransaction,
+ old_rotate_stream_ordering: int,
+ rotate_to_stream_ordering: int,
) -> None:
- old_rotate_stream_ordering = self.db_pool.simple_select_one_onecol_txn(
- txn,
- table="event_push_summary_stream_ordering",
- keyvalues={},
- retcol="stream_ordering",
- )
+ """Archives older notifications (from event_push_actions) into event_push_summary.
+
+ Any event_push_actions between old_rotate_stream_ordering (exclusive) and
+ rotate_to_stream_ordering (inclusive) will be added to the event_push_summary
+ table.
+
+ Args:
+ txn: The database transaction.
+ old_rotate_stream_ordering: The previous maximum event stream ordering.
+ rotate_to_stream_ordering: The new maximum event stream ordering to summarise.
+ """
+
+ # Ensure that any new actions have an updated thread_id.
+ if not self._event_push_backfill_thread_id_done:
+ txn.execute(
+ """
+ UPDATE event_push_actions
+ SET thread_id = ?
+ WHERE ? < stream_ordering AND stream_ordering <= ? AND thread_id IS NULL
+ """,
+ (MAIN_TIMELINE, old_rotate_stream_ordering, rotate_to_stream_ordering),
+ )
+
+ # XXX Do we need to update summaries here too?
# Calculate the new counts that should be upserted into event_push_summary
sql = """
- SELECT user_id, room_id,
+ SELECT user_id, room_id, thread_id,
coalesce(old.%s, 0) + upd.cnt,
upd.stream_ordering
FROM (
- SELECT user_id, room_id, count(*) as cnt,
+ SELECT user_id, room_id, thread_id, count(*) as cnt,
max(ea.stream_ordering) as stream_ordering
FROM event_push_actions AS ea
- LEFT JOIN event_push_summary AS old USING (user_id, room_id)
+ LEFT JOIN event_push_summary AS old USING (user_id, room_id, thread_id)
WHERE ? < ea.stream_ordering AND ea.stream_ordering <= ?
AND (
old.last_receipt_stream_ordering IS NULL
OR old.last_receipt_stream_ordering < ea.stream_ordering
)
AND %s = 1
- GROUP BY user_id, room_id
+ GROUP BY user_id, room_id, thread_id
) AS upd
- LEFT JOIN event_push_summary AS old USING (user_id, room_id)
+ LEFT JOIN event_push_summary AS old USING (user_id, room_id, thread_id)
"""
# First get the count of unread messages.
@@ -1039,11 +1596,11 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
# object because we might not have the same amount of rows in each of them. To do
# this, we use a dict indexed on the user ID and room ID to make it easier to
# populate.
- summaries: Dict[Tuple[str, str], _EventPushSummary] = {}
+ summaries: Dict[Tuple[str, str, str], _EventPushSummary] = {}
for row in txn:
- summaries[(row[0], row[1])] = _EventPushSummary(
- unread_count=row[2],
- stream_ordering=row[3],
+ summaries[(row[0], row[1], row[2])] = _EventPushSummary(
+ unread_count=row[3],
+ stream_ordering=row[4],
notif_count=0,
)
@@ -1054,26 +1611,43 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
)
for row in txn:
- if (row[0], row[1]) in summaries:
- summaries[(row[0], row[1])].notif_count = row[2]
+ if (row[0], row[1], row[2]) in summaries:
+ summaries[(row[0], row[1], row[2])].notif_count = row[3]
else:
# Because the rules on notifying are different than the rules on marking
# a message unread, we might end up with messages that notify but aren't
# marked unread, so we might not have a summary for this (user, room)
# tuple to complete.
- summaries[(row[0], row[1])] = _EventPushSummary(
+ summaries[(row[0], row[1], row[2])] = _EventPushSummary(
unread_count=0,
- stream_ordering=row[3],
- notif_count=row[2],
+ stream_ordering=row[4],
+ notif_count=row[3],
)
logger.info("Rotating notifications, handling %d rows", len(summaries))
+ # Ensure that any updated threads have the proper thread_id.
+ if not self._event_push_backfill_thread_id_done:
+ txn.execute_batch(
+ """
+ UPDATE event_push_summary
+ SET thread_id = ?
+ WHERE room_id = ? AND user_id = ? AND thread_id is NULL
+ """,
+ [
+ (MAIN_TIMELINE, room_id, user_id)
+ for user_id, room_id, _ in summaries
+ ],
+ )
+
self.db_pool.simple_upsert_many_txn(
txn,
table="event_push_summary",
- key_names=("user_id", "room_id"),
- key_values=[(user_id, room_id) for user_id, room_id in summaries],
+ key_names=("user_id", "room_id", "thread_id"),
+ key_values=[
+ (user_id, room_id, thread_id)
+ for user_id, room_id, thread_id in summaries
+ ],
value_names=("notif_count", "unread_count", "stream_ordering"),
value_values=[
(
@@ -1090,12 +1664,13 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
(rotate_to_stream_ordering,),
)
- async def _remove_old_push_actions_that_have_rotated(
- self,
- ) -> None:
- """Clear out old push actions that have been summarized."""
+ async def _remove_old_push_actions_that_have_rotated(self) -> None:
+ """
+ Clear out old push actions that have been summarised (and are older than
+ 1 day ago).
+ """
- # We want to clear out anything that older than a day that *has* already
+ # We want to clear out anything that is older than a day that *has* already
# been rotated.
rotated_upto_stream_ordering = await self.db_pool.simple_select_one_onecol(
table="event_push_summary_stream_ordering",
@@ -1119,7 +1694,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
SELECT stream_ordering FROM event_push_actions
WHERE stream_ordering <= ? AND highlight = 0
ORDER BY stream_ordering ASC LIMIT 1 OFFSET ?
- """,
+ """,
(
max_stream_ordering_to_delete,
batch_size,
@@ -1154,6 +1729,53 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
if done:
break
+ @wrap_as_background_process("_clear_old_push_actions_staging")
+ async def _clear_old_push_actions_staging(self) -> None:
+ """Clear out any old event push actions from the staging table for
+ events that we failed to persist.
+ """
+
+ # We delete anything more than an hour old, on the assumption that we'll
+ # never take more than an hour to persist an event.
+ delete_before_ts = self._clock.time_msec() - 60 * 60 * 1000
+
+ if self._started_ts > delete_before_ts:
+ # We need to wait for at least an hour before we started deleting,
+ # so that we know it's safe to delete rows with NULL `inserted_ts`.
+ return
+
+ # We don't have an index on `inserted_ts`, instead we assume that the
+ # number of "live" rows in `event_push_actions_staging` is small enough
+ # that an infrequent periodic scan won't cause a problem.
+ #
+ # Note: we also delete any columns with NULL `inserted_ts`, this is safe
+ # as we added a default value to new rows and so they must be at least
+ # an hour old.
+ limit = 1000
+ sql = """
+ DELETE FROM event_push_actions_staging WHERE event_id IN (
+ SELECT event_id FROM event_push_actions_staging WHERE
+ inserted_ts < ? OR inserted_ts IS NULL
+ LIMIT ?
+ )
+ """
+
+ def _clear_old_push_actions_staging_txn(txn: LoggingTransaction) -> bool:
+ txn.execute(sql, (delete_before_ts, limit))
+ return txn.rowcount >= limit
+
+ while True:
+ # Returns true if we have more stuff to delete from the table.
+ deleted = await self.db_pool.runInteraction(
+ "_clear_old_push_actions_staging", _clear_old_push_actions_staging_txn
+ )
+
+ if not deleted:
+ return
+
+ # We sleep to ensure that we don't overwhelm the DB.
+ await self._clock.sleep(1.0)
+
class EventPushActionsStore(EventPushActionsWorkerStore):
EPA_HIGHLIGHT_INDEX = "epa_highlight_index"
@@ -1188,7 +1810,6 @@ class EventPushActionsStore(EventPushActionsWorkerStore):
table="event_push_actions",
columns=["highlight", "stream_ordering"],
where_clause="highlight=0",
- psql_only=True,
)
async def get_push_actions_for_user(
@@ -1215,16 +1836,18 @@ class EventPushActionsStore(EventPushActionsWorkerStore):
# NB. This assumes event_ids are globally unique since
# it makes the query easier to index
- sql = (
- "SELECT epa.event_id, epa.room_id,"
- " epa.stream_ordering, epa.topological_ordering,"
- " epa.actions, epa.highlight, epa.profile_tag, e.received_ts"
- " FROM event_push_actions epa, events e"
- " WHERE epa.event_id = e.event_id"
- " AND epa.user_id = ? %s"
- " AND epa.notif = 1"
- " ORDER BY epa.stream_ordering DESC"
- " LIMIT ?" % (before_clause,)
+ sql = """
+ SELECT epa.event_id, epa.room_id,
+ epa.stream_ordering, epa.topological_ordering,
+ epa.actions, epa.highlight, epa.profile_tag, e.received_ts
+ FROM event_push_actions epa, events e
+ WHERE epa.event_id = e.event_id
+ AND epa.user_id = ? %s
+ AND epa.notif = 1
+ ORDER BY epa.stream_ordering DESC
+ LIMIT ?
+ """ % (
+ before_clause,
)
txn.execute(sql, args)
return cast(
@@ -1247,7 +1870,7 @@ class EventPushActionsStore(EventPushActionsWorkerStore):
]
-def _action_has_highlight(actions: List[Union[dict, str]]) -> bool:
+def _action_has_highlight(actions: Collection[Union[Mapping, str]]) -> bool:
for action in actions:
if not isinstance(action, dict):
continue
diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py
index 1f600f1190..0f097a2927 100644
--- a/synapse/storage/databases/main/events.py
+++ b/synapse/storage/databases/main/events.py
@@ -40,6 +40,7 @@ from synapse.api.errors import Codes, SynapseError
from synapse.api.room_versions import RoomVersions
from synapse.events import EventBase, relation_from_event
from synapse.events.snapshot import EventContext
+from synapse.logging.opentracing import trace
from synapse.storage._base import db_to_json, make_in_list_sql_clause
from synapse.storage.database import (
DatabasePool,
@@ -145,6 +146,7 @@ class PersistEventsStore:
self._backfill_id_gen: AbstractStreamIdGenerator = self.store._backfill_id_gen
self._stream_id_gen: AbstractStreamIdGenerator = self.store._stream_id_gen
+ @trace
async def _persist_events_and_state_updates(
self,
events_and_contexts: List[Tuple[EventBase, EventContext]],
@@ -353,9 +355,9 @@ class PersistEventsStore:
txn: LoggingTransaction,
*,
events_and_contexts: List[Tuple[EventBase, EventContext]],
- inhibit_local_membership_updates: bool = False,
- state_delta_for_room: Optional[Dict[str, DeltaState]] = None,
- new_forward_extremities: Optional[Dict[str, Set[str]]] = None,
+ inhibit_local_membership_updates: bool,
+ state_delta_for_room: Dict[str, DeltaState],
+ new_forward_extremities: Dict[str, Set[str]],
) -> None:
"""Insert some number of room events into the necessary database tables.
@@ -382,9 +384,6 @@ class PersistEventsStore:
PartialStateConflictError: if attempting to persist a partial state event in
a room that has been un-partial stated.
"""
- state_delta_for_room = state_delta_for_room or {}
- new_forward_extremities = new_forward_extremities or {}
-
all_events_and_contexts = events_and_contexts
min_stream_order = events_and_contexts[0][0].internal_metadata.stream_ordering
@@ -408,6 +407,31 @@ class PersistEventsStore:
assert min_stream_order
assert max_stream_order
+ # Once the txn completes, invalidate all of the relevant caches. Note that we do this
+ # up here because it captures all the events_and_contexts before any are removed.
+ for event, _ in events_and_contexts:
+ self.store.invalidate_get_event_cache_after_txn(txn, event.event_id)
+ if event.redacts:
+ self.store.invalidate_get_event_cache_after_txn(txn, event.redacts)
+
+ relates_to = None
+ relation = relation_from_event(event)
+ if relation:
+ relates_to = relation.parent_id
+
+ assert event.internal_metadata.stream_ordering is not None
+ txn.call_after(
+ self.store._invalidate_caches_for_event,
+ event.internal_metadata.stream_ordering,
+ event.event_id,
+ event.room_id,
+ event.type,
+ getattr(event, "state_key", None),
+ event.redacts,
+ relates_to,
+ backfilled=False,
+ )
+
self._update_forward_extremities_txn(
txn,
new_forward_extremities=new_forward_extremities,
@@ -457,6 +481,7 @@ class PersistEventsStore:
# We call this last as it assumes we've inserted the events into
# room_memberships, where applicable.
+ # NB: This function invalidates all state related caches
self._update_current_state_txn(txn, state_delta_for_room, min_stream_order)
def _persist_event_auth_chain_txn(
@@ -1170,17 +1195,16 @@ class PersistEventsStore:
)
# Invalidate the various caches
-
- for member in members_changed:
- txn.call_after(
- self.store.get_rooms_for_user_with_stream_ordering.invalidate,
- (member,),
- )
-
self.store._invalidate_state_caches_and_stream(
txn, room_id, members_changed
)
+ # Check if any of the remote membership changes requires us to
+ # unsubscribe from their device lists.
+ self.store.handle_potentially_left_users_txn(
+ txn, {m for m in members_changed if not self.hs.is_mine_id(m)}
+ )
+
def _upsert_room_version_txn(self, txn: LoggingTransaction, room_id: str) -> None:
"""Update the room version in the database based off current state
events.
@@ -1220,9 +1244,6 @@ class PersistEventsStore:
self.db_pool.simple_delete_txn(
txn, table="event_forward_extremities", keyvalues={"room_id": room_id}
)
- txn.call_after(
- self.store.get_latest_event_ids_in_room.invalidate, (room_id,)
- )
self.db_pool.simple_insert_many_txn(
txn,
@@ -1258,9 +1279,10 @@ class PersistEventsStore:
Pick the earliest non-outlier if there is one, else the earliest one.
Args:
- events_and_contexts (list[(EventBase, EventContext)]):
+ events_and_contexts:
+
Returns:
- list[(EventBase, EventContext)]: filtered list
+ filtered list
"""
new_events_and_contexts: OrderedDict[
str, Tuple[EventBase, EventContext]
@@ -1286,14 +1308,11 @@ class PersistEventsStore:
"""Update min_depth for each room
Args:
- txn (twisted.enterprise.adbapi.Connection): db connection
- events_and_contexts (list[(EventBase, EventContext)]): events
- we are persisting
+ txn: db connection
+ events_and_contexts: events we are persisting
"""
depth_updates: Dict[str, int] = {}
for event, context in events_and_contexts:
- # Remove the any existing cache entries for the event_ids
- self.store.invalidate_get_event_cache_after_txn(txn, event.event_id)
# Then update the `stream_ordering` position to mark the latest
# event as the front of the room. This should not be done for
# backfilled events because backfilled events have negative
@@ -1490,7 +1509,7 @@ class PersistEventsStore:
event.sender,
"url" in event.content and isinstance(event.content["url"], str),
event.get_state_key(),
- context.rejected or None,
+ context.rejected,
)
for event, context in events_and_contexts
),
@@ -1561,13 +1580,11 @@ class PersistEventsStore:
"""Update all the miscellaneous tables for new events
Args:
- txn (twisted.enterprise.adbapi.Connection): db connection
- events_and_contexts (list[(EventBase, EventContext)]): events
- we are persisting
- all_events_and_contexts (list[(EventBase, EventContext)]): all
- events that we were going to persist. This includes events
- we've already persisted, etc, that wouldn't appear in
- events_and_context.
+ txn: db connection
+ events_and_contexts: events we are persisting
+ all_events_and_contexts: all events that we were going to persist.
+ This includes events we've already persisted, etc, that wouldn't
+ appear in events_and_context.
inhibit_local_membership_updates: Stop the local_current_membership
from being updated by these events. This should be set to True
for backfilled events because backfilled events in the past do
@@ -1594,7 +1611,7 @@ class PersistEventsStore:
)
# Remove from relations table.
- self._handle_redact_relations(txn, event.redacts)
+ self._handle_redact_relations(txn, event.room_id, event.redacts)
# Update the event_forward_extremities, event_backward_extremities and
# event_edges tables.
@@ -1695,16 +1712,7 @@ class PersistEventsStore:
txn.async_call_after(prefill)
def _store_redaction(self, txn: LoggingTransaction, event: EventBase) -> None:
- """Invalidate the caches for the redacted event.
-
- Note that these caches are also cleared as part of event replication in
- _invalidate_caches_for_event.
- """
assert event.redacts is not None
- self.store.invalidate_get_event_cache_after_txn(txn, event.redacts)
- txn.call_after(self.store.get_relations_for_event.invalidate, (event.redacts,))
- txn.call_after(self.store.get_applicable_edit.invalidate, (event.redacts,))
-
self.db_pool.simple_upsert_txn(
txn,
table="redactions",
@@ -1805,34 +1813,6 @@ class PersistEventsStore:
for event in events:
assert event.internal_metadata.stream_ordering is not None
- txn.call_after(
- self.store._membership_stream_cache.entity_has_changed,
- event.state_key,
- event.internal_metadata.stream_ordering,
- )
- txn.call_after(
- self.store.get_invited_rooms_for_local_user.invalidate,
- (event.state_key,),
- )
- txn.call_after(
- self.store.get_local_users_in_room.invalidate,
- (event.room_id,),
- )
- txn.call_after(
- self.store.get_number_joined_users_in_room.invalidate,
- (event.room_id,),
- )
- txn.call_after(
- self.store.get_user_in_room_with_profile.invalidate,
- (event.room_id, event.state_key),
- )
-
- # The `_get_membership_from_event_id` is immutable, except for the
- # case where we look up an event *before* persisting it.
- txn.call_after(
- self.store._get_membership_from_event_id.invalidate,
- (event.event_id,),
- )
# We update the local_current_membership table only if the event is
# "current", i.e., its something that has just happened.
@@ -1881,33 +1861,32 @@ class PersistEventsStore:
},
)
- txn.call_after(
- self.store.get_relations_for_event.invalidate, (relation.parent_id,)
- )
- txn.call_after(
- self.store.get_aggregation_groups_for_event.invalidate,
- (relation.parent_id,),
- )
- txn.call_after(
- self.store.get_mutual_event_relations_for_rel_type.invalidate,
- (relation.parent_id,),
- )
-
- if relation.rel_type == RelationTypes.REPLACE:
- txn.call_after(
- self.store.get_applicable_edit.invalidate, (relation.parent_id,)
- )
-
if relation.rel_type == RelationTypes.THREAD:
- txn.call_after(
- self.store.get_thread_summary.invalidate, (relation.parent_id,)
- )
- # It should be safe to only invalidate the cache if the user has not
- # previously participated in the thread, but that's difficult (and
- # potentially error-prone) so it is always invalidated.
- txn.call_after(
- self.store.get_thread_participated.invalidate,
- (relation.parent_id, event.sender),
+ # Upsert into the threads table, but only overwrite the value if the
+ # new event is of a later topological order OR if the topological
+ # ordering is equal, but the stream ordering is later.
+ sql = """
+ INSERT INTO threads (room_id, thread_id, latest_event_id, topological_ordering, stream_ordering)
+ VALUES (?, ?, ?, ?, ?)
+ ON CONFLICT (room_id, thread_id)
+ DO UPDATE SET
+ latest_event_id = excluded.latest_event_id,
+ topological_ordering = excluded.topological_ordering,
+ stream_ordering = excluded.stream_ordering
+ WHERE
+ threads.topological_ordering <= excluded.topological_ordering AND
+ threads.stream_ordering < excluded.stream_ordering
+ """
+
+ txn.execute(
+ sql,
+ (
+ event.room_id,
+ relation.parent_id,
+ event.event_id,
+ event.depth,
+ event.internal_metadata.stream_ordering,
+ ),
)
def _handle_insertion_event(
@@ -2033,35 +2012,52 @@ class PersistEventsStore:
txn.execute(sql, (batch_id,))
def _handle_redact_relations(
- self, txn: LoggingTransaction, redacted_event_id: str
+ self, txn: LoggingTransaction, room_id: str, redacted_event_id: str
) -> None:
"""Handles receiving a redaction and checking whether the redacted event
has any relations which must be removed from the database.
Args:
txn
+ room_id: The room ID of the event that was redacted.
redacted_event_id: The event that was redacted.
"""
- # Fetch the current relation of the event being redacted.
- redacted_relates_to = self.db_pool.simple_select_one_onecol_txn(
+ # Fetch the relation of the event being redacted.
+ row = self.db_pool.simple_select_one_txn(
txn,
table="event_relations",
keyvalues={"event_id": redacted_event_id},
- retcol="relates_to_id",
+ retcols=("relates_to_id", "relation_type"),
allow_none=True,
)
+ # Nothing to do if no relation is found.
+ if row is None:
+ return
+
+ redacted_relates_to = row["relates_to_id"]
+ rel_type = row["relation_type"]
+ self.db_pool.simple_delete_txn(
+ txn, table="event_relations", keyvalues={"event_id": redacted_event_id}
+ )
+
# Any relation information for the related event must be cleared.
- if redacted_relates_to is not None:
+ self.store._invalidate_cache_and_stream(
+ txn, self.store.get_relations_for_event, (redacted_relates_to,)
+ )
+ if rel_type == RelationTypes.ANNOTATION:
self.store._invalidate_cache_and_stream(
- txn, self.store.get_relations_for_event, (redacted_relates_to,)
+ txn, self.store.get_aggregation_groups_for_event, (redacted_relates_to,)
)
+ if rel_type == RelationTypes.REFERENCE:
self.store._invalidate_cache_and_stream(
- txn, self.store.get_aggregation_groups_for_event, (redacted_relates_to,)
+ txn, self.store.get_references_for_event, (redacted_relates_to,)
)
+ if rel_type == RelationTypes.REPLACE:
self.store._invalidate_cache_and_stream(
txn, self.store.get_applicable_edit, (redacted_relates_to,)
)
+ if rel_type == RelationTypes.THREAD:
self.store._invalidate_cache_and_stream(
txn, self.store.get_thread_summary, (redacted_relates_to,)
)
@@ -2069,14 +2065,41 @@ class PersistEventsStore:
txn, self.store.get_thread_participated, (redacted_relates_to,)
)
self.store._invalidate_cache_and_stream(
- txn,
- self.store.get_mutual_event_relations_for_rel_type,
- (redacted_relates_to,),
+ txn, self.store.get_threads, (room_id,)
)
- self.db_pool.simple_delete_txn(
- txn, table="event_relations", keyvalues={"event_id": redacted_event_id}
- )
+ # Find the new latest event in the thread.
+ sql = """
+ SELECT event_id, topological_ordering, stream_ordering
+ FROM event_relations
+ INNER JOIN events USING (event_id)
+ WHERE relates_to_id = ? AND relation_type = ?
+ ORDER BY topological_ordering DESC, stream_ordering DESC
+ LIMIT 1
+ """
+ txn.execute(sql, (redacted_relates_to, RelationTypes.THREAD))
+
+ # If a latest event is found, update the threads table, this might
+ # be the same current latest event (if an earlier event in the thread
+ # was redacted).
+ latest_event_row = txn.fetchone()
+ if latest_event_row:
+ self.db_pool.simple_upsert_txn(
+ txn,
+ table="threads",
+ keyvalues={"room_id": room_id, "thread_id": redacted_relates_to},
+ values={
+ "latest_event_id": latest_event_row[0],
+ "topological_ordering": latest_event_row[1],
+ "stream_ordering": latest_event_row[2],
+ },
+ )
+
+ # Otherwise, delete the thread: it no longer exists.
+ else:
+ self.db_pool.simple_delete_one_txn(
+ txn, table="threads", keyvalues={"thread_id": redacted_relates_to}
+ )
def _store_room_topic_txn(self, txn: LoggingTransaction, event: EventBase) -> None:
if isinstance(event.content.get("topic"), str):
@@ -2178,26 +2201,26 @@ class PersistEventsStore:
appear in events_and_context.
"""
- # Only non outlier events will have push actions associated with them,
+ # Only notifiable events will have push actions associated with them,
# so let's filter them out. (This makes joining large rooms faster, as
# these queries took seconds to process all the state events).
- non_outlier_events = [
+ notifiable_events = [
event
for event, _ in events_and_contexts
- if not event.internal_metadata.is_outlier()
+ if event.internal_metadata.is_notifiable()
]
sql = """
INSERT INTO event_push_actions (
room_id, event_id, user_id, actions, stream_ordering,
- topological_ordering, notif, highlight, unread
+ topological_ordering, notif, highlight, unread, thread_id
)
- SELECT ?, event_id, user_id, actions, ?, ?, notif, highlight, unread
+ SELECT ?, event_id, user_id, actions, ?, ?, notif, highlight, unread, thread_id
FROM event_push_actions_staging
WHERE event_id = ?
"""
- if non_outlier_events:
+ if notifiable_events:
txn.execute_batch(
sql,
(
@@ -2207,32 +2230,10 @@ class PersistEventsStore:
event.depth,
event.event_id,
)
- for event in non_outlier_events
+ for event in notifiable_events
),
)
- room_to_event_ids: Dict[str, List[str]] = {}
- for e in non_outlier_events:
- room_to_event_ids.setdefault(e.room_id, []).append(e.event_id)
-
- for room_id, event_ids in room_to_event_ids.items():
- rows = self.db_pool.simple_select_many_txn(
- txn,
- table="event_push_actions_staging",
- column="event_id",
- iterable=event_ids,
- keyvalues={},
- retcols=("user_id",),
- )
-
- user_ids = {row["user_id"] for row in rows}
-
- for user_id in user_ids:
- txn.call_after(
- self.store.get_unread_event_push_actions_by_room_for_user.invalidate,
- (room_id, user_id),
- )
-
# Now we delete the staging area for *all* events that were being
# persisted.
txn.execute_batch(
@@ -2240,18 +2241,13 @@ class PersistEventsStore:
(
(event.event_id,)
for event, _ in all_events_and_contexts
- if not event.internal_metadata.is_outlier()
+ if event.internal_metadata.is_notifiable()
),
)
def _remove_push_actions_for_event_id_txn(
self, txn: LoggingTransaction, room_id: str, event_id: str
) -> None:
- # Sad that we have to blow away the cache for the whole room here
- txn.call_after(
- self.store.get_unread_event_push_actions_by_room_for_user.invalidate,
- (room_id,),
- )
txn.execute(
"DELETE FROM event_push_actions WHERE room_id = ? AND event_id = ?",
(room_id, event_id),
@@ -2433,17 +2429,31 @@ class PersistEventsStore:
"DELETE FROM event_backward_extremities"
" WHERE event_id = ? AND room_id = ?"
)
+ backward_extremity_tuples_to_remove = [
+ (ev.event_id, ev.room_id)
+ for ev in events
+ if not ev.internal_metadata.is_outlier()
+ # If we encountered an event with no prev_events, then we might
+ # as well remove it now because it won't ever have anything else
+ # to backfill from.
+ or len(ev.prev_event_ids()) == 0
+ ]
txn.execute_batch(
query,
- [
- (ev.event_id, ev.room_id)
- for ev in events
- if not ev.internal_metadata.is_outlier()
- # If we encountered an event with no prev_events, then we might
- # as well remove it now because it won't ever have anything else
- # to backfill from.
- or len(ev.prev_event_ids()) == 0
- ],
+ backward_extremity_tuples_to_remove,
+ )
+
+ # Clear out the failed backfill attempts after we successfully pulled
+ # the event. Since we no longer need these events as backward
+ # extremities, it also means that they won't be backfilled from again so
+ # we no longer need to store the backfill attempts around it.
+ query = """
+ DELETE FROM event_failed_pull_attempts
+ WHERE event_id = ? and room_id = ?
+ """
+ txn.execute_batch(
+ query,
+ backward_extremity_tuples_to_remove,
)
diff --git a/synapse/storage/databases/main/events_bg_updates.py b/synapse/storage/databases/main/events_bg_updates.py
index 6e8aeed7b4..9e31798ab1 100644
--- a/synapse/storage/databases/main/events_bg_updates.py
+++ b/synapse/storage/databases/main/events_bg_updates.py
@@ -1435,16 +1435,16 @@ class EventsBackgroundUpdatesStore(SQLBaseStore):
),
)
- endpoint = None
row = txn.fetchone()
if row:
endpoint = row[0]
+ else:
+ # if the query didn't return a row, we must be almost done. We just
+ # need to go up to the recorded max_stream_ordering.
+ endpoint = max_stream_ordering_inclusive
- where_clause = "stream_ordering > ?"
- args = [min_stream_ordering_exclusive]
- if endpoint:
- where_clause += " AND stream_ordering <= ?"
- args.append(endpoint)
+ where_clause = "stream_ordering > ? AND stream_ordering <= ?"
+ args = [min_stream_ordering_exclusive, endpoint]
# now do the updates.
txn.execute(
@@ -1458,13 +1458,13 @@ class EventsBackgroundUpdatesStore(SQLBaseStore):
)
logger.info(
- "populated new `events` columns up to %s/%i: updated %i rows",
+ "populated new `events` columns up to %i/%i: updated %i rows",
endpoint,
max_stream_ordering_inclusive,
txn.rowcount,
)
- if endpoint is None:
+ if endpoint >= max_stream_ordering_inclusive:
# we're done
return True
diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py
index 29c99c6357..01e935edef 100644
--- a/synapse/storage/databases/main/events_worker.py
+++ b/synapse/storage/databases/main/events_worker.py
@@ -54,11 +54,11 @@ from synapse.logging.context import (
current_context,
make_deferred_yieldable,
)
+from synapse.logging.opentracing import start_active_span, tag_args, trace
from synapse.metrics.background_process_metrics import (
run_as_background_process,
wrap_as_background_process,
)
-from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker
from synapse.replication.tcp.streams import BackfillStream
from synapse.replication.tcp.streams.events import EventsStream
from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause
@@ -80,6 +80,8 @@ from synapse.util import unwrapFirstError
from synapse.util.async_helpers import ObservableDeferred, delay_cancellation
from synapse.util.caches.descriptors import cached, cachedList
from synapse.util.caches.lrucache import AsyncLruCache
+from synapse.util.caches.stream_change_cache import StreamChangeCache
+from synapse.util.cancellation import cancellable
from synapse.util.iterutils import batch_iter
from synapse.util.metrics import Measure
@@ -210,26 +212,35 @@ class EventsWorkerStore(SQLBaseStore):
# `StreamIdGenerator`, otherwise we use `SlavedIdTracker` which gets
# updated over replication. (Multiple writers are not supported for
# SQLite).
- if hs.get_instance_name() in hs.config.worker.writers.events:
- self._stream_id_gen = StreamIdGenerator(
- db_conn,
- "events",
- "stream_ordering",
- )
- self._backfill_id_gen = StreamIdGenerator(
- db_conn,
- "events",
- "stream_ordering",
- step=-1,
- extra_tables=[("ex_outlier_stream", "event_stream_ordering")],
- )
- else:
- self._stream_id_gen = SlavedIdTracker(
- db_conn, "events", "stream_ordering"
- )
- self._backfill_id_gen = SlavedIdTracker(
- db_conn, "events", "stream_ordering", step=-1
- )
+ self._stream_id_gen = StreamIdGenerator(
+ db_conn,
+ "events",
+ "stream_ordering",
+ is_writer=hs.get_instance_name() in hs.config.worker.writers.events,
+ )
+ self._backfill_id_gen = StreamIdGenerator(
+ db_conn,
+ "events",
+ "stream_ordering",
+ step=-1,
+ extra_tables=[("ex_outlier_stream", "event_stream_ordering")],
+ is_writer=hs.get_instance_name() in hs.config.worker.writers.events,
+ )
+
+ events_max = self._stream_id_gen.get_current_token()
+ curr_state_delta_prefill, min_curr_state_delta_id = self.db_pool.get_cache_dict(
+ db_conn,
+ "current_state_delta_stream",
+ entity_column="room_id",
+ stream_column="stream_id",
+ max_value=events_max, # As we share the stream id with events token
+ limit=1000,
+ )
+ self._curr_state_delta_stream_cache: StreamChangeCache = StreamChangeCache(
+ "_curr_state_delta_stream_cache",
+ min_curr_state_delta_id,
+ prefilled_cache=curr_state_delta_prefill,
+ )
if hs.config.worker.run_background_tasks:
# We periodically clean out old transaction ID mappings
@@ -338,6 +349,7 @@ class EventsWorkerStore(SQLBaseStore):
) -> Optional[EventBase]:
...
+ @cancellable
async def get_event(
self,
event_id: str,
@@ -371,7 +383,7 @@ class EventsWorkerStore(SQLBaseStore):
If there is a mismatch, behave as per allow_none.
Returns:
- The event, or None if the event was not found.
+ The event, or None if the event was not found and allow_none is `True`.
"""
if not isinstance(event_id, str):
raise TypeError("Invalid event event_id %r" % (event_id,))
@@ -430,6 +442,9 @@ class EventsWorkerStore(SQLBaseStore):
return {e.event_id: e for e in events}
+ @trace
+ @tag_args
+ @cancellable
async def get_events_as_list(
self,
event_ids: Collection[str],
@@ -468,7 +483,7 @@ class EventsWorkerStore(SQLBaseStore):
return []
# there may be duplicates so we cast the list to a set
- event_entry_map = await self._get_events_from_cache_or_db(
+ event_entry_map = await self.get_unredacted_events_from_cache_or_db(
set(event_ids), allow_rejected=allow_rejected
)
@@ -503,7 +518,9 @@ class EventsWorkerStore(SQLBaseStore):
continue
redacted_event_id = entry.event.redacts
- event_map = await self._get_events_from_cache_or_db([redacted_event_id])
+ event_map = await self.get_unredacted_events_from_cache_or_db(
+ [redacted_event_id]
+ )
original_event_entry = event_map.get(redacted_event_id)
if not original_event_entry:
# we don't have the redacted event (or it was rejected).
@@ -581,11 +598,17 @@ class EventsWorkerStore(SQLBaseStore):
return events
- async def _get_events_from_cache_or_db(
- self, event_ids: Iterable[str], allow_rejected: bool = False
+ @cancellable
+ async def get_unredacted_events_from_cache_or_db(
+ self,
+ event_ids: Iterable[str],
+ allow_rejected: bool = False,
) -> Dict[str, EventCacheEntry]:
"""Fetch a bunch of events from the cache or the database.
+ Note that the events pulled by this function will not have any redactions
+ applied, and no guarantee is made about the ordering of the events returned.
+
If events are pulled from the database, they will be cached for future lookups.
Unknown events are omitted from the response.
@@ -600,7 +623,11 @@ class EventsWorkerStore(SQLBaseStore):
Returns:
map from event id to result
"""
- event_entry_map = await self._get_events_from_cache(
+ # Shortcut: check if we have any events in the *in memory* cache - this function
+ # may be called repeatedly for the same event so at this point we cannot reach
+ # out to any external cache for performance reasons. The external cache is
+ # checked later on in the `get_missing_events_from_cache_or_db` function below.
+ event_entry_map = self._get_events_from_local_cache(
event_ids,
)
@@ -632,7 +659,9 @@ class EventsWorkerStore(SQLBaseStore):
if missing_events_ids:
- async def get_missing_events_from_db() -> Dict[str, EventCacheEntry]:
+ async def get_missing_events_from_cache_or_db() -> Dict[
+ str, EventCacheEntry
+ ]:
"""Fetches the events in `missing_event_ids` from the database.
Also creates entries in `self._current_event_fetches` to allow
@@ -657,10 +686,18 @@ class EventsWorkerStore(SQLBaseStore):
# the events have been redacted, and if so pulling the redaction event
# out of the database to check it.
#
+ missing_events = {}
try:
- missing_events = await self._get_events_from_db(
+ # Try to fetch from any external cache. We already checked the
+ # in-memory cache above.
+ missing_events = await self._get_events_from_external_cache(
missing_events_ids,
)
+ # Now actually fetch any remaining events from the DB
+ db_missing_events = await self._get_events_from_db(
+ missing_events_ids - missing_events.keys(),
+ )
+ missing_events.update(db_missing_events)
except Exception as e:
with PreserveLoggingContext():
fetching_deferred.errback(e)
@@ -679,7 +716,7 @@ class EventsWorkerStore(SQLBaseStore):
# cancellations, since multiple `_get_events_from_cache_or_db` calls can
# reuse the same fetch.
missing_events: Dict[str, EventCacheEntry] = await delay_cancellation(
- get_missing_events_from_db()
+ get_missing_events_from_cache_or_db()
)
event_entry_map.update(missing_events)
@@ -754,7 +791,54 @@ class EventsWorkerStore(SQLBaseStore):
async def _get_events_from_cache(
self, events: Iterable[str], update_metrics: bool = True
) -> Dict[str, EventCacheEntry]:
- """Fetch events from the caches.
+ """Fetch events from the caches, both in memory and any external.
+
+ May return rejected events.
+
+ Args:
+ events: list of event_ids to fetch
+ update_metrics: Whether to update the cache hit ratio metrics
+ """
+ event_map = self._get_events_from_local_cache(
+ events, update_metrics=update_metrics
+ )
+
+ missing_event_ids = (e for e in events if e not in event_map)
+ event_map.update(
+ await self._get_events_from_external_cache(
+ events=missing_event_ids,
+ update_metrics=update_metrics,
+ )
+ )
+
+ return event_map
+
+ async def _get_events_from_external_cache(
+ self, events: Iterable[str], update_metrics: bool = True
+ ) -> Dict[str, EventCacheEntry]:
+ """Fetch events from any configured external cache.
+
+ May return rejected events.
+
+ Args:
+ events: list of event_ids to fetch
+ update_metrics: Whether to update the cache hit ratio metrics
+ """
+ event_map = {}
+
+ for event_id in events:
+ ret = await self._get_event_cache.get_external(
+ (event_id,), None, update_metrics=update_metrics
+ )
+ if ret:
+ event_map[event_id] = ret
+
+ return event_map
+
+ def _get_events_from_local_cache(
+ self, events: Iterable[str], update_metrics: bool = True
+ ) -> Dict[str, EventCacheEntry]:
+ """Fetch events from the local, in memory, caches.
May return rejected events.
@@ -766,7 +850,7 @@ class EventsWorkerStore(SQLBaseStore):
for event_id in events:
# First check if it's in the event cache
- ret = await self._get_event_cache.get(
+ ret = self._get_event_cache.get_local(
(event_id,), None, update_metrics=update_metrics
)
if ret:
@@ -788,7 +872,7 @@ class EventsWorkerStore(SQLBaseStore):
# We add the entry back into the cache as we want to keep
# recently queried events in the cache.
- await self._get_event_cache.set((event_id,), cache_entry)
+ self._get_event_cache.set_local((event_id,), cache_entry)
return event_map
@@ -1029,23 +1113,42 @@ class EventsWorkerStore(SQLBaseStore):
"""
fetched_event_ids: Set[str] = set()
fetched_events: Dict[str, _EventRow] = {}
- events_to_fetch = event_ids
- while events_to_fetch:
- row_map = await self._enqueue_events(events_to_fetch)
+ async def _fetch_event_ids_and_get_outstanding_redactions(
+ event_ids_to_fetch: Collection[str],
+ ) -> Collection[str]:
+ """
+ Fetch all of the given event_ids and return any associated redaction event_ids
+ that we still need to fetch in the next iteration.
+ """
+ row_map = await self._enqueue_events(event_ids_to_fetch)
# we need to recursively fetch any redactions of those events
redaction_ids: Set[str] = set()
- for event_id in events_to_fetch:
+ for event_id in event_ids_to_fetch:
row = row_map.get(event_id)
fetched_event_ids.add(event_id)
if row:
fetched_events[event_id] = row
redaction_ids.update(row.redactions)
- events_to_fetch = redaction_ids.difference(fetched_event_ids)
- if events_to_fetch:
- logger.debug("Also fetching redaction events %s", events_to_fetch)
+ event_ids_to_fetch = redaction_ids.difference(fetched_event_ids)
+ return event_ids_to_fetch
+
+ # Grab the initial list of events requested
+ event_ids_to_fetch = await _fetch_event_ids_and_get_outstanding_redactions(
+ event_ids
+ )
+ # Then go and recursively find all of the associated redactions
+ with start_active_span("recursively fetching redactions"):
+ while event_ids_to_fetch:
+ logger.debug("Also fetching redaction events %s", event_ids_to_fetch)
+
+ event_ids_to_fetch = (
+ await _fetch_event_ids_and_get_outstanding_redactions(
+ event_ids_to_fetch
+ )
+ )
# build a map from event_id to EventBase
event_map: Dict[str, EventBase] = {}
@@ -1073,7 +1176,7 @@ class EventsWorkerStore(SQLBaseStore):
if format_version is None:
# This means that we stored the event before we had the concept
# of a event format version, so it must be a V1 event.
- format_version = EventFormatVersions.V1
+ format_version = EventFormatVersions.ROOM_V1_V2
room_version_id = row.room_version_id
@@ -1103,10 +1206,10 @@ class EventsWorkerStore(SQLBaseStore):
#
# So, the following approximations should be adequate.
- if format_version == EventFormatVersions.V1:
+ if format_version == EventFormatVersions.ROOM_V1_V2:
# if it's event format v1 then it must be room v1 or v2
room_version = RoomVersions.V1
- elif format_version == EventFormatVersions.V2:
+ elif format_version == EventFormatVersions.ROOM_V3:
# if it's event format v2 then it must be room v3
room_version = RoomVersions.V3
else:
@@ -1363,6 +1466,8 @@ class EventsWorkerStore(SQLBaseStore):
return {r["event_id"] for r in rows}
+ @trace
+ @tag_args
async def have_seen_events(
self, room_id: str, event_ids: Iterable[str]
) -> Set[str]:
@@ -1385,36 +1490,36 @@ class EventsWorkerStore(SQLBaseStore):
# the batches as big as possible.
results: Set[str] = set()
- for chunk in batch_iter(event_ids, 500):
- r = await self._have_seen_events_dict(
- [(room_id, event_id) for event_id in chunk]
+ for event_ids_chunk in batch_iter(event_ids, 500):
+ events_seen_dict = await self._have_seen_events_dict(
+ room_id, event_ids_chunk
+ )
+ results.update(
+ eid for (eid, have_event) in events_seen_dict.items() if have_event
)
- results.update(eid for ((_rid, eid), have_event) in r.items() if have_event)
return results
- @cachedList(cached_method_name="have_seen_event", list_name="keys")
+ @cachedList(cached_method_name="have_seen_event", list_name="event_ids")
async def _have_seen_events_dict(
- self, keys: Collection[Tuple[str, str]]
- ) -> Dict[Tuple[str, str], bool]:
+ self,
+ room_id: str,
+ event_ids: Collection[str],
+ ) -> Dict[str, bool]:
"""Helper for have_seen_events
Returns:
- a dict {(room_id, event_id)-> bool}
+ a dict {event_id -> bool}
"""
- # if the event cache contains the event, obviously we've seen it.
-
- cache_results = {
- (rid, eid)
- for (rid, eid) in keys
- if await self._get_event_cache.contains((eid,))
- }
- results = dict.fromkeys(cache_results, True)
- remaining = [k for k in keys if k not in cache_results]
- if not remaining:
- return results
-
- def have_seen_events_txn(txn: LoggingTransaction) -> None:
+ # TODO: We used to query the _get_event_cache here as a fast-path before
+ # hitting the database. For if an event were in the cache, we've presumably
+ # seen it before.
+ #
+ # But this is currently an invalid assumption due to the _get_event_cache
+ # not being invalidated when purging events from a room. The optimisation can
+ # be re-added after https://github.com/matrix-org/synapse/issues/13476
+
+ def have_seen_events_txn(txn: LoggingTransaction) -> Dict[str, bool]:
# we deliberately do *not* query the database for room_id, to make the
# query an index-only lookup on `events_event_id_key`.
#
@@ -1422,23 +1527,22 @@ class EventsWorkerStore(SQLBaseStore):
sql = "SELECT event_id FROM events AS e WHERE "
clause, args = make_in_list_sql_clause(
- txn.database_engine, "e.event_id", [eid for (_rid, eid) in remaining]
+ txn.database_engine, "e.event_id", event_ids
)
txn.execute(sql + clause, args)
found_events = {eid for eid, in txn}
# ... and then we can update the results for each key
- results.update(
- {(rid, eid): (eid in found_events) for (rid, eid) in remaining}
- )
+ return {eid: (eid in found_events) for eid in event_ids}
- await self.db_pool.runInteraction("have_seen_events", have_seen_events_txn)
- return results
+ return await self.db_pool.runInteraction(
+ "have_seen_events", have_seen_events_txn
+ )
@cached(max_entries=100000, tree=True)
async def have_seen_event(self, room_id: str, event_id: str) -> bool:
- res = await self._have_seen_events_dict(((room_id, event_id),))
- return res[(room_id, event_id)]
+ res = await self._have_seen_events_dict(room_id, [event_id])
+ return res[event_id]
def _get_current_state_event_counts_txn(
self, txn: LoggingTransaction, room_id: str
@@ -1478,7 +1582,7 @@ class EventsWorkerStore(SQLBaseStore):
room_id: The room ID to query.
Returns:
- dict[str:float] of complexity version to complexity.
+ Map of complexity version to complexity.
"""
state_events = await self.get_current_state_event_counts(room_id)
@@ -1876,12 +1980,17 @@ class EventsWorkerStore(SQLBaseStore):
Args:
room_id: room where the event lives
- event_id: event to check
+ event: event to check (can't be an `outlier`)
Returns:
Boolean indicating whether it's an extremity
"""
+ assert not event.internal_metadata.is_outlier(), (
+ "is_event_next_to_backward_gap(...) can't be used with `outlier` events. "
+ "This function relies on `event_backward_extremities` which won't be filled in for `outliers`."
+ )
+
def is_event_next_to_backward_gap_txn(txn: LoggingTransaction) -> bool:
# If the event in question has any of its prev_events listed as a
# backward extremity, it's next to a gap.
@@ -1931,12 +2040,17 @@ class EventsWorkerStore(SQLBaseStore):
Args:
room_id: room where the event lives
- event_id: event to check
+ event: event to check (can't be an `outlier`)
Returns:
Boolean indicating whether it's an extremity
"""
+ assert not event.internal_metadata.is_outlier(), (
+ "is_event_next_to_forward_gap(...) can't be used with `outlier` events. "
+ "This function relies on `event_edges` and `event_forward_extremities` which won't be filled in for `outliers`."
+ )
+
def is_event_next_to_gap_txn(txn: LoggingTransaction) -> bool:
# If the event in question is a forward extremity, we will just
# consider any potential forward gap as not a gap since it's one of
@@ -2017,35 +2131,50 @@ class EventsWorkerStore(SQLBaseStore):
The closest event_id otherwise None if we can't find any event in
the given direction.
"""
+ if direction == "b":
+ # Find closest event *before* a given timestamp. We use descending
+ # (which gives values largest to smallest) because we want the
+ # largest possible timestamp *before* the given timestamp.
+ comparison_operator = "<="
+ order = "DESC"
+ else:
+ # Find closest event *after* a given timestamp. We use ascending
+ # (which gives values smallest to largest) because we want the
+ # closest possible timestamp *after* the given timestamp.
+ comparison_operator = ">="
+ order = "ASC"
- sql_template = """
+ sql_template = f"""
SELECT event_id FROM events
LEFT JOIN rejections USING (event_id)
WHERE
- origin_server_ts %s ?
- AND room_id = ?
+ room_id = ?
+ AND origin_server_ts {comparison_operator} ?
+ /**
+ * Make sure the event isn't an `outlier` because we have no way
+ * to later check whether it's next to a gap. `outliers` do not
+ * have entries in the `event_edges`, `event_forward_extremeties`,
+ * and `event_backward_extremities` tables to check against
+ * (used by `is_event_next_to_backward_gap` and `is_event_next_to_forward_gap`).
+ */
+ AND NOT outlier
/* Make sure event is not rejected */
AND rejections.event_id IS NULL
- ORDER BY origin_server_ts %s
+ /**
+ * First sort by the message timestamp. If the message timestamps are the
+ * same, we want the message that logically comes "next" (before/after
+ * the given timestamp) based on the DAG and its topological order (`depth`).
+ * Finally, we can tie-break based on when it was received on the server
+ * (`stream_ordering`).
+ */
+ ORDER BY origin_server_ts {order}, depth {order}, stream_ordering {order}
LIMIT 1;
"""
def get_event_id_for_timestamp_txn(txn: LoggingTransaction) -> Optional[str]:
- if direction == "b":
- # Find closest event *before* a given timestamp. We use descending
- # (which gives values largest to smallest) because we want the
- # largest possible timestamp *before* the given timestamp.
- comparison_operator = "<="
- order = "DESC"
- else:
- # Find closest event *after* a given timestamp. We use ascending
- # (which gives values smallest to largest) because we want the
- # closest possible timestamp *after* the given timestamp.
- comparison_operator = ">="
- order = "ASC"
-
txn.execute(
- sql_template % (comparison_operator, order), (timestamp, room_id)
+ sql_template,
+ (room_id, timestamp),
)
row = txn.fetchone()
if row:
@@ -2099,7 +2228,15 @@ class EventsWorkerStore(SQLBaseStore):
return result is not None
async def get_partial_state_events_batch(self, room_id: str) -> List[str]:
- """Get a list of events in the given room that have partial state"""
+ """
+ Get a list of events in the given room that:
+ - have partial state; and
+ - are ready to be resynced (because they have no prev_events that are
+ partial-stated)
+
+ See the docstring on `_get_partial_state_events_batch_txn` for more
+ information.
+ """
return await self.db_pool.runInteraction(
"get_partial_state_events_batch",
self._get_partial_state_events_batch_txn,
@@ -2139,3 +2276,63 @@ class EventsWorkerStore(SQLBaseStore):
(room_id,),
)
return [row[0] for row in txn]
+
+ def mark_event_rejected_txn(
+ self,
+ txn: LoggingTransaction,
+ event_id: str,
+ rejection_reason: Optional[str],
+ ) -> None:
+ """Mark an event that was previously accepted as rejected, or vice versa
+
+ This can happen, for example, when resyncing state during a faster join.
+
+ Args:
+ txn:
+ event_id: ID of event to update
+ rejection_reason: reason it has been rejected, or None if it is now accepted
+ """
+ if rejection_reason is None:
+ logger.info(
+ "Marking previously-processed event %s as accepted",
+ event_id,
+ )
+ self.db_pool.simple_delete_txn(
+ txn,
+ "rejections",
+ keyvalues={"event_id": event_id},
+ )
+ else:
+ logger.info(
+ "Marking previously-processed event %s as rejected(%s)",
+ event_id,
+ rejection_reason,
+ )
+ self.db_pool.simple_upsert_txn(
+ txn,
+ table="rejections",
+ keyvalues={"event_id": event_id},
+ values={
+ "reason": rejection_reason,
+ "last_check": self._clock.time_msec(),
+ },
+ )
+ self.db_pool.simple_update_txn(
+ txn,
+ table="events",
+ keyvalues={"event_id": event_id},
+ updatevalues={"rejection_reason": rejection_reason},
+ )
+
+ self.invalidate_get_event_cache_after_txn(txn, event_id)
+
+ # TODO(faster_joins): invalidate the cache on workers. Ideally we'd just
+ # call '_send_invalidation_to_replication', but we actually need the other
+ # end to call _invalidate_local_get_event_cache() rather than (just)
+ # _get_event_cache.invalidate().
+ #
+ # One solution might be to (somehow) get the workers to call
+ # _invalidate_caches_for_event() (though that will invalidate more than
+ # strictly necessary).
+ #
+ # https://github.com/matrix-org/synapse/issues/12994
diff --git a/synapse/storage/databases/main/filtering.py b/synapse/storage/databases/main/filtering.py
index cb9ee08fa8..12f3b601f1 100644
--- a/synapse/storage/databases/main/filtering.py
+++ b/synapse/storage/databases/main/filtering.py
@@ -24,7 +24,7 @@ from synapse.types import JsonDict
from synapse.util.caches.descriptors import cached
-class FilteringStore(SQLBaseStore):
+class FilteringWorkerStore(SQLBaseStore):
@cached(num_args=2)
async def get_user_filter(
self, user_localpart: str, filter_id: Union[int, str]
@@ -46,6 +46,8 @@ class FilteringStore(SQLBaseStore):
return db_to_json(def_json)
+
+class FilteringStore(FilteringWorkerStore):
async def add_user_filter(self, user_localpart: str, user_filter: JsonDict) -> int:
def_json = encode_canonical_json(user_filter)
diff --git a/synapse/storage/databases/main/lock.py b/synapse/storage/databases/main/lock.py
index 2d7633fbd5..7270ef09da 100644
--- a/synapse/storage/databases/main/lock.py
+++ b/synapse/storage/databases/main/lock.py
@@ -129,91 +129,48 @@ class LockStore(SQLBaseStore):
now = self._clock.time_msec()
token = random_string(6)
- if self.db_pool.engine.can_native_upsert:
-
- def _try_acquire_lock_txn(txn: LoggingTransaction) -> bool:
- # We take out the lock if either a) there is no row for the lock
- # already, b) the existing row has timed out, or c) the row is
- # for this instance (which means the process got killed and
- # restarted)
- sql = """
- INSERT INTO worker_locks (lock_name, lock_key, instance_name, token, last_renewed_ts)
- VALUES (?, ?, ?, ?, ?)
- ON CONFLICT (lock_name, lock_key)
- DO UPDATE
- SET
- token = EXCLUDED.token,
- instance_name = EXCLUDED.instance_name,
- last_renewed_ts = EXCLUDED.last_renewed_ts
- WHERE
- worker_locks.last_renewed_ts < ?
- OR worker_locks.instance_name = EXCLUDED.instance_name
- """
- txn.execute(
- sql,
- (
- lock_name,
- lock_key,
- self._instance_name,
- token,
- now,
- now - _LOCK_TIMEOUT_MS,
- ),
- )
-
- # We only acquired the lock if we inserted or updated the table.
- return bool(txn.rowcount)
-
- did_lock = await self.db_pool.runInteraction(
- "try_acquire_lock",
- _try_acquire_lock_txn,
- # We can autocommit here as we're executing a single query, this
- # will avoid serialization errors.
- db_autocommit=True,
+ def _try_acquire_lock_txn(txn: LoggingTransaction) -> bool:
+ # We take out the lock if either a) there is no row for the lock
+ # already, b) the existing row has timed out, or c) the row is
+ # for this instance (which means the process got killed and
+ # restarted)
+ sql = """
+ INSERT INTO worker_locks (lock_name, lock_key, instance_name, token, last_renewed_ts)
+ VALUES (?, ?, ?, ?, ?)
+ ON CONFLICT (lock_name, lock_key)
+ DO UPDATE
+ SET
+ token = EXCLUDED.token,
+ instance_name = EXCLUDED.instance_name,
+ last_renewed_ts = EXCLUDED.last_renewed_ts
+ WHERE
+ worker_locks.last_renewed_ts < ?
+ OR worker_locks.instance_name = EXCLUDED.instance_name
+ """
+ txn.execute(
+ sql,
+ (
+ lock_name,
+ lock_key,
+ self._instance_name,
+ token,
+ now,
+ now - _LOCK_TIMEOUT_MS,
+ ),
)
- if not did_lock:
- return None
-
- else:
- # If we're on an old SQLite we emulate the above logic by first
- # clearing out any existing stale locks and then upserting.
-
- def _try_acquire_lock_emulated_txn(txn: LoggingTransaction) -> bool:
- sql = """
- DELETE FROM worker_locks
- WHERE
- lock_name = ?
- AND lock_key = ?
- AND (last_renewed_ts < ? OR instance_name = ?)
- """
- txn.execute(
- sql,
- (lock_name, lock_key, now - _LOCK_TIMEOUT_MS, self._instance_name),
- )
-
- inserted = self.db_pool.simple_upsert_txn_emulated(
- txn,
- table="worker_locks",
- keyvalues={
- "lock_name": lock_name,
- "lock_key": lock_key,
- },
- values={},
- insertion_values={
- "token": token,
- "last_renewed_ts": self._clock.time_msec(),
- "instance_name": self._instance_name,
- },
- )
-
- return inserted
- did_lock = await self.db_pool.runInteraction(
- "try_acquire_lock_emulated", _try_acquire_lock_emulated_txn
- )
+ # We only acquired the lock if we inserted or updated the table.
+ return bool(txn.rowcount)
- if not did_lock:
- return None
+ did_lock = await self.db_pool.runInteraction(
+ "try_acquire_lock",
+ _try_acquire_lock_txn,
+ # We can autocommit here as we're executing a single query, this
+ # will avoid serialization errors.
+ db_autocommit=True,
+ )
+ if not did_lock:
+ return None
lock = Lock(
self._reactor,
diff --git a/synapse/storage/databases/main/monthly_active_users.py b/synapse/storage/databases/main/monthly_active_users.py
index efd136a864..db9a24db5e 100644
--- a/synapse/storage/databases/main/monthly_active_users.py
+++ b/synapse/storage/databases/main/monthly_active_users.py
@@ -217,7 +217,7 @@ class MonthlyActiveUsersWorkerStore(RegistrationWorkerStore):
def _reap_users(txn: LoggingTransaction, reserved_users: List[str]) -> None:
"""
Args:
- reserved_users (tuple): reserved users to preserve
+ reserved_users: reserved users to preserve
"""
thirty_days_ago = int(self._clock.time_msec()) - (1000 * 60 * 60 * 24 * 30)
@@ -370,8 +370,8 @@ class MonthlyActiveUsersWorkerStore(RegistrationWorkerStore):
should not appear in the MAU stats).
Args:
- txn (cursor):
- user_id (str): user to add/update
+ txn:
+ user_id: user to add/update
"""
assert (
self._update_on_this_worker
@@ -401,7 +401,7 @@ class MonthlyActiveUsersWorkerStore(RegistrationWorkerStore):
add the user to the monthly active tables
Args:
- user_id(str): the user_id to query
+ user_id: the user_id to query
"""
assert (
self._update_on_this_worker
diff --git a/synapse/storage/databases/main/purge_events.py b/synapse/storage/databases/main/purge_events.py
index f6822707e4..9213ce0b5a 100644
--- a/synapse/storage/databases/main/purge_events.py
+++ b/synapse/storage/databases/main/purge_events.py
@@ -419,6 +419,7 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore):
"event_forward_extremities",
"event_push_actions",
"event_search",
+ "event_failed_pull_attempts",
"partial_state_events",
"events",
"federation_inbound_events_staging",
@@ -441,6 +442,10 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore):
"e2e_room_keys",
"event_push_summary",
"pusher_throttle",
+ "insertion_events",
+ "insertion_event_extremities",
+ "insertion_event_edges",
+ "batch_events",
"room_account_data",
"room_tags",
# "rooms" happens last, to keep the foreign keys in the other tables
diff --git a/synapse/storage/databases/main/push_rule.py b/synapse/storage/databases/main/push_rule.py
index 768f95d16c..12ad44dbb3 100644
--- a/synapse/storage/databases/main/push_rule.py
+++ b/synapse/storage/databases/main/push_rule.py
@@ -12,15 +12,26 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-import abc
import logging
-from typing import TYPE_CHECKING, Collection, Dict, List, Optional, Tuple, Union, cast
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ Collection,
+ Dict,
+ Iterable,
+ List,
+ Mapping,
+ Optional,
+ Sequence,
+ Tuple,
+ Union,
+ cast,
+)
from synapse.api.errors import StoreError
from synapse.config.homeserver import ExperimentalConfig
-from synapse.push.baserules import list_with_base_rules
-from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker
-from synapse.storage._base import SQLBaseStore, db_to_json
+from synapse.replication.tcp.streams import PushRulesStream
+from synapse.storage._base import SQLBaseStore
from synapse.storage.database import (
DatabasePool,
LoggingDatabaseConnection,
@@ -39,6 +50,7 @@ from synapse.storage.util.id_generators import (
IdGenerator,
StreamIdGenerator,
)
+from synapse.synapse_rust.push import FilteredPushRules, PushRule, PushRules
from synapse.types import JsonDict
from synapse.util import json_encoder
from synapse.util.caches.descriptors import cached, cachedList
@@ -50,64 +62,34 @@ if TYPE_CHECKING:
logger = logging.getLogger(__name__)
-def _is_experimental_rule_enabled(
- rule_id: str, experimental_config: ExperimentalConfig
-) -> bool:
- """Used by `_load_rules` to filter out experimental rules when they
- have not been enabled.
- """
- if (
- rule_id == "global/override/.org.matrix.msc3786.rule.room.server_acl"
- and not experimental_config.msc3786_enabled
- ):
- return False
- if (
- rule_id == "global/underride/.org.matrix.msc3772.thread_reply"
- and not experimental_config.msc3772_enabled
- ):
- return False
- return True
-
-
def _load_rules(
rawrules: List[JsonDict],
enabled_map: Dict[str, bool],
experimental_config: ExperimentalConfig,
-) -> List[JsonDict]:
- ruleslist = []
- for rawrule in rawrules:
- rule = dict(rawrule)
- rule["conditions"] = db_to_json(rawrule["conditions"])
- rule["actions"] = db_to_json(rawrule["actions"])
- rule["default"] = False
- ruleslist.append(rule)
-
- # We're going to be mutating this a lot, so copy it. We also filter out
- # any experimental default push rules that aren't enabled.
- rules = [
- rule
- for rule in list_with_base_rules(ruleslist)
- if _is_experimental_rule_enabled(rule["rule_id"], experimental_config)
- ]
+) -> FilteredPushRules:
+ """Take the DB rows returned from the DB and convert them into a full
+ `FilteredPushRules` object.
+ """
- for i, rule in enumerate(rules):
- rule_id = rule["rule_id"]
+ ruleslist = [
+ PushRule.from_db(
+ rule_id=rawrule["rule_id"],
+ priority_class=rawrule["priority_class"],
+ conditions=rawrule["conditions"],
+ actions=rawrule["actions"],
+ )
+ for rawrule in rawrules
+ ]
- if rule_id not in enabled_map:
- continue
- if rule.get("enabled", True) == bool(enabled_map[rule_id]):
- continue
+ push_rules = PushRules(ruleslist)
- # Rules are cached across users.
- rule = dict(rule)
- rule["enabled"] = bool(enabled_map[rule_id])
- rules[i] = rule
+ filtered_rules = FilteredPushRules(
+ push_rules, enabled_map, msc3664_enabled=experimental_config.msc3664_enabled
+ )
- return rules
+ return filtered_rules
-# The ABCMeta metaclass ensures that it cannot be instantiated without
-# the abstract methods being implemented.
class PushRulesWorkerStore(
ApplicationServiceWorkerStore,
PusherWorkerStore,
@@ -115,7 +97,6 @@ class PushRulesWorkerStore(
ReceiptsWorkerStore,
EventsWorkerStore,
SQLBaseStore,
- metaclass=abc.ABCMeta,
):
"""This is an abstract base class where subclasses must implement
`get_max_push_rules_stream_id` which can be called in the initializer.
@@ -129,14 +110,14 @@ class PushRulesWorkerStore(
):
super().__init__(database, db_conn, hs)
- if hs.config.worker.worker_app is None:
- self._push_rules_stream_id_gen: AbstractStreamIdTracker = StreamIdGenerator(
- db_conn, "push_rules_stream", "stream_id"
- )
- else:
- self._push_rules_stream_id_gen = SlavedIdTracker(
- db_conn, "push_rules_stream", "stream_id"
- )
+ # In the worker store this is an ID tracker which we overwrite in the non-worker
+ # class below that is used on the main process.
+ self._push_rules_stream_id_gen: AbstractStreamIdTracker = StreamIdGenerator(
+ db_conn,
+ "push_rules_stream",
+ "stream_id",
+ is_writer=hs.config.worker.worker_app is None,
+ )
push_rules_prefill, push_rules_id = self.db_pool.get_cache_dict(
db_conn,
@@ -152,17 +133,26 @@ class PushRulesWorkerStore(
prefilled_cache=push_rules_prefill,
)
- @abc.abstractmethod
def get_max_push_rules_stream_id(self) -> int:
"""Get the position of the push rules stream.
Returns:
int
"""
- raise NotImplementedError()
+ return self._push_rules_stream_id_gen.get_current_token()
+
+ def process_replication_rows(
+ self, stream_name: str, instance_name: str, token: int, rows: Iterable[Any]
+ ) -> None:
+ if stream_name == PushRulesStream.NAME:
+ self._push_rules_stream_id_gen.advance(instance_name, token)
+ for row in rows:
+ self.get_push_rules_for_user.invalidate((row.user_id,))
+ self.push_rules_stream_cache.entity_has_changed(row.user_id, token)
+ return super().process_replication_rows(stream_name, instance_name, token, rows)
@cached(max_entries=5000)
- async def get_push_rules_for_user(self, user_id: str) -> List[JsonDict]:
+ async def get_push_rules_for_user(self, user_id: str) -> FilteredPushRules:
rows = await self.db_pool.simple_select_list(
table="push_rules",
keyvalues={"user_name": user_id},
@@ -183,7 +173,6 @@ class PushRulesWorkerStore(
return _load_rules(rows, enabled_map, self.hs.config.experimental)
- @cached(max_entries=5000)
async def get_push_rules_enabled_for_user(self, user_id: str) -> Dict[str, bool]:
results = await self.db_pool.simple_select_list(
table="push_rules_enable",
@@ -216,11 +205,11 @@ class PushRulesWorkerStore(
@cachedList(cached_method_name="get_push_rules_for_user", list_name="user_ids")
async def bulk_get_push_rules(
self, user_ids: Collection[str]
- ) -> Dict[str, List[JsonDict]]:
+ ) -> Dict[str, FilteredPushRules]:
if not user_ids:
return {}
- results: Dict[str, List[JsonDict]] = {user_id: [] for user_id in user_ids}
+ raw_rules: Dict[str, List[JsonDict]] = {user_id: [] for user_id in user_ids}
rows = await self.db_pool.simple_select_many_batch(
table="push_rules",
@@ -234,20 +223,19 @@ class PushRulesWorkerStore(
rows.sort(key=lambda row: (-int(row["priority_class"]), -int(row["priority"])))
for row in rows:
- results.setdefault(row["user_name"], []).append(row)
+ raw_rules.setdefault(row["user_name"], []).append(row)
enabled_map_by_user = await self.bulk_get_push_rules_enabled(user_ids)
- for user_id, rules in results.items():
+ results: Dict[str, FilteredPushRules] = {}
+
+ for user_id, rules in raw_rules.items():
results[user_id] = _load_rules(
rules, enabled_map_by_user.get(user_id, {}), self.hs.config.experimental
)
return results
- @cachedList(
- cached_method_name="get_push_rules_enabled_for_user", list_name="user_ids"
- )
async def bulk_get_push_rules_enabled(
self, user_ids: Collection[str]
) -> Dict[str, Dict[str, bool]]:
@@ -262,6 +250,7 @@ class PushRulesWorkerStore(
iterable=user_ids,
retcols=("user_name", "rule_id", "enabled"),
desc="bulk_get_push_rules_enabled",
+ batch_size=1000,
)
for row in rows:
enabled = bool(row["enabled"])
@@ -345,8 +334,8 @@ class PushRuleStore(PushRulesWorkerStore):
user_id: str,
rule_id: str,
priority_class: int,
- conditions: List[Dict[str, str]],
- actions: List[Union[JsonDict, str]],
+ conditions: Sequence[Mapping[str, str]],
+ actions: Sequence[Union[Mapping[str, Any], str]],
before: Optional[str] = None,
after: Optional[str] = None,
) -> None:
@@ -808,7 +797,6 @@ class PushRuleStore(PushRulesWorkerStore):
self.db_pool.simple_insert_txn(txn, "push_rules_stream", values=values)
txn.call_after(self.get_push_rules_for_user.invalidate, (user_id,))
- txn.call_after(self.get_push_rules_enabled_for_user.invalidate, (user_id,))
txn.call_after(
self.push_rules_stream_cache.entity_has_changed, user_id, stream_id
)
@@ -817,7 +805,7 @@ class PushRuleStore(PushRulesWorkerStore):
return self._push_rules_stream_id_gen.get_current_token()
async def copy_push_rule_from_room_to_room(
- self, new_room_id: str, user_id: str, rule: dict
+ self, new_room_id: str, user_id: str, rule: PushRule
) -> None:
"""Copy a single push rule from one room to another for a specific user.
@@ -827,21 +815,27 @@ class PushRuleStore(PushRulesWorkerStore):
rule: A push rule.
"""
# Create new rule id
- rule_id_scope = "/".join(rule["rule_id"].split("/")[:-1])
+ rule_id_scope = "/".join(rule.rule_id.split("/")[:-1])
new_rule_id = rule_id_scope + "/" + new_room_id
+ new_conditions = []
+
# Change room id in each condition
- for condition in rule.get("conditions", []):
+ for condition in rule.conditions:
+ new_condition = condition
if condition.get("key") == "room_id":
- condition["pattern"] = new_room_id
+ new_condition = dict(condition)
+ new_condition["pattern"] = new_room_id
+
+ new_conditions.append(new_condition)
# Add the rule for the new room
await self.add_push_rule(
user_id=user_id,
rule_id=new_rule_id,
- priority_class=rule["priority_class"],
- conditions=rule["conditions"],
- actions=rule["actions"],
+ priority_class=rule.priority_class,
+ conditions=new_conditions,
+ actions=rule.actions,
)
async def copy_push_rules_from_room_to_room_for_user(
@@ -859,8 +853,11 @@ class PushRuleStore(PushRulesWorkerStore):
user_push_rules = await self.get_push_rules_for_user(user_id)
# Get rules relating to the old room and copy them to the new room
- for rule in user_push_rules:
- conditions = rule.get("conditions", [])
+ for rule, enabled in user_push_rules.rules():
+ if not enabled:
+ continue
+
+ conditions = rule.conditions
if any(
(c.get("key") == "room_id" and c.get("pattern") == old_room_id)
for c in conditions
diff --git a/synapse/storage/databases/main/pusher.py b/synapse/storage/databases/main/pusher.py
index bd0cfa7f32..fee37b9ce4 100644
--- a/synapse/storage/databases/main/pusher.py
+++ b/synapse/storage/databases/main/pusher.py
@@ -27,13 +27,18 @@ from typing import (
)
from synapse.push import PusherConfig, ThrottleParams
+from synapse.replication.tcp.streams import PushersStream
from synapse.storage._base import SQLBaseStore, db_to_json
from synapse.storage.database import (
DatabasePool,
LoggingDatabaseConnection,
LoggingTransaction,
)
-from synapse.storage.util.id_generators import StreamIdGenerator
+from synapse.storage.util.id_generators import (
+ AbstractStreamIdGenerator,
+ AbstractStreamIdTracker,
+ StreamIdGenerator,
+)
from synapse.types import JsonDict
from synapse.util import json_encoder
from synapse.util.caches.descriptors import cached
@@ -52,8 +57,15 @@ class PusherWorkerStore(SQLBaseStore):
hs: "HomeServer",
):
super().__init__(database, db_conn, hs)
- self._pushers_id_gen = StreamIdGenerator(
- db_conn, "pushers", "id", extra_tables=[("deleted_pushers", "stream_id")]
+
+ # In the worker store this is an ID tracker which we overwrite in the non-worker
+ # class below that is used on the main process.
+ self._pushers_id_gen: AbstractStreamIdTracker = StreamIdGenerator(
+ db_conn,
+ "pushers",
+ "id",
+ extra_tables=[("deleted_pushers", "stream_id")],
+ is_writer=hs.config.worker.worker_app is None,
)
self.db_pool.updates.register_background_update_handler(
@@ -89,8 +101,23 @@ class PusherWorkerStore(SQLBaseStore):
)
continue
+ # If we're using SQLite, then boolean values are integers. This is
+ # troublesome since some code using the return value of this method might
+ # expect it to be a boolean, or will expose it to clients (in responses).
+ r["enabled"] = bool(r["enabled"])
+
yield PusherConfig(**r)
+ def get_pushers_stream_token(self) -> int:
+ return self._pushers_id_gen.get_current_token()
+
+ def process_replication_rows(
+ self, stream_name: str, instance_name: str, token: int, rows: Iterable[Any]
+ ) -> None:
+ if stream_name == PushersStream.NAME:
+ self._pushers_id_gen.advance(instance_name, token)
+ return super().process_replication_rows(stream_name, instance_name, token, rows)
+
async def get_pushers_by_app_id_and_pushkey(
self, app_id: str, pushkey: str
) -> Iterator[PusherConfig]:
@@ -100,38 +127,52 @@ class PusherWorkerStore(SQLBaseStore):
return await self.get_pushers_by({"user_name": user_id})
async def get_pushers_by(self, keyvalues: Dict[str, Any]) -> Iterator[PusherConfig]:
- ret = await self.db_pool.simple_select_list(
- "pushers",
- keyvalues,
- [
- "id",
- "user_name",
- "access_token",
- "profile_tag",
- "kind",
- "app_id",
- "app_display_name",
- "device_display_name",
- "pushkey",
- "ts",
- "lang",
- "data",
- "last_stream_ordering",
- "last_success",
- "failing_since",
- ],
+ """Retrieve pushers that match the given criteria.
+
+ Args:
+ keyvalues: A {column: value} dictionary.
+
+ Returns:
+ The pushers for which the given columns have the given values.
+ """
+
+ def get_pushers_by_txn(txn: LoggingTransaction) -> List[Dict[str, Any]]:
+ # We could technically use simple_select_list here, but we need to call
+ # COALESCE on the 'enabled' column. While it is technically possible to give
+ # simple_select_list the whole `COALESCE(...) AS ...` as a column name, it
+ # feels a bit hacky, so it's probably better to just inline the query.
+ sql = """
+ SELECT
+ id, user_name, access_token, profile_tag, kind, app_id,
+ app_display_name, device_display_name, pushkey, ts, lang, data,
+ last_stream_ordering, last_success, failing_since,
+ COALESCE(enabled, TRUE) AS enabled, device_id
+ FROM pushers
+ """
+
+ sql += "WHERE %s" % (" AND ".join("%s = ?" % (k,) for k in keyvalues),)
+
+ txn.execute(sql, list(keyvalues.values()))
+
+ return self.db_pool.cursor_to_dict(txn)
+
+ ret = await self.db_pool.runInteraction(
desc="get_pushers_by",
+ func=get_pushers_by_txn,
)
+
return self._decode_pushers_rows(ret)
- async def get_all_pushers(self) -> Iterator[PusherConfig]:
- def get_pushers(txn: LoggingTransaction) -> Iterator[PusherConfig]:
- txn.execute("SELECT * FROM pushers")
+ async def get_enabled_pushers(self) -> Iterator[PusherConfig]:
+ def get_enabled_pushers_txn(txn: LoggingTransaction) -> Iterator[PusherConfig]:
+ txn.execute("SELECT * FROM pushers WHERE COALESCE(enabled, TRUE)")
rows = self.db_pool.cursor_to_dict(txn)
return self._decode_pushers_rows(rows)
- return await self.db_pool.runInteraction("get_all_pushers", get_pushers)
+ return await self.db_pool.runInteraction(
+ "get_enabled_pushers", get_enabled_pushers_txn
+ )
async def get_all_updated_pushers_rows(
self, instance_name: str, last_id: int, current_id: int, limit: int
@@ -458,9 +499,77 @@ class PusherWorkerStore(SQLBaseStore):
return number_deleted
-class PusherStore(PusherWorkerStore):
- def get_pushers_stream_token(self) -> int:
- return self._pushers_id_gen.get_current_token()
+class PusherBackgroundUpdatesStore(SQLBaseStore):
+ def __init__(
+ self,
+ database: DatabasePool,
+ db_conn: LoggingDatabaseConnection,
+ hs: "HomeServer",
+ ):
+ super().__init__(database, db_conn, hs)
+
+ self.db_pool.updates.register_background_update_handler(
+ "set_device_id_for_pushers", self._set_device_id_for_pushers
+ )
+
+ async def _set_device_id_for_pushers(
+ self, progress: JsonDict, batch_size: int
+ ) -> int:
+ """Background update to populate the device_id column of the pushers table."""
+ last_pusher_id = progress.get("pusher_id", 0)
+
+ def set_device_id_for_pushers_txn(txn: LoggingTransaction) -> int:
+ txn.execute(
+ """
+ SELECT p.id, at.device_id
+ FROM pushers AS p
+ INNER JOIN access_tokens AS at
+ ON p.access_token = at.id
+ WHERE
+ p.access_token IS NOT NULL
+ AND at.device_id IS NOT NULL
+ AND p.id > ?
+ ORDER BY p.id
+ LIMIT ?
+ """,
+ (last_pusher_id, batch_size),
+ )
+
+ rows = self.db_pool.cursor_to_dict(txn)
+ if len(rows) == 0:
+ return 0
+
+ self.db_pool.simple_update_many_txn(
+ txn=txn,
+ table="pushers",
+ key_names=("id",),
+ key_values=[(row["id"],) for row in rows],
+ value_names=("device_id",),
+ value_values=[(row["device_id"],) for row in rows],
+ )
+
+ self.db_pool.updates._background_update_progress_txn(
+ txn, "set_device_id_for_pushers", {"pusher_id": rows[-1]["id"]}
+ )
+
+ return len(rows)
+
+ nb_processed = await self.db_pool.runInteraction(
+ "set_device_id_for_pushers", set_device_id_for_pushers_txn
+ )
+
+ if nb_processed < batch_size:
+ await self.db_pool.updates._end_background_update(
+ "set_device_id_for_pushers"
+ )
+
+ return nb_processed
+
+
+class PusherStore(PusherWorkerStore, PusherBackgroundUpdatesStore):
+ # Because we have write access, this will be a StreamIdGenerator
+ # (see PusherWorkerStore.__init__)
+ _pushers_id_gen: AbstractStreamIdGenerator
async def add_pusher(
self,
@@ -476,6 +585,8 @@ class PusherStore(PusherWorkerStore):
data: Optional[JsonDict],
last_stream_ordering: int,
profile_tag: str = "",
+ enabled: bool = True,
+ device_id: Optional[str] = None,
) -> None:
async with self._pushers_id_gen.get_next() as stream_id:
# no need to lock because `pushers` has a unique key on
@@ -494,6 +605,8 @@ class PusherStore(PusherWorkerStore):
"last_stream_ordering": last_stream_ordering,
"profile_tag": profile_tag,
"id": stream_id,
+ "enabled": enabled,
+ "device_id": device_id,
},
desc="add_pusher",
lock=False,
diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py
index 0090c9f225..a580e4bdda 100644
--- a/synapse/storage/databases/main/receipts.py
+++ b/synapse/storage/databases/main/receipts.py
@@ -27,7 +27,6 @@ from typing import (
)
from synapse.api.constants import EduTypes
-from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker
from synapse.replication.tcp.streams import ReceiptsStream
from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause
from synapse.storage.database import (
@@ -61,6 +60,9 @@ class ReceiptsWorkerStore(SQLBaseStore):
hs: "HomeServer",
):
self._instance_name = hs.get_instance_name()
+
+ # In the worker store this is an ID tracker which we overwrite in the non-worker
+ # class below that is used on the main process.
self._receipts_id_gen: AbstractStreamIdTracker
if isinstance(database.engine, PostgresEngine):
@@ -87,14 +89,12 @@ class ReceiptsWorkerStore(SQLBaseStore):
# `StreamIdGenerator`, otherwise we use `SlavedIdTracker` which gets
# updated over replication. (Multiple writers are not supported for
# SQLite).
- if hs.get_instance_name() in hs.config.worker.writers.receipts:
- self._receipts_id_gen = StreamIdGenerator(
- db_conn, "receipts_linearized", "stream_id"
- )
- else:
- self._receipts_id_gen = SlavedIdTracker(
- db_conn, "receipts_linearized", "stream_id"
- )
+ self._receipts_id_gen = StreamIdGenerator(
+ db_conn,
+ "receipts_linearized",
+ "stream_id",
+ is_writer=hs.get_instance_name() in hs.config.worker.writers.receipts,
+ )
super().__init__(database, db_conn, hs)
@@ -117,34 +117,7 @@ class ReceiptsWorkerStore(SQLBaseStore):
"""Get the current max stream ID for receipts stream"""
return self._receipts_id_gen.get_current_token()
- async def get_last_receipt_event_id_for_user(
- self, user_id: str, room_id: str, receipt_types: Collection[str]
- ) -> Optional[str]:
- """
- Fetch the event ID for the latest receipt in a room with one of the given receipt types.
-
- Args:
- user_id: The user to fetch receipts for.
- room_id: The room ID to fetch the receipt for.
- receipt_type: The receipt types to fetch.
-
- Returns:
- The latest receipt, if one exists.
- """
- result = await self.db_pool.runInteraction(
- "get_last_receipt_event_id_for_user",
- self.get_last_receipt_for_user_txn,
- user_id,
- room_id,
- receipt_types,
- )
- if not result:
- return None
-
- event_id, _ = result
- return event_id
-
- def get_last_receipt_for_user_txn(
+ def get_last_unthreaded_receipt_for_user_txn(
self,
txn: LoggingTransaction,
user_id: str,
@@ -152,16 +125,16 @@ class ReceiptsWorkerStore(SQLBaseStore):
receipt_types: Collection[str],
) -> Optional[Tuple[str, int]]:
"""
- Fetch the event ID and stream_ordering for the latest receipt in a room
- with one of the given receipt types.
+ Fetch the event ID and stream_ordering for the latest unthreaded receipt
+ in a room with one of the given receipt types.
Args:
user_id: The user to fetch receipts for.
room_id: The room ID to fetch the receipt for.
- receipt_type: The receipt types to fetch.
+ receipt_types: The receipt types to fetch.
Returns:
- The latest receipt, if one exists.
+ The event ID and stream ordering of the latest receipt, if one exists.
"""
clause, args = make_in_list_sql_clause(
@@ -175,6 +148,7 @@ class ReceiptsWorkerStore(SQLBaseStore):
WHERE {clause}
AND user_id = ?
AND room_id = ?
+ AND thread_id IS NULL
ORDER BY stream_ordering DESC
LIMIT 1
"""
@@ -426,6 +400,8 @@ class ReceiptsWorkerStore(SQLBaseStore):
receipt_type = event_entry.setdefault(row["receipt_type"], {})
receipt_type[row["user_id"]] = db_to_json(row["data"])
+ if row["thread_id"]:
+ receipt_type[row["user_id"]]["thread_id"] = row["thread_id"]
results = {
room_id: [results[room_id]] if room_id in results else []
@@ -522,7 +498,9 @@ class ReceiptsWorkerStore(SQLBaseStore):
async def get_all_updated_receipts(
self, instance_name: str, last_id: int, current_id: int, limit: int
- ) -> Tuple[List[Tuple[int, list]], int, bool]:
+ ) -> Tuple[
+ List[Tuple[int, Tuple[str, str, str, str, Optional[str], JsonDict]]], int, bool
+ ]:
"""Get updates for receipts replication stream.
Args:
@@ -549,9 +527,13 @@ class ReceiptsWorkerStore(SQLBaseStore):
def get_all_updated_receipts_txn(
txn: LoggingTransaction,
- ) -> Tuple[List[Tuple[int, list]], int, bool]:
+ ) -> Tuple[
+ List[Tuple[int, Tuple[str, str, str, str, Optional[str], JsonDict]]],
+ int,
+ bool,
+ ]:
sql = """
- SELECT stream_id, room_id, receipt_type, user_id, event_id, data
+ SELECT stream_id, room_id, receipt_type, user_id, event_id, thread_id, data
FROM receipts_linearized
WHERE ? < stream_id AND stream_id <= ?
ORDER BY stream_id ASC
@@ -560,8 +542,8 @@ class ReceiptsWorkerStore(SQLBaseStore):
txn.execute(sql, (last_id, current_id, limit))
updates = cast(
- List[Tuple[int, list]],
- [(r[0], r[1:5] + (db_to_json(r[5]),)) for r in txn],
+ List[Tuple[int, Tuple[str, str, str, str, Optional[str], JsonDict]]],
+ [(r[0], r[1:6] + (db_to_json(r[6]),)) for r in txn],
)
limited = False
@@ -613,6 +595,7 @@ class ReceiptsWorkerStore(SQLBaseStore):
receipt_type: str,
user_id: str,
event_id: str,
+ thread_id: Optional[str],
data: JsonDict,
stream_id: int,
) -> Optional[int]:
@@ -639,12 +622,27 @@ class ReceiptsWorkerStore(SQLBaseStore):
# We don't want to clobber receipts for more recent events, so we
# have to compare orderings of existing receipts
if stream_ordering is not None:
- sql = (
- "SELECT stream_ordering, event_id FROM events"
- " INNER JOIN receipts_linearized AS r USING (event_id, room_id)"
- " WHERE r.room_id = ? AND r.receipt_type = ? AND r.user_id = ?"
+ if thread_id is None:
+ thread_clause = "r.thread_id IS NULL"
+ thread_args: Tuple[str, ...] = ()
+ else:
+ thread_clause = "r.thread_id = ?"
+ thread_args = (thread_id,)
+
+ sql = f"""
+ SELECT stream_ordering, event_id FROM events
+ INNER JOIN receipts_linearized AS r USING (event_id, room_id)
+ WHERE r.room_id = ? AND r.receipt_type = ? AND r.user_id = ? AND {thread_clause}
+ """
+ txn.execute(
+ sql,
+ (
+ room_id,
+ receipt_type,
+ user_id,
+ )
+ + thread_args,
)
- txn.execute(sql, (room_id, receipt_type, user_id))
for so, eid in txn:
if int(so) >= stream_ordering:
@@ -664,22 +662,28 @@ class ReceiptsWorkerStore(SQLBaseStore):
self._receipts_stream_cache.entity_has_changed, room_id, stream_id
)
+ keyvalues = {
+ "room_id": room_id,
+ "receipt_type": receipt_type,
+ "user_id": user_id,
+ }
+ where_clause = ""
+ if thread_id is None:
+ where_clause = "thread_id IS NULL"
+ else:
+ keyvalues["thread_id"] = thread_id
+
self.db_pool.simple_upsert_txn(
txn,
table="receipts_linearized",
- keyvalues={
- "room_id": room_id,
- "receipt_type": receipt_type,
- "user_id": user_id,
- },
+ keyvalues=keyvalues,
values={
"stream_id": stream_id,
"event_id": event_id,
+ "event_stream_ordering": stream_ordering,
"data": json_encoder.encode(data),
},
- # receipts_linearized has a unique constraint on
- # (user_id, room_id, receipt_type), so no need to lock
- lock=False,
+ where_clause=where_clause,
)
return rx_ts
@@ -728,6 +732,7 @@ class ReceiptsWorkerStore(SQLBaseStore):
receipt_type: str,
user_id: str,
event_ids: List[str],
+ thread_id: Optional[str],
data: dict,
) -> Optional[Tuple[int, int]]:
"""Insert a receipt, either from local client or remote server.
@@ -760,6 +765,7 @@ class ReceiptsWorkerStore(SQLBaseStore):
receipt_type,
user_id,
linearized_event_id,
+ thread_id,
data,
stream_id=stream_id,
# Read committed is actually beneficial here because we check for a receipt with
@@ -774,7 +780,8 @@ class ReceiptsWorkerStore(SQLBaseStore):
now = self._clock.time_msec()
logger.debug(
- "RR for event %s in %s (%i ms old)",
+ "Receipt %s for event %s in %s (%i ms old)",
+ receipt_type,
linearized_event_id,
room_id,
now - event_ts,
@@ -787,6 +794,7 @@ class ReceiptsWorkerStore(SQLBaseStore):
receipt_type,
user_id,
event_ids,
+ thread_id,
data,
)
@@ -801,6 +809,7 @@ class ReceiptsWorkerStore(SQLBaseStore):
receipt_type: str,
user_id: str,
event_ids: List[str],
+ thread_id: Optional[str],
data: JsonDict,
) -> None:
assert self._can_write_to_receipts
@@ -812,27 +821,246 @@ class ReceiptsWorkerStore(SQLBaseStore):
# FIXME: This shouldn't invalidate the whole cache
txn.call_after(self._get_linearized_receipts_for_room.invalidate, (room_id,))
- self.db_pool.simple_delete_txn(
- txn,
- table="receipts_graph",
- keyvalues={
- "room_id": room_id,
- "receipt_type": receipt_type,
- "user_id": user_id,
- },
- )
- self.db_pool.simple_insert_txn(
+ keyvalues = {
+ "room_id": room_id,
+ "receipt_type": receipt_type,
+ "user_id": user_id,
+ }
+ where_clause = ""
+ if thread_id is None:
+ where_clause = "thread_id IS NULL"
+ else:
+ keyvalues["thread_id"] = thread_id
+
+ self.db_pool.simple_upsert_txn(
txn,
table="receipts_graph",
+ keyvalues=keyvalues,
values={
- "room_id": room_id,
- "receipt_type": receipt_type,
- "user_id": user_id,
"event_ids": json_encoder.encode(event_ids),
"data": json_encoder.encode(data),
},
+ where_clause=where_clause,
+ )
+
+
+class ReceiptsBackgroundUpdateStore(SQLBaseStore):
+ POPULATE_RECEIPT_EVENT_STREAM_ORDERING = "populate_event_stream_ordering"
+ RECEIPTS_LINEARIZED_UNIQUE_INDEX_UPDATE_NAME = "receipts_linearized_unique_index"
+ RECEIPTS_GRAPH_UNIQUE_INDEX_UPDATE_NAME = "receipts_graph_unique_index"
+
+ def __init__(
+ self,
+ database: DatabasePool,
+ db_conn: LoggingDatabaseConnection,
+ hs: "HomeServer",
+ ):
+ super().__init__(database, db_conn, hs)
+
+ self.db_pool.updates.register_background_update_handler(
+ self.POPULATE_RECEIPT_EVENT_STREAM_ORDERING,
+ self._populate_receipt_event_stream_ordering,
+ )
+ self.db_pool.updates.register_background_update_handler(
+ self.RECEIPTS_LINEARIZED_UNIQUE_INDEX_UPDATE_NAME,
+ self._background_receipts_linearized_unique_index,
+ )
+ self.db_pool.updates.register_background_update_handler(
+ self.RECEIPTS_GRAPH_UNIQUE_INDEX_UPDATE_NAME,
+ self._background_receipts_graph_unique_index,
+ )
+
+ async def _populate_receipt_event_stream_ordering(
+ self, progress: JsonDict, batch_size: int
+ ) -> int:
+ def _populate_receipt_event_stream_ordering_txn(
+ txn: LoggingTransaction,
+ ) -> bool:
+
+ if "max_stream_id" in progress:
+ max_stream_id = progress["max_stream_id"]
+ else:
+ txn.execute("SELECT max(stream_id) FROM receipts_linearized")
+ res = txn.fetchone()
+ if res is None or res[0] is None:
+ return True
+ else:
+ max_stream_id = res[0]
+
+ start = progress.get("stream_id", 0)
+ stop = start + batch_size
+
+ sql = """
+ UPDATE receipts_linearized
+ SET event_stream_ordering = (
+ SELECT stream_ordering
+ FROM events
+ WHERE event_id = receipts_linearized.event_id
+ )
+ WHERE stream_id >= ? AND stream_id < ?
+ """
+ txn.execute(sql, (start, stop))
+
+ self.db_pool.updates._background_update_progress_txn(
+ txn,
+ self.POPULATE_RECEIPT_EVENT_STREAM_ORDERING,
+ {
+ "stream_id": stop,
+ "max_stream_id": max_stream_id,
+ },
+ )
+
+ return stop > max_stream_id
+
+ finished = await self.db_pool.runInteraction(
+ "_remove_devices_from_device_inbox_txn",
+ _populate_receipt_event_stream_ordering_txn,
+ )
+
+ if finished:
+ await self.db_pool.updates._end_background_update(
+ self.POPULATE_RECEIPT_EVENT_STREAM_ORDERING
+ )
+
+ return batch_size
+
+ async def _create_receipts_index(self, index_name: str, table: str) -> None:
+ """Adds a unique index on `(room_id, receipt_type, user_id)` to the given
+ receipts table, for non-thread receipts."""
+
+ def _create_index(conn: LoggingDatabaseConnection) -> None:
+ conn.rollback()
+
+ # we have to set autocommit, because postgres refuses to
+ # CREATE INDEX CONCURRENTLY without it.
+ if isinstance(self.database_engine, PostgresEngine):
+ conn.set_session(autocommit=True)
+
+ try:
+ c = conn.cursor()
+
+ # Now that the duplicates are gone, we can create the index.
+ concurrently = (
+ "CONCURRENTLY"
+ if isinstance(self.database_engine, PostgresEngine)
+ else ""
+ )
+ sql = f"""
+ CREATE UNIQUE INDEX {concurrently} {index_name}
+ ON {table}(room_id, receipt_type, user_id)
+ WHERE thread_id IS NULL
+ """
+ c.execute(sql)
+ finally:
+ if isinstance(self.database_engine, PostgresEngine):
+ conn.set_session(autocommit=False)
+
+ await self.db_pool.runWithConnection(_create_index)
+
+ async def _background_receipts_linearized_unique_index(
+ self, progress: dict, batch_size: int
+ ) -> int:
+ """Removes duplicate receipts and adds a unique index on
+ `(room_id, receipt_type, user_id)` to `receipts_linearized`, for non-thread
+ receipts."""
+
+ def _remote_duplicate_receipts_txn(txn: LoggingTransaction) -> None:
+ # Identify any duplicate receipts arising from
+ # https://github.com/matrix-org/synapse/issues/14406.
+ # We expect the following query to use the per-thread receipt index and take
+ # less than a minute.
+ sql = """
+ SELECT MAX(stream_id), room_id, receipt_type, user_id
+ FROM receipts_linearized
+ WHERE thread_id IS NULL
+ GROUP BY room_id, receipt_type, user_id
+ HAVING COUNT(*) > 1
+ """
+ txn.execute(sql)
+ duplicate_keys = cast(List[Tuple[int, str, str, str]], list(txn))
+
+ # Then remove duplicate receipts, keeping the one with the highest
+ # `stream_id`. There should only be a single receipt with any given
+ # `stream_id`.
+ for max_stream_id, room_id, receipt_type, user_id in duplicate_keys:
+ sql = """
+ DELETE FROM receipts_linearized
+ WHERE
+ room_id = ? AND
+ receipt_type = ? AND
+ user_id = ? AND
+ thread_id IS NULL AND
+ stream_id < ?
+ """
+ txn.execute(sql, (room_id, receipt_type, user_id, max_stream_id))
+
+ await self.db_pool.runInteraction(
+ self.RECEIPTS_LINEARIZED_UNIQUE_INDEX_UPDATE_NAME,
+ _remote_duplicate_receipts_txn,
+ )
+
+ await self._create_receipts_index(
+ "receipts_linearized_unique_index",
+ "receipts_linearized",
+ )
+
+ await self.db_pool.updates._end_background_update(
+ self.RECEIPTS_LINEARIZED_UNIQUE_INDEX_UPDATE_NAME
+ )
+
+ return 1
+
+ async def _background_receipts_graph_unique_index(
+ self, progress: dict, batch_size: int
+ ) -> int:
+ """Removes duplicate receipts and adds a unique index on
+ `(room_id, receipt_type, user_id)` to `receipts_graph`, for non-thread
+ receipts."""
+
+ def _remote_duplicate_receipts_txn(txn: LoggingTransaction) -> None:
+ # Identify any duplicate receipts arising from
+ # https://github.com/matrix-org/synapse/issues/14406.
+ # We expect the following query to use the per-thread receipt index and take
+ # less than a minute.
+ sql = """
+ SELECT room_id, receipt_type, user_id FROM receipts_graph
+ WHERE thread_id IS NULL
+ GROUP BY room_id, receipt_type, user_id
+ HAVING COUNT(*) > 1
+ """
+ txn.execute(sql)
+ duplicate_keys = cast(List[Tuple[str, str, str]], list(txn))
+
+ # Then remove all duplicate receipts.
+ # We could be clever and try to keep the latest receipt out of every set of
+ # duplicates, but it's far simpler to remove them all.
+ for room_id, receipt_type, user_id in duplicate_keys:
+ sql = """
+ DELETE FROM receipts_graph
+ WHERE
+ room_id = ? AND
+ receipt_type = ? AND
+ user_id = ? AND
+ thread_id IS NULL
+ """
+ txn.execute(sql, (room_id, receipt_type, user_id))
+
+ await self.db_pool.runInteraction(
+ self.RECEIPTS_GRAPH_UNIQUE_INDEX_UPDATE_NAME,
+ _remote_duplicate_receipts_txn,
+ )
+
+ await self._create_receipts_index(
+ "receipts_graph_unique_index",
+ "receipts_graph",
)
+ await self.db_pool.updates._end_background_update(
+ self.RECEIPTS_GRAPH_UNIQUE_INDEX_UPDATE_NAME
+ )
+
+ return 1
+
-class ReceiptsStore(ReceiptsWorkerStore):
+class ReceiptsStore(ReceiptsWorkerStore, ReceiptsBackgroundUpdateStore):
pass
diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py
index cb63cd9b7d..31f0f2bd3d 100644
--- a/synapse/storage/databases/main/registration.py
+++ b/synapse/storage/databases/main/registration.py
@@ -21,7 +21,13 @@ from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union, cast
import attr
from synapse.api.constants import UserTypes
-from synapse.api.errors import Codes, StoreError, SynapseError, ThreepidValidationError
+from synapse.api.errors import (
+ Codes,
+ NotFoundError,
+ StoreError,
+ SynapseError,
+ ThreepidValidationError,
+)
from synapse.config.homeserver import HomeServerConfig
from synapse.metrics.background_process_metrics import wrap_as_background_process
from synapse.storage.database import (
@@ -50,6 +56,14 @@ class ExternalIDReuseException(Exception):
because this external id is given to an other user."""
+class LoginTokenExpired(Exception):
+ """Exception if the login token sent expired"""
+
+
+class LoginTokenReused(Exception):
+ """Exception if the login token sent was already used"""
+
+
@attr.s(frozen=True, slots=True, auto_attribs=True)
class TokenLookupResult:
"""Result of looking up an access token.
@@ -69,9 +83,9 @@ class TokenLookupResult:
"""
user_id: str
+ token_id: int
is_guest: bool = False
shadow_banned: bool = False
- token_id: Optional[int] = None
device_id: Optional[str] = None
valid_until_ms: Optional[int] = None
token_owner: str = attr.ib()
@@ -115,6 +129,20 @@ class RefreshTokenLookupResult:
If None, the session can be refreshed indefinitely."""
+@attr.s(auto_attribs=True, frozen=True, slots=True)
+class LoginTokenLookupResult:
+ """Result of looking up a login token."""
+
+ user_id: str
+ """The user this token belongs to."""
+
+ auth_provider_id: Optional[str]
+ """The SSO Identity Provider that the user authenticated with, to get this token."""
+
+ auth_provider_session_id: Optional[str]
+ """The session ID advertised by the SSO Identity Provider."""
+
+
class RegistrationWorkerStore(CacheInvalidationWorkerStore):
def __init__(
self,
@@ -166,26 +194,49 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore):
@cached()
async def get_user_by_id(self, user_id: str) -> Optional[Dict[str, Any]]:
"""Deprecated: use get_userinfo_by_id instead"""
- return await self.db_pool.simple_select_one(
- table="users",
- keyvalues={"name": user_id},
- retcols=[
- "name",
- "password_hash",
- "is_guest",
- "admin",
- "consent_version",
- "consent_server_notice_sent",
- "appservice_id",
- "creation_ts",
- "user_type",
- "deactivated",
- "shadow_banned",
- ],
- allow_none=True,
+
+ def get_user_by_id_txn(txn: LoggingTransaction) -> Optional[Dict[str, Any]]:
+ # We could technically use simple_select_one here, but it would not perform
+ # the COALESCEs (unless hacked into the column names), which could yield
+ # confusing results.
+ txn.execute(
+ """
+ SELECT
+ name, password_hash, is_guest, admin, consent_version, consent_ts,
+ consent_server_notice_sent, appservice_id, creation_ts, user_type,
+ deactivated, COALESCE(shadow_banned, FALSE) AS shadow_banned,
+ COALESCE(approved, TRUE) AS approved
+ FROM users
+ WHERE name = ?
+ """,
+ (user_id,),
+ )
+
+ rows = self.db_pool.cursor_to_dict(txn)
+
+ if len(rows) == 0:
+ return None
+
+ return rows[0]
+
+ row = await self.db_pool.runInteraction(
desc="get_user_by_id",
+ func=get_user_by_id_txn,
)
+ if row is not None:
+ # If we're using SQLite our boolean values will be integers. Because we
+ # present some of this data as is to e.g. server admins via REST APIs, we
+ # want to make sure we're returning the right type of data.
+ # Note: when adding a column name to this list, be wary of NULLable columns,
+ # since NULL values will be turned into False.
+ boolean_columns = ["admin", "deactivated", "shadow_banned", "approved"]
+ for column in boolean_columns:
+ if not isinstance(row[column], bool):
+ row[column] = bool(row[column])
+
+ return row
+
async def get_userinfo_by_id(self, user_id: str) -> Optional[UserInfo]:
"""Get a UserInfo object for a user by user ID.
@@ -902,7 +953,7 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore):
"""Returns user id from threepid
Args:
- txn (cursor):
+ txn:
medium: threepid medium e.g. email
address: threepid address e.g. me@example.com
@@ -1232,8 +1283,8 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore):
"""Sets an expiration date to the account with the given user ID.
Args:
- user_id (str): User ID to set an expiration date for.
- use_delta (bool): If set to False, the expiration date for the user will be
+ user_id: User ID to set an expiration date for.
+ use_delta: If set to False, the expiration date for the user will be
now + validity period. If set to True, this expiration date will be a
random value in the [now + period - d ; now + period] range, d being a
delta equal to 10% of the validity period.
@@ -1766,6 +1817,130 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore):
"replace_refresh_token", _replace_refresh_token_txn
)
+ async def add_login_token_to_user(
+ self,
+ user_id: str,
+ token: str,
+ expiry_ts: int,
+ auth_provider_id: Optional[str],
+ auth_provider_session_id: Optional[str],
+ ) -> None:
+ """Adds a short-term login token for the given user.
+
+ Args:
+ user_id: The user ID.
+ token: The new login token to add.
+ expiry_ts (milliseconds since the epoch): Time after which the login token
+ cannot be used.
+ auth_provider_id: The SSO Identity Provider that the user authenticated with
+ to get this token, if any
+ auth_provider_session_id: The session ID advertised by the SSO Identity
+ Provider, if any.
+ """
+ await self.db_pool.simple_insert(
+ "login_tokens",
+ {
+ "token": token,
+ "user_id": user_id,
+ "expiry_ts": expiry_ts,
+ "auth_provider_id": auth_provider_id,
+ "auth_provider_session_id": auth_provider_session_id,
+ },
+ desc="add_login_token_to_user",
+ )
+
+ def _consume_login_token(
+ self,
+ txn: LoggingTransaction,
+ token: str,
+ ts: int,
+ ) -> LoginTokenLookupResult:
+ values = self.db_pool.simple_select_one_txn(
+ txn,
+ "login_tokens",
+ keyvalues={"token": token},
+ retcols=(
+ "user_id",
+ "expiry_ts",
+ "used_ts",
+ "auth_provider_id",
+ "auth_provider_session_id",
+ ),
+ allow_none=True,
+ )
+
+ if values is None:
+ raise NotFoundError()
+
+ self.db_pool.simple_update_one_txn(
+ txn,
+ "login_tokens",
+ keyvalues={"token": token},
+ updatevalues={"used_ts": ts},
+ )
+ user_id = values["user_id"]
+ expiry_ts = values["expiry_ts"]
+ used_ts = values["used_ts"]
+ auth_provider_id = values["auth_provider_id"]
+ auth_provider_session_id = values["auth_provider_session_id"]
+
+ # Token was already used
+ if used_ts is not None:
+ raise LoginTokenReused()
+
+ # Token expired
+ if ts > int(expiry_ts):
+ raise LoginTokenExpired()
+
+ return LoginTokenLookupResult(
+ user_id=user_id,
+ auth_provider_id=auth_provider_id,
+ auth_provider_session_id=auth_provider_session_id,
+ )
+
+ async def consume_login_token(self, token: str) -> LoginTokenLookupResult:
+ """Lookup a login token and consume it.
+
+ Args:
+ token: The login token.
+
+ Returns:
+ The data stored with that token, including the `user_id`. Returns `None` if
+ the token does not exist or if it expired.
+
+ Raises:
+ NotFound if the login token was not found in database
+ LoginTokenExpired if the login token expired
+ LoginTokenReused if the login token was already used
+ """
+ return await self.db_pool.runInteraction(
+ "consume_login_token",
+ self._consume_login_token,
+ token,
+ self._clock.time_msec(),
+ )
+
+ async def invalidate_login_tokens_by_session_id(
+ self, auth_provider_id: str, auth_provider_session_id: str
+ ) -> None:
+ """Invalidate login tokens with the given IdP session ID.
+
+ Args:
+ auth_provider_id: The SSO Identity Provider that the user authenticated with
+ to get this token
+ auth_provider_session_id: The session ID advertised by the SSO Identity
+ Provider
+ """
+ await self.db_pool.simple_update(
+ table="login_tokens",
+ keyvalues={
+ "auth_provider_id": auth_provider_id,
+ "auth_provider_session_id": auth_provider_session_id,
+ },
+ updatevalues={"used_ts": self._clock.time_msec()},
+ desc="invalidate_login_tokens_by_session_id",
+ )
+
@cached()
async def is_guest(self, user_id: str) -> bool:
res = await self.db_pool.simple_select_one_onecol(
@@ -1778,6 +1953,40 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore):
return res if res else False
+ @cached()
+ async def is_user_approved(self, user_id: str) -> bool:
+ """Checks if a user is approved and therefore can be allowed to log in.
+
+ If the user's 'approved' column is NULL, we consider it as true given it means
+ the user was registered when support for an approval flow was either disabled
+ or nonexistent.
+
+ Args:
+ user_id: the user to check the approval status of.
+
+ Returns:
+ A boolean that is True if the user is approved, False otherwise.
+ """
+
+ def is_user_approved_txn(txn: LoggingTransaction) -> bool:
+ txn.execute(
+ """
+ SELECT COALESCE(approved, TRUE) AS approved FROM users WHERE name = ?
+ """,
+ (user_id,),
+ )
+
+ rows = self.db_pool.cursor_to_dict(txn)
+
+ # We cast to bool because the value returned by the database engine might
+ # be an integer if we're using SQLite.
+ return bool(rows[0]["approved"])
+
+ return await self.db_pool.runInteraction(
+ desc="is_user_pending_approval",
+ func=is_user_approved_txn,
+ )
+
class RegistrationBackgroundUpdateStore(RegistrationWorkerStore):
def __init__(
@@ -1915,6 +2124,29 @@ class RegistrationBackgroundUpdateStore(RegistrationWorkerStore):
self._invalidate_cache_and_stream(txn, self.get_user_by_id, (user_id,))
txn.call_after(self.is_guest.invalidate, (user_id,))
+ def update_user_approval_status_txn(
+ self, txn: LoggingTransaction, user_id: str, approved: bool
+ ) -> None:
+ """Set the user's 'approved' flag to the given value.
+
+ The boolean is turned into an int because the column is a smallint.
+
+ Args:
+ txn: the current database transaction.
+ user_id: the user to update the flag for.
+ approved: the value to set the flag to.
+ """
+ self.db_pool.simple_update_one_txn(
+ txn=txn,
+ table="users",
+ keyvalues={"name": user_id},
+ updatevalues={"approved": approved},
+ )
+
+ # Invalidate the caches of methods that read the value of the 'approved' flag.
+ self._invalidate_cache_and_stream(txn, self.get_user_by_id, (user_id,))
+ self._invalidate_cache_and_stream(txn, self.is_user_approved, (user_id,))
+
class RegistrationStore(StatsStore, RegistrationBackgroundUpdateStore):
def __init__(
@@ -1932,6 +2164,19 @@ class RegistrationStore(StatsStore, RegistrationBackgroundUpdateStore):
self._access_tokens_id_gen = IdGenerator(db_conn, "access_tokens", "id")
self._refresh_tokens_id_gen = IdGenerator(db_conn, "refresh_tokens", "id")
+ # If support for MSC3866 is enabled and configured to require approval for new
+ # account, we will create new users with an 'approved' flag set to false.
+ self._require_approval = (
+ hs.config.experimental.msc3866.enabled
+ and hs.config.experimental.msc3866.require_approval_for_new_accounts
+ )
+
+ # Create a background job for removing expired login tokens
+ if hs.config.worker.run_background_tasks:
+ self._clock.looping_call(
+ self._delete_expired_login_tokens, THIRTY_MINUTES_IN_MS
+ )
+
async def add_access_token_to_user(
self,
user_id: str,
@@ -2064,6 +2309,7 @@ class RegistrationStore(StatsStore, RegistrationBackgroundUpdateStore):
admin: bool = False,
user_type: Optional[str] = None,
shadow_banned: bool = False,
+ approved: bool = False,
) -> None:
"""Attempts to register an account.
@@ -2082,6 +2328,8 @@ class RegistrationStore(StatsStore, RegistrationBackgroundUpdateStore):
or None for a normal user.
shadow_banned: Whether the user is shadow-banned, i.e. they may be
told their requests succeeded but we ignore them.
+ approved: Whether to consider the user has already been approved by an
+ administrator.
Raises:
StoreError if the user_id could not be registered.
@@ -2098,6 +2346,7 @@ class RegistrationStore(StatsStore, RegistrationBackgroundUpdateStore):
admin,
user_type,
shadow_banned,
+ approved,
)
def _register_user(
@@ -2112,11 +2361,14 @@ class RegistrationStore(StatsStore, RegistrationBackgroundUpdateStore):
admin: bool,
user_type: Optional[str],
shadow_banned: bool,
+ approved: bool,
) -> None:
user_id_obj = UserID.from_string(user_id)
now = int(self._clock.time())
+ user_approved = approved or not self._require_approval
+
try:
if was_guest:
# Ensure that the guest user actually exists
@@ -2142,6 +2394,7 @@ class RegistrationStore(StatsStore, RegistrationBackgroundUpdateStore):
"admin": 1 if admin else 0,
"user_type": user_type,
"shadow_banned": shadow_banned,
+ "approved": user_approved,
},
)
else:
@@ -2157,6 +2410,7 @@ class RegistrationStore(StatsStore, RegistrationBackgroundUpdateStore):
"admin": 1 if admin else 0,
"user_type": user_type,
"shadow_banned": shadow_banned,
+ "approved": user_approved,
},
)
@@ -2227,7 +2481,10 @@ class RegistrationStore(StatsStore, RegistrationBackgroundUpdateStore):
txn,
table="users",
keyvalues={"name": user_id},
- updatevalues={"consent_version": consent_version},
+ updatevalues={
+ "consent_version": consent_version,
+ "consent_ts": self._clock.time_msec(),
+ },
)
self._invalidate_cache_and_stream(txn, self.get_user_by_id, (user_id,))
@@ -2499,6 +2756,42 @@ class RegistrationStore(StatsStore, RegistrationBackgroundUpdateStore):
start_or_continue_validation_session_txn,
)
+ async def update_user_approval_status(
+ self, user_id: UserID, approved: bool
+ ) -> None:
+ """Set the user's 'approved' flag to the given value.
+
+ The boolean will be turned into an int (in update_user_approval_status_txn)
+ because the column is a smallint.
+
+ Args:
+ user_id: the user to update the flag for.
+ approved: the value to set the flag to.
+ """
+ await self.db_pool.runInteraction(
+ "update_user_approval_status",
+ self.update_user_approval_status_txn,
+ user_id.to_string(),
+ approved,
+ )
+
+ @wrap_as_background_process("delete_expired_login_tokens")
+ async def _delete_expired_login_tokens(self) -> None:
+ """Remove login tokens with expiry dates that have passed."""
+
+ def _delete_expired_login_tokens_txn(txn: LoggingTransaction, ts: int) -> None:
+ sql = "DELETE FROM login_tokens WHERE expiry_ts <= ?"
+ txn.execute(sql, (ts,))
+
+ # We keep the expired tokens for an extra 5 minutes so we can measure how many
+ # times a token is being used after its expiry
+ now = self._clock.time_msec()
+ await self.db_pool.runInteraction(
+ "delete_expired_login_tokens",
+ _delete_expired_login_tokens_txn,
+ now - (5 * 60 * 1000),
+ )
+
def find_max_generated_user_id_localpart(cur: Cursor) -> int:
"""
diff --git a/synapse/storage/databases/main/relations.py b/synapse/storage/databases/main/relations.py
index 7bd27790eb..aea96e9d24 100644
--- a/synapse/storage/databases/main/relations.py
+++ b/synapse/storage/databases/main/relations.py
@@ -14,11 +14,13 @@
import logging
from typing import (
+ TYPE_CHECKING,
Collection,
Dict,
FrozenSet,
Iterable,
List,
+ Mapping,
Optional,
Set,
Tuple,
@@ -28,19 +30,48 @@ from typing import (
import attr
-from synapse.api.constants import RelationTypes
+from synapse.api.constants import MAIN_TIMELINE, RelationTypes
+from synapse.api.errors import SynapseError
from synapse.events import EventBase
from synapse.storage._base import SQLBaseStore
-from synapse.storage.database import LoggingTransaction, make_in_list_sql_clause
+from synapse.storage.database import (
+ DatabasePool,
+ LoggingDatabaseConnection,
+ LoggingTransaction,
+ make_in_list_sql_clause,
+)
from synapse.storage.databases.main.stream import generate_pagination_where_clause
from synapse.storage.engines import PostgresEngine
from synapse.types import JsonDict, RoomStreamToken, StreamKeyType, StreamToken
from synapse.util.caches.descriptors import cached, cachedList
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
+
logger = logging.getLogger(__name__)
@attr.s(slots=True, frozen=True, auto_attribs=True)
+class ThreadsNextBatch:
+ topological_ordering: int
+ stream_ordering: int
+
+ def __str__(self) -> str:
+ return f"{self.topological_ordering}_{self.stream_ordering}"
+
+ @classmethod
+ def from_string(cls, string: str) -> "ThreadsNextBatch":
+ """
+ Creates a ThreadsNextBatch from its textual representation.
+ """
+ try:
+ keys = (int(s) for s in string.split("_"))
+ return cls(*keys)
+ except Exception:
+ raise SynapseError(400, "Invalid threads token")
+
+
+@attr.s(slots=True, frozen=True, auto_attribs=True)
class _RelatedEvent:
"""
Contains enough information about a related event in order to properly filter
@@ -54,6 +85,76 @@ class _RelatedEvent:
class RelationsWorkerStore(SQLBaseStore):
+ def __init__(
+ self,
+ database: DatabasePool,
+ db_conn: LoggingDatabaseConnection,
+ hs: "HomeServer",
+ ):
+ super().__init__(database, db_conn, hs)
+
+ self.db_pool.updates.register_background_update_handler(
+ "threads_backfill", self._backfill_threads
+ )
+
+ async def _backfill_threads(self, progress: JsonDict, batch_size: int) -> int:
+ """Backfill the threads table."""
+
+ def threads_backfill_txn(txn: LoggingTransaction) -> int:
+ last_thread_id = progress.get("last_thread_id", "")
+
+ # Get the latest event in each thread by topo ordering / stream ordering.
+ #
+ # Note that the MAX(event_id) is needed to abide by the rules of group by,
+ # but doesn't actually do anything since there should only be a single event
+ # ID per topo/stream ordering pair.
+ sql = f"""
+ SELECT room_id, relates_to_id, MAX(topological_ordering), MAX(stream_ordering), MAX(event_id)
+ FROM event_relations
+ INNER JOIN events USING (event_id)
+ WHERE
+ relates_to_id > ? AND
+ relation_type = '{RelationTypes.THREAD}'
+ GROUP BY room_id, relates_to_id
+ ORDER BY relates_to_id
+ LIMIT ?
+ """
+ txn.execute(sql, (last_thread_id, batch_size))
+
+ # No more rows to process.
+ rows = txn.fetchall()
+ if not rows:
+ return 0
+
+ # Insert the rows into the threads table. If a matching thread already exists,
+ # assume it is from a newer event.
+ sql = """
+ INSERT INTO threads (room_id, thread_id, topological_ordering, stream_ordering, latest_event_id)
+ VALUES %s
+ ON CONFLICT (room_id, thread_id)
+ DO NOTHING
+ """
+ if isinstance(txn.database_engine, PostgresEngine):
+ txn.execute_values(sql % ("?",), rows, fetch=False)
+ else:
+ txn.execute_batch(sql % ("(?, ?, ?, ?, ?)",), rows)
+
+ # Mark the progress.
+ self.db_pool.updates._background_update_progress_txn(
+ txn, "threads_backfill", {"last_thread_id": rows[-1][1]}
+ )
+
+ return txn.rowcount
+
+ result = await self.db_pool.runInteraction(
+ "threads_backfill", threads_backfill_txn
+ )
+
+ if not result:
+ await self.db_pool.updates._end_background_update("threads_backfill")
+
+ return result
+
@cached(uncached_args=("event",), tree=True)
async def get_relations_for_event(
self,
@@ -91,6 +192,9 @@ class RelationsWorkerStore(SQLBaseStore):
# it. The `event_id` must match the `event.event_id`.
assert event.event_id == event_id
+ # Ensure bad limits aren't being passed in.
+ assert limit >= 0
+
where_clause = ["relates_to_id = ?", "room_id = ?"]
where_args: List[Union[str, int]] = [event.event_id, room_id]
is_redacted = event.internal_metadata.is_redacted()
@@ -139,21 +243,40 @@ class RelationsWorkerStore(SQLBaseStore):
) -> Tuple[List[_RelatedEvent], Optional[StreamToken]]:
txn.execute(sql, where_args + [limit + 1])
- last_topo_id = None
- last_stream_id = None
events = []
- for row in txn:
+ topo_orderings: List[int] = []
+ stream_orderings: List[int] = []
+ for event_id, relation_type, sender, topo_ordering, stream_ordering in cast(
+ List[Tuple[str, str, str, int, int]], txn
+ ):
# Do not include edits for redacted events as they leak event
# content.
- if not is_redacted or row[1] != RelationTypes.REPLACE:
- events.append(_RelatedEvent(row[0], row[2]))
- last_topo_id = row[3]
- last_stream_id = row[4]
+ if not is_redacted or relation_type != RelationTypes.REPLACE:
+ events.append(_RelatedEvent(event_id, sender))
+ topo_orderings.append(topo_ordering)
+ stream_orderings.append(stream_ordering)
- # If there are more events, generate the next pagination key.
+ # If there are more events, generate the next pagination key from the
+ # last event returned.
next_token = None
- if len(events) > limit and last_topo_id and last_stream_id:
- next_key = RoomStreamToken(last_topo_id, last_stream_id)
+ if len(events) > limit:
+ # Instead of using the last row (which tells us there is more
+ # data), use the last row to be returned.
+ events = events[:limit]
+ topo_orderings = topo_orderings[:limit]
+ stream_orderings = stream_orderings[:limit]
+
+ topo = topo_orderings[-1]
+ token = stream_orderings[-1]
+ if direction == "b":
+ # Tokens are positions between events.
+ # This token points *after* the last event in the chunk.
+ # We need it to point to the event before it in the chunk
+ # when we are going backwards so we subtract one from the
+ # stream part.
+ token -= 1
+ next_key = RoomStreamToken(topo, token)
+
if from_token:
next_token = from_token.copy_and_replace(
StreamKeyType.ROOM, next_key
@@ -177,6 +300,42 @@ class RelationsWorkerStore(SQLBaseStore):
"get_recent_references_for_event", _get_recent_references_for_event_txn
)
+ async def get_all_relations_for_event_with_types(
+ self,
+ event_id: str,
+ relation_types: List[str],
+ ) -> List[str]:
+ """Get the event IDs of all events that have a relation to the given event with
+ one of the given relation types.
+
+ Args:
+ event_id: The event for which to look for related events.
+ relation_types: The types of relations to look for.
+
+ Returns:
+ A list of the IDs of the events that relate to the given event with one of
+ the given relation types.
+ """
+
+ def get_all_relation_ids_for_event_with_types_txn(
+ txn: LoggingTransaction,
+ ) -> List[str]:
+ rows = self.db_pool.simple_select_many_txn(
+ txn=txn,
+ table="event_relations",
+ column="relation_type",
+ iterable=relation_types,
+ keyvalues={"relates_to_id": event_id},
+ retcols=["event_id"],
+ )
+
+ return [row["event_id"] for row in rows]
+
+ return await self.db_pool.runInteraction(
+ desc="get_all_relation_ids_for_event_with_types",
+ func=get_all_relation_ids_for_event_with_types_txn,
+ )
+
async def event_includes_relation(self, event_id: str) -> bool:
"""Check if the given event relates to another event.
@@ -240,112 +399,196 @@ class RelationsWorkerStore(SQLBaseStore):
)
return result is not None
- @cached(tree=True)
- async def get_aggregation_groups_for_event(
- self, event_id: str, room_id: str, limit: int = 5
- ) -> List[JsonDict]:
- """Get a list of annotations on the event, grouped by event type and
+ @cached()
+ async def get_aggregation_groups_for_event(self, event_id: str) -> List[JsonDict]:
+ raise NotImplementedError()
+
+ @cachedList(
+ cached_method_name="get_aggregation_groups_for_event", list_name="event_ids"
+ )
+ async def get_aggregation_groups_for_events(
+ self, event_ids: Collection[str]
+ ) -> Mapping[str, Optional[List[JsonDict]]]:
+ """Get a list of annotations on the given events, grouped by event type and
aggregation key, sorted by count.
This is used e.g. to get the what and how many reactions have happend
on an event.
Args:
- event_id: Fetch events that relate to this event ID.
- room_id: The room the event belongs to.
- limit: Only fetch the `limit` groups.
+ event_ids: Fetch events that relate to these event IDs.
Returns:
- List of groups of annotations that match. Each row is a dict with
- `type`, `key` and `count` fields.
+ A map of event IDs to a list of groups of annotations that match.
+ Each entry is a dict with `type`, `key` and `count` fields.
"""
+ # The number of entries to return per event ID.
+ limit = 5
- args = [
- event_id,
- room_id,
- RelationTypes.ANNOTATION,
- limit,
- ]
+ clause, args = make_in_list_sql_clause(
+ self.database_engine, "relates_to_id", event_ids
+ )
+ args.append(RelationTypes.ANNOTATION)
- sql = """
- SELECT type, aggregation_key, COUNT(DISTINCT sender)
- FROM event_relations
- INNER JOIN events USING (event_id)
- WHERE relates_to_id = ? AND room_id = ? AND relation_type = ?
- GROUP BY relation_type, type, aggregation_key
- ORDER BY COUNT(*) DESC
- LIMIT ?
+ sql = f"""
+ SELECT
+ relates_to_id,
+ annotation.type,
+ aggregation_key,
+ COUNT(DISTINCT annotation.sender)
+ FROM events AS annotation
+ INNER JOIN event_relations USING (event_id)
+ INNER JOIN events AS parent ON
+ parent.event_id = relates_to_id
+ AND parent.room_id = annotation.room_id
+ WHERE
+ {clause}
+ AND relation_type = ?
+ GROUP BY relates_to_id, annotation.type, aggregation_key
+ ORDER BY relates_to_id, COUNT(*) DESC
"""
- def _get_aggregation_groups_for_event_txn(
+ def _get_aggregation_groups_for_events_txn(
txn: LoggingTransaction,
- ) -> List[JsonDict]:
+ ) -> Mapping[str, List[JsonDict]]:
txn.execute(sql, args)
- return [{"type": row[0], "key": row[1], "count": row[2]} for row in txn]
+ result: Dict[str, List[JsonDict]] = {}
+ for event_id, type, key, count in cast(
+ List[Tuple[str, str, str, int]], txn
+ ):
+ event_results = result.setdefault(event_id, [])
+
+ # Limit the number of results per event ID.
+ if len(event_results) == limit:
+ continue
+
+ event_results.append({"type": type, "key": key, "count": count})
+
+ return result
return await self.db_pool.runInteraction(
- "get_aggregation_groups_for_event", _get_aggregation_groups_for_event_txn
+ "get_aggregation_groups_for_events", _get_aggregation_groups_for_events_txn
)
async def get_aggregation_groups_for_users(
- self,
- event_id: str,
- room_id: str,
- limit: int,
- users: FrozenSet[str] = frozenset(),
- ) -> Dict[Tuple[str, str], int]:
+ self, event_ids: Collection[str], users: FrozenSet[str]
+ ) -> Dict[str, Dict[Tuple[str, str], int]]:
"""Fetch the partial aggregations for an event for specific users.
This is used, in conjunction with get_aggregation_groups_for_event, to
remove information from the results for ignored users.
Args:
- event_id: Fetch events that relate to this event ID.
- room_id: The room the event belongs to.
- limit: Only fetch the `limit` groups.
+ event_ids: Fetch events that relate to these event IDs.
users: The users to fetch information for.
Returns:
- A map of (event type, aggregation key) to a count of users.
+ A map of event ID to a map of (event type, aggregation key) to a
+ count of users.
"""
if not users:
return {}
- args: List[Union[str, int]] = [
- event_id,
- room_id,
- RelationTypes.ANNOTATION,
- ]
+ events_sql, args = make_in_list_sql_clause(
+ self.database_engine, "relates_to_id", event_ids
+ )
users_sql, users_args = make_in_list_sql_clause(
- self.database_engine, "sender", users
+ self.database_engine, "annotation.sender", users
)
args.extend(users_args)
+ args.append(RelationTypes.ANNOTATION)
sql = f"""
- SELECT type, aggregation_key, COUNT(DISTINCT sender)
- FROM event_relations
- INNER JOIN events USING (event_id)
- WHERE relates_to_id = ? AND room_id = ? AND relation_type = ? AND {users_sql}
- GROUP BY relation_type, type, aggregation_key
- ORDER BY COUNT(*) DESC
- LIMIT ?
+ SELECT
+ relates_to_id,
+ annotation.type,
+ aggregation_key,
+ COUNT(DISTINCT annotation.sender)
+ FROM events AS annotation
+ INNER JOIN event_relations USING (event_id)
+ INNER JOIN events AS parent ON
+ parent.event_id = relates_to_id
+ AND parent.room_id = annotation.room_id
+ WHERE {events_sql} AND {users_sql} AND relation_type = ?
+ GROUP BY relates_to_id, annotation.type, aggregation_key
+ ORDER BY relates_to_id, COUNT(*) DESC
"""
def _get_aggregation_groups_for_users_txn(
txn: LoggingTransaction,
- ) -> Dict[Tuple[str, str], int]:
- txn.execute(sql, args + [limit])
+ ) -> Dict[str, Dict[Tuple[str, str], int]]:
+ txn.execute(sql, args)
- return {(row[0], row[1]): row[2] for row in txn}
+ result: Dict[str, Dict[Tuple[str, str], int]] = {}
+ for event_id, type, key, count in cast(
+ List[Tuple[str, str, str, int]], txn
+ ):
+ result.setdefault(event_id, {})[(type, key)] = count
+
+ return result
return await self.db_pool.runInteraction(
"get_aggregation_groups_for_users", _get_aggregation_groups_for_users_txn
)
@cached()
+ async def get_references_for_event(self, event_id: str) -> List[JsonDict]:
+ raise NotImplementedError()
+
+ @cachedList(cached_method_name="get_references_for_event", list_name="event_ids")
+ async def get_references_for_events(
+ self, event_ids: Collection[str]
+ ) -> Mapping[str, Optional[List[_RelatedEvent]]]:
+ """Get a list of references to the given events.
+
+ Args:
+ event_ids: Fetch events that relate to these event IDs.
+
+ Returns:
+ A map of event IDs to a list of related event IDs (and their senders).
+ """
+
+ clause, args = make_in_list_sql_clause(
+ self.database_engine, "relates_to_id", event_ids
+ )
+ args.append(RelationTypes.REFERENCE)
+
+ sql = f"""
+ SELECT relates_to_id, ref.event_id, ref.sender
+ FROM events AS ref
+ INNER JOIN event_relations USING (event_id)
+ INNER JOIN events AS parent ON
+ parent.event_id = relates_to_id
+ AND parent.room_id = ref.room_id
+ WHERE
+ {clause}
+ AND relation_type = ?
+ ORDER BY ref.topological_ordering, ref.stream_ordering
+ """
+
+ def _get_references_for_events_txn(
+ txn: LoggingTransaction,
+ ) -> Mapping[str, List[_RelatedEvent]]:
+ txn.execute(sql, args)
+
+ result: Dict[str, List[_RelatedEvent]] = {}
+ for relates_to_id, event_id, sender in cast(
+ List[Tuple[str, str, str]], txn
+ ):
+ result.setdefault(relates_to_id, []).append(
+ _RelatedEvent(event_id, sender)
+ )
+
+ return result
+
+ return await self.db_pool.runInteraction(
+ "_get_references_for_events_txn", _get_references_for_events_txn
+ )
+
+ @cached()
def get_applicable_edit(self, event_id: str) -> Optional[EventBase]:
raise NotImplementedError()
@@ -761,57 +1004,192 @@ class RelationsWorkerStore(SQLBaseStore):
"get_if_user_has_annotated_event", _get_if_user_has_annotated_event
)
- @cached(iterable=True)
- async def get_mutual_event_relations_for_rel_type(
- self, event_id: str, relation_type: str
- ) -> Set[Tuple[str, str]]:
- raise NotImplementedError()
+ @cached(tree=True)
+ async def get_threads(
+ self,
+ room_id: str,
+ limit: int = 5,
+ from_token: Optional[ThreadsNextBatch] = None,
+ ) -> Tuple[List[str], Optional[ThreadsNextBatch]]:
+ """Get a list of thread IDs, ordered by topological ordering of their
+ latest reply.
- @cachedList(
- cached_method_name="get_mutual_event_relations_for_rel_type",
- list_name="relation_types",
- )
- async def get_mutual_event_relations(
- self, event_id: str, relation_types: Collection[str]
- ) -> Dict[str, Set[Tuple[str, str]]]:
+ Args:
+ room_id: The room the event belongs to.
+ limit: Only fetch the most recent `limit` threads.
+ from_token: Fetch rows from a previous next_batch, or from the start if None.
+
+ Returns:
+ A tuple of:
+ A list of thread root event IDs.
+
+ The next_batch, if one exists.
+ """
+ # Generate the pagination clause, if necessary.
+ #
+ # Find any threads where the latest reply is equal / before the last
+ # thread's topo ordering and earlier in stream ordering.
+ pagination_clause = ""
+ pagination_args: tuple = ()
+ if from_token:
+ pagination_clause = "AND topological_ordering <= ? AND stream_ordering < ?"
+ pagination_args = (
+ from_token.topological_ordering,
+ from_token.stream_ordering,
+ )
+
+ sql = f"""
+ SELECT thread_id, topological_ordering, stream_ordering
+ FROM threads
+ WHERE
+ room_id = ?
+ {pagination_clause}
+ ORDER BY topological_ordering DESC, stream_ordering DESC
+ LIMIT ?
+ """
+
+ def _get_threads_txn(
+ txn: LoggingTransaction,
+ ) -> Tuple[List[str], Optional[ThreadsNextBatch]]:
+ txn.execute(sql, (room_id, *pagination_args, limit + 1))
+
+ rows = cast(List[Tuple[str, int, int]], txn.fetchall())
+ thread_ids = [r[0] for r in rows]
+
+ # If there are more events, generate the next pagination key from the
+ # last thread which will be returned.
+ next_token = None
+ if len(thread_ids) > limit:
+ last_topo_id = rows[-2][1]
+ last_stream_id = rows[-2][2]
+ next_token = ThreadsNextBatch(last_topo_id, last_stream_id)
+
+ return thread_ids[:limit], next_token
+
+ return await self.db_pool.runInteraction("get_threads", _get_threads_txn)
+
+ @cached()
+ async def get_thread_id(self, event_id: str) -> str:
"""
- Fetch event metadata for events which related to the same event as the given event.
+ Get the thread ID for an event. This considers multi-level relations,
+ e.g. an annotation to an event which is part of a thread.
- If the given event has no relation information, returns an empty dictionary.
+ It only searches up the relations tree, i.e. it only searches for events
+ which the given event is related to (and which those events are related
+ to, etc.)
+
+ Given the following DAG:
+
+ A <---[m.thread]-- B <--[m.annotation]-- C
+ ^
+ |--[m.reference]-- D <--[m.annotation]-- E
+
+ get_thread_id(X) considers events B and C as part of thread A.
+
+ See also get_thread_id_for_receipts.
Args:
- event_id: The event ID which is targeted by relations.
- relation_types: The relation types to check for mutual relations.
+ event_id: The event ID to fetch the thread ID for.
Returns:
- A dictionary of relation type to:
- A set of tuples of:
- The sender
- The event type
+ The event ID of the root event in the thread, if this event is part
+ of a thread. "main", otherwise.
"""
- rel_type_sql, rel_type_args = make_in_list_sql_clause(
- self.database_engine, "relation_type", relation_types
- )
- sql = f"""
- SELECT DISTINCT relation_type, sender, type FROM event_relations
- INNER JOIN events USING (event_id)
- WHERE relates_to_id = ? AND {rel_type_sql}
+ # Recurse event relations up to the *root* event, then search that chain
+ # of relations for a thread relation. If one is found, the root event is
+ # returned.
+ #
+ # Note that this should only ever find 0 or 1 entries since it is invalid
+ # for an event to have a thread relation to an event which also has a
+ # relation.
+ sql = """
+ WITH RECURSIVE related_events AS (
+ SELECT event_id, relates_to_id, relation_type, 0 depth
+ FROM event_relations
+ WHERE event_id = ?
+ UNION SELECT e.event_id, e.relates_to_id, e.relation_type, depth + 1
+ FROM event_relations e
+ INNER JOIN related_events r ON r.relates_to_id = e.event_id
+ WHERE depth <= 3
+ )
+ SELECT relates_to_id FROM related_events
+ WHERE relation_type = 'm.thread'
+ ORDER BY depth DESC
+ LIMIT 1;
"""
- def _get_event_relations(
- txn: LoggingTransaction,
- ) -> Dict[str, Set[Tuple[str, str]]]:
- txn.execute(sql, [event_id] + rel_type_args)
- result: Dict[str, Set[Tuple[str, str]]] = {
- rel_type: set() for rel_type in relation_types
- }
- for rel_type, sender, type in txn.fetchall():
- result[rel_type].add((sender, type))
- return result
+ def _get_thread_id(txn: LoggingTransaction) -> str:
+ txn.execute(sql, (event_id,))
+ row = txn.fetchone()
+ if row:
+ return row[0]
+
+ # If no thread was found, it is part of the main timeline.
+ return MAIN_TIMELINE
+
+ return await self.db_pool.runInteraction("get_thread_id", _get_thread_id)
+
+ @cached()
+ async def get_thread_id_for_receipts(self, event_id: str) -> str:
+ """
+ Get the thread ID for an event by traversing to the top-most related event
+ and confirming any children events form a thread.
+
+ Given the following DAG:
+
+ A <---[m.thread]-- B <--[m.annotation]-- C
+ ^
+ |--[m.reference]-- D <--[m.annotation]-- E
+
+ get_thread_id_for_receipts(X) considers events A, B, C, D, and E as part
+ of thread A.
+
+ See also get_thread_id.
+
+ Args:
+ event_id: The event ID to fetch the thread ID for.
+
+ Returns:
+ The event ID of the root event in the thread, if this event is part
+ of a thread. "main", otherwise.
+ """
+
+ # Recurse event relations up to the *root* event, then search for any events
+ # related to that root node for a thread relation. If one is found, the
+ # root event is returned.
+ #
+ # Note that there cannot be thread relations in the middle of the chain since
+ # it is invalid for an event to have a thread relation to an event which also
+ # has a relation.
+ sql = """
+ SELECT relates_to_id FROM event_relations WHERE relates_to_id = COALESCE((
+ WITH RECURSIVE related_events AS (
+ SELECT event_id, relates_to_id, relation_type, 0 depth
+ FROM event_relations
+ WHERE event_id = ?
+ UNION SELECT e.event_id, e.relates_to_id, e.relation_type, depth + 1
+ FROM event_relations e
+ INNER JOIN related_events r ON r.relates_to_id = e.event_id
+ WHERE depth <= 3
+ )
+ SELECT relates_to_id FROM related_events
+ ORDER BY depth DESC
+ LIMIT 1
+ ), ?) AND relation_type = 'm.thread' LIMIT 1;
+ """
+
+ def _get_related_thread_id(txn: LoggingTransaction) -> str:
+ txn.execute(sql, (event_id, event_id))
+ row = txn.fetchone()
+ if row:
+ return row[0]
+
+ # If no thread was found, it is part of the main timeline.
+ return MAIN_TIMELINE
return await self.db_pool.runInteraction(
- "get_event_relations", _get_event_relations
+ "get_related_thread_id", _get_related_thread_id
)
diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py
index 0f1f0d11ea..52ad947c6c 100644
--- a/synapse/storage/databases/main/room.py
+++ b/synapse/storage/databases/main/room.py
@@ -25,6 +25,7 @@ from typing import (
List,
Mapping,
Optional,
+ Sequence,
Tuple,
Union,
cast,
@@ -96,6 +97,12 @@ class RoomSortOrder(Enum):
STATE_EVENTS = "state_events"
+@attr.s(slots=True, frozen=True, auto_attribs=True)
+class PartialStateResyncInfo:
+ joined_via: Optional[str]
+ servers_in_room: List[str] = attr.ib(factory=list)
+
+
class RoomWorkerStore(CacheInvalidationWorkerStore):
def __init__(
self,
@@ -206,21 +213,30 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
def _construct_room_type_where_clause(
self, room_types: Union[List[Union[str, None]], None]
- ) -> Tuple[Union[str, None], List[str]]:
+ ) -> Tuple[Union[str, None], list]:
if not room_types:
return None, []
- else:
- # We use None when we want get rooms without a type
- is_null_clause = ""
- if None in room_types:
- is_null_clause = "OR room_type IS NULL"
- room_types = [value for value in room_types if value is not None]
+ # Since None is used to represent a room without a type, care needs to
+ # be taken into account when constructing the where clause.
+ clauses = []
+ args: list = []
+
+ room_types_set = set(room_types)
+
+ # We use None to represent a room without a type.
+ if None in room_types_set:
+ clauses.append("room_type IS NULL")
+ room_types_set.remove(None)
+
+ # If there are other room types, generate the proper clause.
+ if room_types:
list_clause, args = make_in_list_sql_clause(
- self.database_engine, "room_type", room_types
+ self.database_engine, "room_type", room_types_set
)
+ clauses.append(list_clause)
- return f"({list_clause} {is_null_clause})", args
+ return f"({' OR '.join(clauses)})", args
async def count_public_rooms(
self,
@@ -240,14 +256,6 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
def _count_public_rooms_txn(txn: LoggingTransaction) -> int:
query_args = []
- room_type_clause, args = self._construct_room_type_where_clause(
- search_filter.get(PublicRoomsFilterFields.ROOM_TYPES, None)
- if search_filter
- else None
- )
- room_type_clause = f" AND {room_type_clause}" if room_type_clause else ""
- query_args += args
-
if network_tuple:
if network_tuple.appservice_id:
published_sql = """
@@ -267,6 +275,14 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
UNION SELECT room_id from appservice_room_list
"""
+ room_type_clause, args = self._construct_room_type_where_clause(
+ search_filter.get(PublicRoomsFilterFields.ROOM_TYPES, None)
+ if search_filter
+ else None
+ )
+ room_type_clause = f" AND {room_type_clause}" if room_type_clause else ""
+ query_args += args
+
sql = f"""
SELECT
COUNT(*)
@@ -641,8 +657,10 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
"version": room[5],
"creator": room[6],
"encryption": room[7],
- "federatable": room[8],
- "public": room[9],
+ # room_stats_state.federatable is an integer on sqlite.
+ "federatable": bool(room[8]),
+ # rooms.is_public is an integer on sqlite.
+ "public": bool(room[9]),
"join_rules": room[10],
"guest_access": room[11],
"history_visibility": room[12],
@@ -894,7 +912,11 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
event_json = db_to_json(content_json)
content = event_json["content"]
content_url = content.get("url")
- thumbnail_url = content.get("info", {}).get("thumbnail_url")
+ info = content.get("info")
+ if isinstance(info, dict):
+ thumbnail_url = info.get("thumbnail_url")
+ else:
+ thumbnail_url = None
for url in (content_url, thumbnail_url):
if not url:
@@ -1131,17 +1153,46 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
get_rooms_for_retention_period_in_range_txn,
)
- async def get_partial_state_rooms_and_servers(
+ @cached(iterable=True)
+ async def get_partial_state_servers_at_join(self, room_id: str) -> Sequence[str]:
+ """Gets the list of servers in a partial state room at the time we joined it.
+
+ Returns:
+ The `servers_in_room` list from the `/send_join` response for partial state
+ rooms. May not be accurate or complete, as it comes from a remote
+ homeserver.
+ An empty list for full state rooms.
+ """
+ return await self.db_pool.simple_select_onecol(
+ "partial_state_rooms_servers",
+ keyvalues={"room_id": room_id},
+ retcol="server_name",
+ desc="get_partial_state_servers_at_join",
+ )
+
+ async def get_partial_state_room_resync_info(
self,
- ) -> Mapping[str, Collection[str]]:
- """Get all rooms containing events with partial state, and the servers known
- to be in the room.
+ ) -> Mapping[str, PartialStateResyncInfo]:
+ """Get all rooms containing events with partial state, and the information
+ needed to restart a "resync" of those rooms.
Returns:
A dictionary of rooms with partial state, with room IDs as keys and
lists of servers in rooms as values.
"""
- room_servers: Dict[str, List[str]] = {}
+ room_servers: Dict[str, PartialStateResyncInfo] = {}
+
+ rows = await self.db_pool.simple_select_list(
+ table="partial_state_rooms",
+ keyvalues={},
+ retcols=("room_id", "joined_via"),
+ desc="get_server_which_served_partial_join",
+ )
+
+ for row in rows:
+ room_id = row["room_id"]
+ joined_via = row["joined_via"]
+ room_servers[room_id] = PartialStateResyncInfo(joined_via=joined_via)
rows = await self.db_pool.simple_select_list(
"partial_state_rooms_servers",
@@ -1153,7 +1204,15 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
for row in rows:
room_id = row["room_id"]
server_name = row["server_name"]
- room_servers.setdefault(room_id, []).append(server_name)
+ entry = room_servers.get(room_id)
+ if entry is None:
+ # There is a foreign key constraint which enforces that every room_id in
+ # partial_state_rooms_servers appears in partial_state_rooms. So we
+ # expect `entry` to be non-null. (This reasoning fails if we've
+ # partial-joined between the two SELECTs, but this is unlikely to happen
+ # in practice.)
+ continue
+ entry.servers_in_room.append(server_name)
return room_servers
@@ -1183,8 +1242,9 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
)
return False
- @staticmethod
- def _clear_partial_state_room_txn(txn: LoggingTransaction, room_id: str) -> None:
+ def _clear_partial_state_room_txn(
+ self, txn: LoggingTransaction, room_id: str
+ ) -> None:
DatabasePool.simple_delete_txn(
txn,
table="partial_state_rooms_servers",
@@ -1195,7 +1255,32 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
table="partial_state_rooms",
keyvalues={"room_id": room_id},
)
+ self._invalidate_cache_and_stream(txn, self.is_partial_state_room, (room_id,))
+ self._invalidate_cache_and_stream(
+ txn, self.get_partial_state_servers_at_join, (room_id,)
+ )
+ # We now delete anything from `device_lists_remote_pending` with a
+ # stream ID less than the minimum
+ # `partial_state_rooms.device_lists_stream_id`, as we no longer need them.
+ device_lists_stream_id = DatabasePool.simple_select_one_onecol_txn(
+ txn,
+ table="partial_state_rooms",
+ keyvalues={},
+ retcol="MIN(device_lists_stream_id)",
+ allow_none=True,
+ )
+ if device_lists_stream_id is None:
+ # There are no rooms being currently partially joined, so we delete everything.
+ txn.execute("DELETE FROM device_lists_remote_pending")
+ else:
+ sql = """
+ DELETE FROM device_lists_remote_pending
+ WHERE stream_id <= ?
+ """
+ txn.execute(sql, (device_lists_stream_id,))
+
+ @cached()
async def is_partial_state_room(self, room_id: str) -> bool:
"""Checks if this room has partial state.
@@ -1214,6 +1299,22 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
return entry is not None
+ async def get_join_event_id_and_device_lists_stream_id_for_partial_state(
+ self, room_id: str
+ ) -> Tuple[str, int]:
+ """Get the event ID of the initial join that started the partial
+ join, and the device list stream ID at the point we started the partial
+ join.
+ """
+
+ result = await self.db_pool.simple_select_one(
+ table="partial_state_rooms",
+ keyvalues={"room_id": room_id},
+ retcols=("join_event_id", "device_lists_stream_id"),
+ desc="get_join_event_id_for_partial_state",
+ )
+ return result["join_event_id"], result["device_lists_stream_id"]
+
class _BackgroundUpdates:
REMOVE_TOMESTONED_ROOMS_BG_UPDATE = "remove_tombstoned_rooms_from_directory"
@@ -1755,29 +1856,51 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore):
self,
room_id: str,
servers: Collection[str],
+ device_lists_stream_id: int,
+ joined_via: str,
) -> None:
- """Mark the given room as containing events with partial state
+ """Mark the given room as containing events with partial state.
+
+ We also store additional data that describes _when_ we first partial-joined this
+ room, which helps us to keep other homeservers in sync when we finally fully
+ join this room.
+
+ We do not include a `join_event_id` here---we need to wait for the join event
+ to be persisted first.
Args:
room_id: the ID of the room
servers: other servers known to be in the room
+ device_lists_stream_id: the device_lists stream ID at the time when we first
+ joined the room.
+ joined_via: the server name we requested a partial join from.
"""
await self.db_pool.runInteraction(
"store_partial_state_room",
self._store_partial_state_room_txn,
room_id,
servers,
+ device_lists_stream_id,
+ joined_via,
)
- @staticmethod
def _store_partial_state_room_txn(
- txn: LoggingTransaction, room_id: str, servers: Collection[str]
+ self,
+ txn: LoggingTransaction,
+ room_id: str,
+ servers: Collection[str],
+ device_lists_stream_id: int,
+ joined_via: str,
) -> None:
DatabasePool.simple_insert_txn(
txn,
table="partial_state_rooms",
values={
"room_id": room_id,
+ "device_lists_stream_id": device_lists_stream_id,
+ # To be updated later once the join event is persisted.
+ "join_event_id": None,
+ "joined_via": joined_via,
},
)
DatabasePool.simple_insert_many_txn(
@@ -1786,6 +1909,40 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore):
keys=("room_id", "server_name"),
values=((room_id, s) for s in servers),
)
+ self._invalidate_cache_and_stream(txn, self.is_partial_state_room, (room_id,))
+ self._invalidate_cache_and_stream(
+ txn, self.get_partial_state_servers_at_join, (room_id,)
+ )
+
+ async def write_partial_state_rooms_join_event_id(
+ self,
+ room_id: str,
+ join_event_id: str,
+ ) -> None:
+ """Record the join event which resulted from a partial join.
+
+ We do this separately to `store_partial_state_room` because we need to wait for
+ the join event to be persisted. Otherwise we violate a foreign key constraint.
+ """
+ await self.db_pool.runInteraction(
+ "write_partial_state_rooms_join_event_id",
+ self._write_partial_state_rooms_join_event_id,
+ room_id,
+ join_event_id,
+ )
+
+ def _write_partial_state_rooms_join_event_id(
+ self,
+ txn: LoggingTransaction,
+ room_id: str,
+ join_event_id: str,
+ ) -> None:
+ DatabasePool.simple_update_txn(
+ txn,
+ table="partial_state_rooms",
+ keyvalues={"room_id": room_id},
+ updatevalues={"join_event_id": join_event_id},
+ )
async def maybe_store_room_on_outlier_membership(
self, room_id: str, room_version: RoomVersion
@@ -1904,7 +2061,8 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore):
Args:
report_id: ID of reported event in database
Returns:
- event_report: json list of information from event report
+ JSON dict of information from an event report or None if the
+ report does not exist.
"""
def _get_event_report_txn(
@@ -1977,8 +2135,9 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore):
user_id: search for user_id. Ignored if user_id is None
room_id: search for room_id. Ignored if room_id is None
Returns:
- event_reports: json list of event reports
- count: total number of event reports matching the filter criteria
+ Tuple of:
+ json list of event reports
+ total number of event reports matching the filter criteria
"""
def _get_event_reports_paginate_txn(
@@ -2001,9 +2160,15 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore):
where_clause = "WHERE " + " AND ".join(filters) if len(filters) > 0 else ""
+ # We join on room_stats_state despite not using any columns from it
+ # because the join can influence the number of rows returned;
+ # e.g. a room that doesn't have state, maybe because it was deleted.
+ # The query returning the total count should be consistent with
+ # the query returning the results.
sql = """
SELECT COUNT(*) as total_event_reports
FROM event_reports AS er
+ JOIN room_stats_state ON room_stats_state.room_id = er.room_id
{}
""".format(
where_clause
diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py
index e2cccc688c..f02c1d7ea7 100644
--- a/synapse/storage/databases/main/roommember.py
+++ b/synapse/storage/databases/main/roommember.py
@@ -15,7 +15,6 @@
import logging
from typing import (
TYPE_CHECKING,
- Callable,
Collection,
Dict,
FrozenSet,
@@ -31,12 +30,8 @@ from typing import (
import attr
from synapse.api.constants import EventTypes, Membership
-from synapse.events import EventBase
from synapse.metrics import LaterGauge
-from synapse.metrics.background_process_metrics import (
- run_as_background_process,
- wrap_as_background_process,
-)
+from synapse.metrics.background_process_metrics import wrap_as_background_process
from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause
from synapse.storage.database import (
DatabasePool,
@@ -91,15 +86,7 @@ class RoomMemberWorkerStore(EventsWorkerStore):
# at a time. Keyed by room_id.
self._joined_host_linearizer = Linearizer("_JoinedHostsCache")
- # Is the current_state_events.membership up to date? Or is the
- # background update still running?
- self._current_state_events_membership_up_to_date = False
-
- txn = db_conn.cursor(
- txn_name="_check_safe_current_state_events_membership_updated"
- )
- self._check_safe_current_state_events_membership_updated_txn(txn)
- txn.close()
+ self._server_notices_mxid = hs.config.servernotices.server_notices_mxid
if (
self.hs.config.worker.run_background_tasks
@@ -157,61 +144,42 @@ class RoomMemberWorkerStore(EventsWorkerStore):
self._known_servers_count = max([count, 1])
return self._known_servers_count
- def _check_safe_current_state_events_membership_updated_txn(
- self, txn: LoggingTransaction
- ) -> None:
- """Checks if it is safe to assume the new current_state_events
- membership column is up to date
- """
-
- pending_update = self.db_pool.simple_select_one_txn(
- txn,
- table="background_updates",
- keyvalues={"update_name": _CURRENT_STATE_MEMBERSHIP_UPDATE_NAME},
- retcols=["update_name"],
- allow_none=True,
- )
-
- self._current_state_events_membership_up_to_date = not pending_update
-
- # If the update is still running, reschedule to run.
- if pending_update:
- self._clock.call_later(
- 15.0,
- run_as_background_process,
- "_check_safe_current_state_events_membership_updated",
- self.db_pool.runInteraction,
- "_check_safe_current_state_events_membership_updated",
- self._check_safe_current_state_events_membership_updated_txn,
- )
-
@cached(max_entries=100000, iterable=True)
async def get_users_in_room(self, room_id: str) -> List[str]:
- return await self.db_pool.runInteraction(
- "get_users_in_room", self.get_users_in_room_txn, room_id
+ """Returns a list of users in the room.
+
+ Will return inaccurate results for rooms with partial state, since the state for
+ the forward extremities of those rooms will exclude most members. We may also
+ calculate room state incorrectly for such rooms and believe that a member is or
+ is not in the room when the opposite is true.
+
+ Note: If you only care about users in the room local to the homeserver, use
+ `get_local_users_in_room(...)` instead which will be more performant.
+ """
+ return await self.db_pool.simple_select_onecol(
+ table="current_state_events",
+ keyvalues={
+ "type": EventTypes.Member,
+ "room_id": room_id,
+ "membership": Membership.JOIN,
+ },
+ retcol="state_key",
+ desc="get_users_in_room",
)
def get_users_in_room_txn(self, txn: LoggingTransaction, room_id: str) -> List[str]:
- # If we can assume current_state_events.membership is up to date
- # then we can avoid a join, which is a Very Good Thing given how
- # frequently this function gets called.
- if self._current_state_events_membership_up_to_date:
- sql = """
- SELECT state_key FROM current_state_events
- WHERE type = 'm.room.member' AND room_id = ? AND membership = ?
- """
- else:
- sql = """
- SELECT state_key FROM room_memberships as m
- INNER JOIN current_state_events as c
- ON m.event_id = c.event_id
- AND m.room_id = c.room_id
- AND m.user_id = c.state_key
- WHERE c.type = 'm.room.member' AND c.room_id = ? AND m.membership = ?
- """
+ """Returns a list of users in the room."""
- txn.execute(sql, (room_id, Membership.JOIN))
- return [r[0] for r in txn]
+ return self.db_pool.simple_select_onecol_txn(
+ txn,
+ table="current_state_events",
+ keyvalues={
+ "type": EventTypes.Member,
+ "room_id": room_id,
+ "membership": Membership.JOIN,
+ },
+ retcol="state_key",
+ )
@cached()
def get_user_in_room_with_profile(
@@ -283,6 +251,9 @@ class RoomMemberWorkerStore(EventsWorkerStore):
Returns:
A mapping from user ID to ProfileInfo.
+
+ Preconditions:
+ - There is full state available for the room (it is not partial-stated).
"""
def _get_users_in_room_with_profiles(
@@ -322,28 +293,14 @@ class RoomMemberWorkerStore(EventsWorkerStore):
# We do this all in one transaction to keep the cache small.
# FIXME: get rid of this when we have room_stats
- # If we can assume current_state_events.membership is up to date
- # then we can avoid a join, which is a Very Good Thing given how
- # frequently this function gets called.
- if self._current_state_events_membership_up_to_date:
- # Note, rejected events will have a null membership field, so
- # we we manually filter them out.
- sql = """
- SELECT count(*), membership FROM current_state_events
- WHERE type = 'm.room.member' AND room_id = ?
- AND membership IS NOT NULL
- GROUP BY membership
- """
- else:
- sql = """
- SELECT count(*), m.membership FROM room_memberships as m
- INNER JOIN current_state_events as c
- ON m.event_id = c.event_id
- AND m.room_id = c.room_id
- AND m.user_id = c.state_key
- WHERE c.type = 'm.room.member' AND c.room_id = ?
- GROUP BY m.membership
- """
+ # Note, rejected events will have a null membership field, so
+ # we we manually filter them out.
+ sql = """
+ SELECT count(*), membership FROM current_state_events
+ WHERE type = 'm.room.member' AND room_id = ?
+ AND membership IS NOT NULL
+ GROUP BY membership
+ """
txn.execute(sql, (room_id,))
res: Dict[str, MemberSummary] = {}
@@ -352,30 +309,18 @@ class RoomMemberWorkerStore(EventsWorkerStore):
# we order by membership and then fairly arbitrarily by event_id so
# heroes are consistent
- if self._current_state_events_membership_up_to_date:
- # Note, rejected events will have a null membership field, so
- # we we manually filter them out.
- sql = """
- SELECT state_key, membership, event_id
- FROM current_state_events
- WHERE type = 'm.room.member' AND room_id = ?
- AND membership IS NOT NULL
- ORDER BY
- CASE membership WHEN ? THEN 1 WHEN ? THEN 2 ELSE 3 END ASC,
- event_id ASC
- LIMIT ?
- """
- else:
- sql = """
- SELECT c.state_key, m.membership, c.event_id
- FROM room_memberships as m
- INNER JOIN current_state_events as c USING (room_id, event_id)
- WHERE c.type = 'm.room.member' AND c.room_id = ?
- ORDER BY
- CASE m.membership WHEN ? THEN 1 WHEN ? THEN 2 ELSE 3 END ASC,
- c.event_id ASC
- LIMIT ?
- """
+ # Note, rejected events will have a null membership field, so
+ # we we manually filter them out.
+ sql = """
+ SELECT state_key, membership, event_id
+ FROM current_state_events
+ WHERE type = 'm.room.member' AND room_id = ?
+ AND membership IS NOT NULL
+ ORDER BY
+ CASE membership WHEN ? THEN 1 WHEN ? THEN 2 ELSE 3 END ASC,
+ event_id ASC
+ LIMIT ?
+ """
# 6 is 5 (number of heroes) plus 1, in case one of them is the calling user.
txn.execute(sql, (room_id, Membership.JOIN, Membership.INVITE, 6))
@@ -531,6 +476,47 @@ class RoomMemberWorkerStore(EventsWorkerStore):
desc="get_local_users_in_room",
)
+ async def check_local_user_in_room(self, user_id: str, room_id: str) -> bool:
+ """
+ Check whether a given local user is currently joined to the given room.
+
+ Returns:
+ A boolean indicating whether the user is currently joined to the room
+
+ Raises:
+ Exeption when called with a non-local user to this homeserver
+ """
+ if not self.hs.is_mine_id(user_id):
+ raise Exception(
+ "Cannot call 'check_local_user_in_room' on "
+ "non-local user %s" % (user_id,),
+ )
+
+ (
+ membership,
+ member_event_id,
+ ) = await self.get_local_current_membership_for_user_in_room(
+ user_id=user_id,
+ room_id=room_id,
+ )
+
+ return membership == Membership.JOIN
+
+ async def is_server_notice_room(self, room_id: str) -> bool:
+ """
+ Determines whether the given room is a 'Server Notices' room, used for
+ sending server notices to a user.
+
+ This is determined by seeing whether the server notices user is present
+ in the room.
+ """
+ if self._server_notices_mxid is None:
+ return False
+ is_server_notices_room = await self.check_local_user_in_room(
+ user_id=self._server_notices_mxid, room_id=room_id
+ )
+ return is_server_notices_room
+
async def get_local_current_membership_for_user_in_room(
self, user_id: str, room_id: str
) -> Tuple[Optional[str], Optional[str]]:
@@ -592,27 +578,15 @@ class RoomMemberWorkerStore(EventsWorkerStore):
# We use `current_state_events` here and not `local_current_membership`
# as a) this gets called with remote users and b) this only gets called
# for rooms the server is participating in.
- if self._current_state_events_membership_up_to_date:
- sql = """
- SELECT room_id, e.instance_name, e.stream_ordering
- FROM current_state_events AS c
- INNER JOIN events AS e USING (room_id, event_id)
- WHERE
- c.type = 'm.room.member'
- AND c.state_key = ?
- AND c.membership = ?
- """
- else:
- sql = """
- SELECT room_id, e.instance_name, e.stream_ordering
- FROM current_state_events AS c
- INNER JOIN room_memberships AS m USING (room_id, event_id)
- INNER JOIN events AS e USING (room_id, event_id)
- WHERE
- c.type = 'm.room.member'
- AND c.state_key = ?
- AND m.membership = ?
- """
+ sql = """
+ SELECT room_id, e.instance_name, e.stream_ordering
+ FROM current_state_events AS c
+ INNER JOIN events AS e USING (room_id, event_id)
+ WHERE
+ c.type = 'm.room.member'
+ AND c.state_key = ?
+ AND c.membership = ?
+ """
txn.execute(sql, (user_id, Membership.JOIN))
return frozenset(
@@ -622,117 +596,124 @@ class RoomMemberWorkerStore(EventsWorkerStore):
for room_id, instance, stream_id in txn
)
- @cachedList(
- cached_method_name="get_rooms_for_user_with_stream_ordering",
- list_name="user_ids",
- )
- async def get_rooms_for_users_with_stream_ordering(
+ async def get_users_server_still_shares_room_with(
self, user_ids: Collection[str]
- ) -> Dict[str, FrozenSet[GetRoomsForUserWithStreamOrdering]]:
- """A batched version of `get_rooms_for_user_with_stream_ordering`.
-
- Returns:
- Map from user_id to set of rooms that is currently in.
+ ) -> Set[str]:
+ """Given a list of users return the set that the server still share a
+ room with.
"""
+
+ if not user_ids:
+ return set()
+
return await self.db_pool.runInteraction(
- "get_rooms_for_users_with_stream_ordering",
- self._get_rooms_for_users_with_stream_ordering_txn,
+ "get_users_server_still_shares_room_with",
+ self.get_users_server_still_shares_room_with_txn,
user_ids,
)
- def _get_rooms_for_users_with_stream_ordering_txn(
- self, txn: LoggingTransaction, user_ids: Collection[str]
- ) -> Dict[str, FrozenSet[GetRoomsForUserWithStreamOrdering]]:
+ def get_users_server_still_shares_room_with_txn(
+ self,
+ txn: LoggingTransaction,
+ user_ids: Collection[str],
+ ) -> Set[str]:
+ if not user_ids:
+ return set()
+
+ sql = """
+ SELECT state_key FROM current_state_events
+ WHERE
+ type = 'm.room.member'
+ AND membership = 'join'
+ AND %s
+ GROUP BY state_key
+ """
clause, args = make_in_list_sql_clause(
- self.database_engine,
- "c.state_key",
- user_ids,
+ self.database_engine, "state_key", user_ids
)
- if self._current_state_events_membership_up_to_date:
- sql = f"""
- SELECT c.state_key, room_id, e.instance_name, e.stream_ordering
- FROM current_state_events AS c
- INNER JOIN events AS e USING (room_id, event_id)
- WHERE
- c.type = 'm.room.member'
- AND c.membership = ?
- AND {clause}
- """
- else:
- sql = f"""
- SELECT c.state_key, room_id, e.instance_name, e.stream_ordering
- FROM current_state_events AS c
- INNER JOIN room_memberships AS m USING (room_id, event_id)
- INNER JOIN events AS e USING (room_id, event_id)
- WHERE
- c.type = 'm.room.member'
- AND m.membership = ?
- AND {clause}
- """
+ txn.execute(sql % (clause,), args)
- txn.execute(sql, [Membership.JOIN] + args)
+ return {row[0] for row in txn}
- result: Dict[str, Set[GetRoomsForUserWithStreamOrdering]] = {
- user_id: set() for user_id in user_ids
- }
- for user_id, room_id, instance, stream_id in txn:
- result[user_id].add(
- GetRoomsForUserWithStreamOrdering(
- room_id, PersistedEventPosition(instance, stream_id)
- )
- )
-
- return {user_id: frozenset(v) for user_id, v in result.items()}
+ @cached(max_entries=500000, iterable=True)
+ async def get_rooms_for_user(self, user_id: str) -> FrozenSet[str]:
+ """Returns a set of room_ids the user is currently joined to.
- async def get_users_server_still_shares_room_with(
- self, user_ids: Collection[str]
- ) -> Set[str]:
- """Given a list of users return the set that the server still share a
- room with.
+ If a remote user only returns rooms this server is currently
+ participating in.
"""
+ rooms = self.get_rooms_for_user_with_stream_ordering.cache.get_immediate(
+ (user_id,),
+ None,
+ update_metrics=False,
+ )
+ if rooms:
+ return frozenset(r.room_id for r in rooms)
- if not user_ids:
- return set()
-
- def _get_users_server_still_shares_room_with_txn(
- txn: LoggingTransaction,
- ) -> Set[str]:
- sql = """
- SELECT state_key FROM current_state_events
- WHERE
- type = 'm.room.member'
- AND membership = 'join'
- AND %s
- GROUP BY state_key
- """
+ room_ids = await self.db_pool.simple_select_onecol(
+ table="current_state_events",
+ keyvalues={
+ "type": EventTypes.Member,
+ "membership": Membership.JOIN,
+ "state_key": user_id,
+ },
+ retcol="room_id",
+ desc="get_rooms_for_user",
+ )
- clause, args = make_in_list_sql_clause(
- self.database_engine, "state_key", user_ids
- )
+ return frozenset(room_ids)
- txn.execute(sql % (clause,), args)
+ @cachedList(
+ cached_method_name="get_rooms_for_user",
+ list_name="user_ids",
+ )
+ async def _get_rooms_for_users(
+ self, user_ids: Collection[str]
+ ) -> Dict[str, FrozenSet[str]]:
+ """A batched version of `get_rooms_for_user`.
- return {row[0] for row in txn}
+ Returns:
+ Map from user_id to set of rooms that is currently in.
+ """
- return await self.db_pool.runInteraction(
- "get_users_server_still_shares_room_with",
- _get_users_server_still_shares_room_with_txn,
+ rows = await self.db_pool.simple_select_many_batch(
+ table="current_state_events",
+ column="state_key",
+ iterable=user_ids,
+ retcols=(
+ "state_key",
+ "room_id",
+ ),
+ keyvalues={
+ "type": EventTypes.Member,
+ "membership": Membership.JOIN,
+ },
+ desc="get_rooms_for_users",
)
- async def get_rooms_for_user(
- self, user_id: str, on_invalidate: Optional[Callable[[], None]] = None
- ) -> FrozenSet[str]:
- """Returns a set of room_ids the user is currently joined to.
+ user_rooms: Dict[str, Set[str]] = {user_id: set() for user_id in user_ids}
- If a remote user only returns rooms this server is currently
- participating in.
+ for row in rows:
+ user_rooms[row["state_key"]].add(row["room_id"])
+
+ return {key: frozenset(rooms) for key, rooms in user_rooms.items()}
+
+ async def get_rooms_for_users(
+ self, user_ids: Collection[str]
+ ) -> Dict[str, FrozenSet[str]]:
+ """A batched wrapper around `_get_rooms_for_users`, to prevent locking
+ other calls to `get_rooms_for_user` for large user lists.
"""
- rooms = await self.get_rooms_for_user_with_stream_ordering(
- user_id, on_invalidate=on_invalidate
- )
- return frozenset(r.room_id for r in rooms)
+ all_user_rooms: Dict[str, FrozenSet[str]] = {}
+
+ # 250 users is pretty arbitrary but the data can be quite large if users
+ # are in many rooms.
+ for batch_user_ids in batch_iter(user_ids, 250):
+ all_user_rooms.update(await self._get_rooms_for_users(batch_user_ids))
+
+ return all_user_rooms
@cached(max_entries=10000)
async def does_pair_of_users_share_a_room(
@@ -764,7 +745,7 @@ class RoomMemberWorkerStore(EventsWorkerStore):
# user and the set of other users, and then checking if there is any
# overlap.
sql = f"""
- SELECT b.state_key
+ SELECT DISTINCT b.state_key
FROM (
SELECT room_id FROM current_state_events
WHERE type = 'm.room.member' AND membership = 'join' AND state_key = ?
@@ -773,7 +754,6 @@ class RoomMemberWorkerStore(EventsWorkerStore):
SELECT room_id, state_key FROM current_state_events
WHERE type = 'm.room.member' AND membership = 'join' AND {clause}
) AS b using (room_id)
- LIMIT 1
"""
txn.execute(sql, (user_id, *args))
@@ -832,144 +812,92 @@ class RoomMemberWorkerStore(EventsWorkerStore):
return shared_room_ids or frozenset()
- async def get_joined_users_from_state(
- self, room_id: str, state: StateMap[str], state_entry: "_StateCacheEntry"
- ) -> Dict[str, ProfileInfo]:
- state_group: Union[object, int] = state_entry.state_group
- if not state_group:
- # If state_group is None it means it has yet to be assigned a
- # state group, i.e. we need to make sure that calls with a state_group
- # of None don't hit previous cached calls with a None state_group.
- # To do this we set the state_group to a new object as object() != object()
- state_group = object()
-
- assert state_group is not None
- with Measure(self._clock, "get_joined_users_from_state"):
- return await self._get_joined_users_from_context(
- room_id, state_group, state, context=state_entry
- )
+ async def get_joined_user_ids_from_state(
+ self, room_id: str, state: StateMap[str]
+ ) -> Set[str]:
+ """
+ For a given set of state IDs, get a set of user IDs in the room.
- @cached(num_args=2, iterable=True, max_entries=100000)
- async def _get_joined_users_from_context(
- self,
- room_id: str,
- state_group: Union[object, int],
- current_state_ids: StateMap[str],
- event: Optional[EventBase] = None,
- context: Optional["_StateCacheEntry"] = None,
- ) -> Dict[str, ProfileInfo]:
- # We don't use `state_group`, it's there so that we can cache based
- # on it. However, it's important that it's never None, since two current_states
- # with a state_group of None are likely to be different.
- assert state_group is not None
+ This method checks the local event cache, before calling
+ `_get_user_ids_from_membership_event_ids` for any uncached events.
+ """
- users_in_room = {}
- member_event_ids = [
- e_id
- for key, e_id in current_state_ids.items()
- if key[0] == EventTypes.Member
- ]
-
- if context is not None:
- # If we have a context with a delta from a previous state group,
- # check if we also have the result from the previous group in cache.
- # If we do then we can reuse that result and simply update it with
- # any membership changes in `delta_ids`
- if context.prev_group and context.delta_ids:
- prev_res = self._get_joined_users_from_context.cache.get_immediate(
- (room_id, context.prev_group), None
- )
- if prev_res and isinstance(prev_res, dict):
- users_in_room = dict(prev_res)
- member_event_ids = [
- e_id
- for key, e_id in context.delta_ids.items()
- if key[0] == EventTypes.Member
- ]
- for etype, state_key in context.delta_ids:
- if etype == EventTypes.Member:
- users_in_room.pop(state_key, None)
-
- # We check if we have any of the member event ids in the event cache
- # before we ask the DB
-
- # We don't update the event cache hit ratio as it completely throws off
- # the hit ratio counts. After all, we don't populate the cache if we
- # miss it here
- event_map = await self._get_events_from_cache(
- member_event_ids, update_metrics=False
- )
+ with Measure(self._clock, "get_joined_user_ids_from_state"):
+ users_in_room = set()
+ member_event_ids = [
+ e_id for key, e_id in state.items() if key[0] == EventTypes.Member
+ ]
- missing_member_event_ids = []
- for event_id in member_event_ids:
- ev_entry = event_map.get(event_id)
- if ev_entry and not ev_entry.event.rejected_reason:
- if ev_entry.event.membership == Membership.JOIN:
- users_in_room[ev_entry.event.state_key] = ProfileInfo(
- display_name=ev_entry.event.content.get("displayname", None),
- avatar_url=ev_entry.event.content.get("avatar_url", None),
- )
- else:
- missing_member_event_ids.append(event_id)
+ # We check if we have any of the member event ids in the event cache
+ # before we ask the DB
- if missing_member_event_ids:
- event_to_memberships = await self._get_joined_profiles_from_event_ids(
- missing_member_event_ids
+ # We don't update the event cache hit ratio as it completely throws off
+ # the hit ratio counts. After all, we don't populate the cache if we
+ # miss it here
+ event_map = self._get_events_from_local_cache(
+ member_event_ids, update_metrics=False
)
- users_in_room.update(row for row in event_to_memberships.values() if row)
-
- if event is not None and event.type == EventTypes.Member:
- if event.membership == Membership.JOIN:
- if event.event_id in member_event_ids:
- users_in_room[event.state_key] = ProfileInfo(
- display_name=event.content.get("displayname", None),
- avatar_url=event.content.get("avatar_url", None),
+
+ missing_member_event_ids = []
+ for event_id in member_event_ids:
+ ev_entry = event_map.get(event_id)
+ if ev_entry and not ev_entry.event.rejected_reason:
+ if ev_entry.event.membership == Membership.JOIN:
+ users_in_room.add(ev_entry.event.state_key)
+ else:
+ missing_member_event_ids.append(event_id)
+
+ if missing_member_event_ids:
+ event_to_memberships = (
+ await self._get_user_ids_from_membership_event_ids(
+ missing_member_event_ids
)
+ )
+ users_in_room.update(
+ user_id for user_id in event_to_memberships.values() if user_id
+ )
- return users_in_room
+ return users_in_room
- @cached(max_entries=10000)
- def _get_joined_profile_from_event_id(
+ @cached(
+ max_entries=10000,
+ # This name matches the old function that has been replaced - the cache name
+ # is kept here to maintain backwards compatibility.
+ name="_get_joined_profile_from_event_id",
+ )
+ def _get_user_id_from_membership_event_id(
self, event_id: str
) -> Optional[Tuple[str, ProfileInfo]]:
raise NotImplementedError()
@cachedList(
- cached_method_name="_get_joined_profile_from_event_id",
+ cached_method_name="_get_user_id_from_membership_event_id",
list_name="event_ids",
)
- async def _get_joined_profiles_from_event_ids(
+ async def _get_user_ids_from_membership_event_ids(
self, event_ids: Iterable[str]
- ) -> Dict[str, Optional[Tuple[str, ProfileInfo]]]:
+ ) -> Dict[str, Optional[str]]:
"""For given set of member event_ids check if they point to a join
- event and if so return the associated user and profile info.
+ event.
Args:
event_ids: The member event IDs to lookup
Returns:
- Map from event ID to `user_id` and ProfileInfo (or None if not join event).
+ Map from event ID to `user_id`, or None if event is not a join.
"""
rows = await self.db_pool.simple_select_many_batch(
table="room_memberships",
column="event_id",
iterable=event_ids,
- retcols=("user_id", "display_name", "avatar_url", "event_id"),
+ retcols=("user_id", "event_id"),
keyvalues={"membership": Membership.JOIN},
batch_size=1000,
- desc="_get_joined_profiles_from_event_ids",
+ desc="_get_user_ids_from_membership_event_ids",
)
- return {
- row["event_id"]: (
- row["user_id"],
- ProfileInfo(
- avatar_url=row["avatar_url"], display_name=row["display_name"]
- ),
- )
- for row in rows
- }
+ return {row["event_id"]: row["user_id"] for row in rows}
@cached(max_entries=10000)
async def is_host_joined(self, room_id: str, host: str) -> bool:
@@ -1051,6 +979,72 @@ class RoomMemberWorkerStore(EventsWorkerStore):
"get_current_hosts_in_room", get_current_hosts_in_room_txn
)
+ @cached(iterable=True, max_entries=10000)
+ async def get_current_hosts_in_room_ordered(self, room_id: str) -> List[str]:
+ """
+ Get current hosts in room based on current state.
+
+ The heuristic of sorting by servers who have been in the room the
+ longest is good because they're most likely to have anything we ask
+ about.
+
+ For SQLite the returned list is not ordered, as SQLite doesn't support
+ the appropriate SQL.
+
+ Uses `m.room.member`s in the room state at the current forward
+ extremities to determine which hosts are in the room.
+
+ Will return inaccurate results for rooms with partial state, since the
+ state for the forward extremities of those rooms will exclude most
+ members. We may also calculate room state incorrectly for such rooms and
+ believe that a host is or is not in the room when the opposite is true.
+
+ Returns:
+ Returns a list of servers sorted by longest in the room first. (aka.
+ sorted by join with the lowest depth first).
+ """
+
+ if isinstance(self.database_engine, Sqlite3Engine):
+ # If we're using SQLite then let's just always use
+ # `get_users_in_room` rather than funky SQL.
+
+ domains = await self.get_current_hosts_in_room(room_id)
+ return list(domains)
+
+ # For PostgreSQL we can use a regex to pull out the domains from the
+ # joined users in `current_state_events` via regex.
+
+ def get_current_hosts_in_room_ordered_txn(txn: LoggingTransaction) -> List[str]:
+ # Returns a list of servers currently joined in the room sorted by
+ # longest in the room first (aka. with the lowest depth). The
+ # heuristic of sorting by servers who have been in the room the
+ # longest is good because they're most likely to have anything we
+ # ask about.
+ sql = """
+ SELECT
+ /* Match the domain part of the MXID */
+ substring(c.state_key FROM '@[^:]*:(.*)$') as server_domain
+ FROM current_state_events c
+ /* Get the depth of the event from the events table */
+ INNER JOIN events AS e USING (event_id)
+ WHERE
+ /* Find any join state events in the room */
+ c.type = 'm.room.member'
+ AND c.membership = 'join'
+ AND c.room_id = ?
+ /* Group all state events from the same domain into their own buckets (groups) */
+ GROUP BY server_domain
+ /* Sorted by lowest depth first */
+ ORDER BY min(e.depth) ASC;
+ """
+ txn.execute(sql, (room_id,))
+ # `server_domain` will be `NULL` for malformed MXIDs with no colons.
+ return [d for d, in txn if d is not None]
+
+ return await self.db_pool.runInteraction(
+ "get_current_hosts_in_room_ordered", get_current_hosts_in_room_ordered_txn
+ )
+
async def get_joined_hosts(
self, room_id: str, state: StateMap[str], state_entry: "_StateCacheEntry"
) -> FrozenSet[str]:
@@ -1128,12 +1122,12 @@ class RoomMemberWorkerStore(EventsWorkerStore):
else:
# The cache doesn't match the state group or prev state group,
# so we calculate the result from first principles.
- joined_users = await self.get_joined_users_from_state(
- room_id, state, state_entry
+ joined_user_ids = await self.get_joined_user_ids_from_state(
+ room_id, state
)
cache.hosts_to_joined_users = {}
- for user_id in joined_users:
+ for user_id in joined_user_ids:
host = intern_string(get_domain_from_id(user_id))
cache.hosts_to_joined_users.setdefault(host, set()).add(user_id)
@@ -1212,6 +1206,30 @@ class RoomMemberWorkerStore(EventsWorkerStore):
"get_forgotten_rooms_for_user", _get_forgotten_rooms_for_user_txn
)
+ async def is_locally_forgotten_room(self, room_id: str) -> bool:
+ """Returns whether all local users have forgotten this room_id.
+
+ Args:
+ room_id: The room ID to query.
+
+ Returns:
+ Whether the room is forgotten.
+ """
+
+ sql = """
+ SELECT count(*) > 0 FROM local_current_membership
+ INNER JOIN room_memberships USING (room_id, event_id)
+ WHERE
+ room_id = ?
+ AND forgotten = 0;
+ """
+
+ rows = await self.db_pool.execute("is_forgotten_room", None, sql, room_id)
+
+ # `count(*)` returns always an integer
+ # If any rows still exist it means someone has not forgotten this room yet
+ return not rows[0][0]
+
async def get_rooms_user_has_been_in(self, user_id: str) -> Set[str]:
"""Get all rooms that the user has ever been in.
@@ -1499,6 +1517,36 @@ class RoomMemberStore(
await self.db_pool.runInteraction("forget_membership", f)
+def extract_heroes_from_room_summary(
+ details: Mapping[str, MemberSummary], me: str
+) -> List[str]:
+ """Determine the users that represent a room, from the perspective of the `me` user.
+
+ The rules which say which users we select are specified in the "Room Summary"
+ section of
+ https://spec.matrix.org/v1.4/client-server-api/#get_matrixclientv3sync
+
+ Returns a list (possibly empty) of heroes' mxids.
+ """
+ empty_ms = MemberSummary([], 0)
+
+ joined_user_ids = [
+ r[0] for r in details.get(Membership.JOIN, empty_ms).members if r[0] != me
+ ]
+ invited_user_ids = [
+ r[0] for r in details.get(Membership.INVITE, empty_ms).members if r[0] != me
+ ]
+ gone_user_ids = [
+ r[0] for r in details.get(Membership.LEAVE, empty_ms).members if r[0] != me
+ ] + [r[0] for r in details.get(Membership.BAN, empty_ms).members if r[0] != me]
+
+ # FIXME: order by stream ordering rather than as returned by SQL
+ if joined_user_ids or invited_user_ids:
+ return sorted(joined_user_ids + invited_user_ids)[0:5]
+ else:
+ return sorted(gone_user_ids)[0:5]
+
+
@attr.s(slots=True, auto_attribs=True)
class _JoinedHostsCache:
"""The cached data used by the `_get_joined_hosts_cache`."""
diff --git a/synapse/storage/databases/main/search.py b/synapse/storage/databases/main/search.py
index f6e24b68d2..3fe433f66c 100644
--- a/synapse/storage/databases/main/search.py
+++ b/synapse/storage/databases/main/search.py
@@ -11,10 +11,22 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
+import enum
import logging
import re
-from typing import TYPE_CHECKING, Any, Collection, Iterable, List, Optional, Set, Tuple
+from collections import deque
+from dataclasses import dataclass
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ Collection,
+ Iterable,
+ List,
+ Optional,
+ Set,
+ Tuple,
+ Union,
+)
import attr
@@ -27,7 +39,7 @@ from synapse.storage.database import (
LoggingTransaction,
)
from synapse.storage.databases.main.events_worker import EventRedactBehaviour
-from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine, Sqlite3Engine
+from synapse.storage.engines import PostgresEngine, Sqlite3Engine
from synapse.types import JsonDict
if TYPE_CHECKING:
@@ -68,11 +80,11 @@ class SearchWorkerStore(SQLBaseStore):
if not self.hs.config.server.enable_search:
return
if isinstance(self.database_engine, PostgresEngine):
- sql = (
- "INSERT INTO event_search"
- " (event_id, room_id, key, vector, stream_ordering, origin_server_ts)"
- " VALUES (?,?,?,to_tsvector('english', ?),?,?)"
- )
+ sql = """
+ INSERT INTO event_search
+ (event_id, room_id, key, vector, stream_ordering, origin_server_ts)
+ VALUES (?,?,?,to_tsvector('english', ?),?,?)
+ """
args1 = (
(
@@ -89,20 +101,20 @@ class SearchWorkerStore(SQLBaseStore):
txn.execute_batch(sql, args1)
elif isinstance(self.database_engine, Sqlite3Engine):
- sql = (
- "INSERT INTO event_search (event_id, room_id, key, value)"
- " VALUES (?,?,?,?)"
- )
- args2 = (
- (
- entry.event_id,
- entry.room_id,
- entry.key,
- _clean_value_for_search(entry.value),
- )
- for entry in entries
+ self.db_pool.simple_insert_many_txn(
+ txn,
+ table="event_search",
+ keys=("event_id", "room_id", "key", "value"),
+ values=(
+ (
+ entry.event_id,
+ entry.room_id,
+ entry.key,
+ _clean_value_for_search(entry.value),
+ )
+ for entry in entries
+ ),
)
- txn.execute_batch(sql, args2)
else:
# This should be unreachable.
@@ -150,15 +162,17 @@ class SearchBackgroundUpdateStore(SearchWorkerStore):
TYPES = ["m.room.name", "m.room.message", "m.room.topic"]
def reindex_search_txn(txn: LoggingTransaction) -> int:
- sql = (
- "SELECT stream_ordering, event_id, room_id, type, json, "
- " origin_server_ts FROM events"
- " JOIN event_json USING (room_id, event_id)"
- " WHERE ? <= stream_ordering AND stream_ordering < ?"
- " AND (%s)"
- " ORDER BY stream_ordering DESC"
- " LIMIT ?"
- ) % (" OR ".join("type = '%s'" % (t,) for t in TYPES),)
+ sql = """
+ SELECT stream_ordering, event_id, room_id, type, json, origin_server_ts
+ FROM events
+ JOIN event_json USING (room_id, event_id)
+ WHERE ? <= stream_ordering AND stream_ordering < ?
+ AND (%s)
+ ORDER BY stream_ordering DESC
+ LIMIT ?
+ """ % (
+ " OR ".join("type = '%s'" % (t,) for t in TYPES),
+ )
txn.execute(sql, (target_min_stream_id, max_stream_id, batch_size))
@@ -272,8 +286,10 @@ class SearchBackgroundUpdateStore(SearchWorkerStore):
try:
c.execute(
- "CREATE INDEX CONCURRENTLY event_search_fts_idx"
- " ON event_search USING GIN (vector)"
+ """
+ CREATE INDEX CONCURRENTLY event_search_fts_idx
+ ON event_search USING GIN (vector)
+ """
)
except psycopg2.ProgrammingError as e:
logger.warning(
@@ -311,12 +327,16 @@ class SearchBackgroundUpdateStore(SearchWorkerStore):
# We create with NULLS FIRST so that when we search *backwards*
# we get the ones with non null origin_server_ts *first*
c.execute(
- "CREATE INDEX CONCURRENTLY event_search_room_order ON event_search("
- "room_id, origin_server_ts NULLS FIRST, stream_ordering NULLS FIRST)"
+ """
+ CREATE INDEX CONCURRENTLY event_search_room_order
+ ON event_search(room_id, origin_server_ts NULLS FIRST, stream_ordering NULLS FIRST)
+ """
)
c.execute(
- "CREATE INDEX CONCURRENTLY event_search_order ON event_search("
- "origin_server_ts NULLS FIRST, stream_ordering NULLS FIRST)"
+ """
+ CREATE INDEX CONCURRENTLY event_search_order
+ ON event_search(origin_server_ts NULLS FIRST, stream_ordering NULLS FIRST)
+ """
)
conn.set_session(autocommit=False)
@@ -333,14 +353,14 @@ class SearchBackgroundUpdateStore(SearchWorkerStore):
)
def reindex_search_txn(txn: LoggingTransaction) -> Tuple[int, bool]:
- sql = (
- "UPDATE event_search AS es SET stream_ordering = e.stream_ordering,"
- " origin_server_ts = e.origin_server_ts"
- " FROM events AS e"
- " WHERE e.event_id = es.event_id"
- " AND ? <= e.stream_ordering AND e.stream_ordering < ?"
- " RETURNING es.stream_ordering"
- )
+ sql = """
+ UPDATE event_search AS es
+ SET stream_ordering = e.stream_ordering, origin_server_ts = e.origin_server_ts
+ FROM events AS e
+ WHERE e.event_id = es.event_id
+ AND ? <= e.stream_ordering AND e.stream_ordering < ?
+ RETURNING es.stream_ordering
+ """
min_stream_id = max_stream_id - batch_size
txn.execute(sql, (min_stream_id, max_stream_id))
@@ -421,8 +441,6 @@ class SearchStore(SearchBackgroundUpdateStore):
"""
clauses = []
- search_query = _parse_query(self.database_engine, search_term)
-
args: List[Any] = []
# Make sure we don't explode because the person is in too many rooms.
@@ -444,32 +462,35 @@ class SearchStore(SearchBackgroundUpdateStore):
count_clauses = clauses
if isinstance(self.database_engine, PostgresEngine):
- sql = (
- "SELECT ts_rank_cd(vector, to_tsquery('english', ?)) AS rank,"
- " room_id, event_id"
- " FROM event_search"
- " WHERE vector @@ to_tsquery('english', ?)"
- )
+ search_query = search_term
+ sql = """
+ SELECT ts_rank_cd(vector, websearch_to_tsquery('english', ?)) AS rank,
+ room_id, event_id
+ FROM event_search
+ WHERE vector @@ websearch_to_tsquery('english', ?)
+ """
args = [search_query, search_query] + args
- count_sql = (
- "SELECT room_id, count(*) as count FROM event_search"
- " WHERE vector @@ to_tsquery('english', ?)"
- )
+ count_sql = """
+ SELECT room_id, count(*) as count FROM event_search
+ WHERE vector @@ websearch_to_tsquery('english', ?)
+ """
count_args = [search_query] + count_args
elif isinstance(self.database_engine, Sqlite3Engine):
- sql = (
- "SELECT rank(matchinfo(event_search)) as rank, room_id, event_id"
- " FROM event_search"
- " WHERE value MATCH ?"
- )
+ search_query = _parse_query_for_sqlite(search_term)
+
+ sql = """
+ SELECT rank(matchinfo(event_search)) as rank, room_id, event_id
+ FROM event_search
+ WHERE value MATCH ?
+ """
args = [search_query] + args
- count_sql = (
- "SELECT room_id, count(*) as count FROM event_search"
- " WHERE value MATCH ?"
- )
- count_args = [search_term] + count_args
+ count_sql = """
+ SELECT room_id, count(*) as count FROM event_search
+ WHERE value MATCH ?
+ """
+ count_args = [search_query] + count_args
else:
# This should be unreachable.
raise Exception("Unrecognized database engine")
@@ -510,7 +531,6 @@ class SearchStore(SearchBackgroundUpdateStore):
)
count = sum(row["count"] for row in count_results if row["room_id"] in room_ids)
-
return {
"results": [
{"event": event_map[r["event_id"]], "rank": r["rank"]}
@@ -542,9 +562,6 @@ class SearchStore(SearchBackgroundUpdateStore):
Each match as a dictionary.
"""
clauses = []
-
- search_query = _parse_query(self.database_engine, search_term)
-
args: List[Any] = []
# Make sure we don't explode because the person is in too many rooms.
@@ -576,26 +593,29 @@ class SearchStore(SearchBackgroundUpdateStore):
raise SynapseError(400, "Invalid pagination token")
clauses.append(
- "(origin_server_ts < ?"
- " OR (origin_server_ts = ? AND stream_ordering < ?))"
+ """
+ (origin_server_ts < ? OR (origin_server_ts = ? AND stream_ordering < ?))
+ """
)
args.extend([origin_server_ts, origin_server_ts, stream])
if isinstance(self.database_engine, PostgresEngine):
- sql = (
- "SELECT ts_rank_cd(vector, to_tsquery('english', ?)) as rank,"
- " origin_server_ts, stream_ordering, room_id, event_id"
- " FROM event_search"
- " WHERE vector @@ to_tsquery('english', ?) AND "
- )
+ search_query = search_term
+ sql = """
+ SELECT ts_rank_cd(vector, websearch_to_tsquery('english', ?)) as rank,
+ origin_server_ts, stream_ordering, room_id, event_id
+ FROM event_search
+ WHERE vector @@ websearch_to_tsquery('english', ?) AND
+ """
args = [search_query, search_query] + args
- count_sql = (
- "SELECT room_id, count(*) as count FROM event_search"
- " WHERE vector @@ to_tsquery('english', ?) AND "
- )
+ count_sql = """
+ SELECT room_id, count(*) as count FROM event_search
+ WHERE vector @@ websearch_to_tsquery('english', ?) AND
+ """
count_args = [search_query] + count_args
elif isinstance(self.database_engine, Sqlite3Engine):
+
# We use CROSS JOIN here to ensure we use the right indexes.
# https://sqlite.org/optoverview.html#crossjoin
#
@@ -604,23 +624,25 @@ class SearchStore(SearchBackgroundUpdateStore):
# in the events table to get the topological ordering. We need
# to use the indexes in this order because sqlite refuses to
# MATCH unless it uses the full text search index
- sql = (
- "SELECT rank(matchinfo) as rank, room_id, event_id,"
- " origin_server_ts, stream_ordering"
- " FROM (SELECT key, event_id, matchinfo(event_search) as matchinfo"
- " FROM event_search"
- " WHERE value MATCH ?"
- " )"
- " CROSS JOIN events USING (event_id)"
- " WHERE "
+ sql = """
+ SELECT
+ rank(matchinfo) as rank, room_id, event_id, origin_server_ts, stream_ordering
+ FROM (
+ SELECT key, event_id, matchinfo(event_search) as matchinfo
+ FROM event_search
+ WHERE value MATCH ?
)
+ CROSS JOIN events USING (event_id)
+ WHERE
+ """
+ search_query = _parse_query_for_sqlite(search_term)
args = [search_query] + args
- count_sql = (
- "SELECT room_id, count(*) as count FROM event_search"
- " WHERE value MATCH ? AND "
- )
- count_args = [search_term] + count_args
+ count_sql = """
+ SELECT room_id, count(*) as count FROM event_search
+ WHERE value MATCH ? AND
+ """
+ count_args = [search_query] + count_args
else:
# This should be unreachable.
raise Exception("Unrecognized database engine")
@@ -631,17 +653,17 @@ class SearchStore(SearchBackgroundUpdateStore):
# We add an arbitrary limit here to ensure we don't try to pull the
# entire table from the database.
if isinstance(self.database_engine, PostgresEngine):
- sql += (
- " ORDER BY origin_server_ts DESC NULLS LAST,"
- " stream_ordering DESC NULLS LAST LIMIT ?"
- )
+ sql += """
+ ORDER BY origin_server_ts DESC NULLS LAST, stream_ordering DESC NULLS LAST
+ LIMIT ?
+ """
elif isinstance(self.database_engine, Sqlite3Engine):
sql += " ORDER BY origin_server_ts DESC, stream_ordering DESC LIMIT ?"
else:
raise Exception("Unrecognized database engine")
# mypy expects to append only a `str`, not an `int`
- args.append(limit) # type: ignore[arg-type]
+ args.append(limit)
results = await self.db_pool.execute(
"search_rooms", self.db_pool.cursor_to_dict, sql, *args
@@ -729,13 +751,16 @@ class SearchStore(SearchBackgroundUpdateStore):
while stop_sel in value:
stop_sel += ">"
- query = "SELECT ts_headline(?, to_tsquery('english', ?), %s)" % (
- _to_postgres_options(
- {
- "StartSel": start_sel,
- "StopSel": stop_sel,
- "MaxFragments": "50",
- }
+ query = (
+ "SELECT ts_headline(?, websearch_to_tsquery('english', ?), %s)"
+ % (
+ _to_postgres_options(
+ {
+ "StartSel": start_sel,
+ "StopSel": stop_sel,
+ "MaxFragments": "50",
+ }
+ )
)
)
txn.execute(query, (value, search_query))
@@ -760,20 +785,127 @@ def _to_postgres_options(options_dict: JsonDict) -> str:
return "'%s'" % (",".join("%s=%s" % (k, v) for k, v in options_dict.items()),)
-def _parse_query(database_engine: BaseDatabaseEngine, search_term: str) -> str:
- """Takes a plain unicode string from the user and converts it into a form
- that can be passed to database.
- We use this so that we can add prefix matching, which isn't something
- that is supported by default.
+@dataclass
+class Phrase:
+ phrase: List[str]
+
+
+class SearchToken(enum.Enum):
+ Not = enum.auto()
+ Or = enum.auto()
+ And = enum.auto()
+
+
+Token = Union[str, Phrase, SearchToken]
+TokenList = List[Token]
+
+
+def _is_stop_word(word: str) -> bool:
+ # TODO Pull these out of the dictionary:
+ # https://github.com/postgres/postgres/blob/master/src/backend/snowball/stopwords/english.stop
+ return word in {"the", "a", "you", "me", "and", "but"}
+
+
+def _tokenize_query(query: str) -> TokenList:
"""
+ Convert the user-supplied `query` into a TokenList, which can be translated into
+ some DB-specific syntax.
+
+ The following constructs are supported:
+
+ - phrase queries using "double quotes"
+ - case-insensitive `or` and `and` operators
+ - negation of a keyword via unary `-`
+ - unary hyphen to denote NOT e.g. 'include -exclude'
- # Pull out the individual words, discarding any non-word characters.
- results = re.findall(r"([\w\-]+)", search_term, re.UNICODE)
+ The following differs from websearch_to_tsquery:
- if isinstance(database_engine, PostgresEngine):
- return " & ".join(result + ":*" for result in results)
- elif isinstance(database_engine, Sqlite3Engine):
- return " & ".join(result + "*" for result in results)
- else:
- # This should be unreachable.
- raise Exception("Unrecognized database engine")
+ - Stop words are not removed.
+ - Unclosed phrases are treated differently.
+
+ """
+ tokens: TokenList = []
+
+ # Find phrases.
+ in_phrase = False
+ parts = deque(query.split('"'))
+ for i, part in enumerate(parts):
+ # The contents inside double quotes is treated as a phrase.
+ in_phrase = bool(i % 2)
+
+ # Pull out the individual words, discarding any non-word characters.
+ words = deque(re.findall(r"([\w\-]+)", part, re.UNICODE))
+
+ # Phrases have simplified handling of words.
+ if in_phrase:
+ # Skip stop words.
+ phrase = [word for word in words if not _is_stop_word(word)]
+
+ # Consecutive words are implicitly ANDed together.
+ if tokens and tokens[-1] not in (SearchToken.Not, SearchToken.Or):
+ tokens.append(SearchToken.And)
+
+ # Add the phrase.
+ tokens.append(Phrase(phrase))
+ continue
+
+ # Otherwise, not in a phrase.
+ while words:
+ word = words.popleft()
+
+ if word.startswith("-"):
+ tokens.append(SearchToken.Not)
+
+ # If there's more word, put it back to be processed again.
+ word = word[1:]
+ if word:
+ words.appendleft(word)
+ elif word.lower() == "or":
+ tokens.append(SearchToken.Or)
+ else:
+ # Skip stop words.
+ if _is_stop_word(word):
+ continue
+
+ # Consecutive words are implicitly ANDed together.
+ if tokens and tokens[-1] not in (SearchToken.Not, SearchToken.Or):
+ tokens.append(SearchToken.And)
+
+ # Add the search term.
+ tokens.append(word)
+
+ return tokens
+
+
+def _tokens_to_sqlite_match_query(tokens: TokenList) -> str:
+ """
+ Convert the list of tokens to a string suitable for passing to sqlite's MATCH.
+ Assume sqlite was compiled with enhanced query syntax.
+
+ Ref: https://www.sqlite.org/fts3.html#full_text_index_queries
+ """
+ match_query = []
+ for token in tokens:
+ if isinstance(token, str):
+ match_query.append(token)
+ elif isinstance(token, Phrase):
+ match_query.append('"' + " ".join(token.phrase) + '"')
+ elif token == SearchToken.Not:
+ # TODO: SQLite treats NOT as a *binary* operator. Hopefully a search
+ # term has already been added before this.
+ match_query.append(" NOT ")
+ elif token == SearchToken.Or:
+ match_query.append(" OR ")
+ elif token == SearchToken.And:
+ match_query.append(" AND ")
+ else:
+ raise ValueError(f"unknown token {token}")
+
+ return "".join(match_query)
+
+
+def _parse_query_for_sqlite(search_term: str) -> str:
+ """Takes a plain unicode string from the user and converts it into a form
+ that can be passed to sqllite's matchinfo().
+ """
+ return _tokens_to_sqlite_match_query(_tokenize_query(search_term))
diff --git a/synapse/storage/databases/main/state.py b/synapse/storage/databases/main/state.py
index f70705a0af..af7bebee80 100644
--- a/synapse/storage/databases/main/state.py
+++ b/synapse/storage/databases/main/state.py
@@ -23,6 +23,7 @@ from synapse.api.errors import NotFoundError, UnsupportedRoomVersionError
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion
from synapse.events import EventBase
from synapse.events.snapshot import EventContext
+from synapse.logging.opentracing import trace
from synapse.storage._base import SQLBaseStore
from synapse.storage.database import (
DatabasePool,
@@ -36,6 +37,7 @@ from synapse.storage.state import StateFilter
from synapse.types import JsonDict, JsonMapping, StateMap
from synapse.util.caches import intern_string
from synapse.util.caches.descriptors import cached, cachedList
+from synapse.util.cancellation import cancellable
from synapse.util.iterutils import batch_iter
if TYPE_CHECKING:
@@ -142,6 +144,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
return room_version
+ @trace
async def get_metadata_for_events(
self, event_ids: Collection[str]
) -> Dict[str, EventMetadata]:
@@ -281,6 +284,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
)
# FIXME: how should this be cached?
+ @cancellable
async def get_partial_filtered_current_state_ids(
self, room_id: str, state_filter: Optional[StateFilter] = None
) -> StateMap[str]:
@@ -430,6 +434,11 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
updatevalues={"state_group": state_group},
)
+ # the event may now be rejected where it was not before, or vice versa,
+ # in which case we need to update the rejected flags.
+ if bool(context.rejected) != (event.rejected_reason is not None):
+ self.mark_event_rejected_txn(txn, event.event_id, context.rejected)
+
self.db_pool.simple_delete_one_txn(
txn,
table="partial_state_events",
diff --git a/synapse/storage/databases/main/stats.py b/synapse/storage/databases/main/stats.py
index b4c652acf3..356d4ca788 100644
--- a/synapse/storage/databases/main/stats.py
+++ b/synapse/storage/databases/main/stats.py
@@ -446,59 +446,41 @@ class StatsStore(StateDeltasStore):
absolutes: Absolute (set) fields
additive_relatives: Fields that will be added onto if existing row present.
"""
- if self.database_engine.can_native_upsert:
- absolute_updates = [
- "%(field)s = EXCLUDED.%(field)s" % {"field": field}
- for field in absolutes.keys()
- ]
-
- relative_updates = [
- "%(field)s = EXCLUDED.%(field)s + COALESCE(%(table)s.%(field)s, 0)"
- % {"table": table, "field": field}
- for field in additive_relatives.keys()
- ]
-
- insert_cols = []
- qargs = []
-
- for (key, val) in chain(
- keyvalues.items(), absolutes.items(), additive_relatives.items()
- ):
- insert_cols.append(key)
- qargs.append(val)
+ absolute_updates = [
+ "%(field)s = EXCLUDED.%(field)s" % {"field": field}
+ for field in absolutes.keys()
+ ]
+
+ relative_updates = [
+ "%(field)s = EXCLUDED.%(field)s + COALESCE(%(table)s.%(field)s, 0)"
+ % {"table": table, "field": field}
+ for field in additive_relatives.keys()
+ ]
+
+ insert_cols = []
+ qargs = []
+
+ for (key, val) in chain(
+ keyvalues.items(), absolutes.items(), additive_relatives.items()
+ ):
+ insert_cols.append(key)
+ qargs.append(val)
+
+ sql = """
+ INSERT INTO %(table)s (%(insert_cols_cs)s)
+ VALUES (%(insert_vals_qs)s)
+ ON CONFLICT (%(key_columns)s) DO UPDATE SET %(updates)s
+ """ % {
+ "table": table,
+ "insert_cols_cs": ", ".join(insert_cols),
+ "insert_vals_qs": ", ".join(
+ ["?"] * (len(keyvalues) + len(absolutes) + len(additive_relatives))
+ ),
+ "key_columns": ", ".join(keyvalues),
+ "updates": ", ".join(chain(absolute_updates, relative_updates)),
+ }
- sql = """
- INSERT INTO %(table)s (%(insert_cols_cs)s)
- VALUES (%(insert_vals_qs)s)
- ON CONFLICT (%(key_columns)s) DO UPDATE SET %(updates)s
- """ % {
- "table": table,
- "insert_cols_cs": ", ".join(insert_cols),
- "insert_vals_qs": ", ".join(
- ["?"] * (len(keyvalues) + len(absolutes) + len(additive_relatives))
- ),
- "key_columns": ", ".join(keyvalues),
- "updates": ", ".join(chain(absolute_updates, relative_updates)),
- }
-
- txn.execute(sql, qargs)
- else:
- self.database_engine.lock_table(txn, table)
- retcols = list(chain(absolutes.keys(), additive_relatives.keys()))
- current_row = self.db_pool.simple_select_one_txn(
- txn, table, keyvalues, retcols, allow_none=True
- )
- if current_row is None:
- merged_dict = {**keyvalues, **absolutes, **additive_relatives}
- self.db_pool.simple_insert_txn(txn, table, merged_dict)
- else:
- for (key, val) in additive_relatives.items():
- if current_row[key] is None:
- current_row[key] = val
- else:
- current_row[key] += val
- current_row.update(absolutes)
- self.db_pool.simple_update_one_txn(txn, table, keyvalues, current_row)
+ txn.execute(sql, qargs)
async def _calculate_and_set_initial_state_for_room(self, room_id: str) -> None:
"""Calculate and insert an entry into room_stats_current.
diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py
index 2590b52f73..cc27ec3804 100644
--- a/synapse/storage/databases/main/stream.py
+++ b/synapse/storage/databases/main/stream.py
@@ -58,6 +58,7 @@ from twisted.internet import defer
from synapse.api.filtering import Filter
from synapse.events import EventBase
from synapse.logging.context import make_deferred_yieldable, run_in_background
+from synapse.logging.opentracing import trace
from synapse.storage._base import SQLBaseStore
from synapse.storage.database import (
DatabasePool,
@@ -71,6 +72,7 @@ from synapse.storage.util.id_generators import MultiWriterIdGenerator
from synapse.types import PersistedEventPosition, RoomStreamToken
from synapse.util.caches.descriptors import cached
from synapse.util.caches.stream_change_cache import StreamChangeCache
+from synapse.util.cancellation import cancellable
if TYPE_CHECKING:
from synapse.server import HomeServer
@@ -355,6 +357,24 @@ def filter_to_clause(event_filter: Optional[Filter]) -> Tuple[str, List[str]]:
)
args.extend(event_filter.related_by_rel_types)
+ if event_filter.rel_types:
+ clauses.append(
+ "(%s)"
+ % " OR ".join(
+ "event_relation.relation_type = ?" for _ in event_filter.rel_types
+ )
+ )
+ args.extend(event_filter.rel_types)
+
+ if event_filter.not_rel_types:
+ clauses.append(
+ "((%s) OR event_relation.relation_type IS NULL)"
+ % " AND ".join(
+ "event_relation.relation_type != ?" for _ in event_filter.not_rel_types
+ )
+ )
+ args.extend(event_filter.not_rel_types)
+
return " AND ".join(clauses), args
@@ -395,6 +415,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
)
self._stream_order_on_start = self.get_room_max_stream_ordering()
+ self._min_stream_order_on_start = self.get_room_min_stream_ordering()
def get_room_max_stream_ordering(self) -> int:
"""Get the stream_ordering of regular events that we have committed up to
@@ -596,6 +617,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
return ret, key
+ @cancellable
async def get_membership_changes_for_user(
self,
user_id: str,
@@ -1021,28 +1043,31 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
"after": {"event_ids": events_after, "token": end_token},
}
- async def get_all_new_events_stream(
- self, from_id: int, current_id: int, limit: int, get_prev_content: bool = False
- ) -> Tuple[int, List[EventBase], Dict[str, Optional[int]]]:
+ async def get_all_new_event_ids_stream(
+ self,
+ from_id: int,
+ current_id: int,
+ limit: int,
+ ) -> Tuple[int, Dict[str, Optional[int]]]:
"""Get all new events
- Returns all events with from_id < stream_ordering <= current_id.
+ Returns all event ids with from_id < stream_ordering <= current_id.
Args:
from_id: the stream_ordering of the last event we processed
current_id: the stream_ordering of the most recently processed event
limit: the maximum number of events to return
- get_prev_content: whether to fetch previous event content
Returns:
- A tuple of (next_id, events, event_to_received_ts), where `next_id`
+ A tuple of (next_id, event_to_received_ts), where `next_id`
is the next value to pass as `from_id` (it will either be the
stream_ordering of the last returned event, or, if fewer than `limit`
events were found, the `current_id`). The `event_to_received_ts` is
- a dictionary mapping event ID to the event `received_ts`.
+ a dictionary mapping event ID to the event `received_ts`, sorted by ascending
+ stream_ordering.
"""
- def get_all_new_events_stream_txn(
+ def get_all_new_event_ids_stream_txn(
txn: LoggingTransaction,
) -> Tuple[int, Dict[str, Optional[int]]]:
sql = (
@@ -1067,15 +1092,10 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
return upper_bound, event_to_received_ts
upper_bound, event_to_received_ts = await self.db_pool.runInteraction(
- "get_all_new_events_stream", get_all_new_events_stream_txn
- )
-
- events = await self.get_events_as_list(
- event_to_received_ts.keys(),
- get_prev_content=get_prev_content,
+ "get_all_new_event_ids_stream", get_all_new_event_ids_stream_txn
)
- return upper_bound, events, event_to_received_ts
+ return upper_bound, event_to_received_ts
async def get_federation_out_pos(self, typ: str) -> int:
if self._need_to_reset_federation_stream_positions:
@@ -1199,8 +1219,6 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
`to_token`), or `limit` is zero.
"""
- assert int(limit) >= 0
-
# Tokens really represent positions between elements, but we use
# the convention of pointing to the event before the gap. Hence
# we have a bit of asymmetry when it comes to equalities.
@@ -1279,8 +1297,8 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
# Multiple labels could cause the same event to appear multiple times.
needs_distinct = True
- # If there is a filter on relation_senders and relation_types join to the
- # relations table.
+ # If there is a relation_senders and relation_types filter join to the
+ # relations table to get events related to the current event.
if event_filter and (
event_filter.related_by_senders or event_filter.related_by_rel_types
):
@@ -1295,6 +1313,13 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
LEFT JOIN events AS related_event ON (relation.event_id = related_event.event_id)
"""
+ # If there is a not_rel_types filter join to the relations table to get
+ # the event's relation information.
+ if event_filter and (event_filter.rel_types or event_filter.not_rel_types):
+ join_clause += """
+ LEFT JOIN event_relations AS event_relation USING (event_id)
+ """
+
if needs_distinct:
select_keywords += " DISTINCT"
@@ -1331,21 +1356,22 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
if rows:
topo = rows[-1].topological_ordering
- toke = rows[-1].stream_ordering
+ token = rows[-1].stream_ordering
if direction == "b":
# Tokens are positions between events.
# This token points *after* the last event in the chunk.
# We need it to point to the event before it in the chunk
# when we are going backwards so we subtract one from the
# stream part.
- toke -= 1
- next_token = RoomStreamToken(topo, toke)
+ token -= 1
+ next_token = RoomStreamToken(topo, token)
else:
# TODO (erikj): We should work out what to do here instead.
next_token = to_token if to_token else from_token
return rows, next_token
+ @trace
async def paginate_room_events(
self,
room_id: str,
diff --git a/synapse/storage/databases/main/transactions.py b/synapse/storage/databases/main/transactions.py
index ba79e19f7f..f8c6877ee8 100644
--- a/synapse/storage/databases/main/transactions.py
+++ b/synapse/storage/databases/main/transactions.py
@@ -221,25 +221,15 @@ class TransactionWorkerStore(CacheInvalidationWorkerStore):
retry_interval: how long until next retry in ms
"""
- if self.database_engine.can_native_upsert:
- await self.db_pool.runInteraction(
- "set_destination_retry_timings",
- self._set_destination_retry_timings_native,
- destination,
- failure_ts,
- retry_last_ts,
- retry_interval,
- db_autocommit=True, # Safe as its a single upsert
- )
- else:
- await self.db_pool.runInteraction(
- "set_destination_retry_timings",
- self._set_destination_retry_timings_emulated,
- destination,
- failure_ts,
- retry_last_ts,
- retry_interval,
- )
+ await self.db_pool.runInteraction(
+ "set_destination_retry_timings",
+ self._set_destination_retry_timings_native,
+ destination,
+ failure_ts,
+ retry_last_ts,
+ retry_interval,
+ db_autocommit=True, # Safe as it's a single upsert
+ )
def _set_destination_retry_timings_native(
self,
@@ -249,8 +239,6 @@ class TransactionWorkerStore(CacheInvalidationWorkerStore):
retry_last_ts: int,
retry_interval: int,
) -> None:
- assert self.database_engine.can_native_upsert
-
# Upsert retry time interval if retry_interval is zero (i.e. we're
# resetting it) or greater than the existing retry interval.
#
diff --git a/synapse/storage/databases/main/user_directory.py b/synapse/storage/databases/main/user_directory.py
index ddb25b5cea..698d6f7515 100644
--- a/synapse/storage/databases/main/user_directory.py
+++ b/synapse/storage/databases/main/user_directory.py
@@ -185,9 +185,8 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
- who should be in the user_directory.
Args:
- progress (dict)
- batch_size (int): Maximum number of state events to process
- per cycle.
+ progress
+ batch_size: Maximum number of state events to process per cycle.
Returns:
number of events processed.
@@ -708,10 +707,10 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore):
Returns the rooms that a user is in.
Args:
- user_id(str): Must be a local user
+ user_id: Must be a local user
Returns:
- list: user_id
+ List of room IDs
"""
rows = await self.db_pool.simple_select_onecol(
table="users_who_share_private_rooms",
diff --git a/synapse/storage/databases/state/bg_updates.py b/synapse/storage/databases/state/bg_updates.py
index a7fcc564a9..4a4ad0f492 100644
--- a/synapse/storage/databases/state/bg_updates.py
+++ b/synapse/storage/databases/state/bg_updates.py
@@ -93,13 +93,6 @@ class StateGroupBackgroundUpdateStore(SQLBaseStore):
results: Dict[int, MutableStateMap[str]] = {group: {} for group in groups}
- where_clause, where_args = state_filter.make_sql_filter_clause()
-
- # Unless the filter clause is empty, we're going to append it after an
- # existing where clause
- if where_clause:
- where_clause = " AND (%s)" % (where_clause,)
-
if isinstance(self.database_engine, PostgresEngine):
# Temporarily disable sequential scans in this transaction. This is
# a temporary hack until we can add the right indices in
@@ -110,31 +103,91 @@ class StateGroupBackgroundUpdateStore(SQLBaseStore):
# against `state_groups_state` to fetch the latest state.
# It assumes that previous state groups are always numerically
# lesser.
- # The PARTITION is used to get the event_id in the greatest state
- # group for the given type, state_key.
# This may return multiple rows per (type, state_key), but last_value
# should be the same.
sql = """
- WITH RECURSIVE state(state_group) AS (
+ WITH RECURSIVE sgs(state_group) AS (
VALUES(?::bigint)
UNION ALL
- SELECT prev_state_group FROM state_group_edges e, state s
+ SELECT prev_state_group FROM state_group_edges e, sgs s
WHERE s.state_group = e.state_group
)
- SELECT DISTINCT ON (type, state_key)
- type, state_key, event_id
- FROM state_groups_state
- WHERE state_group IN (
- SELECT state_group FROM state
- ) %s
- ORDER BY type, state_key, state_group DESC
+ %s
"""
+ overall_select_query_args: List[Union[int, str]] = []
+
+ # This is an optimization to create a select clause per-condition. This
+ # makes the query planner a lot smarter on what rows should pull out in the
+ # first place and we end up with something that takes 10x less time to get a
+ # result.
+ use_condition_optimization = (
+ not state_filter.include_others and not state_filter.is_full()
+ )
+ state_filter_condition_combos: List[Tuple[str, Optional[str]]] = []
+ # We don't need to caclculate this list if we're not using the condition
+ # optimization
+ if use_condition_optimization:
+ for etype, state_keys in state_filter.types.items():
+ if state_keys is None:
+ state_filter_condition_combos.append((etype, None))
+ else:
+ for state_key in state_keys:
+ state_filter_condition_combos.append((etype, state_key))
+ # And here is the optimization itself. We don't want to do the optimization
+ # if there are too many individual conditions. 10 is an arbitrary number
+ # with no testing behind it but we do know that we specifically made this
+ # optimization for when we grab the necessary state out for
+ # `filter_events_for_client` which just uses 2 conditions
+ # (`EventTypes.RoomHistoryVisibility` and `EventTypes.Member`).
+ if use_condition_optimization and len(state_filter_condition_combos) < 10:
+ select_clause_list: List[str] = []
+ for etype, skey in state_filter_condition_combos:
+ if skey is None:
+ where_clause = "(type = ?)"
+ overall_select_query_args.extend([etype])
+ else:
+ where_clause = "(type = ? AND state_key = ?)"
+ overall_select_query_args.extend([etype, skey])
+
+ select_clause_list.append(
+ f"""
+ (
+ SELECT DISTINCT ON (type, state_key)
+ type, state_key, event_id
+ FROM state_groups_state
+ INNER JOIN sgs USING (state_group)
+ WHERE {where_clause}
+ ORDER BY type, state_key, state_group DESC
+ )
+ """
+ )
+
+ overall_select_clause = " UNION ".join(select_clause_list)
+ else:
+ where_clause, where_args = state_filter.make_sql_filter_clause()
+ # Unless the filter clause is empty, we're going to append it after an
+ # existing where clause
+ if where_clause:
+ where_clause = " AND (%s)" % (where_clause,)
+
+ overall_select_query_args.extend(where_args)
+
+ overall_select_clause = f"""
+ SELECT DISTINCT ON (type, state_key)
+ type, state_key, event_id
+ FROM state_groups_state
+ WHERE state_group IN (
+ SELECT state_group FROM sgs
+ ) {where_clause}
+ ORDER BY type, state_key, state_group DESC
+ """
+
for group in groups:
args: List[Union[int, str]] = [group]
- args.extend(where_args)
+ args.extend(overall_select_query_args)
- txn.execute(sql % (where_clause,), args)
+ txn.execute(sql % (overall_select_clause,), args)
for row in txn:
typ, state_key, event_id = row
key = (intern_string(typ), intern_string(state_key))
@@ -142,6 +195,12 @@ class StateGroupBackgroundUpdateStore(SQLBaseStore):
else:
max_entries_returned = state_filter.max_entries_returned()
+ where_clause, where_args = state_filter.make_sql_filter_clause()
+ # Unless the filter clause is empty, we're going to append it after an
+ # existing where clause
+ if where_clause:
+ where_clause = " AND (%s)" % (where_clause,)
+
# We don't use WITH RECURSIVE on sqlite3 as there are distributions
# that ship with an sqlite3 version that doesn't support it (e.g. wheezy)
for group in groups:
diff --git a/synapse/storage/databases/state/store.py b/synapse/storage/databases/state/store.py
index bb64543c1f..f8cfcaca83 100644
--- a/synapse/storage/databases/state/store.py
+++ b/synapse/storage/databases/state/store.py
@@ -31,6 +31,7 @@ from synapse.storage.util.sequence import build_sequence_generator
from synapse.types import MutableStateMap, StateKey, StateMap
from synapse.util.caches.descriptors import cached
from synapse.util.caches.dictionary_cache import DictionaryCache
+from synapse.util.cancellation import cancellable
if TYPE_CHECKING:
from synapse.server import HomeServer
@@ -156,6 +157,7 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
"get_state_group_delta", _get_state_group_delta_txn
)
+ @cancellable
async def _get_state_groups_from_groups(
self, groups: List[int], state_filter: StateFilter
) -> Dict[int, StateMap[str]]:
@@ -235,6 +237,7 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
return state_filter.filter_state(state_dict_ids), not missing_types
+ @cancellable
async def _get_state_for_groups(
self, groups: Iterable[int], state_filter: Optional[StateFilter] = None
) -> Dict[int, MutableStateMap[str]]:
diff --git a/synapse/storage/engines/_base.py b/synapse/storage/engines/_base.py
index 971ff82693..70e594a68f 100644
--- a/synapse/storage/engines/_base.py
+++ b/synapse/storage/engines/_base.py
@@ -32,9 +32,10 @@ class IncorrectDatabaseSetup(RuntimeError):
ConnectionType = TypeVar("ConnectionType", bound=Connection)
+CursorType = TypeVar("CursorType", bound=Cursor)
-class BaseDatabaseEngine(Generic[ConnectionType], metaclass=abc.ABCMeta):
+class BaseDatabaseEngine(Generic[ConnectionType, CursorType], metaclass=abc.ABCMeta):
def __init__(self, module: DBAPI2Module, config: Mapping[str, Any]):
self.module = module
@@ -45,14 +46,6 @@ class BaseDatabaseEngine(Generic[ConnectionType], metaclass=abc.ABCMeta):
@property
@abc.abstractmethod
- def can_native_upsert(self) -> bool:
- """
- Do we support native UPSERTs?
- """
- ...
-
- @property
- @abc.abstractmethod
def supports_using_any_list(self) -> bool:
"""
Do we support using `a = ANY(?)` and passing a list
@@ -72,7 +65,7 @@ class BaseDatabaseEngine(Generic[ConnectionType], metaclass=abc.ABCMeta):
...
@abc.abstractmethod
- def check_new_database(self, txn: Cursor) -> None:
+ def check_new_database(self, txn: CursorType) -> None:
"""Gets called when setting up a brand new database. This allows us to
apply stricter checks on new databases versus existing database.
"""
@@ -132,3 +125,21 @@ class BaseDatabaseEngine(Generic[ConnectionType], metaclass=abc.ABCMeta):
Note: This has no effect on SQLite3, as transactions are SERIALIZABLE by default.
"""
...
+
+ @staticmethod
+ @abc.abstractmethod
+ def executescript(cursor: CursorType, script: str) -> None:
+ """Execute a chunk of SQL containing multiple semicolon-delimited statements.
+
+ This is not provided by DBAPI2, and so needs engine-specific support.
+ """
+ ...
+
+ @classmethod
+ def execute_script_file(cls, cursor: CursorType, filepath: str) -> None:
+ """Execute a file containing multiple semicolon-delimited SQL statements.
+
+ This is not provided by DBAPI2, and so needs engine-specific support.
+ """
+ with open(filepath, "rt") as f:
+ cls.executescript(cursor, f.read())
diff --git a/synapse/storage/engines/postgres.py b/synapse/storage/engines/postgres.py
index 517f9d5f98..719a517336 100644
--- a/synapse/storage/engines/postgres.py
+++ b/synapse/storage/engines/postgres.py
@@ -31,7 +31,9 @@ if TYPE_CHECKING:
logger = logging.getLogger(__name__)
-class PostgresEngine(BaseDatabaseEngine[psycopg2.extensions.connection]):
+class PostgresEngine(
+ BaseDatabaseEngine[psycopg2.extensions.connection, psycopg2.extensions.cursor]
+):
def __init__(self, database_config: Mapping[str, Any]):
super().__init__(psycopg2, database_config)
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
@@ -79,8 +81,8 @@ class PostgresEngine(BaseDatabaseEngine[psycopg2.extensions.connection]):
allow_unsafe_locale = self.config.get("allow_unsafe_locale", False)
# Are we on a supported PostgreSQL version?
- if not allow_outdated_version and self._version < 100000:
- raise RuntimeError("Synapse requires PostgreSQL 10 or above.")
+ if not allow_outdated_version and self._version < 110000:
+ raise RuntimeError("Synapse requires PostgreSQL 11 or above.")
with db_conn.cursor() as txn:
txn.execute("SHOW SERVER_ENCODING")
@@ -159,13 +161,6 @@ class PostgresEngine(BaseDatabaseEngine[psycopg2.extensions.connection]):
db_conn.commit()
@property
- def can_native_upsert(self) -> bool:
- """
- Can we use native UPSERTs?
- """
- return True
-
- @property
def supports_using_any_list(self) -> bool:
"""Do we support using `a = ANY(?)` and passing a list"""
return True
@@ -219,3 +214,11 @@ class PostgresEngine(BaseDatabaseEngine[psycopg2.extensions.connection]):
else:
isolation_level = self.isolation_level_map[isolation_level]
return conn.set_isolation_level(isolation_level)
+
+ @staticmethod
+ def executescript(cursor: psycopg2.extensions.cursor, script: str) -> None:
+ """Execute a chunk of SQL containing multiple semicolon-delimited statements.
+
+ Psycopg2 seems happy to do this in DBAPI2's `execute()` function.
+ """
+ cursor.execute(script)
diff --git a/synapse/storage/engines/sqlite.py b/synapse/storage/engines/sqlite.py
index 621f2c5efe..14260442b6 100644
--- a/synapse/storage/engines/sqlite.py
+++ b/synapse/storage/engines/sqlite.py
@@ -24,7 +24,7 @@ if TYPE_CHECKING:
from synapse.storage.database import LoggingDatabaseConnection
-class Sqlite3Engine(BaseDatabaseEngine[sqlite3.Connection]):
+class Sqlite3Engine(BaseDatabaseEngine[sqlite3.Connection, sqlite3.Cursor]):
def __init__(self, database_config: Mapping[str, Any]):
super().__init__(sqlite3, database_config)
@@ -49,14 +49,6 @@ class Sqlite3Engine(BaseDatabaseEngine[sqlite3.Connection]):
return True
@property
- def can_native_upsert(self) -> bool:
- """
- Do we support native UPSERTs? This requires SQLite3 3.24+, plus some
- more work we haven't done yet to tell what was inserted vs updated.
- """
- return sqlite3.sqlite_version_info >= (3, 24, 0)
-
- @property
def supports_using_any_list(self) -> bool:
"""Do we support using `a = ANY(?)` and passing a list"""
return False
@@ -70,12 +62,11 @@ class Sqlite3Engine(BaseDatabaseEngine[sqlite3.Connection]):
self, db_conn: sqlite3.Connection, allow_outdated_version: bool = False
) -> None:
if not allow_outdated_version:
- version = sqlite3.sqlite_version_info
# Synapse is untested against older SQLite versions, and we don't want
# to let users upgrade to a version of Synapse with broken support for their
# sqlite version, because it risks leaving them with a half-upgraded db.
- if version < (3, 22, 0):
- raise RuntimeError("Synapse requires sqlite 3.22 or above.")
+ if sqlite3.sqlite_version_info < (3, 27, 0):
+ raise RuntimeError("Synapse requires sqlite 3.27 or above.")
def check_new_database(self, txn: Cursor) -> None:
"""Gets called when setting up a brand new database. This allows us to
@@ -97,6 +88,10 @@ class Sqlite3Engine(BaseDatabaseEngine[sqlite3.Connection]):
db_conn.create_function("rank", 1, _rank)
db_conn.execute("PRAGMA foreign_keys = ON;")
+
+ # Enable WAL.
+ # see https://www.sqlite.org/wal.html
+ db_conn.execute("PRAGMA journal_mode = WAL;")
db_conn.commit()
def is_deadlock(self, error: Exception) -> bool:
@@ -129,6 +124,25 @@ class Sqlite3Engine(BaseDatabaseEngine[sqlite3.Connection]):
# All transactions are SERIALIZABLE by default in sqlite
pass
+ @staticmethod
+ def executescript(cursor: sqlite3.Cursor, script: str) -> None:
+ """Execute a chunk of SQL containing multiple semicolon-delimited statements.
+
+ Python's built-in SQLite driver does not allow you to do this with DBAPI2's
+ `execute`:
+
+ > execute() will only execute a single SQL statement. If you try to execute more
+ > than one statement with it, it will raise a Warning. Use executescript() if
+ > you want to execute multiple SQL statements with one call.
+
+ Though the docs for `executescript` warn:
+
+ > If there is a pending transaction, an implicit COMMIT statement is executed
+ > first. No other implicit transaction control is performed; any transaction
+ > control must be added to sql_script.
+ """
+ cursor.executescript(script)
+
# Following functions taken from: https://github.com/coleifer/peewee
diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py
index 09a2b58f4c..3acdb39da7 100644
--- a/synapse/storage/prepare_database.py
+++ b/synapse/storage/prepare_database.py
@@ -266,7 +266,7 @@ def _setup_new_database(
".sql." + specific
):
logger.debug("Applying schema %s", entry.absolute_path)
- executescript(cur, entry.absolute_path)
+ database_engine.execute_script_file(cur, entry.absolute_path)
cur.execute(
"INSERT INTO schema_version (version, upgraded) VALUES (?,?)",
@@ -517,7 +517,7 @@ def _upgrade_existing_database(
UNAPPLIED_DELTA_ON_WORKER_ERROR % relative_path
)
logger.info("Applying schema %s", relative_path)
- executescript(cur, absolute_path)
+ database_engine.execute_script_file(cur, absolute_path)
elif ext == specific_engine_extension and root_name.endswith(".sql"):
# A .sql file specific to our engine; just read and execute it
if is_worker:
@@ -525,7 +525,7 @@ def _upgrade_existing_database(
UNAPPLIED_DELTA_ON_WORKER_ERROR % relative_path
)
logger.info("Applying engine-specific schema %s", relative_path)
- executescript(cur, absolute_path)
+ database_engine.execute_script_file(cur, absolute_path)
elif ext in specific_engine_extensions and root_name.endswith(".sql"):
# A .sql file for a different engine; skip it.
continue
@@ -666,7 +666,7 @@ def _get_or_create_schema_state(
) -> Optional[_SchemaState]:
# Bluntly try creating the schema_version tables.
sql_path = os.path.join(schema_path, "common", "schema_version.sql")
- executescript(txn, sql_path)
+ database_engine.execute_script_file(txn, sql_path)
txn.execute("SELECT version, upgraded FROM schema_version")
row = txn.fetchone()
diff --git a/synapse/storage/schema/__init__.py b/synapse/storage/schema/__init__.py
index a9a88c8bfd..19dbf2da7f 100644
--- a/synapse/storage/schema/__init__.py
+++ b/synapse/storage/schema/__init__.py
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-SCHEMA_VERSION = 72 # remember to update the list below when updating
+SCHEMA_VERSION = 73 # remember to update the list below when updating
"""Represents the expectations made by the codebase about the database schema
This should be incremented whenever the codebase changes its requirements on the
@@ -75,13 +75,24 @@ Changes in SCHEMA_VERSION = 71:
Changes in SCHEMA_VERSION = 72:
- event_edges.(room_id, is_state) are no longer written to.
- Tables related to groups are dropped.
+ - Unused column application_services_state.last_txn is dropped
+ - Cache invalidation stream id sequence now begins at 2 to match code expectation.
+
+Changes in SCHEMA_VERSION = 73;
+ - thread_id column is added to event_push_actions, event_push_actions_staging
+ event_push_summary, receipts_linearized, and receipts_graph.
+ - Add table `event_failed_pull_attempts` to keep track when we fail to pull
+ events over federation.
+ - Add indexes to various tables (`event_failed_pull_attempts`, `insertion_events`,
+ `batch_events`) to make it easy to delete all associated rows when purging a room.
+ - `inserted_ts` column is added to `event_push_actions_staging` table.
"""
SCHEMA_COMPAT_VERSION = (
- # The groups tables are no longer accessible, so synapses with SCHEMA_VERSION < 72
- # could break.
- 72
+ # The threads_id column must exist for event_push_actions, event_push_summary,
+ # receipts_linearized, and receipts_graph.
+ 73
)
"""Limit on how far the synapse codebase can be rolled back without breaking db compat
diff --git a/synapse/storage/schema/common/full_schemas/72/full.sql.postgres b/synapse/storage/schema/common/full_schemas/72/full.sql.postgres
new file mode 100644
index 0000000000..f0e546f052
--- /dev/null
+++ b/synapse/storage/schema/common/full_schemas/72/full.sql.postgres
@@ -0,0 +1,8 @@
+CREATE TABLE background_updates (
+ update_name text NOT NULL,
+ progress_json text NOT NULL,
+ depends_on text,
+ ordering integer DEFAULT 0 NOT NULL
+);
+ALTER TABLE ONLY background_updates
+ ADD CONSTRAINT background_updates_uniqueness UNIQUE (update_name);
diff --git a/synapse/storage/schema/common/full_schemas/72/full.sql.sqlite b/synapse/storage/schema/common/full_schemas/72/full.sql.sqlite
new file mode 100644
index 0000000000..d5a2c04aa9
--- /dev/null
+++ b/synapse/storage/schema/common/full_schemas/72/full.sql.sqlite
@@ -0,0 +1,6 @@
+CREATE TABLE background_updates (
+ update_name text NOT NULL,
+ progress_json text NOT NULL,
+ depends_on text, ordering INT NOT NULL DEFAULT 0,
+ CONSTRAINT background_updates_uniqueness UNIQUE (update_name)
+);
diff --git a/synapse/storage/schema/main/delta/72/04drop_column_application_services_state_last_txn.sql.postgres b/synapse/storage/schema/main/delta/72/04drop_column_application_services_state_last_txn.sql.postgres
new file mode 100644
index 0000000000..13d47de9e6
--- /dev/null
+++ b/synapse/storage/schema/main/delta/72/04drop_column_application_services_state_last_txn.sql.postgres
@@ -0,0 +1,17 @@
+/* Copyright 2022 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- Drop unused column application_services_state.last_txn
+ALTER table application_services_state DROP COLUMN last_txn;
\ No newline at end of file
diff --git a/synapse/storage/schema/main/delta/72/04drop_column_application_services_state_last_txn.sql.sqlite b/synapse/storage/schema/main/delta/72/04drop_column_application_services_state_last_txn.sql.sqlite
new file mode 100644
index 0000000000..3be1c88d72
--- /dev/null
+++ b/synapse/storage/schema/main/delta/72/04drop_column_application_services_state_last_txn.sql.sqlite
@@ -0,0 +1,40 @@
+/* Copyright 2022 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- Drop unused column application_services_state.last_txn
+
+CREATE TABLE application_services_state2 (
+ as_id TEXT PRIMARY KEY NOT NULL,
+ state VARCHAR(5),
+ read_receipt_stream_id BIGINT,
+ presence_stream_id BIGINT,
+ to_device_stream_id BIGINT,
+ device_list_stream_id BIGINT
+);
+
+
+INSERT INTO application_services_state2 (
+ as_id,
+ state,
+ read_receipt_stream_id,
+ presence_stream_id,
+ to_device_stream_id,
+ device_list_stream_id
+)
+SELECT as_id, state, read_receipt_stream_id, presence_stream_id, to_device_stream_id, device_list_stream_id
+FROM application_services_state;
+
+DROP TABLE application_services_state;
+ALTER TABLE application_services_state2 RENAME TO application_services_state;
diff --git a/synapse/storage/schema/main/delta/72/05receipts_event_stream_ordering.sql b/synapse/storage/schema/main/delta/72/05receipts_event_stream_ordering.sql
new file mode 100644
index 0000000000..2a822f4509
--- /dev/null
+++ b/synapse/storage/schema/main/delta/72/05receipts_event_stream_ordering.sql
@@ -0,0 +1,19 @@
+/* Copyright 2022 Beeper
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+ALTER TABLE receipts_linearized ADD COLUMN event_stream_ordering BIGINT;
+
+INSERT INTO background_updates (update_name, progress_json) VALUES
+ ('populate_event_stream_ordering', '{}');
diff --git a/synapse/storage/schema/main/delta/72/05remove_unstable_private_read_receipts.sql b/synapse/storage/schema/main/delta/72/05remove_unstable_private_read_receipts.sql
new file mode 100644
index 0000000000..36b41049cd
--- /dev/null
+++ b/synapse/storage/schema/main/delta/72/05remove_unstable_private_read_receipts.sql
@@ -0,0 +1,19 @@
+/* Copyright 2022 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- Drop previously received private read receipts so they do not accidentally
+-- get leaked to other users.
+DELETE FROM receipts_linearized WHERE receipt_type = 'org.matrix.msc2285.read.private';
+DELETE FROM receipts_graph WHERE receipt_type = 'org.matrix.msc2285.read.private';
diff --git a/synapse/storage/schema/main/delta/72/06add_consent_ts_to_users.sql b/synapse/storage/schema/main/delta/72/06add_consent_ts_to_users.sql
new file mode 100644
index 0000000000..609eb1750f
--- /dev/null
+++ b/synapse/storage/schema/main/delta/72/06add_consent_ts_to_users.sql
@@ -0,0 +1,16 @@
+/* Copyright 2022 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+ALTER TABLE users ADD consent_ts bigint;
diff --git a/synapse/storage/schema/main/delta/72/06thread_notifications.sql b/synapse/storage/schema/main/delta/72/06thread_notifications.sql
new file mode 100644
index 0000000000..2f4f5dac7a
--- /dev/null
+++ b/synapse/storage/schema/main/delta/72/06thread_notifications.sql
@@ -0,0 +1,30 @@
+/* Copyright 2022 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- Add a nullable column for thread ID to the event push actions tables; this
+-- will be filled in with a default value for any previously existing rows.
+--
+-- After migration this can be made non-nullable.
+
+ALTER TABLE event_push_actions_staging ADD COLUMN thread_id TEXT;
+ALTER TABLE event_push_actions ADD COLUMN thread_id TEXT;
+ALTER TABLE event_push_summary ADD COLUMN thread_id TEXT;
+
+-- Update the unique index for `event_push_summary`.
+INSERT INTO background_updates (ordering, update_name, progress_json) VALUES
+ (7006, 'event_push_summary_unique_index2', '{}');
+
+INSERT INTO background_updates (ordering, update_name, progress_json, depends_on) VALUES
+ (7006, 'event_push_backfill_thread_id', '{}', 'event_push_summary_unique_index2');
diff --git a/synapse/storage/schema/main/delta/72/07force_update_current_state_events_membership.py b/synapse/storage/schema/main/delta/72/07force_update_current_state_events_membership.py
new file mode 100644
index 0000000000..b5853d125c
--- /dev/null
+++ b/synapse/storage/schema/main/delta/72/07force_update_current_state_events_membership.py
@@ -0,0 +1,52 @@
+# Copyright 2022 Beeper
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""
+Forces through the `current_state_events_membership` background job so checks
+for its completion can be removed.
+
+Note the background job must still remain defined in the database class.
+"""
+
+
+def run_upgrade(cur, database_engine, *args, **kwargs):
+ cur.execute("SELECT update_name FROM background_updates")
+ rows = cur.fetchall()
+ for row in rows:
+ if row[0] == "current_state_events_membership":
+ break
+ # No pending background job so nothing to do here
+ else:
+ return
+
+ # Populate membership field for all current_state_events, this may take
+ # a while but was originally handled via a background update in 2019.
+ cur.execute(
+ """
+ UPDATE current_state_events
+ SET membership = (
+ SELECT membership FROM room_memberships
+ WHERE event_id = current_state_events.event_id
+ )
+ """
+ )
+
+ # Finally, delete the background job because we've handled it above
+ cur.execute(
+ """
+ DELETE FROM background_updates
+ WHERE update_name = 'current_state_events_membership'
+ """
+ )
diff --git a/synapse/storage/schema/main/delta/72/07thread_receipts.sql.postgres b/synapse/storage/schema/main/delta/72/07thread_receipts.sql.postgres
new file mode 100644
index 0000000000..55fff9e278
--- /dev/null
+++ b/synapse/storage/schema/main/delta/72/07thread_receipts.sql.postgres
@@ -0,0 +1,30 @@
+/* Copyright 2022 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- Add a nullable column for thread ID to the receipts table; this allows a
+-- receipt per user, per room, as well as an unthreaded receipt (corresponding
+-- to a null thread ID).
+
+ALTER TABLE receipts_linearized ADD COLUMN thread_id TEXT;
+ALTER TABLE receipts_graph ADD COLUMN thread_id TEXT;
+
+-- Rebuild the unique constraint with the thread_id.
+ALTER TABLE receipts_linearized
+ ADD CONSTRAINT receipts_linearized_uniqueness_thread
+ UNIQUE (room_id, receipt_type, user_id, thread_id);
+
+ALTER TABLE receipts_graph
+ ADD CONSTRAINT receipts_graph_uniqueness_thread
+ UNIQUE (room_id, receipt_type, user_id, thread_id);
diff --git a/synapse/storage/schema/main/delta/72/07thread_receipts.sql.sqlite b/synapse/storage/schema/main/delta/72/07thread_receipts.sql.sqlite
new file mode 100644
index 0000000000..232f67deb4
--- /dev/null
+++ b/synapse/storage/schema/main/delta/72/07thread_receipts.sql.sqlite
@@ -0,0 +1,70 @@
+/* Copyright 2022 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- Allow multiple receipts per user per room via a nullable thread_id column.
+--
+-- SQLite doesn't support modifying constraints to an existing table, so it must
+-- be recreated.
+
+-- Create the new tables.
+CREATE TABLE receipts_linearized_new (
+ stream_id BIGINT NOT NULL,
+ room_id TEXT NOT NULL,
+ receipt_type TEXT NOT NULL,
+ user_id TEXT NOT NULL,
+ event_id TEXT NOT NULL,
+ thread_id TEXT,
+ event_stream_ordering BIGINT,
+ data TEXT NOT NULL,
+ CONSTRAINT receipts_linearized_uniqueness UNIQUE (room_id, receipt_type, user_id),
+ CONSTRAINT receipts_linearized_uniqueness_thread UNIQUE (room_id, receipt_type, user_id, thread_id)
+);
+
+CREATE TABLE receipts_graph_new (
+ room_id TEXT NOT NULL,
+ receipt_type TEXT NOT NULL,
+ user_id TEXT NOT NULL,
+ event_ids TEXT NOT NULL,
+ thread_id TEXT,
+ data TEXT NOT NULL,
+ CONSTRAINT receipts_graph_uniqueness UNIQUE (room_id, receipt_type, user_id),
+ CONSTRAINT receipts_graph_uniqueness_thread UNIQUE (room_id, receipt_type, user_id, thread_id)
+);
+
+-- Drop the old indexes.
+DROP INDEX IF EXISTS receipts_linearized_id;
+DROP INDEX IF EXISTS receipts_linearized_room_stream;
+DROP INDEX IF EXISTS receipts_linearized_user;
+
+-- Copy the data.
+INSERT INTO receipts_linearized_new (stream_id, room_id, receipt_type, user_id, event_id, event_stream_ordering, data)
+ SELECT stream_id, room_id, receipt_type, user_id, event_id, event_stream_ordering, data
+ FROM receipts_linearized;
+INSERT INTO receipts_graph_new (room_id, receipt_type, user_id, event_ids, data)
+ SELECT room_id, receipt_type, user_id, event_ids, data
+ FROM receipts_graph;
+
+-- Drop the old tables.
+DROP TABLE receipts_linearized;
+DROP TABLE receipts_graph;
+
+-- Rename the tables.
+ALTER TABLE receipts_linearized_new RENAME TO receipts_linearized;
+ALTER TABLE receipts_graph_new RENAME TO receipts_graph;
+
+-- Create the indices.
+CREATE INDEX receipts_linearized_id ON receipts_linearized( stream_id );
+CREATE INDEX receipts_linearized_room_stream ON receipts_linearized( room_id, stream_id );
+CREATE INDEX receipts_linearized_user ON receipts_linearized( user_id );
diff --git a/synapse/storage/schema/main/delta/72/08begin_cache_invalidation_seq_at_2.sql.postgres b/synapse/storage/schema/main/delta/72/08begin_cache_invalidation_seq_at_2.sql.postgres
new file mode 100644
index 0000000000..69931fe971
--- /dev/null
+++ b/synapse/storage/schema/main/delta/72/08begin_cache_invalidation_seq_at_2.sql.postgres
@@ -0,0 +1,23 @@
+/* Copyright 2022 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+-- The sequence needs to begin at 2 because a bunch of code assumes that
+-- get_next_id_txn will return values >= 2, cf this comment:
+-- https://github.com/matrix-org/synapse/blob/b93bd95e8ab64d27ae26841020f62ee61272a5f2/synapse/storage/util/id_generators.py#L344
+
+SELECT setval('cache_invalidation_stream_seq', (
+ SELECT COALESCE(MAX(last_value), 1) FROM cache_invalidation_stream_seq
+));
diff --git a/synapse/storage/schema/main/delta/72/08thread_receipts.sql b/synapse/storage/schema/main/delta/72/08thread_receipts.sql
new file mode 100644
index 0000000000..e35b021f31
--- /dev/null
+++ b/synapse/storage/schema/main/delta/72/08thread_receipts.sql
@@ -0,0 +1,20 @@
+/* Copyright 2022 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+INSERT INTO background_updates (ordering, update_name, progress_json) VALUES
+ (7007, 'receipts_linearized_unique_index', '{}');
+
+INSERT INTO background_updates (ordering, update_name, progress_json) VALUES
+ (7007, 'receipts_graph_unique_index', '{}');
diff --git a/synapse/storage/schema/main/delta/72/09partial_indices.sql.sqlite b/synapse/storage/schema/main/delta/72/09partial_indices.sql.sqlite
new file mode 100644
index 0000000000..c8dfdf0218
--- /dev/null
+++ b/synapse/storage/schema/main/delta/72/09partial_indices.sql.sqlite
@@ -0,0 +1,56 @@
+/* Copyright 2022 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- SQLite needs to rebuild indices which use partial indices on Postgres, but
+-- previously did not use them on SQLite.
+
+-- Drop each index that was added with register_background_index_update AND specified
+-- a where_clause (that existed before this delta).
+
+-- From events_bg_updates.py
+DROP INDEX IF EXISTS event_contains_url_index;
+-- There is also a redactions_censored_redacts index, but that gets dropped.
+DROP INDEX IF EXISTS redactions_have_censored_ts;
+-- There is also a PostgreSQL only index (event_contains_url_index2)
+-- which gets renamed to event_contains_url_index.
+
+-- From roommember.py
+DROP INDEX IF EXISTS room_memberships_user_room_forgotten;
+
+-- From presence.py
+DROP INDEX IF EXISTS presence_stream_state_not_offline_idx;
+
+-- From media_repository.py
+DROP INDEX IF EXISTS local_media_repository_url_idx;
+
+-- From event_push_actions.py
+DROP INDEX IF EXISTS event_push_actions_highlights_index;
+-- There's also a event_push_actions_stream_highlight_index which was previously
+-- PostgreSQL-only.
+
+-- From state.py
+DROP INDEX IF EXISTS current_state_events_member_index;
+
+-- Re-insert the background jobs to re-create the indices.
+INSERT INTO background_updates (ordering, update_name, progress_json, depends_on) VALUES
+ (7209, 'event_contains_url_index', '{}', NULL),
+ (7209, 'redactions_have_censored_ts_idx', '{}', NULL),
+ (7209, 'room_membership_forgotten_idx', '{}', NULL),
+ (7209, 'presence_stream_not_offline_index', '{}', NULL),
+ (7209, 'local_media_repository_url_idx', '{}', NULL),
+ (7209, 'event_push_actions_highlights_index', '{}', NULL),
+ (7209, 'event_push_actions_stream_highlight_index', '{}', NULL),
+ (7209, 'current_state_members_idx', '{}', NULL)
+ON CONFLICT (update_name) DO NOTHING;
diff --git a/synapse/storage/schema/main/delta/73/01event_failed_pull_attempts.sql b/synapse/storage/schema/main/delta/73/01event_failed_pull_attempts.sql
new file mode 100644
index 0000000000..d397ee1082
--- /dev/null
+++ b/synapse/storage/schema/main/delta/73/01event_failed_pull_attempts.sql
@@ -0,0 +1,29 @@
+/* Copyright 2022 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+-- Add a table that keeps track of when we failed to pull an event over
+-- federation (via /backfill, `/event`, `/get_missing_events`, etc). This allows
+-- us to be more intelligent when we decide to retry (we don't need to fail over
+-- and over) and we can process that event in the background so we don't block
+-- on it each time.
+CREATE TABLE IF NOT EXISTS event_failed_pull_attempts(
+ room_id TEXT NOT NULL REFERENCES rooms (room_id),
+ event_id TEXT NOT NULL,
+ num_attempts INT NOT NULL,
+ last_attempt_ts BIGINT NOT NULL,
+ last_cause TEXT NOT NULL,
+ PRIMARY KEY (room_id, event_id)
+);
diff --git a/synapse/storage/schema/main/delta/73/02add_pusher_enabled.sql b/synapse/storage/schema/main/delta/73/02add_pusher_enabled.sql
new file mode 100644
index 0000000000..dba3b4900b
--- /dev/null
+++ b/synapse/storage/schema/main/delta/73/02add_pusher_enabled.sql
@@ -0,0 +1,16 @@
+/* Copyright 2022 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+ALTER TABLE pushers ADD COLUMN enabled BOOLEAN;
\ No newline at end of file
diff --git a/synapse/storage/schema/main/delta/73/02room_id_indexes_for_purging.sql b/synapse/storage/schema/main/delta/73/02room_id_indexes_for_purging.sql
new file mode 100644
index 0000000000..6d38bdd430
--- /dev/null
+++ b/synapse/storage/schema/main/delta/73/02room_id_indexes_for_purging.sql
@@ -0,0 +1,22 @@
+/* Copyright 2022 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- Add index so we can easily purge all rows from a given `room_id`
+CREATE INDEX IF NOT EXISTS event_failed_pull_attempts_room_id ON event_failed_pull_attempts(room_id);
+
+-- MSC2716 related tables:
+-- Add indexes so we can easily purge all rows from a given `room_id`
+CREATE INDEX IF NOT EXISTS insertion_events_room_id ON insertion_events(room_id);
+CREATE INDEX IF NOT EXISTS batch_events_room_id ON batch_events(room_id);
diff --git a/synapse/storage/schema/main/delta/73/03pusher_device_id.sql b/synapse/storage/schema/main/delta/73/03pusher_device_id.sql
new file mode 100644
index 0000000000..1b4ffbeebe
--- /dev/null
+++ b/synapse/storage/schema/main/delta/73/03pusher_device_id.sql
@@ -0,0 +1,20 @@
+/* Copyright 2022 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- Add a device_id column to track the device ID that created the pusher. It's NULLable
+-- on purpose, because a) it might not be possible to track down the device that created
+-- old pushers (pushers.access_token and access_tokens.device_id are both NULLable), and
+-- b) access tokens retrieved via the admin API don't have a device associated to them.
+ALTER TABLE pushers ADD COLUMN device_id TEXT;
\ No newline at end of file
diff --git a/synapse/storage/schema/main/delta/73/03users_approved_column.sql b/synapse/storage/schema/main/delta/73/03users_approved_column.sql
new file mode 100644
index 0000000000..5328d592ea
--- /dev/null
+++ b/synapse/storage/schema/main/delta/73/03users_approved_column.sql
@@ -0,0 +1,20 @@
+/* Copyright 2022 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- Add a column to the users table to track whether the user needs to be approved by an
+-- administrator.
+-- A NULL column means the user was created before this feature was supported by Synapse,
+-- and should be considered as TRUE.
+ALTER TABLE users ADD COLUMN approved BOOLEAN;
diff --git a/synapse/storage/schema/main/delta/73/04partial_join_details.sql b/synapse/storage/schema/main/delta/73/04partial_join_details.sql
new file mode 100644
index 0000000000..5fb2bfe1a2
--- /dev/null
+++ b/synapse/storage/schema/main/delta/73/04partial_join_details.sql
@@ -0,0 +1,23 @@
+/* Copyright 2022 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- To ensure we correctly notify other homeservers about device list changes from our
+-- users after a partial join transitions to a full join, we need to know when we began
+-- the partial join. For now it's sufficient to know the device_list stream_id at the
+-- time of the partial join, and the join event created for us during a partial join.
+--
+-- Both columns are backwards compatible.
+ALTER TABLE partial_state_rooms ADD COLUMN device_lists_stream_id BIGINT NOT NULL DEFAULT 0;
+ALTER TABLE partial_state_rooms ADD COLUMN join_event_id TEXT REFERENCES events(event_id);
diff --git a/synapse/storage/schema/main/delta/73/04pending_device_list_updates.sql b/synapse/storage/schema/main/delta/73/04pending_device_list_updates.sql
new file mode 100644
index 0000000000..dbd78d677d
--- /dev/null
+++ b/synapse/storage/schema/main/delta/73/04pending_device_list_updates.sql
@@ -0,0 +1,28 @@
+/* Copyright 2022 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- Stores remote device lists we have received for remote users while a partial
+-- join is in progress.
+--
+-- This allows us to replay any device list updates if it turns out the remote
+-- user was in the partially joined room
+CREATE TABLE device_lists_remote_pending(
+ stream_id BIGINT PRIMARY KEY,
+ user_id TEXT NOT NULL,
+ device_id TEXT NOT NULL
+);
+
+-- We only keep the most recent update for a given user/device pair.
+CREATE UNIQUE INDEX device_lists_remote_pending_user_device_id ON device_lists_remote_pending(user_id, device_id);
diff --git a/synapse/storage/schema/main/delta/73/05old_push_actions.sql.postgres b/synapse/storage/schema/main/delta/73/05old_push_actions.sql.postgres
new file mode 100644
index 0000000000..4af1a8470b
--- /dev/null
+++ b/synapse/storage/schema/main/delta/73/05old_push_actions.sql.postgres
@@ -0,0 +1,22 @@
+/* Copyright 2022 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- Add a column so that we know when a push action was inserted, to make it
+-- easier to clear out old ones.
+ALTER TABLE event_push_actions_staging ADD COLUMN inserted_ts BIGINT;
+
+-- We now add a default for *new* rows. We don't do this above as we don't want
+-- to have to update every remove with the new default.
+ALTER TABLE event_push_actions_staging ALTER COLUMN inserted_ts SET DEFAULT extract(epoch from now()) * 1000;
diff --git a/synapse/storage/schema/main/delta/73/05old_push_actions.sql.sqlite b/synapse/storage/schema/main/delta/73/05old_push_actions.sql.sqlite
new file mode 100644
index 0000000000..7482dabba2
--- /dev/null
+++ b/synapse/storage/schema/main/delta/73/05old_push_actions.sql.sqlite
@@ -0,0 +1,24 @@
+/* Copyright 2022 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- On SQLite we must be in monolith mode and updating the database from Synapse,
+-- so its safe to assume that `event_push_actions_staging` should be empty (as
+-- over restart an event must either have been fully persisted or we'll
+-- recalculate the push actions)
+DELETE FROM event_push_actions_staging;
+
+-- Add a column so that we know when a push action was inserted, to make it
+-- easier to clear out old ones.
+ALTER TABLE event_push_actions_staging ADD COLUMN inserted_ts BIGINT;
diff --git a/synapse/storage/schema/main/delta/73/06thread_notifications_thread_id_idx.sql b/synapse/storage/schema/main/delta/73/06thread_notifications_thread_id_idx.sql
new file mode 100644
index 0000000000..8b3c636594
--- /dev/null
+++ b/synapse/storage/schema/main/delta/73/06thread_notifications_thread_id_idx.sql
@@ -0,0 +1,23 @@
+/* Copyright 2022 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- Allow there to be multiple summaries per user/room.
+DROP INDEX IF EXISTS event_push_summary_unique_index;
+
+INSERT INTO background_updates (ordering, update_name, progress_json, depends_on) VALUES
+ (7306, 'event_push_actions_thread_id_null', '{}', 'event_push_backfill_thread_id');
+
+INSERT INTO background_updates (ordering, update_name, progress_json, depends_on) VALUES
+ (7306, 'event_push_summary_thread_id_null', '{}', 'event_push_backfill_thread_id');
diff --git a/synapse/storage/schema/main/delta/73/08thread_receipts_non_null.sql.postgres b/synapse/storage/schema/main/delta/73/08thread_receipts_non_null.sql.postgres
new file mode 100644
index 0000000000..3e0bc9e5eb
--- /dev/null
+++ b/synapse/storage/schema/main/delta/73/08thread_receipts_non_null.sql.postgres
@@ -0,0 +1,23 @@
+/* Copyright 2022 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- Drop constraint on (room_id, receipt_type, user_id).
+
+-- Rebuild the unique constraint with the thread_id.
+ALTER TABLE receipts_linearized
+ DROP CONSTRAINT receipts_linearized_uniqueness;
+
+ALTER TABLE receipts_graph
+ DROP CONSTRAINT receipts_graph_uniqueness;
diff --git a/synapse/storage/schema/main/delta/73/08thread_receipts_non_null.sql.sqlite b/synapse/storage/schema/main/delta/73/08thread_receipts_non_null.sql.sqlite
new file mode 100644
index 0000000000..e664889fbc
--- /dev/null
+++ b/synapse/storage/schema/main/delta/73/08thread_receipts_non_null.sql.sqlite
@@ -0,0 +1,76 @@
+/* Copyright 2022 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- Drop constraint on (room_id, receipt_type, user_id).
+--
+-- SQLite doesn't support modifying constraints to an existing table, so it must
+-- be recreated.
+
+-- Create the new tables.
+CREATE TABLE receipts_linearized_new (
+ stream_id BIGINT NOT NULL,
+ room_id TEXT NOT NULL,
+ receipt_type TEXT NOT NULL,
+ user_id TEXT NOT NULL,
+ event_id TEXT NOT NULL,
+ thread_id TEXT,
+ event_stream_ordering BIGINT,
+ data TEXT NOT NULL,
+ CONSTRAINT receipts_linearized_uniqueness_thread UNIQUE (room_id, receipt_type, user_id, thread_id)
+);
+
+CREATE TABLE receipts_graph_new (
+ room_id TEXT NOT NULL,
+ receipt_type TEXT NOT NULL,
+ user_id TEXT NOT NULL,
+ event_ids TEXT NOT NULL,
+ thread_id TEXT,
+ data TEXT NOT NULL,
+ CONSTRAINT receipts_graph_uniqueness_thread UNIQUE (room_id, receipt_type, user_id, thread_id)
+);
+
+-- Drop the old indexes.
+DROP INDEX IF EXISTS receipts_linearized_id;
+DROP INDEX IF EXISTS receipts_linearized_room_stream;
+DROP INDEX IF EXISTS receipts_linearized_user;
+
+-- Copy the data.
+INSERT INTO receipts_linearized_new (stream_id, room_id, receipt_type, user_id, event_id, data)
+ SELECT stream_id, room_id, receipt_type, user_id, event_id, data
+ FROM receipts_linearized;
+INSERT INTO receipts_graph_new (room_id, receipt_type, user_id, event_ids, data)
+ SELECT room_id, receipt_type, user_id, event_ids, data
+ FROM receipts_graph;
+
+-- Drop the old tables.
+DROP TABLE receipts_linearized;
+DROP TABLE receipts_graph;
+
+-- Rename the tables.
+ALTER TABLE receipts_linearized_new RENAME TO receipts_linearized;
+ALTER TABLE receipts_graph_new RENAME TO receipts_graph;
+
+-- Create the indices.
+CREATE INDEX receipts_linearized_id ON receipts_linearized( stream_id );
+CREATE INDEX receipts_linearized_room_stream ON receipts_linearized( room_id, stream_id );
+CREATE INDEX receipts_linearized_user ON receipts_linearized( user_id );
+
+-- Re-run background updates from 72/08thread_receipts.sql.
+INSERT INTO background_updates (ordering, update_name, progress_json) VALUES
+ (7308, 'receipts_linearized_unique_index', '{}')
+ ON CONFLICT (update_name) DO NOTHING;
+INSERT INTO background_updates (ordering, update_name, progress_json) VALUES
+ (7308, 'receipts_graph_unique_index', '{}')
+ ON CONFLICT (update_name) DO NOTHING;
diff --git a/synapse/storage/schema/main/delta/73/09partial_joined_via_destination.sql b/synapse/storage/schema/main/delta/73/09partial_joined_via_destination.sql
new file mode 100644
index 0000000000..066d602b18
--- /dev/null
+++ b/synapse/storage/schema/main/delta/73/09partial_joined_via_destination.sql
@@ -0,0 +1,18 @@
+/* Copyright 2022 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- When we resync partial state, we prioritise doing so using the server we
+-- partial-joined from. To do this we need to record which server that was!
+ALTER TABLE partial_state_rooms ADD COLUMN joined_via TEXT;
diff --git a/synapse/storage/schema/main/delta/73/09threads_table.sql b/synapse/storage/schema/main/delta/73/09threads_table.sql
new file mode 100644
index 0000000000..aa7c5e9a2e
--- /dev/null
+++ b/synapse/storage/schema/main/delta/73/09threads_table.sql
@@ -0,0 +1,30 @@
+/* Copyright 2022 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+CREATE TABLE threads (
+ room_id TEXT NOT NULL,
+ -- The event ID of the root event in the thread.
+ thread_id TEXT NOT NULL,
+ -- The latest event ID and corresponding topo / stream ordering.
+ latest_event_id TEXT NOT NULL,
+ topological_ordering BIGINT NOT NULL,
+ stream_ordering BIGINT NOT NULL,
+ CONSTRAINT threads_uniqueness UNIQUE (room_id, thread_id)
+);
+
+CREATE INDEX threads_ordering_idx ON threads(room_id, topological_ordering, stream_ordering);
+
+INSERT INTO background_updates (ordering, update_name, progress_json) VALUES
+ (7309, 'threads_backfill', '{}');
diff --git a/synapse/storage/schema/main/delta/73/10_update_sqlite_fts4_tokenizer.py b/synapse/storage/schema/main/delta/73/10_update_sqlite_fts4_tokenizer.py
new file mode 100644
index 0000000000..3de0a709eb
--- /dev/null
+++ b/synapse/storage/schema/main/delta/73/10_update_sqlite_fts4_tokenizer.py
@@ -0,0 +1,62 @@
+# Copyright 2022 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import json
+
+from synapse.storage.engines import BaseDatabaseEngine, Sqlite3Engine
+from synapse.storage.types import Cursor
+
+
+def run_create(cur: Cursor, database_engine: BaseDatabaseEngine) -> None:
+ """
+ Upgrade the event_search table to use the porter tokenizer if it isn't already
+
+ Applies only for sqlite.
+ """
+ if not isinstance(database_engine, Sqlite3Engine):
+ return
+
+ # Rebuild the table event_search table with tokenize=porter configured.
+ cur.execute("DROP TABLE event_search")
+ cur.execute(
+ """
+ CREATE VIRTUAL TABLE event_search
+ USING fts4 (tokenize=porter, event_id, room_id, sender, key, value )
+ """
+ )
+
+ # Re-run the background job to re-populate the event_search table.
+ cur.execute("SELECT MIN(stream_ordering) FROM events")
+ row = cur.fetchone()
+ min_stream_id = row[0]
+
+ # If there are not any events, nothing to do.
+ if min_stream_id is None:
+ return
+
+ cur.execute("SELECT MAX(stream_ordering) FROM events")
+ row = cur.fetchone()
+ max_stream_id = row[0]
+
+ progress = {
+ "target_min_stream_id_inclusive": min_stream_id,
+ "max_stream_id_exclusive": max_stream_id + 1,
+ }
+ progress_json = json.dumps(progress)
+
+ sql = """
+ INSERT into background_updates (ordering, update_name, progress_json)
+ VALUES (?, ?, ?)
+ """
+
+ cur.execute(sql, (7310, "event_search", progress_json))
diff --git a/synapse/storage/schema/main/delta/73/10login_tokens.sql b/synapse/storage/schema/main/delta/73/10login_tokens.sql
new file mode 100644
index 0000000000..a39b7bcece
--- /dev/null
+++ b/synapse/storage/schema/main/delta/73/10login_tokens.sql
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2022 The Matrix.org Foundation C.I.C.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- Login tokens are short-lived tokens that are used for the m.login.token
+-- login method, mainly during SSO logins
+CREATE TABLE login_tokens (
+ token TEXT PRIMARY KEY,
+ user_id TEXT NOT NULL,
+ expiry_ts BIGINT NOT NULL,
+ used_ts BIGINT,
+ auth_provider_id TEXT,
+ auth_provider_session_id TEXT
+);
+
+-- We're sometimes querying them by their session ID we got from their IDP
+CREATE INDEX login_tokens_auth_provider_idx
+ ON login_tokens (auth_provider_id, auth_provider_session_id);
+
+-- We're deleting them by their expiration time
+CREATE INDEX login_tokens_expiry_time_idx
+ ON login_tokens (expiry_ts);
+
diff --git a/synapse/storage/schema/main/delta/73/11event_search_room_id_n_distinct.sql.postgres b/synapse/storage/schema/main/delta/73/11event_search_room_id_n_distinct.sql.postgres
new file mode 100644
index 0000000000..93cdaefca1
--- /dev/null
+++ b/synapse/storage/schema/main/delta/73/11event_search_room_id_n_distinct.sql.postgres
@@ -0,0 +1,33 @@
+/* Copyright 2022 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+-- By default the postgres statistics collector massively underestimates the
+-- number of distinct rooms in `event_search`, which can cause postgres to use
+-- table scans for queries for multiple rooms.
+--
+-- To work around this we can manually tell postgres the number of distinct rooms
+-- by setting `n_distinct` (a negative value here is the number of distinct values
+-- divided by the number of rows, so -0.01 means on average there are 100 rows per
+-- distinct value). We don't need a particularly accurate number here, as a) we just
+-- want it to always use index scans and b) our estimate is going to be better than the
+-- one made by the statistics collector.
+
+ALTER TABLE event_search ALTER COLUMN room_id SET (n_distinct = -0.01);
+
+-- Ideally we'd do an `ANALYZE event_search (room_id)` here so that
+-- the above gets picked up immediately, but that can take a bit of time so we
+-- rely on the autovacuum eventually getting run and doing that in the
+-- background for us.
diff --git a/synapse/storage/schema/main/delta/73/12refactor_device_list_outbound_pokes.sql b/synapse/storage/schema/main/delta/73/12refactor_device_list_outbound_pokes.sql
new file mode 100644
index 0000000000..93d7fcb79b
--- /dev/null
+++ b/synapse/storage/schema/main/delta/73/12refactor_device_list_outbound_pokes.sql
@@ -0,0 +1,53 @@
+/* Copyright 2022 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- Prior to this schema delta, we tracked the set of unconverted rows in
+-- `device_lists_changes_in_room` using the `converted_to_destinations` flag. When rows
+-- were converted to `device_lists_outbound_pokes`, the `converted_to_destinations` flag
+-- would be set.
+--
+-- After this schema delta, the `converted_to_destinations` is still populated like
+-- before, but the set of unconverted rows is determined by the `stream_id` in the new
+-- `device_lists_changes_converted_stream_position` table.
+--
+-- If rolled back, Synapse will re-send all device list changes that happened since the
+-- schema delta.
+
+CREATE TABLE IF NOT EXISTS device_lists_changes_converted_stream_position(
+ Lock CHAR(1) NOT NULL DEFAULT 'X' UNIQUE, -- Makes sure this table only has one row.
+ -- The (stream id, room id) of the last row in `device_lists_changes_in_room` that
+ -- has been converted to `device_lists_outbound_pokes`. Rows with a strictly larger
+ -- (stream id, room id) where `converted_to_destinations` is `FALSE` have not been
+ -- converted.
+ stream_id BIGINT NOT NULL,
+ -- `room_id` may be an empty string, which compares less than all valid room IDs.
+ room_id TEXT NOT NULL,
+ CHECK (Lock='X')
+);
+
+INSERT INTO device_lists_changes_converted_stream_position (stream_id, room_id) VALUES (
+ (
+ SELECT COALESCE(
+ -- The last converted stream id is the smallest unconverted stream id minus
+ -- one.
+ MIN(stream_id) - 1,
+ -- If there is no unconverted stream id, the last converted stream id is the
+ -- largest stream id.
+ -- Otherwise, pick 1, since stream ids start at 2.
+ (SELECT COALESCE(MAX(stream_id), 1) FROM device_lists_changes_in_room)
+ ) FROM device_lists_changes_in_room WHERE NOT converted_to_destinations
+ ),
+ ''
+);
diff --git a/synapse/storage/schema/main/delta/73/13add_device_lists_index.sql b/synapse/storage/schema/main/delta/73/13add_device_lists_index.sql
new file mode 100644
index 0000000000..3725022a13
--- /dev/null
+++ b/synapse/storage/schema/main/delta/73/13add_device_lists_index.sql
@@ -0,0 +1,20 @@
+/* Copyright 2022 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+-- Adds an index on `device_lists_changes_in_room (room_id, stream_id)`, which
+-- speeds up `/sync` queries.
+INSERT INTO background_updates (ordering, update_name, progress_json) VALUES
+ (7313, 'device_lists_changes_in_room_by_room_index', '{}');
diff --git a/synapse/storage/schema/main/full_schemas/72/full.sql.postgres b/synapse/storage/schema/main/full_schemas/72/full.sql.postgres
new file mode 100644
index 0000000000..d421fd9ab9
--- /dev/null
+++ b/synapse/storage/schema/main/full_schemas/72/full.sql.postgres
@@ -0,0 +1,1344 @@
+CREATE FUNCTION check_partial_state_events() RETURNS trigger
+ LANGUAGE plpgsql
+ AS $$
+ BEGIN
+ IF EXISTS (
+ SELECT 1 FROM events
+ WHERE events.event_id = NEW.event_id
+ AND events.room_id != NEW.room_id
+ ) THEN
+ RAISE EXCEPTION 'Incorrect room_id in partial_state_events';
+ END IF;
+ RETURN NEW;
+ END;
+ $$;
+CREATE TABLE access_tokens (
+ id bigint NOT NULL,
+ user_id text NOT NULL,
+ device_id text,
+ token text NOT NULL,
+ valid_until_ms bigint,
+ puppets_user_id text,
+ last_validated bigint,
+ refresh_token_id bigint,
+ used boolean
+);
+CREATE TABLE account_data (
+ user_id text NOT NULL,
+ account_data_type text NOT NULL,
+ stream_id bigint NOT NULL,
+ content text NOT NULL,
+ instance_name text
+);
+CREATE SEQUENCE account_data_sequence
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+CREATE TABLE account_validity (
+ user_id text NOT NULL,
+ expiration_ts_ms bigint NOT NULL,
+ email_sent boolean NOT NULL,
+ renewal_token text,
+ token_used_ts_ms bigint
+);
+CREATE TABLE application_services_state (
+ as_id text NOT NULL,
+ state character varying(5),
+ read_receipt_stream_id bigint,
+ presence_stream_id bigint,
+ to_device_stream_id bigint,
+ device_list_stream_id bigint
+);
+CREATE SEQUENCE application_services_txn_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+CREATE TABLE application_services_txns (
+ as_id text NOT NULL,
+ txn_id bigint NOT NULL,
+ event_ids text NOT NULL
+);
+CREATE TABLE appservice_room_list (
+ appservice_id text NOT NULL,
+ network_id text NOT NULL,
+ room_id text NOT NULL
+);
+CREATE TABLE appservice_stream_position (
+ lock character(1) DEFAULT 'X'::bpchar NOT NULL,
+ stream_ordering bigint,
+ CONSTRAINT appservice_stream_position_lock_check CHECK ((lock = 'X'::bpchar))
+);
+CREATE TABLE batch_events (
+ event_id text NOT NULL,
+ room_id text NOT NULL,
+ batch_id text NOT NULL
+);
+CREATE TABLE blocked_rooms (
+ room_id text NOT NULL,
+ user_id text NOT NULL
+);
+CREATE TABLE cache_invalidation_stream_by_instance (
+ stream_id bigint NOT NULL,
+ instance_name text NOT NULL,
+ cache_func text NOT NULL,
+ keys text[],
+ invalidation_ts bigint
+);
+CREATE SEQUENCE cache_invalidation_stream_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+CREATE TABLE current_state_delta_stream (
+ stream_id bigint NOT NULL,
+ room_id text NOT NULL,
+ type text NOT NULL,
+ state_key text NOT NULL,
+ event_id text,
+ prev_event_id text,
+ instance_name text
+);
+CREATE TABLE current_state_events (
+ event_id text NOT NULL,
+ room_id text NOT NULL,
+ type text NOT NULL,
+ state_key text NOT NULL,
+ membership text
+);
+CREATE TABLE dehydrated_devices (
+ user_id text NOT NULL,
+ device_id text NOT NULL,
+ device_data text NOT NULL
+);
+CREATE TABLE deleted_pushers (
+ stream_id bigint NOT NULL,
+ app_id text NOT NULL,
+ pushkey text NOT NULL,
+ user_id text NOT NULL
+);
+CREATE TABLE destination_rooms (
+ destination text NOT NULL,
+ room_id text NOT NULL,
+ stream_ordering bigint NOT NULL
+);
+CREATE TABLE destinations (
+ destination text NOT NULL,
+ retry_last_ts bigint,
+ retry_interval bigint,
+ failure_ts bigint,
+ last_successful_stream_ordering bigint
+);
+CREATE TABLE device_auth_providers (
+ user_id text NOT NULL,
+ device_id text NOT NULL,
+ auth_provider_id text NOT NULL,
+ auth_provider_session_id text NOT NULL
+);
+CREATE TABLE device_federation_inbox (
+ origin text NOT NULL,
+ message_id text NOT NULL,
+ received_ts bigint NOT NULL,
+ instance_name text
+);
+CREATE TABLE device_federation_outbox (
+ destination text NOT NULL,
+ stream_id bigint NOT NULL,
+ queued_ts bigint NOT NULL,
+ messages_json text NOT NULL,
+ instance_name text
+);
+CREATE TABLE device_inbox (
+ user_id text NOT NULL,
+ device_id text NOT NULL,
+ stream_id bigint NOT NULL,
+ message_json text NOT NULL,
+ instance_name text
+);
+CREATE SEQUENCE device_inbox_sequence
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+CREATE TABLE device_lists_changes_in_room (
+ user_id text NOT NULL,
+ device_id text NOT NULL,
+ room_id text NOT NULL,
+ stream_id bigint NOT NULL,
+ converted_to_destinations boolean NOT NULL,
+ opentracing_context text
+);
+CREATE TABLE device_lists_outbound_last_success (
+ destination text NOT NULL,
+ user_id text NOT NULL,
+ stream_id bigint NOT NULL
+);
+CREATE TABLE device_lists_outbound_pokes (
+ destination text NOT NULL,
+ stream_id bigint NOT NULL,
+ user_id text NOT NULL,
+ device_id text NOT NULL,
+ sent boolean NOT NULL,
+ ts bigint NOT NULL,
+ opentracing_context text
+);
+CREATE TABLE device_lists_remote_cache (
+ user_id text NOT NULL,
+ device_id text NOT NULL,
+ content text NOT NULL
+);
+CREATE TABLE device_lists_remote_extremeties (
+ user_id text NOT NULL,
+ stream_id text NOT NULL
+);
+CREATE TABLE device_lists_remote_resync (
+ user_id text NOT NULL,
+ added_ts bigint NOT NULL
+);
+CREATE TABLE device_lists_stream (
+ stream_id bigint NOT NULL,
+ user_id text NOT NULL,
+ device_id text NOT NULL
+);
+CREATE TABLE devices (
+ user_id text NOT NULL,
+ device_id text NOT NULL,
+ display_name text,
+ last_seen bigint,
+ ip text,
+ user_agent text,
+ hidden boolean DEFAULT false
+);
+CREATE TABLE e2e_cross_signing_keys (
+ user_id text NOT NULL,
+ keytype text NOT NULL,
+ keydata text NOT NULL,
+ stream_id bigint NOT NULL
+);
+CREATE TABLE e2e_cross_signing_signatures (
+ user_id text NOT NULL,
+ key_id text NOT NULL,
+ target_user_id text NOT NULL,
+ target_device_id text NOT NULL,
+ signature text NOT NULL
+);
+CREATE TABLE e2e_device_keys_json (
+ user_id text NOT NULL,
+ device_id text NOT NULL,
+ ts_added_ms bigint NOT NULL,
+ key_json text NOT NULL
+);
+CREATE TABLE e2e_fallback_keys_json (
+ user_id text NOT NULL,
+ device_id text NOT NULL,
+ algorithm text NOT NULL,
+ key_id text NOT NULL,
+ key_json text NOT NULL,
+ used boolean DEFAULT false NOT NULL
+);
+CREATE TABLE e2e_one_time_keys_json (
+ user_id text NOT NULL,
+ device_id text NOT NULL,
+ algorithm text NOT NULL,
+ key_id text NOT NULL,
+ ts_added_ms bigint NOT NULL,
+ key_json text NOT NULL
+);
+CREATE TABLE e2e_room_keys (
+ user_id text NOT NULL,
+ room_id text NOT NULL,
+ session_id text NOT NULL,
+ version bigint NOT NULL,
+ first_message_index integer,
+ forwarded_count integer,
+ is_verified boolean,
+ session_data text NOT NULL
+);
+CREATE TABLE e2e_room_keys_versions (
+ user_id text NOT NULL,
+ version bigint NOT NULL,
+ algorithm text NOT NULL,
+ auth_data text NOT NULL,
+ deleted smallint DEFAULT 0 NOT NULL,
+ etag bigint
+);
+CREATE TABLE erased_users (
+ user_id text NOT NULL
+);
+CREATE TABLE event_auth (
+ event_id text NOT NULL,
+ auth_id text NOT NULL,
+ room_id text NOT NULL
+);
+CREATE SEQUENCE event_auth_chain_id
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+CREATE TABLE event_auth_chain_links (
+ origin_chain_id bigint NOT NULL,
+ origin_sequence_number bigint NOT NULL,
+ target_chain_id bigint NOT NULL,
+ target_sequence_number bigint NOT NULL
+);
+CREATE TABLE event_auth_chain_to_calculate (
+ event_id text NOT NULL,
+ room_id text NOT NULL,
+ type text NOT NULL,
+ state_key text NOT NULL
+);
+CREATE TABLE event_auth_chains (
+ event_id text NOT NULL,
+ chain_id bigint NOT NULL,
+ sequence_number bigint NOT NULL
+);
+CREATE TABLE event_backward_extremities (
+ event_id text NOT NULL,
+ room_id text NOT NULL
+);
+CREATE TABLE event_edges (
+ event_id text NOT NULL,
+ prev_event_id text NOT NULL,
+ room_id text,
+ is_state boolean DEFAULT false NOT NULL
+);
+CREATE TABLE event_expiry (
+ event_id text NOT NULL,
+ expiry_ts bigint NOT NULL
+);
+CREATE TABLE event_forward_extremities (
+ event_id text NOT NULL,
+ room_id text NOT NULL
+);
+CREATE TABLE event_json (
+ event_id text NOT NULL,
+ room_id text NOT NULL,
+ internal_metadata text NOT NULL,
+ json text NOT NULL,
+ format_version integer
+);
+CREATE TABLE event_labels (
+ event_id text NOT NULL,
+ label text NOT NULL,
+ room_id text NOT NULL,
+ topological_ordering bigint NOT NULL
+);
+CREATE TABLE event_push_actions (
+ room_id text NOT NULL,
+ event_id text NOT NULL,
+ user_id text NOT NULL,
+ profile_tag character varying(32),
+ actions text NOT NULL,
+ topological_ordering bigint,
+ stream_ordering bigint,
+ notif smallint,
+ highlight smallint,
+ unread smallint,
+ thread_id text
+);
+CREATE TABLE event_push_actions_staging (
+ event_id text NOT NULL,
+ user_id text NOT NULL,
+ actions text NOT NULL,
+ notif smallint NOT NULL,
+ highlight smallint NOT NULL,
+ unread smallint,
+ thread_id text
+);
+CREATE TABLE event_push_summary (
+ user_id text NOT NULL,
+ room_id text NOT NULL,
+ notif_count bigint NOT NULL,
+ stream_ordering bigint NOT NULL,
+ unread_count bigint,
+ last_receipt_stream_ordering bigint,
+ thread_id text
+);
+CREATE TABLE event_push_summary_last_receipt_stream_id (
+ lock character(1) DEFAULT 'X'::bpchar NOT NULL,
+ stream_id bigint NOT NULL,
+ CONSTRAINT event_push_summary_last_receipt_stream_id_lock_check CHECK ((lock = 'X'::bpchar))
+);
+CREATE TABLE event_push_summary_stream_ordering (
+ lock character(1) DEFAULT 'X'::bpchar NOT NULL,
+ stream_ordering bigint NOT NULL,
+ CONSTRAINT event_push_summary_stream_ordering_lock_check CHECK ((lock = 'X'::bpchar))
+);
+CREATE TABLE event_relations (
+ event_id text NOT NULL,
+ relates_to_id text NOT NULL,
+ relation_type text NOT NULL,
+ aggregation_key text
+);
+CREATE TABLE event_reports (
+ id bigint NOT NULL,
+ received_ts bigint NOT NULL,
+ room_id text NOT NULL,
+ event_id text NOT NULL,
+ user_id text NOT NULL,
+ reason text,
+ content text
+);
+CREATE TABLE event_search (
+ event_id text,
+ room_id text,
+ sender text,
+ key text,
+ vector tsvector,
+ origin_server_ts bigint,
+ stream_ordering bigint
+);
+CREATE TABLE event_to_state_groups (
+ event_id text NOT NULL,
+ state_group bigint NOT NULL
+);
+CREATE TABLE event_txn_id (
+ event_id text NOT NULL,
+ room_id text NOT NULL,
+ user_id text NOT NULL,
+ token_id bigint NOT NULL,
+ txn_id text NOT NULL,
+ inserted_ts bigint NOT NULL
+);
+CREATE TABLE events (
+ topological_ordering bigint NOT NULL,
+ event_id text NOT NULL,
+ type text NOT NULL,
+ room_id text NOT NULL,
+ content text,
+ unrecognized_keys text,
+ processed boolean NOT NULL,
+ outlier boolean NOT NULL,
+ depth bigint DEFAULT 0 NOT NULL,
+ origin_server_ts bigint,
+ received_ts bigint,
+ sender text,
+ contains_url boolean,
+ instance_name text,
+ stream_ordering bigint,
+ state_key text,
+ rejection_reason text
+);
+CREATE SEQUENCE events_backfill_stream_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+CREATE SEQUENCE events_stream_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+CREATE TABLE ex_outlier_stream (
+ event_stream_ordering bigint NOT NULL,
+ event_id text NOT NULL,
+ state_group bigint NOT NULL,
+ instance_name text
+);
+CREATE TABLE federation_inbound_events_staging (
+ origin text NOT NULL,
+ room_id text NOT NULL,
+ event_id text NOT NULL,
+ received_ts bigint NOT NULL,
+ event_json text NOT NULL,
+ internal_metadata text NOT NULL
+);
+CREATE TABLE federation_stream_position (
+ type text NOT NULL,
+ stream_id bigint NOT NULL,
+ instance_name text DEFAULT 'master'::text NOT NULL
+);
+CREATE TABLE ignored_users (
+ ignorer_user_id text NOT NULL,
+ ignored_user_id text NOT NULL
+);
+CREATE TABLE insertion_event_edges (
+ event_id text NOT NULL,
+ room_id text NOT NULL,
+ insertion_prev_event_id text NOT NULL
+);
+CREATE TABLE insertion_event_extremities (
+ event_id text NOT NULL,
+ room_id text NOT NULL
+);
+CREATE TABLE insertion_events (
+ event_id text NOT NULL,
+ room_id text NOT NULL,
+ next_batch_id text NOT NULL
+);
+CREATE TABLE instance_map (
+ instance_id integer NOT NULL,
+ instance_name text NOT NULL
+);
+CREATE SEQUENCE instance_map_instance_id_seq
+ AS integer
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+ALTER SEQUENCE instance_map_instance_id_seq OWNED BY instance_map.instance_id;
+CREATE TABLE local_current_membership (
+ room_id text NOT NULL,
+ user_id text NOT NULL,
+ event_id text NOT NULL,
+ membership text NOT NULL
+);
+CREATE TABLE local_media_repository (
+ media_id text,
+ media_type text,
+ media_length integer,
+ created_ts bigint,
+ upload_name text,
+ user_id text,
+ quarantined_by text,
+ url_cache text,
+ last_access_ts bigint,
+ safe_from_quarantine boolean DEFAULT false NOT NULL
+);
+CREATE TABLE local_media_repository_thumbnails (
+ media_id text,
+ thumbnail_width integer,
+ thumbnail_height integer,
+ thumbnail_type text,
+ thumbnail_method text,
+ thumbnail_length integer
+);
+CREATE TABLE local_media_repository_url_cache (
+ url text,
+ response_code integer,
+ etag text,
+ expires_ts bigint,
+ og text,
+ media_id text,
+ download_ts bigint
+);
+CREATE TABLE monthly_active_users (
+ user_id text NOT NULL,
+ "timestamp" bigint NOT NULL
+);
+CREATE TABLE open_id_tokens (
+ token text NOT NULL,
+ ts_valid_until_ms bigint NOT NULL,
+ user_id text NOT NULL
+);
+CREATE TABLE partial_state_events (
+ room_id text NOT NULL,
+ event_id text NOT NULL
+);
+CREATE TABLE partial_state_rooms (
+ room_id text NOT NULL
+);
+CREATE TABLE partial_state_rooms_servers (
+ room_id text NOT NULL,
+ server_name text NOT NULL
+);
+CREATE TABLE presence (
+ user_id text NOT NULL,
+ state character varying(20),
+ status_msg text,
+ mtime bigint
+);
+CREATE TABLE presence_stream (
+ stream_id bigint,
+ user_id text,
+ state text,
+ last_active_ts bigint,
+ last_federation_update_ts bigint,
+ last_user_sync_ts bigint,
+ status_msg text,
+ currently_active boolean,
+ instance_name text
+);
+CREATE SEQUENCE presence_stream_sequence
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+CREATE TABLE profiles (
+ user_id text NOT NULL,
+ displayname text,
+ avatar_url text
+);
+CREATE TABLE push_rules (
+ id bigint NOT NULL,
+ user_name text NOT NULL,
+ rule_id text NOT NULL,
+ priority_class smallint NOT NULL,
+ priority integer DEFAULT 0 NOT NULL,
+ conditions text NOT NULL,
+ actions text NOT NULL
+);
+CREATE TABLE push_rules_enable (
+ id bigint NOT NULL,
+ user_name text NOT NULL,
+ rule_id text NOT NULL,
+ enabled smallint
+);
+CREATE TABLE push_rules_stream (
+ stream_id bigint NOT NULL,
+ event_stream_ordering bigint NOT NULL,
+ user_id text NOT NULL,
+ rule_id text NOT NULL,
+ op text NOT NULL,
+ priority_class smallint,
+ priority integer,
+ conditions text,
+ actions text
+);
+CREATE TABLE pusher_throttle (
+ pusher bigint NOT NULL,
+ room_id text NOT NULL,
+ last_sent_ts bigint,
+ throttle_ms bigint
+);
+CREATE TABLE pushers (
+ id bigint NOT NULL,
+ user_name text NOT NULL,
+ access_token bigint,
+ profile_tag text NOT NULL,
+ kind text NOT NULL,
+ app_id text NOT NULL,
+ app_display_name text NOT NULL,
+ device_display_name text NOT NULL,
+ pushkey text NOT NULL,
+ ts bigint NOT NULL,
+ lang text,
+ data text,
+ last_stream_ordering bigint,
+ last_success bigint,
+ failing_since bigint
+);
+CREATE TABLE ratelimit_override (
+ user_id text NOT NULL,
+ messages_per_second bigint,
+ burst_count bigint
+);
+CREATE TABLE receipts_graph (
+ room_id text NOT NULL,
+ receipt_type text NOT NULL,
+ user_id text NOT NULL,
+ event_ids text NOT NULL,
+ data text NOT NULL,
+ thread_id text
+);
+CREATE TABLE receipts_linearized (
+ stream_id bigint NOT NULL,
+ room_id text NOT NULL,
+ receipt_type text NOT NULL,
+ user_id text NOT NULL,
+ event_id text NOT NULL,
+ data text NOT NULL,
+ instance_name text,
+ event_stream_ordering bigint,
+ thread_id text
+);
+CREATE SEQUENCE receipts_sequence
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+CREATE TABLE received_transactions (
+ transaction_id text,
+ origin text,
+ ts bigint,
+ response_code integer,
+ response_json bytea,
+ has_been_referenced smallint DEFAULT 0
+);
+CREATE TABLE redactions (
+ event_id text NOT NULL,
+ redacts text NOT NULL,
+ have_censored boolean DEFAULT false NOT NULL,
+ received_ts bigint
+);
+CREATE TABLE refresh_tokens (
+ id bigint NOT NULL,
+ user_id text NOT NULL,
+ device_id text NOT NULL,
+ token text NOT NULL,
+ next_token_id bigint,
+ expiry_ts bigint,
+ ultimate_session_expiry_ts bigint
+);
+CREATE TABLE registration_tokens (
+ token text NOT NULL,
+ uses_allowed integer,
+ pending integer NOT NULL,
+ completed integer NOT NULL,
+ expiry_time bigint
+);
+CREATE TABLE rejections (
+ event_id text NOT NULL,
+ reason text NOT NULL,
+ last_check text NOT NULL
+);
+CREATE TABLE remote_media_cache (
+ media_origin text,
+ media_id text,
+ media_type text,
+ created_ts bigint,
+ upload_name text,
+ media_length integer,
+ filesystem_id text,
+ last_access_ts bigint,
+ quarantined_by text
+);
+CREATE TABLE remote_media_cache_thumbnails (
+ media_origin text,
+ media_id text,
+ thumbnail_width integer,
+ thumbnail_height integer,
+ thumbnail_method text,
+ thumbnail_type text,
+ thumbnail_length integer,
+ filesystem_id text
+);
+CREATE TABLE room_account_data (
+ user_id text NOT NULL,
+ room_id text NOT NULL,
+ account_data_type text NOT NULL,
+ stream_id bigint NOT NULL,
+ content text NOT NULL,
+ instance_name text
+);
+CREATE TABLE room_alias_servers (
+ room_alias text NOT NULL,
+ server text NOT NULL
+);
+CREATE TABLE room_aliases (
+ room_alias text NOT NULL,
+ room_id text NOT NULL,
+ creator text
+);
+CREATE TABLE room_depth (
+ room_id text NOT NULL,
+ min_depth bigint
+);
+CREATE TABLE room_memberships (
+ event_id text NOT NULL,
+ user_id text NOT NULL,
+ sender text NOT NULL,
+ room_id text NOT NULL,
+ membership text NOT NULL,
+ forgotten integer DEFAULT 0,
+ display_name text,
+ avatar_url text
+);
+CREATE TABLE room_retention (
+ room_id text NOT NULL,
+ event_id text NOT NULL,
+ min_lifetime bigint,
+ max_lifetime bigint
+);
+CREATE TABLE room_stats_current (
+ room_id text NOT NULL,
+ current_state_events integer NOT NULL,
+ joined_members integer NOT NULL,
+ invited_members integer NOT NULL,
+ left_members integer NOT NULL,
+ banned_members integer NOT NULL,
+ local_users_in_room integer NOT NULL,
+ completed_delta_stream_id bigint NOT NULL,
+ knocked_members integer
+);
+CREATE TABLE room_stats_earliest_token (
+ room_id text NOT NULL,
+ token bigint NOT NULL
+);
+CREATE TABLE room_stats_state (
+ room_id text NOT NULL,
+ name text,
+ canonical_alias text,
+ join_rules text,
+ history_visibility text,
+ encryption text,
+ avatar text,
+ guest_access text,
+ is_federatable boolean,
+ topic text,
+ room_type text
+);
+CREATE TABLE room_tags (
+ user_id text NOT NULL,
+ room_id text NOT NULL,
+ tag text NOT NULL,
+ content text NOT NULL
+);
+CREATE TABLE room_tags_revisions (
+ user_id text NOT NULL,
+ room_id text NOT NULL,
+ stream_id bigint NOT NULL,
+ instance_name text
+);
+CREATE TABLE rooms (
+ room_id text NOT NULL,
+ is_public boolean,
+ creator text,
+ room_version text,
+ has_auth_chain_index boolean
+);
+CREATE TABLE server_keys_json (
+ server_name text NOT NULL,
+ key_id text NOT NULL,
+ from_server text NOT NULL,
+ ts_added_ms bigint NOT NULL,
+ ts_valid_until_ms bigint NOT NULL,
+ key_json bytea NOT NULL
+);
+CREATE TABLE server_signature_keys (
+ server_name text,
+ key_id text,
+ from_server text,
+ ts_added_ms bigint,
+ verify_key bytea,
+ ts_valid_until_ms bigint
+);
+CREATE TABLE sessions (
+ session_type text NOT NULL,
+ session_id text NOT NULL,
+ value text NOT NULL,
+ expiry_time_ms bigint NOT NULL
+);
+CREATE TABLE state_events (
+ event_id text NOT NULL,
+ room_id text NOT NULL,
+ type text NOT NULL,
+ state_key text NOT NULL,
+ prev_state text
+);
+CREATE TABLE stats_incremental_position (
+ lock character(1) DEFAULT 'X'::bpchar NOT NULL,
+ stream_id bigint NOT NULL,
+ CONSTRAINT stats_incremental_position_lock_check CHECK ((lock = 'X'::bpchar))
+);
+CREATE TABLE stream_ordering_to_exterm (
+ stream_ordering bigint NOT NULL,
+ room_id text NOT NULL,
+ event_id text NOT NULL
+);
+CREATE TABLE stream_positions (
+ stream_name text NOT NULL,
+ instance_name text NOT NULL,
+ stream_id bigint NOT NULL
+);
+CREATE TABLE threepid_guest_access_tokens (
+ medium text,
+ address text,
+ guest_access_token text,
+ first_inviter text
+);
+CREATE TABLE threepid_validation_session (
+ session_id text NOT NULL,
+ medium text NOT NULL,
+ address text NOT NULL,
+ client_secret text NOT NULL,
+ last_send_attempt bigint NOT NULL,
+ validated_at bigint
+);
+CREATE TABLE threepid_validation_token (
+ token text NOT NULL,
+ session_id text NOT NULL,
+ next_link text,
+ expires bigint NOT NULL
+);
+CREATE TABLE ui_auth_sessions (
+ session_id text NOT NULL,
+ creation_time bigint NOT NULL,
+ serverdict text NOT NULL,
+ clientdict text NOT NULL,
+ uri text NOT NULL,
+ method text NOT NULL,
+ description text NOT NULL
+);
+CREATE TABLE ui_auth_sessions_credentials (
+ session_id text NOT NULL,
+ stage_type text NOT NULL,
+ result text NOT NULL
+);
+CREATE TABLE ui_auth_sessions_ips (
+ session_id text NOT NULL,
+ ip text NOT NULL,
+ user_agent text NOT NULL
+);
+CREATE TABLE user_daily_visits (
+ user_id text NOT NULL,
+ device_id text,
+ "timestamp" bigint NOT NULL,
+ user_agent text
+);
+CREATE TABLE user_directory (
+ user_id text NOT NULL,
+ room_id text,
+ display_name text,
+ avatar_url text
+);
+CREATE TABLE user_directory_search (
+ user_id text NOT NULL,
+ vector tsvector
+);
+CREATE TABLE user_directory_stream_pos (
+ lock character(1) DEFAULT 'X'::bpchar NOT NULL,
+ stream_id bigint,
+ CONSTRAINT user_directory_stream_pos_lock_check CHECK ((lock = 'X'::bpchar))
+);
+CREATE TABLE user_external_ids (
+ auth_provider text NOT NULL,
+ external_id text NOT NULL,
+ user_id text NOT NULL
+);
+CREATE TABLE user_filters (
+ user_id text NOT NULL,
+ filter_id bigint NOT NULL,
+ filter_json bytea NOT NULL
+);
+CREATE SEQUENCE user_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+CREATE TABLE user_ips (
+ user_id text NOT NULL,
+ access_token text NOT NULL,
+ device_id text,
+ ip text NOT NULL,
+ user_agent text NOT NULL,
+ last_seen bigint NOT NULL
+);
+CREATE TABLE user_signature_stream (
+ stream_id bigint NOT NULL,
+ from_user_id text NOT NULL,
+ user_ids text NOT NULL
+);
+CREATE TABLE user_stats_current (
+ user_id text NOT NULL,
+ joined_rooms bigint NOT NULL,
+ completed_delta_stream_id bigint NOT NULL
+);
+CREATE TABLE user_threepid_id_server (
+ user_id text NOT NULL,
+ medium text NOT NULL,
+ address text NOT NULL,
+ id_server text NOT NULL
+);
+CREATE TABLE user_threepids (
+ user_id text NOT NULL,
+ medium text NOT NULL,
+ address text NOT NULL,
+ validated_at bigint NOT NULL,
+ added_at bigint NOT NULL
+);
+CREATE TABLE users (
+ name text,
+ password_hash text,
+ creation_ts bigint,
+ admin smallint DEFAULT 0 NOT NULL,
+ upgrade_ts bigint,
+ is_guest smallint DEFAULT 0 NOT NULL,
+ appservice_id text,
+ consent_version text,
+ consent_server_notice_sent text,
+ user_type text,
+ deactivated smallint DEFAULT 0 NOT NULL,
+ shadow_banned boolean,
+ consent_ts bigint
+);
+CREATE TABLE users_in_public_rooms (
+ user_id text NOT NULL,
+ room_id text NOT NULL
+);
+CREATE TABLE users_pending_deactivation (
+ user_id text NOT NULL
+);
+CREATE TABLE users_to_send_full_presence_to (
+ user_id text NOT NULL,
+ presence_stream_id bigint
+);
+CREATE TABLE users_who_share_private_rooms (
+ user_id text NOT NULL,
+ other_user_id text NOT NULL,
+ room_id text NOT NULL
+);
+CREATE TABLE worker_locks (
+ lock_name text NOT NULL,
+ lock_key text NOT NULL,
+ instance_name text NOT NULL,
+ token text NOT NULL,
+ last_renewed_ts bigint NOT NULL
+);
+ALTER TABLE ONLY instance_map ALTER COLUMN instance_id SET DEFAULT nextval('instance_map_instance_id_seq'::regclass);
+ALTER TABLE ONLY access_tokens
+ ADD CONSTRAINT access_tokens_pkey PRIMARY KEY (id);
+ALTER TABLE ONLY access_tokens
+ ADD CONSTRAINT access_tokens_token_key UNIQUE (token);
+ALTER TABLE ONLY account_data
+ ADD CONSTRAINT account_data_uniqueness UNIQUE (user_id, account_data_type);
+ALTER TABLE ONLY account_validity
+ ADD CONSTRAINT account_validity_pkey PRIMARY KEY (user_id);
+ALTER TABLE ONLY application_services_state
+ ADD CONSTRAINT application_services_state_pkey PRIMARY KEY (as_id);
+ALTER TABLE ONLY application_services_txns
+ ADD CONSTRAINT application_services_txns_as_id_txn_id_key UNIQUE (as_id, txn_id);
+ALTER TABLE ONLY appservice_stream_position
+ ADD CONSTRAINT appservice_stream_position_lock_key UNIQUE (lock);
+ALTER TABLE ONLY current_state_events
+ ADD CONSTRAINT current_state_events_event_id_key UNIQUE (event_id);
+ALTER TABLE ONLY current_state_events
+ ADD CONSTRAINT current_state_events_room_id_type_state_key_key UNIQUE (room_id, type, state_key);
+ALTER TABLE ONLY dehydrated_devices
+ ADD CONSTRAINT dehydrated_devices_pkey PRIMARY KEY (user_id);
+ALTER TABLE ONLY destination_rooms
+ ADD CONSTRAINT destination_rooms_pkey PRIMARY KEY (destination, room_id);
+ALTER TABLE ONLY destinations
+ ADD CONSTRAINT destinations_pkey PRIMARY KEY (destination);
+ALTER TABLE ONLY devices
+ ADD CONSTRAINT device_uniqueness UNIQUE (user_id, device_id);
+ALTER TABLE ONLY e2e_device_keys_json
+ ADD CONSTRAINT e2e_device_keys_json_uniqueness UNIQUE (user_id, device_id);
+ALTER TABLE ONLY e2e_fallback_keys_json
+ ADD CONSTRAINT e2e_fallback_keys_json_uniqueness UNIQUE (user_id, device_id, algorithm);
+ALTER TABLE ONLY e2e_one_time_keys_json
+ ADD CONSTRAINT e2e_one_time_keys_json_uniqueness UNIQUE (user_id, device_id, algorithm, key_id);
+ALTER TABLE ONLY event_auth_chain_to_calculate
+ ADD CONSTRAINT event_auth_chain_to_calculate_pkey PRIMARY KEY (event_id);
+ALTER TABLE ONLY event_auth_chains
+ ADD CONSTRAINT event_auth_chains_pkey PRIMARY KEY (event_id);
+ALTER TABLE ONLY event_backward_extremities
+ ADD CONSTRAINT event_backward_extremities_event_id_room_id_key UNIQUE (event_id, room_id);
+ALTER TABLE ONLY event_expiry
+ ADD CONSTRAINT event_expiry_pkey PRIMARY KEY (event_id);
+ALTER TABLE ONLY event_forward_extremities
+ ADD CONSTRAINT event_forward_extremities_event_id_room_id_key UNIQUE (event_id, room_id);
+ALTER TABLE ONLY event_push_actions
+ ADD CONSTRAINT event_id_user_id_profile_tag_uniqueness UNIQUE (room_id, event_id, user_id, profile_tag);
+ALTER TABLE ONLY event_json
+ ADD CONSTRAINT event_json_event_id_key UNIQUE (event_id);
+ALTER TABLE ONLY event_labels
+ ADD CONSTRAINT event_labels_pkey PRIMARY KEY (event_id, label);
+ALTER TABLE ONLY event_push_summary_last_receipt_stream_id
+ ADD CONSTRAINT event_push_summary_last_receipt_stream_id_lock_key UNIQUE (lock);
+ALTER TABLE ONLY event_push_summary_stream_ordering
+ ADD CONSTRAINT event_push_summary_stream_ordering_lock_key UNIQUE (lock);
+ALTER TABLE ONLY event_reports
+ ADD CONSTRAINT event_reports_pkey PRIMARY KEY (id);
+ALTER TABLE ONLY event_to_state_groups
+ ADD CONSTRAINT event_to_state_groups_event_id_key UNIQUE (event_id);
+ALTER TABLE ONLY events
+ ADD CONSTRAINT events_event_id_key UNIQUE (event_id);
+ALTER TABLE ONLY ex_outlier_stream
+ ADD CONSTRAINT ex_outlier_stream_pkey PRIMARY KEY (event_stream_ordering);
+ALTER TABLE ONLY instance_map
+ ADD CONSTRAINT instance_map_pkey PRIMARY KEY (instance_id);
+ALTER TABLE ONLY local_media_repository
+ ADD CONSTRAINT local_media_repository_media_id_key UNIQUE (media_id);
+ALTER TABLE ONLY user_threepids
+ ADD CONSTRAINT medium_address UNIQUE (medium, address);
+ALTER TABLE ONLY open_id_tokens
+ ADD CONSTRAINT open_id_tokens_pkey PRIMARY KEY (token);
+ALTER TABLE ONLY partial_state_events
+ ADD CONSTRAINT partial_state_events_event_id_key UNIQUE (event_id);
+ALTER TABLE ONLY partial_state_rooms
+ ADD CONSTRAINT partial_state_rooms_pkey PRIMARY KEY (room_id);
+ALTER TABLE ONLY partial_state_rooms_servers
+ ADD CONSTRAINT partial_state_rooms_servers_room_id_server_name_key UNIQUE (room_id, server_name);
+ALTER TABLE ONLY presence
+ ADD CONSTRAINT presence_user_id_key UNIQUE (user_id);
+ALTER TABLE ONLY profiles
+ ADD CONSTRAINT profiles_user_id_key UNIQUE (user_id);
+ALTER TABLE ONLY push_rules_enable
+ ADD CONSTRAINT push_rules_enable_pkey PRIMARY KEY (id);
+ALTER TABLE ONLY push_rules_enable
+ ADD CONSTRAINT push_rules_enable_user_name_rule_id_key UNIQUE (user_name, rule_id);
+ALTER TABLE ONLY push_rules
+ ADD CONSTRAINT push_rules_pkey PRIMARY KEY (id);
+ALTER TABLE ONLY push_rules
+ ADD CONSTRAINT push_rules_user_name_rule_id_key UNIQUE (user_name, rule_id);
+ALTER TABLE ONLY pusher_throttle
+ ADD CONSTRAINT pusher_throttle_pkey PRIMARY KEY (pusher, room_id);
+ALTER TABLE ONLY pushers
+ ADD CONSTRAINT pushers2_app_id_pushkey_user_name_key UNIQUE (app_id, pushkey, user_name);
+ALTER TABLE ONLY pushers
+ ADD CONSTRAINT pushers2_pkey PRIMARY KEY (id);
+ALTER TABLE ONLY receipts_graph
+ ADD CONSTRAINT receipts_graph_uniqueness UNIQUE (room_id, receipt_type, user_id);
+ALTER TABLE ONLY receipts_graph
+ ADD CONSTRAINT receipts_graph_uniqueness_thread UNIQUE (room_id, receipt_type, user_id, thread_id);
+ALTER TABLE ONLY receipts_linearized
+ ADD CONSTRAINT receipts_linearized_uniqueness UNIQUE (room_id, receipt_type, user_id);
+ALTER TABLE ONLY receipts_linearized
+ ADD CONSTRAINT receipts_linearized_uniqueness_thread UNIQUE (room_id, receipt_type, user_id, thread_id);
+ALTER TABLE ONLY received_transactions
+ ADD CONSTRAINT received_transactions_transaction_id_origin_key UNIQUE (transaction_id, origin);
+ALTER TABLE ONLY redactions
+ ADD CONSTRAINT redactions_event_id_key UNIQUE (event_id);
+ALTER TABLE ONLY refresh_tokens
+ ADD CONSTRAINT refresh_tokens_pkey PRIMARY KEY (id);
+ALTER TABLE ONLY refresh_tokens
+ ADD CONSTRAINT refresh_tokens_token_key UNIQUE (token);
+ALTER TABLE ONLY registration_tokens
+ ADD CONSTRAINT registration_tokens_token_key UNIQUE (token);
+ALTER TABLE ONLY rejections
+ ADD CONSTRAINT rejections_event_id_key UNIQUE (event_id);
+ALTER TABLE ONLY remote_media_cache
+ ADD CONSTRAINT remote_media_cache_media_origin_media_id_key UNIQUE (media_origin, media_id);
+ALTER TABLE ONLY room_account_data
+ ADD CONSTRAINT room_account_data_uniqueness UNIQUE (user_id, room_id, account_data_type);
+ALTER TABLE ONLY room_aliases
+ ADD CONSTRAINT room_aliases_room_alias_key UNIQUE (room_alias);
+ALTER TABLE ONLY room_depth
+ ADD CONSTRAINT room_depth_room_id_key UNIQUE (room_id);
+ALTER TABLE ONLY room_memberships
+ ADD CONSTRAINT room_memberships_event_id_key UNIQUE (event_id);
+ALTER TABLE ONLY room_retention
+ ADD CONSTRAINT room_retention_pkey PRIMARY KEY (room_id, event_id);
+ALTER TABLE ONLY room_stats_current
+ ADD CONSTRAINT room_stats_current_pkey PRIMARY KEY (room_id);
+ALTER TABLE ONLY room_tags_revisions
+ ADD CONSTRAINT room_tag_revisions_uniqueness UNIQUE (user_id, room_id);
+ALTER TABLE ONLY room_tags
+ ADD CONSTRAINT room_tag_uniqueness UNIQUE (user_id, room_id, tag);
+ALTER TABLE ONLY rooms
+ ADD CONSTRAINT rooms_pkey PRIMARY KEY (room_id);
+ALTER TABLE ONLY server_keys_json
+ ADD CONSTRAINT server_keys_json_uniqueness UNIQUE (server_name, key_id, from_server);
+ALTER TABLE ONLY server_signature_keys
+ ADD CONSTRAINT server_signature_keys_server_name_key_id_key UNIQUE (server_name, key_id);
+ALTER TABLE ONLY sessions
+ ADD CONSTRAINT sessions_session_type_session_id_key UNIQUE (session_type, session_id);
+ALTER TABLE ONLY state_events
+ ADD CONSTRAINT state_events_event_id_key UNIQUE (event_id);
+ALTER TABLE ONLY stats_incremental_position
+ ADD CONSTRAINT stats_incremental_position_lock_key UNIQUE (lock);
+ALTER TABLE ONLY threepid_validation_session
+ ADD CONSTRAINT threepid_validation_session_pkey PRIMARY KEY (session_id);
+ALTER TABLE ONLY threepid_validation_token
+ ADD CONSTRAINT threepid_validation_token_pkey PRIMARY KEY (token);
+ALTER TABLE ONLY ui_auth_sessions_credentials
+ ADD CONSTRAINT ui_auth_sessions_credentials_session_id_stage_type_key UNIQUE (session_id, stage_type);
+ALTER TABLE ONLY ui_auth_sessions_ips
+ ADD CONSTRAINT ui_auth_sessions_ips_session_id_ip_user_agent_key UNIQUE (session_id, ip, user_agent);
+ALTER TABLE ONLY ui_auth_sessions
+ ADD CONSTRAINT ui_auth_sessions_session_id_key UNIQUE (session_id);
+ALTER TABLE ONLY user_directory_stream_pos
+ ADD CONSTRAINT user_directory_stream_pos_lock_key UNIQUE (lock);
+ALTER TABLE ONLY user_external_ids
+ ADD CONSTRAINT user_external_ids_auth_provider_external_id_key UNIQUE (auth_provider, external_id);
+ALTER TABLE ONLY user_stats_current
+ ADD CONSTRAINT user_stats_current_pkey PRIMARY KEY (user_id);
+ALTER TABLE ONLY users
+ ADD CONSTRAINT users_name_key UNIQUE (name);
+ALTER TABLE ONLY users_to_send_full_presence_to
+ ADD CONSTRAINT users_to_send_full_presence_to_pkey PRIMARY KEY (user_id);
+CREATE INDEX access_tokens_device_id ON access_tokens USING btree (user_id, device_id);
+CREATE INDEX account_data_stream_id ON account_data USING btree (user_id, stream_id);
+CREATE INDEX application_services_txns_id ON application_services_txns USING btree (as_id);
+CREATE UNIQUE INDEX appservice_room_list_idx ON appservice_room_list USING btree (appservice_id, network_id, room_id);
+CREATE INDEX batch_events_batch_id ON batch_events USING btree (batch_id);
+CREATE UNIQUE INDEX blocked_rooms_idx ON blocked_rooms USING btree (room_id);
+CREATE UNIQUE INDEX cache_invalidation_stream_by_instance_id ON cache_invalidation_stream_by_instance USING btree (stream_id);
+CREATE INDEX cache_invalidation_stream_by_instance_instance_index ON cache_invalidation_stream_by_instance USING btree (instance_name, stream_id);
+CREATE UNIQUE INDEX chunk_events_event_id ON batch_events USING btree (event_id);
+CREATE INDEX current_state_delta_stream_idx ON current_state_delta_stream USING btree (stream_id);
+CREATE INDEX current_state_events_member_index ON current_state_events USING btree (state_key) WHERE (type = 'm.room.member'::text);
+CREATE INDEX deleted_pushers_stream_id ON deleted_pushers USING btree (stream_id);
+CREATE INDEX destination_rooms_room_id ON destination_rooms USING btree (room_id);
+CREATE INDEX device_auth_providers_devices ON device_auth_providers USING btree (user_id, device_id);
+CREATE INDEX device_auth_providers_sessions ON device_auth_providers USING btree (auth_provider_id, auth_provider_session_id);
+CREATE INDEX device_federation_inbox_sender_id ON device_federation_inbox USING btree (origin, message_id);
+CREATE INDEX device_federation_outbox_destination_id ON device_federation_outbox USING btree (destination, stream_id);
+CREATE INDEX device_federation_outbox_id ON device_federation_outbox USING btree (stream_id);
+CREATE INDEX device_inbox_stream_id_user_id ON device_inbox USING btree (stream_id, user_id);
+CREATE INDEX device_inbox_user_stream_id ON device_inbox USING btree (user_id, device_id, stream_id);
+CREATE UNIQUE INDEX device_lists_changes_in_stream_id ON device_lists_changes_in_room USING btree (stream_id, room_id);
+CREATE INDEX device_lists_changes_in_stream_id_unconverted ON device_lists_changes_in_room USING btree (stream_id) WHERE (NOT converted_to_destinations);
+CREATE UNIQUE INDEX device_lists_outbound_last_success_unique_idx ON device_lists_outbound_last_success USING btree (destination, user_id);
+CREATE INDEX device_lists_outbound_pokes_id ON device_lists_outbound_pokes USING btree (destination, stream_id);
+CREATE INDEX device_lists_outbound_pokes_stream ON device_lists_outbound_pokes USING btree (stream_id);
+CREATE INDEX device_lists_outbound_pokes_user ON device_lists_outbound_pokes USING btree (destination, user_id);
+CREATE UNIQUE INDEX device_lists_remote_cache_unique_id ON device_lists_remote_cache USING btree (user_id, device_id);
+CREATE UNIQUE INDEX device_lists_remote_extremeties_unique_idx ON device_lists_remote_extremeties USING btree (user_id);
+CREATE UNIQUE INDEX device_lists_remote_resync_idx ON device_lists_remote_resync USING btree (user_id);
+CREATE INDEX device_lists_remote_resync_ts_idx ON device_lists_remote_resync USING btree (added_ts);
+CREATE INDEX device_lists_stream_id ON device_lists_stream USING btree (stream_id, user_id);
+CREATE INDEX device_lists_stream_user_id ON device_lists_stream USING btree (user_id, device_id);
+CREATE UNIQUE INDEX e2e_cross_signing_keys_idx ON e2e_cross_signing_keys USING btree (user_id, keytype, stream_id);
+CREATE UNIQUE INDEX e2e_cross_signing_keys_stream_idx ON e2e_cross_signing_keys USING btree (stream_id);
+CREATE INDEX e2e_cross_signing_signatures2_idx ON e2e_cross_signing_signatures USING btree (user_id, target_user_id, target_device_id);
+CREATE UNIQUE INDEX e2e_room_keys_versions_idx ON e2e_room_keys_versions USING btree (user_id, version);
+CREATE UNIQUE INDEX e2e_room_keys_with_version_idx ON e2e_room_keys USING btree (user_id, version, room_id, session_id);
+CREATE UNIQUE INDEX erased_users_user ON erased_users USING btree (user_id);
+CREATE INDEX ev_b_extrem_id ON event_backward_extremities USING btree (event_id);
+CREATE INDEX ev_b_extrem_room ON event_backward_extremities USING btree (room_id);
+CREATE INDEX ev_edges_prev_id ON event_edges USING btree (prev_event_id);
+CREATE INDEX ev_extrem_id ON event_forward_extremities USING btree (event_id);
+CREATE INDEX ev_extrem_room ON event_forward_extremities USING btree (room_id);
+CREATE INDEX evauth_edges_id ON event_auth USING btree (event_id);
+CREATE INDEX event_auth_chain_links_idx ON event_auth_chain_links USING btree (origin_chain_id, target_chain_id);
+CREATE INDEX event_auth_chain_to_calculate_rm_id ON event_auth_chain_to_calculate USING btree (room_id);
+CREATE UNIQUE INDEX event_auth_chains_c_seq_index ON event_auth_chains USING btree (chain_id, sequence_number);
+CREATE INDEX event_contains_url_index ON events USING btree (room_id, topological_ordering, stream_ordering) WHERE ((contains_url = true) AND (outlier = false));
+CREATE UNIQUE INDEX event_edges_event_id_prev_event_id_idx ON event_edges USING btree (event_id, prev_event_id);
+CREATE INDEX event_expiry_expiry_ts_idx ON event_expiry USING btree (expiry_ts);
+CREATE INDEX event_labels_room_id_label_idx ON event_labels USING btree (room_id, label, topological_ordering);
+CREATE INDEX event_push_actions_highlights_index ON event_push_actions USING btree (user_id, room_id, topological_ordering, stream_ordering) WHERE (highlight = 1);
+CREATE INDEX event_push_actions_rm_tokens ON event_push_actions USING btree (user_id, room_id, topological_ordering, stream_ordering);
+CREATE INDEX event_push_actions_room_id_user_id ON event_push_actions USING btree (room_id, user_id);
+CREATE INDEX event_push_actions_staging_id ON event_push_actions_staging USING btree (event_id);
+CREATE INDEX event_push_actions_stream_highlight_index ON event_push_actions USING btree (highlight, stream_ordering) WHERE (highlight = 0);
+CREATE INDEX event_push_actions_stream_ordering ON event_push_actions USING btree (stream_ordering, user_id);
+CREATE INDEX event_push_actions_u_highlight ON event_push_actions USING btree (user_id, stream_ordering);
+CREATE UNIQUE INDEX event_push_summary_unique_index ON event_push_summary USING btree (user_id, room_id);
+CREATE UNIQUE INDEX event_push_summary_unique_index2 ON event_push_summary USING btree (user_id, room_id, thread_id);
+CREATE UNIQUE INDEX event_relations_id ON event_relations USING btree (event_id);
+CREATE INDEX event_relations_relates ON event_relations USING btree (relates_to_id, relation_type, aggregation_key);
+CREATE INDEX event_search_ev_ridx ON event_search USING btree (room_id);
+CREATE UNIQUE INDEX event_search_event_id_idx ON event_search USING btree (event_id);
+CREATE INDEX event_search_fts_idx ON event_search USING gin (vector);
+CREATE INDEX event_to_state_groups_sg_index ON event_to_state_groups USING btree (state_group);
+CREATE UNIQUE INDEX event_txn_id_event_id ON event_txn_id USING btree (event_id);
+CREATE INDEX event_txn_id_ts ON event_txn_id USING btree (inserted_ts);
+CREATE UNIQUE INDEX event_txn_id_txn_id ON event_txn_id USING btree (room_id, user_id, token_id, txn_id);
+CREATE INDEX events_order_room ON events USING btree (room_id, topological_ordering, stream_ordering);
+CREATE INDEX events_room_stream ON events USING btree (room_id, stream_ordering);
+CREATE UNIQUE INDEX events_stream_ordering ON events USING btree (stream_ordering);
+CREATE INDEX events_ts ON events USING btree (origin_server_ts, stream_ordering);
+CREATE UNIQUE INDEX federation_inbound_events_staging_instance_event ON federation_inbound_events_staging USING btree (origin, event_id);
+CREATE INDEX federation_inbound_events_staging_room ON federation_inbound_events_staging USING btree (room_id, received_ts);
+CREATE UNIQUE INDEX federation_stream_position_instance ON federation_stream_position USING btree (type, instance_name);
+CREATE INDEX ignored_users_ignored_user_id ON ignored_users USING btree (ignored_user_id);
+CREATE UNIQUE INDEX ignored_users_uniqueness ON ignored_users USING btree (ignorer_user_id, ignored_user_id);
+CREATE INDEX insertion_event_edges_event_id ON insertion_event_edges USING btree (event_id);
+CREATE INDEX insertion_event_edges_insertion_prev_event_id ON insertion_event_edges USING btree (insertion_prev_event_id);
+CREATE INDEX insertion_event_edges_insertion_room_id ON insertion_event_edges USING btree (room_id);
+CREATE UNIQUE INDEX insertion_event_extremities_event_id ON insertion_event_extremities USING btree (event_id);
+CREATE INDEX insertion_event_extremities_room_id ON insertion_event_extremities USING btree (room_id);
+CREATE UNIQUE INDEX insertion_events_event_id ON insertion_events USING btree (event_id);
+CREATE INDEX insertion_events_next_batch_id ON insertion_events USING btree (next_batch_id);
+CREATE UNIQUE INDEX instance_map_idx ON instance_map USING btree (instance_name);
+CREATE UNIQUE INDEX local_current_membership_idx ON local_current_membership USING btree (user_id, room_id);
+CREATE INDEX local_current_membership_room_idx ON local_current_membership USING btree (room_id);
+CREATE UNIQUE INDEX local_media_repository_thumbn_media_id_width_height_method_key ON local_media_repository_thumbnails USING btree (media_id, thumbnail_width, thumbnail_height, thumbnail_type, thumbnail_method);
+CREATE INDEX local_media_repository_thumbnails_media_id ON local_media_repository_thumbnails USING btree (media_id);
+CREATE INDEX local_media_repository_url_cache_by_url_download_ts ON local_media_repository_url_cache USING btree (url, download_ts);
+CREATE INDEX local_media_repository_url_cache_expires_idx ON local_media_repository_url_cache USING btree (expires_ts);
+CREATE INDEX local_media_repository_url_cache_media_idx ON local_media_repository_url_cache USING btree (media_id);
+CREATE INDEX local_media_repository_url_idx ON local_media_repository USING btree (created_ts) WHERE (url_cache IS NOT NULL);
+CREATE INDEX monthly_active_users_time_stamp ON monthly_active_users USING btree ("timestamp");
+CREATE UNIQUE INDEX monthly_active_users_users ON monthly_active_users USING btree (user_id);
+CREATE INDEX open_id_tokens_ts_valid_until_ms ON open_id_tokens USING btree (ts_valid_until_ms);
+CREATE INDEX partial_state_events_room_id_idx ON partial_state_events USING btree (room_id);
+CREATE INDEX presence_stream_id ON presence_stream USING btree (stream_id, user_id);
+CREATE INDEX presence_stream_state_not_offline_idx ON presence_stream USING btree (state) WHERE (state <> 'offline'::text);
+CREATE INDEX presence_stream_user_id ON presence_stream USING btree (user_id);
+CREATE INDEX public_room_index ON rooms USING btree (is_public);
+CREATE INDEX push_rules_enable_user_name ON push_rules_enable USING btree (user_name);
+CREATE INDEX push_rules_stream_id ON push_rules_stream USING btree (stream_id);
+CREATE INDEX push_rules_stream_user_stream_id ON push_rules_stream USING btree (user_id, stream_id);
+CREATE INDEX push_rules_user_name ON push_rules USING btree (user_name);
+CREATE UNIQUE INDEX ratelimit_override_idx ON ratelimit_override USING btree (user_id);
+CREATE UNIQUE INDEX receipts_graph_unique_index ON receipts_graph USING btree (room_id, receipt_type, user_id) WHERE (thread_id IS NULL);
+CREATE INDEX receipts_linearized_id ON receipts_linearized USING btree (stream_id);
+CREATE INDEX receipts_linearized_room_stream ON receipts_linearized USING btree (room_id, stream_id);
+CREATE UNIQUE INDEX receipts_linearized_unique_index ON receipts_linearized USING btree (room_id, receipt_type, user_id) WHERE (thread_id IS NULL);
+CREATE INDEX receipts_linearized_user ON receipts_linearized USING btree (user_id);
+CREATE INDEX received_transactions_ts ON received_transactions USING btree (ts);
+CREATE INDEX redactions_have_censored_ts ON redactions USING btree (received_ts) WHERE (NOT have_censored);
+CREATE INDEX redactions_redacts ON redactions USING btree (redacts);
+CREATE INDEX refresh_tokens_next_token_id ON refresh_tokens USING btree (next_token_id) WHERE (next_token_id IS NOT NULL);
+CREATE UNIQUE INDEX remote_media_repository_thumbn_media_origin_id_width_height_met ON remote_media_cache_thumbnails USING btree (media_origin, media_id, thumbnail_width, thumbnail_height, thumbnail_type, thumbnail_method);
+CREATE INDEX room_account_data_stream_id ON room_account_data USING btree (user_id, stream_id);
+CREATE INDEX room_alias_servers_alias ON room_alias_servers USING btree (room_alias);
+CREATE INDEX room_aliases_id ON room_aliases USING btree (room_id);
+CREATE INDEX room_memberships_room_id ON room_memberships USING btree (room_id);
+CREATE INDEX room_memberships_user_id ON room_memberships USING btree (user_id);
+CREATE INDEX room_memberships_user_room_forgotten ON room_memberships USING btree (user_id, room_id) WHERE (forgotten = 1);
+CREATE INDEX room_retention_max_lifetime_idx ON room_retention USING btree (max_lifetime);
+CREATE UNIQUE INDEX room_stats_earliest_token_idx ON room_stats_earliest_token USING btree (room_id);
+CREATE UNIQUE INDEX room_stats_state_room ON room_stats_state USING btree (room_id);
+CREATE INDEX stream_ordering_to_exterm_idx ON stream_ordering_to_exterm USING btree (stream_ordering);
+CREATE INDEX stream_ordering_to_exterm_rm_idx ON stream_ordering_to_exterm USING btree (room_id, stream_ordering);
+CREATE UNIQUE INDEX stream_positions_idx ON stream_positions USING btree (stream_name, instance_name);
+CREATE UNIQUE INDEX threepid_guest_access_tokens_index ON threepid_guest_access_tokens USING btree (medium, address);
+CREATE INDEX threepid_validation_token_session_id ON threepid_validation_token USING btree (session_id);
+CREATE INDEX user_daily_visits_ts_idx ON user_daily_visits USING btree ("timestamp");
+CREATE INDEX user_daily_visits_uts_idx ON user_daily_visits USING btree (user_id, "timestamp");
+CREATE INDEX user_directory_room_idx ON user_directory USING btree (room_id);
+CREATE INDEX user_directory_search_fts_idx ON user_directory_search USING gin (vector);
+CREATE UNIQUE INDEX user_directory_search_user_idx ON user_directory_search USING btree (user_id);
+CREATE UNIQUE INDEX user_directory_user_idx ON user_directory USING btree (user_id);
+CREATE INDEX user_external_ids_user_id_idx ON user_external_ids USING btree (user_id);
+CREATE UNIQUE INDEX user_filters_unique ON user_filters USING btree (user_id, filter_id);
+CREATE INDEX user_ips_device_id ON user_ips USING btree (user_id, device_id, last_seen);
+CREATE INDEX user_ips_last_seen ON user_ips USING btree (user_id, last_seen);
+CREATE INDEX user_ips_last_seen_only ON user_ips USING btree (last_seen);
+CREATE UNIQUE INDEX user_ips_user_token_ip_unique_index ON user_ips USING btree (user_id, access_token, ip);
+CREATE UNIQUE INDEX user_signature_stream_idx ON user_signature_stream USING btree (stream_id);
+CREATE UNIQUE INDEX user_threepid_id_server_idx ON user_threepid_id_server USING btree (user_id, medium, address, id_server);
+CREATE INDEX user_threepids_medium_address ON user_threepids USING btree (medium, address);
+CREATE INDEX user_threepids_user_id ON user_threepids USING btree (user_id);
+CREATE INDEX users_creation_ts ON users USING btree (creation_ts);
+CREATE INDEX users_have_local_media ON local_media_repository USING btree (user_id, created_ts);
+CREATE INDEX users_in_public_rooms_r_idx ON users_in_public_rooms USING btree (room_id);
+CREATE UNIQUE INDEX users_in_public_rooms_u_idx ON users_in_public_rooms USING btree (user_id, room_id);
+CREATE INDEX users_who_share_private_rooms_o_idx ON users_who_share_private_rooms USING btree (other_user_id);
+CREATE INDEX users_who_share_private_rooms_r_idx ON users_who_share_private_rooms USING btree (room_id);
+CREATE UNIQUE INDEX users_who_share_private_rooms_u_idx ON users_who_share_private_rooms USING btree (user_id, other_user_id, room_id);
+CREATE UNIQUE INDEX worker_locks_key ON worker_locks USING btree (lock_name, lock_key);
+CREATE TRIGGER check_partial_state_events BEFORE INSERT OR UPDATE ON partial_state_events FOR EACH ROW EXECUTE PROCEDURE check_partial_state_events();
+ALTER TABLE ONLY access_tokens
+ ADD CONSTRAINT access_tokens_refresh_token_id_fkey FOREIGN KEY (refresh_token_id) REFERENCES refresh_tokens(id) ON DELETE CASCADE;
+ALTER TABLE ONLY destination_rooms
+ ADD CONSTRAINT destination_rooms_destination_fkey FOREIGN KEY (destination) REFERENCES destinations(destination);
+ALTER TABLE ONLY destination_rooms
+ ADD CONSTRAINT destination_rooms_room_id_fkey FOREIGN KEY (room_id) REFERENCES rooms(room_id);
+ALTER TABLE ONLY event_edges
+ ADD CONSTRAINT event_edges_event_id_fkey FOREIGN KEY (event_id) REFERENCES events(event_id);
+ALTER TABLE ONLY event_txn_id
+ ADD CONSTRAINT event_txn_id_event_id_fkey FOREIGN KEY (event_id) REFERENCES events(event_id) ON DELETE CASCADE;
+ALTER TABLE ONLY event_txn_id
+ ADD CONSTRAINT event_txn_id_token_id_fkey FOREIGN KEY (token_id) REFERENCES access_tokens(id) ON DELETE CASCADE;
+ALTER TABLE ONLY partial_state_events
+ ADD CONSTRAINT partial_state_events_event_id_fkey FOREIGN KEY (event_id) REFERENCES events(event_id);
+ALTER TABLE ONLY partial_state_events
+ ADD CONSTRAINT partial_state_events_room_id_fkey FOREIGN KEY (room_id) REFERENCES partial_state_rooms(room_id);
+ALTER TABLE ONLY partial_state_rooms
+ ADD CONSTRAINT partial_state_rooms_room_id_fkey FOREIGN KEY (room_id) REFERENCES rooms(room_id);
+ALTER TABLE ONLY partial_state_rooms_servers
+ ADD CONSTRAINT partial_state_rooms_servers_room_id_fkey FOREIGN KEY (room_id) REFERENCES partial_state_rooms(room_id);
+ALTER TABLE ONLY refresh_tokens
+ ADD CONSTRAINT refresh_tokens_next_token_id_fkey FOREIGN KEY (next_token_id) REFERENCES refresh_tokens(id) ON DELETE CASCADE;
+ALTER TABLE ONLY ui_auth_sessions_credentials
+ ADD CONSTRAINT ui_auth_sessions_credentials_session_id_fkey FOREIGN KEY (session_id) REFERENCES ui_auth_sessions(session_id);
+ALTER TABLE ONLY ui_auth_sessions_ips
+ ADD CONSTRAINT ui_auth_sessions_ips_session_id_fkey FOREIGN KEY (session_id) REFERENCES ui_auth_sessions(session_id);
+ALTER TABLE ONLY users_to_send_full_presence_to
+ ADD CONSTRAINT users_to_send_full_presence_to_user_id_fkey FOREIGN KEY (user_id) REFERENCES users(name);
+INSERT INTO appservice_stream_position VALUES ('X', 0);
+INSERT INTO event_push_summary_last_receipt_stream_id VALUES ('X', 0);
+INSERT INTO event_push_summary_stream_ordering VALUES ('X', 0);
+INSERT INTO federation_stream_position VALUES ('federation', -1, 'master');
+INSERT INTO federation_stream_position VALUES ('events', -1, 'master');
+INSERT INTO stats_incremental_position VALUES ('X', 1);
+INSERT INTO user_directory_stream_pos VALUES ('X', 1);
+SELECT pg_catalog.setval('account_data_sequence', 1, true);
+SELECT pg_catalog.setval('application_services_txn_id_seq', 1, false);
+SELECT pg_catalog.setval('cache_invalidation_stream_seq', 1, true);
+SELECT pg_catalog.setval('device_inbox_sequence', 1, true);
+SELECT pg_catalog.setval('event_auth_chain_id', 1, false);
+SELECT pg_catalog.setval('events_backfill_stream_seq', 1, true);
+SELECT pg_catalog.setval('events_stream_seq', 1, true);
+SELECT pg_catalog.setval('instance_map_instance_id_seq', 1, false);
+SELECT pg_catalog.setval('presence_stream_sequence', 1, true);
+SELECT pg_catalog.setval('receipts_sequence', 1, true);
+SELECT pg_catalog.setval('user_id_seq', 1, false);
diff --git a/synapse/storage/schema/main/full_schemas/72/full.sql.sqlite b/synapse/storage/schema/main/full_schemas/72/full.sql.sqlite
new file mode 100644
index 0000000000..d403baf1fb
--- /dev/null
+++ b/synapse/storage/schema/main/full_schemas/72/full.sql.sqlite
@@ -0,0 +1,646 @@
+CREATE TABLE application_services_txns( as_id TEXT NOT NULL, txn_id INTEGER NOT NULL, event_ids TEXT NOT NULL, UNIQUE(as_id, txn_id) );
+CREATE INDEX application_services_txns_id ON application_services_txns ( as_id );
+CREATE TABLE presence( user_id TEXT NOT NULL, state VARCHAR(20), status_msg TEXT, mtime BIGINT, UNIQUE (user_id) );
+CREATE TABLE users( name TEXT, password_hash TEXT, creation_ts BIGINT, admin SMALLINT DEFAULT 0 NOT NULL, upgrade_ts BIGINT, is_guest SMALLINT DEFAULT 0 NOT NULL, appservice_id TEXT, consent_version TEXT, consent_server_notice_sent TEXT, user_type TEXT DEFAULT NULL, deactivated SMALLINT DEFAULT 0 NOT NULL, shadow_banned BOOLEAN, consent_ts bigint, UNIQUE(name) );
+CREATE TABLE user_ips ( user_id TEXT NOT NULL, access_token TEXT NOT NULL, device_id TEXT, ip TEXT NOT NULL, user_agent TEXT NOT NULL, last_seen BIGINT NOT NULL );
+CREATE TABLE profiles( user_id TEXT NOT NULL, displayname TEXT, avatar_url TEXT, UNIQUE(user_id) );
+CREATE TABLE received_transactions( transaction_id TEXT, origin TEXT, ts BIGINT, response_code INTEGER, response_json bytea, has_been_referenced smallint default 0, UNIQUE (transaction_id, origin) );
+CREATE TABLE destinations( destination TEXT PRIMARY KEY, retry_last_ts BIGINT, retry_interval INTEGER , failure_ts BIGINT, last_successful_stream_ordering BIGINT);
+CREATE TABLE events( stream_ordering INTEGER PRIMARY KEY, topological_ordering BIGINT NOT NULL, event_id TEXT NOT NULL, type TEXT NOT NULL, room_id TEXT NOT NULL, content TEXT, unrecognized_keys TEXT, processed BOOL NOT NULL, outlier BOOL NOT NULL, depth BIGINT DEFAULT 0 NOT NULL, origin_server_ts BIGINT, received_ts BIGINT, sender TEXT, contains_url BOOLEAN, instance_name TEXT, state_key TEXT DEFAULT NULL, rejection_reason TEXT DEFAULT NULL, UNIQUE (event_id) );
+CREATE INDEX events_order_room ON events ( room_id, topological_ordering, stream_ordering );
+CREATE TABLE event_json( event_id TEXT NOT NULL, room_id TEXT NOT NULL, internal_metadata TEXT NOT NULL, json TEXT NOT NULL, format_version INTEGER, UNIQUE (event_id) );
+CREATE TABLE state_events( event_id TEXT NOT NULL, room_id TEXT NOT NULL, type TEXT NOT NULL, state_key TEXT NOT NULL, prev_state TEXT, UNIQUE (event_id) );
+CREATE TABLE current_state_events( event_id TEXT NOT NULL, room_id TEXT NOT NULL, type TEXT NOT NULL, state_key TEXT NOT NULL, membership TEXT, UNIQUE (event_id), UNIQUE (room_id, type, state_key) );
+CREATE TABLE room_memberships( event_id TEXT NOT NULL, user_id TEXT NOT NULL, sender TEXT NOT NULL, room_id TEXT NOT NULL, membership TEXT NOT NULL, forgotten INTEGER DEFAULT 0, display_name TEXT, avatar_url TEXT, UNIQUE (event_id) );
+CREATE INDEX room_memberships_room_id ON room_memberships (room_id);
+CREATE INDEX room_memberships_user_id ON room_memberships (user_id);
+CREATE TABLE rooms( room_id TEXT PRIMARY KEY NOT NULL, is_public BOOL, creator TEXT , room_version TEXT, has_auth_chain_index BOOLEAN);
+CREATE TABLE server_signature_keys( server_name TEXT, key_id TEXT, from_server TEXT, ts_added_ms BIGINT, verify_key bytea, ts_valid_until_ms BIGINT, UNIQUE (server_name, key_id) );
+CREATE TABLE rejections( event_id TEXT NOT NULL, reason TEXT NOT NULL, last_check TEXT NOT NULL, UNIQUE (event_id) );
+CREATE TABLE push_rules ( id BIGINT PRIMARY KEY, user_name TEXT NOT NULL, rule_id TEXT NOT NULL, priority_class SMALLINT NOT NULL, priority INTEGER NOT NULL DEFAULT 0, conditions TEXT NOT NULL, actions TEXT NOT NULL, UNIQUE(user_name, rule_id) );
+CREATE INDEX push_rules_user_name on push_rules (user_name);
+CREATE TABLE push_rules_enable ( id BIGINT PRIMARY KEY, user_name TEXT NOT NULL, rule_id TEXT NOT NULL, enabled SMALLINT, UNIQUE(user_name, rule_id) );
+CREATE INDEX push_rules_enable_user_name on push_rules_enable (user_name);
+CREATE TABLE event_forward_extremities( event_id TEXT NOT NULL, room_id TEXT NOT NULL, UNIQUE (event_id, room_id) );
+CREATE INDEX ev_extrem_room ON event_forward_extremities(room_id);
+CREATE INDEX ev_extrem_id ON event_forward_extremities(event_id);
+CREATE TABLE event_backward_extremities( event_id TEXT NOT NULL, room_id TEXT NOT NULL, UNIQUE (event_id, room_id) );
+CREATE INDEX ev_b_extrem_room ON event_backward_extremities(room_id);
+CREATE INDEX ev_b_extrem_id ON event_backward_extremities(event_id);
+CREATE TABLE room_depth( room_id TEXT NOT NULL, min_depth INTEGER NOT NULL, UNIQUE (room_id) );
+CREATE TABLE event_to_state_groups( event_id TEXT NOT NULL, state_group BIGINT NOT NULL, UNIQUE (event_id) );
+CREATE TABLE local_media_repository ( media_id TEXT, media_type TEXT, media_length INTEGER, created_ts BIGINT, upload_name TEXT, user_id TEXT, quarantined_by TEXT, url_cache TEXT, last_access_ts BIGINT, safe_from_quarantine BOOLEAN NOT NULL DEFAULT 0, UNIQUE (media_id) );
+CREATE TABLE remote_media_cache ( media_origin TEXT, media_id TEXT, media_type TEXT, created_ts BIGINT, upload_name TEXT, media_length INTEGER, filesystem_id TEXT, last_access_ts BIGINT, quarantined_by TEXT, UNIQUE (media_origin, media_id) );
+CREATE TABLE redactions ( event_id TEXT NOT NULL, redacts TEXT NOT NULL, have_censored BOOL NOT NULL DEFAULT false, received_ts BIGINT, UNIQUE (event_id) );
+CREATE INDEX redactions_redacts ON redactions (redacts);
+CREATE TABLE room_aliases( room_alias TEXT NOT NULL, room_id TEXT NOT NULL, creator TEXT, UNIQUE (room_alias) );
+CREATE INDEX room_aliases_id ON room_aliases(room_id);
+CREATE TABLE room_alias_servers( room_alias TEXT NOT NULL, server TEXT NOT NULL );
+CREATE INDEX room_alias_servers_alias ON room_alias_servers(room_alias);
+CREATE TABLE IF NOT EXISTS "server_keys_json" ( server_name TEXT NOT NULL, key_id TEXT NOT NULL, from_server TEXT NOT NULL, ts_added_ms BIGINT NOT NULL, ts_valid_until_ms BIGINT NOT NULL, key_json bytea NOT NULL, CONSTRAINT server_keys_json_uniqueness UNIQUE (server_name, key_id, from_server) );
+CREATE TABLE e2e_device_keys_json ( user_id TEXT NOT NULL, device_id TEXT NOT NULL, ts_added_ms BIGINT NOT NULL, key_json TEXT NOT NULL, CONSTRAINT e2e_device_keys_json_uniqueness UNIQUE (user_id, device_id) );
+CREATE TABLE e2e_one_time_keys_json ( user_id TEXT NOT NULL, device_id TEXT NOT NULL, algorithm TEXT NOT NULL, key_id TEXT NOT NULL, ts_added_ms BIGINT NOT NULL, key_json TEXT NOT NULL, CONSTRAINT e2e_one_time_keys_json_uniqueness UNIQUE (user_id, device_id, algorithm, key_id) );
+CREATE TABLE IF NOT EXISTS "user_threepids" ( user_id TEXT NOT NULL, medium TEXT NOT NULL, address TEXT NOT NULL, validated_at BIGINT NOT NULL, added_at BIGINT NOT NULL, CONSTRAINT medium_address UNIQUE (medium, address) );
+CREATE INDEX user_threepids_user_id ON user_threepids(user_id);
+CREATE VIRTUAL TABLE event_search USING fts4 ( event_id, room_id, sender, key, value )
+/* event_search(event_id,room_id,sender,"key",value) */;
+CREATE TABLE room_tags( user_id TEXT NOT NULL, room_id TEXT NOT NULL, tag TEXT NOT NULL, content TEXT NOT NULL, CONSTRAINT room_tag_uniqueness UNIQUE (user_id, room_id, tag) );
+CREATE TABLE room_tags_revisions ( user_id TEXT NOT NULL, room_id TEXT NOT NULL, stream_id BIGINT NOT NULL, instance_name TEXT, CONSTRAINT room_tag_revisions_uniqueness UNIQUE (user_id, room_id) );
+CREATE TABLE account_data( user_id TEXT NOT NULL, account_data_type TEXT NOT NULL, stream_id BIGINT NOT NULL, content TEXT NOT NULL, instance_name TEXT, CONSTRAINT account_data_uniqueness UNIQUE (user_id, account_data_type) );
+CREATE TABLE room_account_data( user_id TEXT NOT NULL, room_id TEXT NOT NULL, account_data_type TEXT NOT NULL, stream_id BIGINT NOT NULL, content TEXT NOT NULL, instance_name TEXT, CONSTRAINT room_account_data_uniqueness UNIQUE (user_id, room_id, account_data_type) );
+CREATE INDEX account_data_stream_id on account_data(user_id, stream_id);
+CREATE INDEX room_account_data_stream_id on room_account_data(user_id, stream_id);
+CREATE INDEX events_ts ON events(origin_server_ts, stream_ordering);
+CREATE TABLE event_push_actions( room_id TEXT NOT NULL, event_id TEXT NOT NULL, user_id TEXT NOT NULL, profile_tag VARCHAR(32), actions TEXT NOT NULL, topological_ordering BIGINT, stream_ordering BIGINT, notif SMALLINT, highlight SMALLINT, unread SMALLINT, thread_id TEXT, CONSTRAINT event_id_user_id_profile_tag_uniqueness UNIQUE (room_id, event_id, user_id, profile_tag) );
+CREATE INDEX event_push_actions_room_id_user_id on event_push_actions(room_id, user_id);
+CREATE INDEX events_room_stream on events(room_id, stream_ordering);
+CREATE INDEX public_room_index on rooms(is_public);
+CREATE INDEX event_push_actions_rm_tokens on event_push_actions( user_id, room_id, topological_ordering, stream_ordering );
+CREATE TABLE presence_stream( stream_id BIGINT, user_id TEXT, state TEXT, last_active_ts BIGINT, last_federation_update_ts BIGINT, last_user_sync_ts BIGINT, status_msg TEXT, currently_active BOOLEAN , instance_name TEXT);
+CREATE INDEX presence_stream_id ON presence_stream(stream_id, user_id);
+CREATE INDEX presence_stream_user_id ON presence_stream(user_id);
+CREATE TABLE push_rules_stream( stream_id BIGINT NOT NULL, event_stream_ordering BIGINT NOT NULL, user_id TEXT NOT NULL, rule_id TEXT NOT NULL, op TEXT NOT NULL, priority_class SMALLINT, priority INTEGER, conditions TEXT, actions TEXT );
+CREATE INDEX push_rules_stream_id ON push_rules_stream(stream_id);
+CREATE INDEX push_rules_stream_user_stream_id on push_rules_stream(user_id, stream_id);
+CREATE TABLE ex_outlier_stream( event_stream_ordering BIGINT PRIMARY KEY NOT NULL, event_id TEXT NOT NULL, state_group BIGINT NOT NULL , instance_name TEXT);
+CREATE TABLE threepid_guest_access_tokens( medium TEXT, address TEXT, guest_access_token TEXT, first_inviter TEXT );
+CREATE UNIQUE INDEX threepid_guest_access_tokens_index ON threepid_guest_access_tokens(medium, address);
+CREATE INDEX event_push_actions_stream_ordering on event_push_actions( stream_ordering, user_id );
+CREATE TABLE open_id_tokens ( token TEXT NOT NULL PRIMARY KEY, ts_valid_until_ms bigint NOT NULL, user_id TEXT NOT NULL, UNIQUE (token) );
+CREATE INDEX open_id_tokens_ts_valid_until_ms ON open_id_tokens(ts_valid_until_ms);
+CREATE TABLE pusher_throttle( pusher BIGINT NOT NULL, room_id TEXT NOT NULL, last_sent_ts BIGINT, throttle_ms BIGINT, PRIMARY KEY (pusher, room_id) );
+CREATE TABLE event_reports( id BIGINT NOT NULL PRIMARY KEY, received_ts BIGINT NOT NULL, room_id TEXT NOT NULL, event_id TEXT NOT NULL, user_id TEXT NOT NULL, reason TEXT, content TEXT );
+CREATE TABLE appservice_stream_position( Lock CHAR(1) NOT NULL DEFAULT 'X' UNIQUE, stream_ordering BIGINT, CHECK (Lock='X') );
+CREATE TABLE device_inbox ( user_id TEXT NOT NULL, device_id TEXT NOT NULL, stream_id BIGINT NOT NULL, message_json TEXT NOT NULL , instance_name TEXT);
+CREATE INDEX device_inbox_user_stream_id ON device_inbox(user_id, device_id, stream_id);
+CREATE INDEX received_transactions_ts ON received_transactions(ts);
+CREATE TABLE device_federation_outbox ( destination TEXT NOT NULL, stream_id BIGINT NOT NULL, queued_ts BIGINT NOT NULL, messages_json TEXT NOT NULL , instance_name TEXT);
+CREATE INDEX device_federation_outbox_destination_id ON device_federation_outbox(destination, stream_id);
+CREATE TABLE device_federation_inbox ( origin TEXT NOT NULL, message_id TEXT NOT NULL, received_ts BIGINT NOT NULL , instance_name TEXT);
+CREATE INDEX device_federation_inbox_sender_id ON device_federation_inbox(origin, message_id);
+CREATE TABLE stream_ordering_to_exterm ( stream_ordering BIGINT NOT NULL, room_id TEXT NOT NULL, event_id TEXT NOT NULL );
+CREATE INDEX stream_ordering_to_exterm_idx on stream_ordering_to_exterm( stream_ordering );
+CREATE INDEX stream_ordering_to_exterm_rm_idx on stream_ordering_to_exterm( room_id, stream_ordering );
+CREATE TABLE IF NOT EXISTS "event_auth"( event_id TEXT NOT NULL, auth_id TEXT NOT NULL, room_id TEXT NOT NULL );
+CREATE INDEX evauth_edges_id ON event_auth(event_id);
+CREATE INDEX user_threepids_medium_address on user_threepids (medium, address);
+CREATE TABLE appservice_room_list( appservice_id TEXT NOT NULL, network_id TEXT NOT NULL, room_id TEXT NOT NULL );
+CREATE UNIQUE INDEX appservice_room_list_idx ON appservice_room_list( appservice_id, network_id, room_id );
+CREATE INDEX device_federation_outbox_id ON device_federation_outbox(stream_id);
+CREATE TABLE federation_stream_position( type TEXT NOT NULL, stream_id INTEGER NOT NULL , instance_name TEXT NOT NULL DEFAULT 'master');
+CREATE TABLE device_lists_remote_cache ( user_id TEXT NOT NULL, device_id TEXT NOT NULL, content TEXT NOT NULL );
+CREATE TABLE device_lists_remote_extremeties ( user_id TEXT NOT NULL, stream_id TEXT NOT NULL );
+CREATE TABLE device_lists_stream ( stream_id BIGINT NOT NULL, user_id TEXT NOT NULL, device_id TEXT NOT NULL );
+CREATE INDEX device_lists_stream_id ON device_lists_stream(stream_id, user_id);
+CREATE TABLE device_lists_outbound_pokes ( destination TEXT NOT NULL, stream_id BIGINT NOT NULL, user_id TEXT NOT NULL, device_id TEXT NOT NULL, sent BOOLEAN NOT NULL, ts BIGINT NOT NULL , opentracing_context TEXT);
+CREATE INDEX device_lists_outbound_pokes_id ON device_lists_outbound_pokes(destination, stream_id);
+CREATE INDEX device_lists_outbound_pokes_user ON device_lists_outbound_pokes(destination, user_id);
+CREATE TABLE event_push_summary ( user_id TEXT NOT NULL, room_id TEXT NOT NULL, notif_count BIGINT NOT NULL, stream_ordering BIGINT NOT NULL , unread_count BIGINT, last_receipt_stream_ordering BIGINT, thread_id TEXT);
+CREATE TABLE event_push_summary_stream_ordering ( Lock CHAR(1) NOT NULL DEFAULT 'X' UNIQUE, stream_ordering BIGINT NOT NULL, CHECK (Lock='X') );
+CREATE TABLE IF NOT EXISTS "pushers" ( id BIGINT PRIMARY KEY, user_name TEXT NOT NULL, access_token BIGINT DEFAULT NULL, profile_tag TEXT NOT NULL, kind TEXT NOT NULL, app_id TEXT NOT NULL, app_display_name TEXT NOT NULL, device_display_name TEXT NOT NULL, pushkey TEXT NOT NULL, ts BIGINT NOT NULL, lang TEXT, data TEXT, last_stream_ordering INTEGER, last_success BIGINT, failing_since BIGINT, UNIQUE (app_id, pushkey, user_name) );
+CREATE INDEX device_lists_outbound_pokes_stream ON device_lists_outbound_pokes(stream_id);
+CREATE TABLE ratelimit_override ( user_id TEXT NOT NULL, messages_per_second BIGINT, burst_count BIGINT );
+CREATE UNIQUE INDEX ratelimit_override_idx ON ratelimit_override(user_id);
+CREATE TABLE current_state_delta_stream ( stream_id BIGINT NOT NULL, room_id TEXT NOT NULL, type TEXT NOT NULL, state_key TEXT NOT NULL, event_id TEXT, prev_event_id TEXT , instance_name TEXT);
+CREATE INDEX current_state_delta_stream_idx ON current_state_delta_stream(stream_id);
+CREATE TABLE user_directory_stream_pos ( Lock CHAR(1) NOT NULL DEFAULT 'X' UNIQUE, stream_id BIGINT, CHECK (Lock='X') );
+CREATE VIRTUAL TABLE user_directory_search USING fts4 ( user_id, value )
+/* user_directory_search(user_id,value) */;
+CREATE TABLE blocked_rooms ( room_id TEXT NOT NULL, user_id TEXT NOT NULL );
+CREATE UNIQUE INDEX blocked_rooms_idx ON blocked_rooms(room_id);
+CREATE TABLE IF NOT EXISTS "local_media_repository_url_cache"( url TEXT, response_code INTEGER, etag TEXT, expires_ts BIGINT, og TEXT, media_id TEXT, download_ts BIGINT );
+CREATE INDEX local_media_repository_url_cache_expires_idx ON local_media_repository_url_cache(expires_ts);
+CREATE INDEX local_media_repository_url_cache_by_url_download_ts ON local_media_repository_url_cache(url, download_ts);
+CREATE INDEX local_media_repository_url_cache_media_idx ON local_media_repository_url_cache(media_id);
+CREATE TABLE IF NOT EXISTS "deleted_pushers" ( stream_id BIGINT NOT NULL, app_id TEXT NOT NULL, pushkey TEXT NOT NULL, user_id TEXT NOT NULL );
+CREATE INDEX deleted_pushers_stream_id ON deleted_pushers (stream_id);
+CREATE TABLE IF NOT EXISTS "user_directory" ( user_id TEXT NOT NULL, room_id TEXT, display_name TEXT, avatar_url TEXT );
+CREATE INDEX user_directory_room_idx ON user_directory(room_id);
+CREATE UNIQUE INDEX user_directory_user_idx ON user_directory(user_id);
+CREATE TABLE event_push_actions_staging ( event_id TEXT NOT NULL, user_id TEXT NOT NULL, actions TEXT NOT NULL, notif SMALLINT NOT NULL, highlight SMALLINT NOT NULL , unread SMALLINT, thread_id TEXT);
+CREATE INDEX event_push_actions_staging_id ON event_push_actions_staging(event_id);
+CREATE TABLE users_pending_deactivation ( user_id TEXT NOT NULL );
+CREATE TABLE user_daily_visits ( user_id TEXT NOT NULL, device_id TEXT, timestamp BIGINT NOT NULL , user_agent TEXT);
+CREATE INDEX user_daily_visits_uts_idx ON user_daily_visits(user_id, timestamp);
+CREATE INDEX user_daily_visits_ts_idx ON user_daily_visits(timestamp);
+CREATE TABLE erased_users ( user_id TEXT NOT NULL );
+CREATE UNIQUE INDEX erased_users_user ON erased_users(user_id);
+CREATE TABLE monthly_active_users ( user_id TEXT NOT NULL, timestamp BIGINT NOT NULL );
+CREATE UNIQUE INDEX monthly_active_users_users ON monthly_active_users(user_id);
+CREATE INDEX monthly_active_users_time_stamp ON monthly_active_users(timestamp);
+CREATE TABLE IF NOT EXISTS "e2e_room_keys_versions" ( user_id TEXT NOT NULL, version BIGINT NOT NULL, algorithm TEXT NOT NULL, auth_data TEXT NOT NULL, deleted SMALLINT DEFAULT 0 NOT NULL , etag BIGINT);
+CREATE UNIQUE INDEX e2e_room_keys_versions_idx ON e2e_room_keys_versions(user_id, version);
+CREATE TABLE IF NOT EXISTS "e2e_room_keys" ( user_id TEXT NOT NULL, room_id TEXT NOT NULL, session_id TEXT NOT NULL, version BIGINT NOT NULL, first_message_index INT, forwarded_count INT, is_verified BOOLEAN, session_data TEXT NOT NULL );
+CREATE TABLE users_who_share_private_rooms ( user_id TEXT NOT NULL, other_user_id TEXT NOT NULL, room_id TEXT NOT NULL );
+CREATE UNIQUE INDEX users_who_share_private_rooms_u_idx ON users_who_share_private_rooms(user_id, other_user_id, room_id);
+CREATE INDEX users_who_share_private_rooms_r_idx ON users_who_share_private_rooms(room_id);
+CREATE INDEX users_who_share_private_rooms_o_idx ON users_who_share_private_rooms(other_user_id);
+CREATE TABLE user_threepid_id_server ( user_id TEXT NOT NULL, medium TEXT NOT NULL, address TEXT NOT NULL, id_server TEXT NOT NULL );
+CREATE UNIQUE INDEX user_threepid_id_server_idx ON user_threepid_id_server( user_id, medium, address, id_server );
+CREATE TABLE users_in_public_rooms ( user_id TEXT NOT NULL, room_id TEXT NOT NULL );
+CREATE UNIQUE INDEX users_in_public_rooms_u_idx ON users_in_public_rooms(user_id, room_id);
+CREATE TABLE account_validity ( user_id TEXT PRIMARY KEY, expiration_ts_ms BIGINT NOT NULL, email_sent BOOLEAN NOT NULL, renewal_token TEXT , token_used_ts_ms BIGINT);
+CREATE TABLE event_relations ( event_id TEXT NOT NULL, relates_to_id TEXT NOT NULL, relation_type TEXT NOT NULL, aggregation_key TEXT );
+CREATE UNIQUE INDEX event_relations_id ON event_relations(event_id);
+CREATE INDEX event_relations_relates ON event_relations(relates_to_id, relation_type, aggregation_key);
+CREATE TABLE room_stats_earliest_token ( room_id TEXT NOT NULL, token BIGINT NOT NULL );
+CREATE UNIQUE INDEX room_stats_earliest_token_idx ON room_stats_earliest_token(room_id);
+CREATE INDEX user_ips_device_id ON user_ips (user_id, device_id, last_seen);
+CREATE INDEX event_push_actions_u_highlight ON event_push_actions (user_id, stream_ordering);
+CREATE INDEX device_inbox_stream_id_user_id ON device_inbox (stream_id, user_id);
+CREATE INDEX device_lists_stream_user_id ON device_lists_stream (user_id, device_id);
+CREATE INDEX user_ips_last_seen ON user_ips (user_id, last_seen);
+CREATE INDEX user_ips_last_seen_only ON user_ips (last_seen);
+CREATE INDEX users_creation_ts ON users (creation_ts);
+CREATE INDEX event_to_state_groups_sg_index ON event_to_state_groups (state_group);
+CREATE UNIQUE INDEX device_lists_remote_cache_unique_id ON device_lists_remote_cache (user_id, device_id);
+CREATE UNIQUE INDEX device_lists_remote_extremeties_unique_idx ON device_lists_remote_extremeties (user_id);
+CREATE UNIQUE INDEX user_ips_user_token_ip_unique_index ON user_ips (user_id, access_token, ip);
+CREATE TABLE threepid_validation_session (
+ session_id TEXT PRIMARY KEY,
+ medium TEXT NOT NULL,
+ address TEXT NOT NULL,
+ client_secret TEXT NOT NULL,
+ last_send_attempt BIGINT NOT NULL,
+ validated_at BIGINT
+);
+CREATE TABLE threepid_validation_token (
+ token TEXT PRIMARY KEY,
+ session_id TEXT NOT NULL,
+ next_link TEXT,
+ expires BIGINT NOT NULL
+);
+CREATE INDEX threepid_validation_token_session_id ON threepid_validation_token(session_id);
+CREATE TABLE event_expiry (
+ event_id TEXT PRIMARY KEY,
+ expiry_ts BIGINT NOT NULL
+);
+CREATE INDEX event_expiry_expiry_ts_idx ON event_expiry(expiry_ts);
+CREATE TABLE event_labels (
+ event_id TEXT,
+ label TEXT,
+ room_id TEXT NOT NULL,
+ topological_ordering BIGINT NOT NULL,
+ PRIMARY KEY(event_id, label)
+);
+CREATE INDEX event_labels_room_id_label_idx ON event_labels(room_id, label, topological_ordering);
+CREATE UNIQUE INDEX e2e_room_keys_with_version_idx ON e2e_room_keys(user_id, version, room_id, session_id);
+CREATE TABLE IF NOT EXISTS "devices" (
+ user_id TEXT NOT NULL,
+ device_id TEXT NOT NULL,
+ display_name TEXT,
+ last_seen BIGINT,
+ ip TEXT,
+ user_agent TEXT,
+ hidden BOOLEAN DEFAULT 0,
+ CONSTRAINT device_uniqueness UNIQUE (user_id, device_id)
+);
+CREATE TABLE room_retention(
+ room_id TEXT,
+ event_id TEXT,
+ min_lifetime BIGINT,
+ max_lifetime BIGINT,
+
+ PRIMARY KEY(room_id, event_id)
+);
+CREATE INDEX room_retention_max_lifetime_idx on room_retention(max_lifetime);
+CREATE TABLE e2e_cross_signing_keys (
+ user_id TEXT NOT NULL,
+ -- the type of cross-signing key (master, user_signing, or self_signing)
+ keytype TEXT NOT NULL,
+ -- the full key information, as a json-encoded dict
+ keydata TEXT NOT NULL,
+ -- for keeping the keys in order, so that we can fetch the latest one
+ stream_id BIGINT NOT NULL
+);
+CREATE UNIQUE INDEX e2e_cross_signing_keys_idx ON e2e_cross_signing_keys(user_id, keytype, stream_id);
+CREATE TABLE e2e_cross_signing_signatures (
+ -- user who did the signing
+ user_id TEXT NOT NULL,
+ -- key used to sign
+ key_id TEXT NOT NULL,
+ -- user who was signed
+ target_user_id TEXT NOT NULL,
+ -- device/key that was signed
+ target_device_id TEXT NOT NULL,
+ -- the actual signature
+ signature TEXT NOT NULL
+);
+CREATE TABLE user_signature_stream (
+ -- uses the same stream ID as device list stream
+ stream_id BIGINT NOT NULL,
+ -- user who did the signing
+ from_user_id TEXT NOT NULL,
+ -- list of users who were signed, as a JSON array
+ user_ids TEXT NOT NULL
+);
+CREATE UNIQUE INDEX user_signature_stream_idx ON user_signature_stream(stream_id);
+CREATE INDEX e2e_cross_signing_signatures2_idx ON e2e_cross_signing_signatures(user_id, target_user_id, target_device_id);
+CREATE TABLE stats_incremental_position (
+ Lock CHAR(1) NOT NULL DEFAULT 'X' UNIQUE, -- Makes sure this table only has one row.
+ stream_id BIGINT NOT NULL,
+ CHECK (Lock='X')
+);
+CREATE TABLE room_stats_current (
+ room_id TEXT NOT NULL PRIMARY KEY,
+
+ -- These are absolute counts
+ current_state_events INT NOT NULL,
+ joined_members INT NOT NULL,
+ invited_members INT NOT NULL,
+ left_members INT NOT NULL,
+ banned_members INT NOT NULL,
+
+ local_users_in_room INT NOT NULL,
+
+ -- The maximum delta stream position that this row takes into account.
+ completed_delta_stream_id BIGINT NOT NULL
+, knocked_members INT);
+CREATE TABLE user_stats_current (
+ user_id TEXT NOT NULL PRIMARY KEY,
+
+ joined_rooms BIGINT NOT NULL,
+
+ -- The maximum delta stream position that this row takes into account.
+ completed_delta_stream_id BIGINT NOT NULL
+);
+CREATE TABLE room_stats_state (
+ room_id TEXT NOT NULL,
+ name TEXT,
+ canonical_alias TEXT,
+ join_rules TEXT,
+ history_visibility TEXT,
+ encryption TEXT,
+ avatar TEXT,
+ guest_access TEXT,
+ is_federatable BOOLEAN,
+ topic TEXT
+, room_type TEXT);
+CREATE UNIQUE INDEX room_stats_state_room ON room_stats_state(room_id);
+CREATE TABLE IF NOT EXISTS "user_filters" ( user_id TEXT NOT NULL, filter_id BIGINT NOT NULL, filter_json BYTEA NOT NULL );
+CREATE UNIQUE INDEX user_filters_unique ON "user_filters" (user_id, filter_id);
+CREATE TABLE user_external_ids (
+ auth_provider TEXT NOT NULL,
+ external_id TEXT NOT NULL,
+ user_id TEXT NOT NULL,
+ UNIQUE (auth_provider, external_id)
+);
+CREATE INDEX users_in_public_rooms_r_idx ON users_in_public_rooms(room_id);
+CREATE TABLE device_lists_remote_resync (
+ user_id TEXT NOT NULL,
+ added_ts BIGINT NOT NULL
+);
+CREATE UNIQUE INDEX device_lists_remote_resync_idx ON device_lists_remote_resync (user_id);
+CREATE INDEX device_lists_remote_resync_ts_idx ON device_lists_remote_resync (added_ts);
+CREATE TABLE local_current_membership (
+ room_id TEXT NOT NULL,
+ user_id TEXT NOT NULL,
+ event_id TEXT NOT NULL,
+ membership TEXT NOT NULL
+ );
+CREATE UNIQUE INDEX local_current_membership_idx ON local_current_membership(user_id, room_id);
+CREATE INDEX local_current_membership_room_idx ON local_current_membership(room_id);
+CREATE TABLE ui_auth_sessions(
+ session_id TEXT NOT NULL, -- The session ID passed to the client.
+ creation_time BIGINT NOT NULL, -- The time this session was created (epoch time in milliseconds).
+ serverdict TEXT NOT NULL, -- A JSON dictionary of arbitrary data added by Synapse.
+ clientdict TEXT NOT NULL, -- A JSON dictionary of arbitrary data from the client.
+ uri TEXT NOT NULL, -- The URI the UI authentication session is using.
+ method TEXT NOT NULL, -- The HTTP method the UI authentication session is using.
+ -- The clientdict, uri, and method make up an tuple that must be immutable
+ -- throughout the lifetime of the UI Auth session.
+ description TEXT NOT NULL, -- A human readable description of the operation which caused the UI Auth flow to occur.
+ UNIQUE (session_id)
+);
+CREATE TABLE ui_auth_sessions_credentials(
+ session_id TEXT NOT NULL, -- The corresponding UI Auth session.
+ stage_type TEXT NOT NULL, -- The stage type.
+ result TEXT NOT NULL, -- The result of the stage verification, stored as JSON.
+ UNIQUE (session_id, stage_type),
+ FOREIGN KEY (session_id)
+ REFERENCES ui_auth_sessions (session_id)
+);
+CREATE TABLE IF NOT EXISTS "device_lists_outbound_last_success" ( destination TEXT NOT NULL, user_id TEXT NOT NULL, stream_id BIGINT NOT NULL );
+CREATE UNIQUE INDEX device_lists_outbound_last_success_unique_idx ON "device_lists_outbound_last_success" (destination, user_id);
+CREATE TABLE IF NOT EXISTS "local_media_repository_thumbnails" ( media_id TEXT, thumbnail_width INTEGER, thumbnail_height INTEGER, thumbnail_type TEXT, thumbnail_method TEXT, thumbnail_length INTEGER, UNIQUE ( media_id, thumbnail_width, thumbnail_height, thumbnail_type, thumbnail_method ) );
+CREATE INDEX local_media_repository_thumbnails_media_id ON local_media_repository_thumbnails (media_id);
+CREATE TABLE IF NOT EXISTS "remote_media_cache_thumbnails" ( media_origin TEXT, media_id TEXT, thumbnail_width INTEGER, thumbnail_height INTEGER, thumbnail_method TEXT, thumbnail_type TEXT, thumbnail_length INTEGER, filesystem_id TEXT, UNIQUE ( media_origin, media_id, thumbnail_width, thumbnail_height, thumbnail_type, thumbnail_method ) );
+CREATE TABLE ui_auth_sessions_ips(
+ session_id TEXT NOT NULL,
+ ip TEXT NOT NULL,
+ user_agent TEXT NOT NULL,
+ UNIQUE (session_id, ip, user_agent),
+ FOREIGN KEY (session_id)
+ REFERENCES ui_auth_sessions (session_id)
+);
+CREATE UNIQUE INDEX federation_stream_position_instance ON federation_stream_position(type, instance_name);
+CREATE TABLE dehydrated_devices(
+ user_id TEXT NOT NULL PRIMARY KEY,
+ device_id TEXT NOT NULL,
+ device_data TEXT NOT NULL -- JSON-encoded client-defined data
+);
+CREATE TABLE e2e_fallback_keys_json (
+ user_id TEXT NOT NULL, -- The user this fallback key is for.
+ device_id TEXT NOT NULL, -- The device this fallback key is for.
+ algorithm TEXT NOT NULL, -- Which algorithm this fallback key is for.
+ key_id TEXT NOT NULL, -- An id for suppressing duplicate uploads.
+ key_json TEXT NOT NULL, -- The key as a JSON blob.
+ used BOOLEAN NOT NULL DEFAULT FALSE, -- Whether the key has been used or not.
+ CONSTRAINT e2e_fallback_keys_json_uniqueness UNIQUE (user_id, device_id, algorithm)
+);
+CREATE TABLE destination_rooms (
+ -- the destination in question.
+ destination TEXT NOT NULL REFERENCES destinations (destination),
+ -- the ID of the room in question
+ room_id TEXT NOT NULL REFERENCES rooms (room_id),
+ -- the stream_ordering of the event
+ stream_ordering BIGINT NOT NULL,
+ PRIMARY KEY (destination, room_id)
+ -- We don't declare a foreign key on stream_ordering here because that'd mean
+ -- we'd need to either maintain an index (expensive) or do a table scan of
+ -- destination_rooms whenever we delete an event (also potentially expensive).
+ -- In addition to that, a foreign key on stream_ordering would be redundant
+ -- as this row doesn't need to refer to a specific event; if the event gets
+ -- deleted then it doesn't affect the validity of the stream_ordering here.
+);
+CREATE INDEX destination_rooms_room_id
+ ON destination_rooms (room_id);
+CREATE TABLE stream_positions (
+ stream_name TEXT NOT NULL,
+ instance_name TEXT NOT NULL,
+ stream_id BIGINT NOT NULL
+);
+CREATE UNIQUE INDEX stream_positions_idx ON stream_positions(stream_name, instance_name);
+CREATE TABLE IF NOT EXISTS "access_tokens" (
+ id BIGINT PRIMARY KEY,
+ user_id TEXT NOT NULL,
+ device_id TEXT,
+ token TEXT NOT NULL,
+ valid_until_ms BIGINT,
+ puppets_user_id TEXT,
+ last_validated BIGINT, refresh_token_id BIGINT REFERENCES refresh_tokens (id) ON DELETE CASCADE, used BOOLEAN,
+ UNIQUE(token)
+);
+CREATE INDEX access_tokens_device_id ON access_tokens (user_id, device_id);
+CREATE TABLE IF NOT EXISTS "event_txn_id" (
+ event_id TEXT NOT NULL,
+ room_id TEXT NOT NULL,
+ user_id TEXT NOT NULL,
+ token_id BIGINT NOT NULL,
+ txn_id TEXT NOT NULL,
+ inserted_ts BIGINT NOT NULL,
+ FOREIGN KEY (event_id)
+ REFERENCES events (event_id) ON DELETE CASCADE,
+ FOREIGN KEY (token_id)
+ REFERENCES access_tokens (id) ON DELETE CASCADE
+);
+CREATE UNIQUE INDEX event_txn_id_event_id ON event_txn_id(event_id);
+CREATE UNIQUE INDEX event_txn_id_txn_id ON event_txn_id(room_id, user_id, token_id, txn_id);
+CREATE INDEX event_txn_id_ts ON event_txn_id(inserted_ts);
+CREATE TABLE ignored_users( ignorer_user_id TEXT NOT NULL, ignored_user_id TEXT NOT NULL );
+CREATE UNIQUE INDEX ignored_users_uniqueness ON ignored_users (ignorer_user_id, ignored_user_id);
+CREATE INDEX ignored_users_ignored_user_id ON ignored_users (ignored_user_id);
+CREATE TABLE event_auth_chains (
+ event_id TEXT PRIMARY KEY,
+ chain_id BIGINT NOT NULL,
+ sequence_number BIGINT NOT NULL
+);
+CREATE UNIQUE INDEX event_auth_chains_c_seq_index ON event_auth_chains (chain_id, sequence_number);
+CREATE TABLE event_auth_chain_links (
+ origin_chain_id BIGINT NOT NULL,
+ origin_sequence_number BIGINT NOT NULL,
+
+ target_chain_id BIGINT NOT NULL,
+ target_sequence_number BIGINT NOT NULL
+);
+CREATE INDEX event_auth_chain_links_idx ON event_auth_chain_links (origin_chain_id, target_chain_id);
+CREATE TABLE event_auth_chain_to_calculate (
+ event_id TEXT PRIMARY KEY,
+ room_id TEXT NOT NULL,
+ type TEXT NOT NULL,
+ state_key TEXT NOT NULL
+);
+CREATE INDEX event_auth_chain_to_calculate_rm_id ON event_auth_chain_to_calculate(room_id);
+CREATE TABLE users_to_send_full_presence_to(
+ -- The user ID to send full presence to.
+ user_id TEXT PRIMARY KEY,
+ -- A presence stream ID token - the current presence stream token when the row was last upserted.
+ -- If a user calls /sync and this token is part of the update they're to receive, we also include
+ -- full user presence in the response.
+ -- This allows multiple devices for a user to receive full presence whenever they next call /sync.
+ presence_stream_id BIGINT,
+ FOREIGN KEY (user_id)
+ REFERENCES users (name)
+);
+CREATE TABLE refresh_tokens (
+ id BIGINT PRIMARY KEY,
+ user_id TEXT NOT NULL,
+ device_id TEXT NOT NULL,
+ token TEXT NOT NULL,
+ -- When consumed, a new refresh token is generated, which is tracked by
+ -- this foreign key
+ next_token_id BIGINT REFERENCES refresh_tokens (id) ON DELETE CASCADE, expiry_ts BIGINT DEFAULT NULL, ultimate_session_expiry_ts BIGINT DEFAULT NULL,
+ UNIQUE(token)
+);
+CREATE TABLE worker_locks (
+ lock_name TEXT NOT NULL,
+ lock_key TEXT NOT NULL,
+ -- We write the instance name to ease manual debugging, we don't ever read
+ -- from it.
+ -- Note: instance names aren't guarenteed to be unique.
+ instance_name TEXT NOT NULL,
+ -- A random string generated each time an instance takes out a lock. Used by
+ -- the instance to tell whether the lock is still held by it (e.g. in the
+ -- case where the process stalls for a long time the lock may time out and
+ -- be taken out by another instance, at which point the original instance
+ -- can tell it no longer holds the lock as the tokens no longer match).
+ token TEXT NOT NULL,
+ last_renewed_ts BIGINT NOT NULL
+);
+CREATE UNIQUE INDEX worker_locks_key ON worker_locks (lock_name, lock_key);
+CREATE TABLE federation_inbound_events_staging (
+ origin TEXT NOT NULL,
+ room_id TEXT NOT NULL,
+ event_id TEXT NOT NULL,
+ received_ts BIGINT NOT NULL,
+ event_json TEXT NOT NULL,
+ internal_metadata TEXT NOT NULL
+);
+CREATE INDEX federation_inbound_events_staging_room ON federation_inbound_events_staging(room_id, received_ts);
+CREATE UNIQUE INDEX federation_inbound_events_staging_instance_event ON federation_inbound_events_staging(origin, event_id);
+CREATE TABLE insertion_event_edges(
+ event_id TEXT NOT NULL,
+ room_id TEXT NOT NULL,
+ insertion_prev_event_id TEXT NOT NULL
+);
+CREATE INDEX insertion_event_edges_insertion_room_id ON insertion_event_edges(room_id);
+CREATE INDEX insertion_event_edges_insertion_prev_event_id ON insertion_event_edges(insertion_prev_event_id);
+CREATE TABLE insertion_event_extremities(
+ event_id TEXT NOT NULL,
+ room_id TEXT NOT NULL
+);
+CREATE UNIQUE INDEX insertion_event_extremities_event_id ON insertion_event_extremities(event_id);
+CREATE INDEX insertion_event_extremities_room_id ON insertion_event_extremities(room_id);
+CREATE TABLE registration_tokens(
+ token TEXT NOT NULL, -- The token that can be used for authentication.
+ uses_allowed INT, -- The total number of times this token can be used. NULL if no limit.
+ pending INT NOT NULL, -- The number of in progress registrations using this token.
+ completed INT NOT NULL, -- The number of times this token has been used to complete a registration.
+ expiry_time BIGINT, -- The latest time this token will be valid (epoch time in milliseconds). NULL if token doesn't expire.
+ UNIQUE (token)
+);
+CREATE TABLE sessions(
+ session_type TEXT NOT NULL, -- The unique key for this type of session.
+ session_id TEXT NOT NULL, -- The session ID passed to the client.
+ value TEXT NOT NULL, -- A JSON dictionary to persist.
+ expiry_time_ms BIGINT NOT NULL, -- The time this session will expire (epoch time in milliseconds).
+ UNIQUE (session_type, session_id)
+);
+CREATE TABLE insertion_events(
+ event_id TEXT NOT NULL,
+ room_id TEXT NOT NULL,
+ next_batch_id TEXT NOT NULL
+);
+CREATE UNIQUE INDEX insertion_events_event_id ON insertion_events(event_id);
+CREATE INDEX insertion_events_next_batch_id ON insertion_events(next_batch_id);
+CREATE TABLE batch_events(
+ event_id TEXT NOT NULL,
+ room_id TEXT NOT NULL,
+ batch_id TEXT NOT NULL
+);
+CREATE UNIQUE INDEX batch_events_event_id ON batch_events(event_id);
+CREATE INDEX batch_events_batch_id ON batch_events(batch_id);
+CREATE INDEX insertion_event_edges_event_id ON insertion_event_edges(event_id);
+CREATE TABLE device_auth_providers (
+ user_id TEXT NOT NULL,
+ device_id TEXT NOT NULL,
+ auth_provider_id TEXT NOT NULL,
+ auth_provider_session_id TEXT NOT NULL
+);
+CREATE INDEX device_auth_providers_devices
+ ON device_auth_providers (user_id, device_id);
+CREATE INDEX device_auth_providers_sessions
+ ON device_auth_providers (auth_provider_id, auth_provider_session_id);
+CREATE INDEX refresh_tokens_next_token_id
+ ON refresh_tokens(next_token_id)
+ WHERE next_token_id IS NOT NULL;
+CREATE TABLE partial_state_rooms (
+ room_id TEXT PRIMARY KEY,
+ FOREIGN KEY(room_id) REFERENCES rooms(room_id)
+);
+CREATE TABLE partial_state_rooms_servers (
+ room_id TEXT NOT NULL REFERENCES partial_state_rooms(room_id),
+ server_name TEXT NOT NULL,
+ UNIQUE(room_id, server_name)
+);
+CREATE TABLE partial_state_events (
+ -- the room_id is denormalised for efficient indexing (the canonical source is `events`)
+ room_id TEXT NOT NULL REFERENCES partial_state_rooms(room_id),
+ event_id TEXT NOT NULL REFERENCES events(event_id),
+ UNIQUE(event_id)
+);
+CREATE INDEX partial_state_events_room_id_idx
+ ON partial_state_events (room_id);
+CREATE TRIGGER partial_state_events_bad_room_id
+ BEFORE INSERT ON partial_state_events
+ FOR EACH ROW
+ BEGIN
+ SELECT RAISE(ABORT, 'Incorrect room_id in partial_state_events')
+ WHERE EXISTS (
+ SELECT 1 FROM events
+ WHERE events.event_id = NEW.event_id
+ AND events.room_id != NEW.room_id
+ );
+ END;
+CREATE TABLE device_lists_changes_in_room (
+ user_id TEXT NOT NULL,
+ device_id TEXT NOT NULL,
+ room_id TEXT NOT NULL,
+
+ -- This initially matches `device_lists_stream.stream_id`. Note that we
+ -- delete older values from `device_lists_stream`, so we can't use a foreign
+ -- constraint here.
+ --
+ -- The table will contain rows with the same `stream_id` but different
+ -- `room_id`, as for each device update we store a row per room the user is
+ -- joined to. Therefore `(stream_id, room_id)` gives a unique index.
+ stream_id BIGINT NOT NULL,
+
+ -- We have a background process which goes through this table and converts
+ -- entries into rows in `device_lists_outbound_pokes`. Once we have processed
+ -- a row, we mark it as such by setting `converted_to_destinations=TRUE`.
+ converted_to_destinations BOOLEAN NOT NULL,
+ opentracing_context TEXT
+);
+CREATE UNIQUE INDEX device_lists_changes_in_stream_id ON device_lists_changes_in_room(stream_id, room_id);
+CREATE INDEX device_lists_changes_in_stream_id_unconverted ON device_lists_changes_in_room(stream_id) WHERE NOT converted_to_destinations;
+CREATE TABLE IF NOT EXISTS "event_edges" (
+ event_id TEXT NOT NULL,
+ prev_event_id TEXT NOT NULL,
+ room_id TEXT NULL,
+ is_state BOOL NOT NULL DEFAULT 0,
+ FOREIGN KEY(event_id) REFERENCES events(event_id)
+);
+CREATE UNIQUE INDEX event_edges_event_id_prev_event_id_idx
+ ON event_edges (event_id, prev_event_id);
+CREATE INDEX ev_edges_prev_id ON event_edges (prev_event_id);
+CREATE TABLE event_push_summary_last_receipt_stream_id (
+ Lock CHAR(1) NOT NULL DEFAULT 'X' UNIQUE, -- Makes sure this table only has one row.
+ stream_id BIGINT NOT NULL,
+ CHECK (Lock='X')
+);
+CREATE TABLE IF NOT EXISTS "application_services_state" (
+ as_id TEXT PRIMARY KEY NOT NULL,
+ state VARCHAR(5),
+ read_receipt_stream_id BIGINT,
+ presence_stream_id BIGINT,
+ to_device_stream_id BIGINT,
+ device_list_stream_id BIGINT
+);
+CREATE TABLE IF NOT EXISTS "receipts_linearized" (
+ stream_id BIGINT NOT NULL,
+ room_id TEXT NOT NULL,
+ receipt_type TEXT NOT NULL,
+ user_id TEXT NOT NULL,
+ event_id TEXT NOT NULL,
+ thread_id TEXT,
+ event_stream_ordering BIGINT,
+ data TEXT NOT NULL,
+ CONSTRAINT receipts_linearized_uniqueness UNIQUE (room_id, receipt_type, user_id),
+ CONSTRAINT receipts_linearized_uniqueness_thread UNIQUE (room_id, receipt_type, user_id, thread_id)
+);
+CREATE TABLE IF NOT EXISTS "receipts_graph" (
+ room_id TEXT NOT NULL,
+ receipt_type TEXT NOT NULL,
+ user_id TEXT NOT NULL,
+ event_ids TEXT NOT NULL,
+ thread_id TEXT,
+ data TEXT NOT NULL,
+ CONSTRAINT receipts_graph_uniqueness UNIQUE (room_id, receipt_type, user_id),
+ CONSTRAINT receipts_graph_uniqueness_thread UNIQUE (room_id, receipt_type, user_id, thread_id)
+);
+CREATE INDEX receipts_linearized_id ON receipts_linearized( stream_id );
+CREATE INDEX receipts_linearized_room_stream ON receipts_linearized( room_id, stream_id );
+CREATE INDEX receipts_linearized_user ON receipts_linearized( user_id );
+CREATE INDEX redactions_have_censored_ts ON redactions (received_ts) WHERE NOT have_censored;
+CREATE INDEX room_memberships_user_room_forgotten ON room_memberships (user_id, room_id) WHERE forgotten = 1;
+CREATE INDEX users_have_local_media ON local_media_repository (user_id, created_ts) ;
+CREATE UNIQUE INDEX e2e_cross_signing_keys_stream_idx ON e2e_cross_signing_keys (stream_id) ;
+CREATE INDEX user_external_ids_user_id_idx ON user_external_ids (user_id) ;
+CREATE INDEX presence_stream_state_not_offline_idx ON presence_stream (state) WHERE state != 'offline';
+CREATE UNIQUE INDEX event_push_summary_unique_index ON event_push_summary (user_id, room_id) ;
+CREATE UNIQUE INDEX event_push_summary_unique_index2 ON event_push_summary (user_id, room_id, thread_id) ;
+CREATE UNIQUE INDEX receipts_graph_unique_index ON receipts_graph (room_id, receipt_type, user_id) WHERE thread_id IS NULL;
+CREATE UNIQUE INDEX receipts_linearized_unique_index ON receipts_linearized (room_id, receipt_type, user_id) WHERE thread_id IS NULL;
+CREATE INDEX event_push_actions_stream_highlight_index ON event_push_actions (highlight, stream_ordering) WHERE highlight=0;
+CREATE INDEX current_state_events_member_index ON current_state_events (state_key) WHERE type='m.room.member';
+CREATE INDEX event_contains_url_index ON events (room_id, topological_ordering, stream_ordering) WHERE contains_url = true AND outlier = false;
+CREATE INDEX event_push_actions_highlights_index ON event_push_actions (user_id, room_id, topological_ordering, stream_ordering) WHERE highlight=1;
+CREATE INDEX local_media_repository_url_idx ON local_media_repository (created_ts) WHERE url_cache IS NOT NULL;
+INSERT INTO appservice_stream_position VALUES('X',0);
+INSERT INTO federation_stream_position VALUES('federation',-1,'master');
+INSERT INTO federation_stream_position VALUES('events',-1,'master');
+INSERT INTO event_push_summary_stream_ordering VALUES('X',0);
+INSERT INTO user_directory_stream_pos VALUES('X',1);
+INSERT INTO stats_incremental_position VALUES('X',1);
+INSERT INTO event_push_summary_last_receipt_stream_id VALUES('X',0);
diff --git a/synapse/storage/schema/state/delta/30/state_stream.sql b/synapse/storage/schema/state/delta/30/state_stream.sql
deleted file mode 100644
index e85699e82e..0000000000
--- a/synapse/storage/schema/state/delta/30/state_stream.sql
+++ /dev/null
@@ -1,33 +0,0 @@
-/* Copyright 2016 OpenMarket Ltd
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-/* We used to create a table called current_state_resets, but this is no
- * longer used and is removed in delta 54.
- */
-
-/* The outlier events that have aquired a state group typically through
- * backfill. This is tracked separately to the events table, as assigning a
- * state group change the position of the existing event in the stream
- * ordering.
- * However since a stream_ordering is assigned in persist_event for the
- * (event, state) pair, we can use that stream_ordering to identify when
- * the new state was assigned for the event.
- */
-CREATE TABLE IF NOT EXISTS ex_outlier_stream(
- event_stream_ordering BIGINT PRIMARY KEY NOT NULL,
- event_id TEXT NOT NULL,
- state_group BIGINT NOT NULL
-);
diff --git a/synapse/storage/schema/state/full_schemas/72/full.sql.postgres b/synapse/storage/schema/state/full_schemas/72/full.sql.postgres
new file mode 100644
index 0000000000..263ade761e
--- /dev/null
+++ b/synapse/storage/schema/state/full_schemas/72/full.sql.postgres
@@ -0,0 +1,30 @@
+CREATE TABLE state_group_edges (
+ state_group bigint NOT NULL,
+ prev_state_group bigint NOT NULL
+);
+CREATE SEQUENCE state_group_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+CREATE TABLE state_groups (
+ id bigint NOT NULL,
+ room_id text NOT NULL,
+ event_id text NOT NULL
+);
+CREATE TABLE state_groups_state (
+ state_group bigint NOT NULL,
+ room_id text NOT NULL,
+ type text NOT NULL,
+ state_key text NOT NULL,
+ event_id text NOT NULL
+);
+ALTER TABLE ONLY state_groups_state ALTER COLUMN state_group SET (n_distinct=-0.02);
+ALTER TABLE ONLY state_groups
+ ADD CONSTRAINT state_groups_pkey PRIMARY KEY (id);
+CREATE INDEX state_group_edges_prev_idx ON state_group_edges USING btree (prev_state_group);
+CREATE UNIQUE INDEX state_group_edges_unique_idx ON state_group_edges USING btree (state_group, prev_state_group);
+CREATE INDEX state_groups_room_id_idx ON state_groups USING btree (room_id);
+CREATE INDEX state_groups_state_type_idx ON state_groups_state USING btree (state_group, type, state_key);
+SELECT pg_catalog.setval('state_group_id_seq', 1, false);
diff --git a/synapse/storage/schema/state/full_schemas/72/full.sql.sqlite b/synapse/storage/schema/state/full_schemas/72/full.sql.sqlite
new file mode 100644
index 0000000000..dda060b638
--- /dev/null
+++ b/synapse/storage/schema/state/full_schemas/72/full.sql.sqlite
@@ -0,0 +1,20 @@
+CREATE TABLE state_groups (
+ id BIGINT PRIMARY KEY,
+ room_id TEXT NOT NULL,
+ event_id TEXT NOT NULL
+);
+CREATE TABLE state_groups_state (
+ state_group BIGINT NOT NULL,
+ room_id TEXT NOT NULL,
+ type TEXT NOT NULL,
+ state_key TEXT NOT NULL,
+ event_id TEXT NOT NULL
+);
+CREATE TABLE state_group_edges (
+ state_group BIGINT NOT NULL,
+ prev_state_group BIGINT NOT NULL
+);
+CREATE INDEX state_group_edges_prev_idx ON state_group_edges (prev_state_group);
+CREATE INDEX state_groups_state_type_idx ON state_groups_state (state_group, type, state_key);
+CREATE INDEX state_groups_room_id_idx ON state_groups (room_id) ;
+CREATE UNIQUE INDEX state_group_edges_unique_idx ON state_group_edges (state_group, prev_state_group) ;
diff --git a/synapse/storage/state.py b/synapse/storage/state.py
index af3bab2c15..0004d955b4 100644
--- a/synapse/storage/state.py
+++ b/synapse/storage/state.py
@@ -539,15 +539,6 @@ class StateFilter:
is_mine_id: a callable which confirms if a given state_key matches a mxid
of a local user
"""
-
- # TODO(faster_joins): it's not entirely clear that this is safe. In particular,
- # there may be circumstances in which we return a piece of state that, once we
- # resync the state, we discover is invalid. For example: if it turns out that
- # the sender of a piece of state wasn't actually in the room, then clearly that
- # state shouldn't have been returned.
- # We should at least add some tests around this to see what happens.
- # https://github.com/matrix-org/synapse/issues/13006
-
# if we haven't requested membership events, then it depends on the value of
# 'include_others'
if EventTypes.Member not in self.types:
diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py
index 3c13859faa..0d7108f01b 100644
--- a/synapse/storage/util/id_generators.py
+++ b/synapse/storage/util/id_generators.py
@@ -186,11 +186,13 @@ class StreamIdGenerator(AbstractStreamIdGenerator):
column: str,
extra_tables: Iterable[Tuple[str, str]] = (),
step: int = 1,
+ is_writer: bool = True,
) -> None:
assert step != 0
self._lock = threading.Lock()
self._step: int = step
self._current: int = _load_current_id(db_conn, table, column, step)
+ self._is_writer = is_writer
for table, column in extra_tables:
self._current = (max if step > 0 else min)(
self._current, _load_current_id(db_conn, table, column, step)
@@ -204,9 +206,11 @@ class StreamIdGenerator(AbstractStreamIdGenerator):
self._unfinished_ids: OrderedDict[int, int] = OrderedDict()
def advance(self, instance_name: str, new_id: int) -> None:
- # `StreamIdGenerator` should only be used when there is a single writer,
- # so replication should never happen.
- raise Exception("Replication is not supported by StreamIdGenerator")
+ # Advance should never be called on a writer instance, only over replication
+ if self._is_writer:
+ raise Exception("Replication is not supported by writer StreamIdGenerator")
+
+ self._current = (max if self._step > 0 else min)(self._current, new_id)
def get_next(self) -> AsyncContextManager[int]:
with self._lock:
@@ -249,6 +253,9 @@ class StreamIdGenerator(AbstractStreamIdGenerator):
return _AsyncCtxManagerWrapper(manager())
def get_current_token(self) -> int:
+ if not self._is_writer:
+ return self._current
+
with self._lock:
if self._unfinished_ids:
return next(iter(self._unfinished_ids)) - self._step
@@ -460,8 +467,17 @@ class MultiWriterIdGenerator(AbstractStreamIdGenerator):
# Cast safety: this corresponds to the types returned by the query above.
rows.extend(cast(Iterable[Tuple[str, int]], cur))
- # Sort so that we handle rows in order for each instance.
- rows.sort()
+ # Sort by stream_id (ascending, lowest -> highest) so that we handle
+ # rows in order for each instance because we don't want to overwrite
+ # the current_position of an instance to a lower stream ID than
+ # we're actually at.
+ def sort_by_stream_id_key_func(row: Tuple[str, int]) -> int:
+ (instance, stream_id) = row
+ # If `stream_id` is ever `None`, we will see a `TypeError: '<'
+ # not supported between instances of 'NoneType' and 'X'` error.
+ return stream_id
+
+ rows.sort(key=sort_by_stream_id_key_func)
with self._lock:
for (
diff --git a/synapse/storage/util/partial_state_events_tracker.py b/synapse/storage/util/partial_state_events_tracker.py
index 466e5137f2..8d8894d1d5 100644
--- a/synapse/storage/util/partial_state_events_tracker.py
+++ b/synapse/storage/util/partial_state_events_tracker.py
@@ -20,9 +20,11 @@ from twisted.internet import defer
from twisted.internet.defer import Deferred
from synapse.logging.context import PreserveLoggingContext, make_deferred_yieldable
+from synapse.logging.opentracing import trace_with_opname
from synapse.storage.databases.main.events_worker import EventsWorkerStore
from synapse.storage.databases.main.room import RoomWorkerStore
from synapse.util import unwrapFirstError
+from synapse.util.cancellation import cancellable
logger = logging.getLogger(__name__)
@@ -58,6 +60,8 @@ class PartialStateEventsTracker:
for o in observers:
o.callback(None)
+ @trace_with_opname("PartialStateEventsTracker.await_full_state")
+ @cancellable
async def await_full_state(self, event_ids: Collection[str]) -> None:
"""Wait for all the given events to have full state.
@@ -151,6 +155,8 @@ class PartialCurrentStateTracker:
for o in observers:
o.callback(None)
+ @trace_with_opname("PartialCurrentStateTracker.await_full_state")
+ @cancellable
async def await_full_state(self, room_id: str) -> None:
# We add the deferred immediately so that the DB call to check for
# partial state doesn't race when we unpartial the room.
diff --git a/synapse/streams/__init__.py b/synapse/streams/__init__.py
index 806b671305..2dcd43d0a2 100644
--- a/synapse/streams/__init__.py
+++ b/synapse/streams/__init__.py
@@ -27,7 +27,7 @@ class EventSource(Generic[K, R]):
self,
user: UserID,
from_key: K,
- limit: Optional[int],
+ limit: int,
room_ids: Collection[str],
is_guest: bool,
explicit_room_id: Optional[str] = None,
diff --git a/synapse/streams/config.py b/synapse/streams/config.py
index b52723e2b8..6df2de919c 100644
--- a/synapse/streams/config.py
+++ b/synapse/streams/config.py
@@ -35,17 +35,19 @@ class PaginationConfig:
from_token: Optional[StreamToken]
to_token: Optional[StreamToken]
direction: str
- limit: Optional[int]
+ limit: int
@classmethod
async def from_request(
cls,
store: "DataStore",
request: SynapseRequest,
- raise_invalid_params: bool = True,
- default_limit: Optional[int] = None,
+ default_limit: int,
+ default_dir: str = "f",
) -> "PaginationConfig":
- direction = parse_string(request, "dir", default="f", allowed_values=["f", "b"])
+ direction = parse_string(
+ request, "dir", default=default_dir, allowed_values=["f", "b"]
+ )
from_tok_str = parse_string(request, "from")
to_tok_str = parse_string(request, "to")
@@ -67,12 +69,10 @@ class PaginationConfig:
raise SynapseError(400, "'to' parameter is invalid")
limit = parse_integer(request, "limit", default=default_limit)
+ if limit < 0:
+ raise SynapseError(400, "Limit must be 0 or above")
- if limit:
- if limit < 0:
- raise SynapseError(400, "Limit must be 0 or above")
-
- limit = min(int(limit), MAX_LIMIT)
+ limit = min(limit, MAX_LIMIT)
try:
return PaginationConfig(from_tok, to_tok, direction, limit)
diff --git a/synapse/streams/events.py b/synapse/streams/events.py
index 54e0b1a23b..619eb7f601 100644
--- a/synapse/streams/events.py
+++ b/synapse/streams/events.py
@@ -21,6 +21,7 @@ from synapse.handlers.presence import PresenceEventSource
from synapse.handlers.receipts import ReceiptEventSource
from synapse.handlers.room import RoomEventSource
from synapse.handlers.typing import TypingNotificationEventSource
+from synapse.logging.opentracing import trace
from synapse.streams import EventSource
from synapse.types import StreamToken
@@ -44,9 +45,12 @@ class _EventSourcesInner:
class EventSources:
def __init__(self, hs: "HomeServer"):
self.sources = _EventSourcesInner(
- # mypy thinks attribute.type is `Optional`, but we know it's never `None` here since
- # all the attributes of `_EventSourcesInner` are annotated.
- *(attribute.type(hs) for attribute in attr.fields(_EventSourcesInner)) # type: ignore[misc]
+ # mypy previously warned that attribute.type is `Optional`, but we know it's
+ # never `None` here since all the attributes of `_EventSourcesInner` are
+ # annotated.
+ # As of the stubs in attrs 22.1.0, `attr.fields()` now returns Any,
+ # so the call to `attribute.type` is not checked.
+ *(attribute.type(hs) for attribute in attr.fields(_EventSourcesInner))
)
self.store = hs.get_datastores().main
@@ -69,6 +73,20 @@ class EventSources:
)
return token
+ @trace
+ async def get_start_token_for_pagination(self, room_id: str) -> StreamToken:
+ """Get the start token for a given room to be used to paginate
+ events.
+
+ The returned token does not have the current values for fields other
+ than `room`, since they are not used during pagination.
+
+ Returns:
+ The start token for pagination.
+ """
+ return StreamToken.START
+
+ @trace
async def get_current_token_for_pagination(self, room_id: str) -> StreamToken:
"""Get the current token for a given room to be used to paginate
events.
diff --git a/synapse/types.py b/synapse/types.py
index 668d48d646..f2d436ddc3 100644
--- a/synapse/types.py
+++ b/synapse/types.py
@@ -52,6 +52,7 @@ from twisted.internet.interfaces import (
)
from synapse.api.errors import Codes, SynapseError
+from synapse.util.cancellation import cancellable
from synapse.util.stringutils import parse_and_validate_server_name
if TYPE_CHECKING:
@@ -142,8 +143,8 @@ class Requester:
Requester.
Args:
- store (DataStore): Used to convert AS ID to AS object
- input (dict): A dict produced by `serialize`
+ store: Used to convert AS ID to AS object
+ input: A dict produced by `serialize`
Returns:
Requester
@@ -699,7 +700,11 @@ class StreamToken:
START: ClassVar["StreamToken"]
@classmethod
+ @cancellable
async def from_string(cls, store: "DataStore", string: str) -> "StreamToken":
+ """
+ Creates a RoomStreamToken from its textual representation.
+ """
try:
keys = string.split(cls._SEPARATOR)
while len(keys) < len(attr.fields(cls)):
@@ -830,6 +835,7 @@ class ReadReceipt:
receipt_type: str
user_id: str
event_ids: List[str]
+ thread_id: Optional[str]
data: JsonDict
diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py
index a90f08dd4c..7be9d5f113 100644
--- a/synapse/util/__init__.py
+++ b/synapse/util/__init__.py
@@ -15,7 +15,7 @@
import json
import logging
import typing
-from typing import Any, Callable, Dict, Generator, Optional
+from typing import Any, Callable, Dict, Generator, Optional, Sequence
import attr
from frozendict import frozendict
@@ -193,3 +193,15 @@ def log_failure(
# Version string with git info. Computed here once so that we don't invoke git multiple
# times.
SYNAPSE_VERSION = get_distribution_version_string("matrix-synapse", __file__)
+
+
+class ExceptionBundle(Exception):
+ # A poor stand-in for something like Python 3.11's ExceptionGroup.
+ # (A backport called `exceptiongroup` exists but seems overkill: we just want a
+ # container type here.)
+ def __init__(self, message: str, exceptions: Sequence[Exception]):
+ parts = [message]
+ for e in exceptions:
+ parts.append(str(e))
+ super().__init__("\n - ".join(parts))
+ self.exceptions = exceptions
diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py
index 7f1d41eb3c..d24c4f68c4 100644
--- a/synapse/util/async_helpers.py
+++ b/synapse/util/async_helpers.py
@@ -217,7 +217,8 @@ async def concurrently_execute(
limit: Maximum number of conccurent executions.
Returns:
- Deferred: Resolved when all function invocations have finished.
+ None, when all function invocations have finished. The return values
+ from those functions are discarded.
"""
it = iter(args)
diff --git a/synapse/util/caches/__init__.py b/synapse/util/caches/__init__.py
index 42f6abb5e1..9387632d0d 100644
--- a/synapse/util/caches/__init__.py
+++ b/synapse/util/caches/__init__.py
@@ -20,9 +20,11 @@ from sys import intern
from typing import Any, Callable, Dict, List, Optional, Sized, TypeVar
import attr
+from prometheus_client import REGISTRY
from prometheus_client.core import Gauge
from synapse.config.cache import add_resizable_cache
+from synapse.util.metrics import DynamicCollectorRegistry
logger = logging.getLogger(__name__)
@@ -30,27 +32,62 @@ logger = logging.getLogger(__name__)
# Whether to track estimated memory usage of the LruCaches.
TRACK_MEMORY_USAGE = False
+# We track cache metrics in a special registry that lets us update the metrics
+# just before they are returned from the scrape endpoint.
+CACHE_METRIC_REGISTRY = DynamicCollectorRegistry()
caches_by_name: Dict[str, Sized] = {}
-collectors_by_name: Dict[str, "CacheMetric"] = {}
-cache_size = Gauge("synapse_util_caches_cache:size", "", ["name"])
-cache_hits = Gauge("synapse_util_caches_cache:hits", "", ["name"])
-cache_evicted = Gauge("synapse_util_caches_cache:evicted_size", "", ["name", "reason"])
-cache_total = Gauge("synapse_util_caches_cache:total", "", ["name"])
-cache_max_size = Gauge("synapse_util_caches_cache_max_size", "", ["name"])
+cache_size = Gauge(
+ "synapse_util_caches_cache_size", "", ["name"], registry=CACHE_METRIC_REGISTRY
+)
+cache_hits = Gauge(
+ "synapse_util_caches_cache_hits", "", ["name"], registry=CACHE_METRIC_REGISTRY
+)
+cache_evicted = Gauge(
+ "synapse_util_caches_cache_evicted_size",
+ "",
+ ["name", "reason"],
+ registry=CACHE_METRIC_REGISTRY,
+)
+cache_total = Gauge(
+ "synapse_util_caches_cache", "", ["name"], registry=CACHE_METRIC_REGISTRY
+)
+cache_max_size = Gauge(
+ "synapse_util_caches_cache_max_size", "", ["name"], registry=CACHE_METRIC_REGISTRY
+)
cache_memory_usage = Gauge(
"synapse_util_caches_cache_size_bytes",
"Estimated memory usage of the caches",
["name"],
+ registry=CACHE_METRIC_REGISTRY,
)
-response_cache_size = Gauge("synapse_util_caches_response_cache:size", "", ["name"])
-response_cache_hits = Gauge("synapse_util_caches_response_cache:hits", "", ["name"])
+response_cache_size = Gauge(
+ "synapse_util_caches_response_cache_size",
+ "",
+ ["name"],
+ registry=CACHE_METRIC_REGISTRY,
+)
+response_cache_hits = Gauge(
+ "synapse_util_caches_response_cache_hits",
+ "",
+ ["name"],
+ registry=CACHE_METRIC_REGISTRY,
+)
response_cache_evicted = Gauge(
- "synapse_util_caches_response_cache:evicted_size", "", ["name", "reason"]
+ "synapse_util_caches_response_cache_evicted_size",
+ "",
+ ["name", "reason"],
+ registry=CACHE_METRIC_REGISTRY,
)
-response_cache_total = Gauge("synapse_util_caches_response_cache:total", "", ["name"])
+response_cache_total = Gauge(
+ "synapse_util_caches_response_cache", "", ["name"], registry=CACHE_METRIC_REGISTRY
+)
+
+
+# Register our custom cache metrics registry with the global registry
+REGISTRY.register(CACHE_METRIC_REGISTRY)
class EvictionReason(Enum):
@@ -160,7 +197,7 @@ def register_cache(
resize_callback: A function which can be called to resize the cache.
Returns:
- CacheMetric: an object which provides inc_{hits,misses,evictions} methods
+ an object which provides inc_{hits,misses,evictions} methods
"""
if resizable:
if not resize_callback:
@@ -170,7 +207,7 @@ def register_cache(
metric = CacheMetric(cache, cache_type, cache_name, collect_callback)
metric_name = "cache_%s_%s" % (cache_type, cache_name)
caches_by_name[cache_name] = cache
- collectors_by_name[metric_name] = metric
+ CACHE_METRIC_REGISTRY.register_hook(metric_name, metric.collect)
return metric
diff --git a/synapse/util/caches/deferred_cache.py b/synapse/util/caches/deferred_cache.py
index 1d6ec22191..bf7bd351e0 100644
--- a/synapse/util/caches/deferred_cache.py
+++ b/synapse/util/caches/deferred_cache.py
@@ -14,15 +14,19 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import abc
import enum
import threading
from typing import (
Callable,
+ Collection,
+ Dict,
Generic,
- Iterable,
MutableMapping,
Optional,
+ Set,
Sized,
+ Tuple,
TypeVar,
Union,
cast,
@@ -31,7 +35,6 @@ from typing import (
from prometheus_client import Gauge
from twisted.internet import defer
-from twisted.python import failure
from twisted.python.failure import Failure
from synapse.util.async_helpers import ObservableDeferred
@@ -94,7 +97,7 @@ class DeferredCache(Generic[KT, VT]):
# _pending_deferred_cache maps from the key value to a `CacheEntry` object.
self._pending_deferred_cache: Union[
- TreeCache, "MutableMapping[KT, CacheEntry]"
+ TreeCache, "MutableMapping[KT, CacheEntry[KT, VT]]"
] = cache_type()
def metrics_cb() -> None:
@@ -150,7 +153,7 @@ class DeferredCache(Generic[KT, VT]):
Args:
key:
callback: Gets called when the entry in the cache is invalidated
- update_metrics (bool): whether to update the cache hit rate metrics
+ update_metrics: whether to update the cache hit rate metrics
Returns:
A Deferred which completes with the result. Note that this may later fail
@@ -159,15 +162,16 @@ class DeferredCache(Generic[KT, VT]):
Raises:
KeyError if the key is not found in the cache
"""
- callbacks = [callback] if callback else []
val = self._pending_deferred_cache.get(key, _Sentinel.sentinel)
if val is not _Sentinel.sentinel:
- val.callbacks.update(callbacks)
+ val.add_invalidation_callback(key, callback)
if update_metrics:
m = self.cache.metrics
assert m # we always have a name, so should always have metrics
m.inc_hits()
- return val.deferred.observe()
+ return val.deferred(key)
+
+ callbacks = (callback,) if callback else ()
val2 = self.cache.get(
key, _Sentinel.sentinel, callbacks=callbacks, update_metrics=update_metrics
@@ -177,6 +181,73 @@ class DeferredCache(Generic[KT, VT]):
else:
return defer.succeed(val2)
+ def get_bulk(
+ self,
+ keys: Collection[KT],
+ callback: Optional[Callable[[], None]] = None,
+ ) -> Tuple[Dict[KT, VT], Optional["defer.Deferred[Dict[KT, VT]]"], Collection[KT]]:
+ """Bulk lookup of items in the cache.
+
+ Returns:
+ A 3-tuple of:
+ 1. a dict of key/value of items already cached;
+ 2. a deferred that resolves to a dict of key/value of items
+ we're already fetching; and
+ 3. a collection of keys that don't appear in the previous two.
+ """
+
+ # The cached results
+ cached = {}
+
+ # List of pending deferreds
+ pending = []
+
+ # Dict that gets filled out when the pending deferreds complete
+ pending_results = {}
+
+ # List of keys that aren't in either cache
+ missing = []
+
+ callbacks = (callback,) if callback else ()
+
+ for key in keys:
+ # Check if its in the main cache.
+ immediate_value = self.cache.get(
+ key,
+ _Sentinel.sentinel,
+ callbacks=callbacks,
+ )
+ if immediate_value is not _Sentinel.sentinel:
+ cached[key] = immediate_value
+ continue
+
+ # Check if its in the pending cache
+ pending_value = self._pending_deferred_cache.get(key, _Sentinel.sentinel)
+ if pending_value is not _Sentinel.sentinel:
+ pending_value.add_invalidation_callback(key, callback)
+
+ def completed_cb(value: VT, key: KT) -> VT:
+ pending_results[key] = value
+ return value
+
+ # Add a callback to fill out `pending_results` when that completes
+ d = pending_value.deferred(key).addCallback(completed_cb, key)
+ pending.append(d)
+ continue
+
+ # Not in either cache
+ missing.append(key)
+
+ # If we've got pending deferreds, squash them into a single one that
+ # returns `pending_results`.
+ pending_deferred = None
+ if pending:
+ pending_deferred = defer.gatherResults(
+ pending, consumeErrors=True
+ ).addCallback(lambda _: pending_results)
+
+ return (cached, pending_deferred, missing)
+
def get_immediate(
self, key: KT, default: T, update_metrics: bool = True
) -> Union[VT, T]:
@@ -218,84 +289,89 @@ class DeferredCache(Generic[KT, VT]):
value: a deferred which will complete with a result to add to the cache
callback: An optional callback to be called when the entry is invalidated
"""
- if not isinstance(value, defer.Deferred):
- raise TypeError("not a Deferred")
-
- callbacks = [callback] if callback else []
self.check_thread()
- existing_entry = self._pending_deferred_cache.pop(key, None)
- if existing_entry:
- existing_entry.invalidate()
+ self._pending_deferred_cache.pop(key, None)
# XXX: why don't we invalidate the entry in `self.cache` yet?
- # we can save a whole load of effort if the deferred is ready.
- if value.called:
- result = value.result
- if not isinstance(result, failure.Failure):
- self.cache.set(key, cast(VT, result), callbacks)
- return value
-
# otherwise, we'll add an entry to the _pending_deferred_cache for now,
# and add callbacks to add it to the cache properly later.
+ entry = CacheEntrySingle[KT, VT](value)
+ entry.add_invalidation_callback(key, callback)
+ self._pending_deferred_cache[key] = entry
+ deferred = entry.deferred(key).addCallbacks(
+ self._completed_callback,
+ self._error_callback,
+ callbackArgs=(entry, key),
+ errbackArgs=(entry, key),
+ )
- observable = ObservableDeferred(value, consumeErrors=True)
- observer = observable.observe()
- entry = CacheEntry(deferred=observable, callbacks=callbacks)
+ # we return a new Deferred which will be called before any subsequent observers.
+ return deferred
- self._pending_deferred_cache[key] = entry
+ def start_bulk_input(
+ self,
+ keys: Collection[KT],
+ callback: Optional[Callable[[], None]] = None,
+ ) -> "CacheMultipleEntries[KT, VT]":
+ """Bulk set API for use when fetching multiple keys at once from the DB.
- def compare_and_pop() -> bool:
- """Check if our entry is still the one in _pending_deferred_cache, and
- if so, pop it.
-
- Returns true if the entries matched.
- """
- existing_entry = self._pending_deferred_cache.pop(key, None)
- if existing_entry is entry:
- return True
-
- # oops, the _pending_deferred_cache has been updated since
- # we started our query, so we are out of date.
- #
- # Better put back whatever we took out. (We do it this way
- # round, rather than peeking into the _pending_deferred_cache
- # and then removing on a match, to make the common case faster)
- if existing_entry is not None:
- self._pending_deferred_cache[key] = existing_entry
-
- return False
-
- def cb(result: VT) -> None:
- if compare_and_pop():
- self.cache.set(key, result, entry.callbacks)
- else:
- # we're not going to put this entry into the cache, so need
- # to make sure that the invalidation callbacks are called.
- # That was probably done when _pending_deferred_cache was
- # updated, but it's possible that `set` was called without
- # `invalidate` being previously called, in which case it may
- # not have been. Either way, let's double-check now.
- entry.invalidate()
-
- def eb(_fail: Failure) -> None:
- compare_and_pop()
- entry.invalidate()
-
- # once the deferred completes, we can move the entry from the
- # _pending_deferred_cache to the real cache.
- #
- observer.addCallbacks(cb, eb)
+ Called *before* starting the fetch from the DB, and the caller *must*
+ call either `complete_bulk(..)` or `error_bulk(..)` on the return value.
+ """
- # we return a new Deferred which will be called before any subsequent observers.
- return observable.observe()
+ entry = CacheMultipleEntries[KT, VT]()
+ entry.add_global_invalidation_callback(callback)
+
+ for key in keys:
+ self._pending_deferred_cache[key] = entry
+
+ return entry
+
+ def _completed_callback(
+ self, value: VT, entry: "CacheEntry[KT, VT]", key: KT
+ ) -> VT:
+ """Called when a deferred is completed."""
+ # We check if the current entry matches the entry associated with the
+ # deferred. If they don't match then it got invalidated.
+ current_entry = self._pending_deferred_cache.pop(key, None)
+ if current_entry is not entry:
+ if current_entry:
+ self._pending_deferred_cache[key] = current_entry
+ return value
+
+ self.cache.set(key, value, entry.get_invalidation_callbacks(key))
+
+ return value
+
+ def _error_callback(
+ self,
+ failure: Failure,
+ entry: "CacheEntry[KT, VT]",
+ key: KT,
+ ) -> Failure:
+ """Called when a deferred errors."""
+
+ # We check if the current entry matches the entry associated with the
+ # deferred. If they don't match then it got invalidated.
+ current_entry = self._pending_deferred_cache.pop(key, None)
+ if current_entry is not entry:
+ if current_entry:
+ self._pending_deferred_cache[key] = current_entry
+ return failure
+
+ for cb in entry.get_invalidation_callbacks(key):
+ cb()
+
+ return failure
def prefill(
self, key: KT, value: VT, callback: Optional[Callable[[], None]] = None
) -> None:
- callbacks = [callback] if callback else []
+ callbacks = (callback,) if callback else ()
self.cache.set(key, value, callbacks=callbacks)
+ self._pending_deferred_cache.pop(key, None)
def invalidate(self, key: KT) -> None:
"""Delete a key, or tree of entries
@@ -311,41 +387,129 @@ class DeferredCache(Generic[KT, VT]):
self.cache.del_multi(key)
# if we have a pending lookup for this key, remove it from the
- # _pending_deferred_cache, which will (a) stop it being returned
- # for future queries and (b) stop it being persisted as a proper entry
+ # _pending_deferred_cache, which will (a) stop it being returned for
+ # future queries and (b) stop it being persisted as a proper entry
# in self.cache.
entry = self._pending_deferred_cache.pop(key, None)
-
- # run the invalidation callbacks now, rather than waiting for the
- # deferred to resolve.
if entry:
# _pending_deferred_cache.pop should either return a CacheEntry, or, in the
# case of a TreeCache, a dict of keys to cache entries. Either way calling
# iterate_tree_cache_entry on it will do the right thing.
- for entry in iterate_tree_cache_entry(entry):
- entry.invalidate()
+ for iter_entry in iterate_tree_cache_entry(entry):
+ for cb in iter_entry.get_invalidation_callbacks(key):
+ cb()
def invalidate_all(self) -> None:
self.check_thread()
self.cache.clear()
- for entry in self._pending_deferred_cache.values():
- entry.invalidate()
+ for key, entry in self._pending_deferred_cache.items():
+ for cb in entry.get_invalidation_callbacks(key):
+ cb()
+
self._pending_deferred_cache.clear()
-class CacheEntry:
- __slots__ = ["deferred", "callbacks", "invalidated"]
+class CacheEntry(Generic[KT, VT], metaclass=abc.ABCMeta):
+ """Abstract class for entries in `DeferredCache[KT, VT]`"""
- def __init__(
- self, deferred: ObservableDeferred, callbacks: Iterable[Callable[[], None]]
- ):
- self.deferred = deferred
- self.callbacks = set(callbacks)
- self.invalidated = False
-
- def invalidate(self) -> None:
- if not self.invalidated:
- self.invalidated = True
- for callback in self.callbacks:
- callback()
- self.callbacks.clear()
+ @abc.abstractmethod
+ def deferred(self, key: KT) -> "defer.Deferred[VT]":
+ """Get a deferred that a caller can wait on to get the value at the
+ given key"""
+ ...
+
+ @abc.abstractmethod
+ def add_invalidation_callback(
+ self, key: KT, callback: Optional[Callable[[], None]]
+ ) -> None:
+ """Add an invalidation callback"""
+ ...
+
+ @abc.abstractmethod
+ def get_invalidation_callbacks(self, key: KT) -> Collection[Callable[[], None]]:
+ """Get all invalidation callbacks"""
+ ...
+
+
+class CacheEntrySingle(CacheEntry[KT, VT]):
+ """An implementation of `CacheEntry` wrapping a deferred that results in a
+ single cache entry.
+ """
+
+ __slots__ = ["_deferred", "_callbacks"]
+
+ def __init__(self, deferred: "defer.Deferred[VT]") -> None:
+ self._deferred = ObservableDeferred(deferred, consumeErrors=True)
+ self._callbacks: Set[Callable[[], None]] = set()
+
+ def deferred(self, key: KT) -> "defer.Deferred[VT]":
+ return self._deferred.observe()
+
+ def add_invalidation_callback(
+ self, key: KT, callback: Optional[Callable[[], None]]
+ ) -> None:
+ if callback is None:
+ return
+
+ self._callbacks.add(callback)
+
+ def get_invalidation_callbacks(self, key: KT) -> Collection[Callable[[], None]]:
+ return self._callbacks
+
+
+class CacheMultipleEntries(CacheEntry[KT, VT]):
+ """Cache entry that is used for bulk lookups and insertions."""
+
+ __slots__ = ["_deferred", "_callbacks", "_global_callbacks"]
+
+ def __init__(self) -> None:
+ self._deferred: Optional[ObservableDeferred[Dict[KT, VT]]] = None
+ self._callbacks: Dict[KT, Set[Callable[[], None]]] = {}
+ self._global_callbacks: Set[Callable[[], None]] = set()
+
+ def deferred(self, key: KT) -> "defer.Deferred[VT]":
+ if not self._deferred:
+ self._deferred = ObservableDeferred(defer.Deferred(), consumeErrors=True)
+ return self._deferred.observe().addCallback(lambda res: res.get(key))
+
+ def add_invalidation_callback(
+ self, key: KT, callback: Optional[Callable[[], None]]
+ ) -> None:
+ if callback is None:
+ return
+
+ self._callbacks.setdefault(key, set()).add(callback)
+
+ def get_invalidation_callbacks(self, key: KT) -> Collection[Callable[[], None]]:
+ return self._callbacks.get(key, set()) | self._global_callbacks
+
+ def add_global_invalidation_callback(
+ self, callback: Optional[Callable[[], None]]
+ ) -> None:
+ """Add a callback for when any keys get invalidated."""
+ if callback is None:
+ return
+
+ self._global_callbacks.add(callback)
+
+ def complete_bulk(
+ self,
+ cache: DeferredCache[KT, VT],
+ result: Dict[KT, VT],
+ ) -> None:
+ """Called when there is a result"""
+ for key, value in result.items():
+ cache._completed_callback(value, self, key)
+
+ if self._deferred:
+ self._deferred.callback(result)
+
+ def error_bulk(
+ self, cache: DeferredCache[KT, VT], keys: Collection[KT], failure: Failure
+ ) -> None:
+ """Called when bulk lookup failed."""
+ for key in keys:
+ cache._error_callback(failure, self, key)
+
+ if self._deferred:
+ self._deferred.errback(failure)
diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py
index 867f315b2a..72227359b9 100644
--- a/synapse/util/caches/descriptors.py
+++ b/synapse/util/caches/descriptors.py
@@ -12,7 +12,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-import enum
import functools
import inspect
import logging
@@ -25,6 +24,7 @@ from typing import (
Generic,
Hashable,
Iterable,
+ List,
Mapping,
Optional,
Sequence,
@@ -52,7 +52,7 @@ CacheKey = Union[Tuple, Any]
F = TypeVar("F", bound=Callable[..., Any])
-class _CachedFunction(Generic[F]):
+class CachedFunction(Generic[F]):
invalidate: Any = None
invalidate_all: Any = None
prefill: Any = None
@@ -73,8 +73,10 @@ class _CacheDescriptorBase:
num_args: Optional[int],
uncached_args: Optional[Collection[str]] = None,
cache_context: bool = False,
+ name: Optional[str] = None,
):
self.orig = orig
+ self.name = name or orig.__name__
arg_spec = inspect.getfullargspec(orig)
all_args = arg_spec.args
@@ -143,109 +145,6 @@ class _CacheDescriptorBase:
)
-class _LruCachedFunction(Generic[F]):
- cache: LruCache[CacheKey, Any]
- __call__: F
-
-
-def lru_cache(
- *, max_entries: int = 1000, cache_context: bool = False
-) -> Callable[[F], _LruCachedFunction[F]]:
- """A method decorator that applies a memoizing cache around the function.
-
- This is more-or-less a drop-in equivalent to functools.lru_cache, although note
- that the signature is slightly different.
-
- The main differences with functools.lru_cache are:
- (a) the size of the cache can be controlled via the cache_factor mechanism
- (b) the wrapped function can request a "cache_context" which provides a
- callback mechanism to indicate that the result is no longer valid
- (c) prometheus metrics are exposed automatically.
-
- The function should take zero or more arguments, which are used as the key for the
- cache. Single-argument functions use that argument as the cache key; otherwise the
- arguments are built into a tuple.
-
- Cached functions can be "chained" (i.e. a cached function can call other cached
- functions and get appropriately invalidated when they called caches are
- invalidated) by adding a special "cache_context" argument to the function
- and passing that as a kwarg to all caches called. For example:
-
- @lru_cache(cache_context=True)
- def foo(self, key, cache_context):
- r1 = self.bar1(key, on_invalidate=cache_context.invalidate)
- r2 = self.bar2(key, on_invalidate=cache_context.invalidate)
- return r1 + r2
-
- The wrapped function also has a 'cache' property which offers direct access to the
- underlying LruCache.
- """
-
- def func(orig: F) -> _LruCachedFunction[F]:
- desc = LruCacheDescriptor(
- orig,
- max_entries=max_entries,
- cache_context=cache_context,
- )
- return cast(_LruCachedFunction[F], desc)
-
- return func
-
-
-class LruCacheDescriptor(_CacheDescriptorBase):
- """Helper for @lru_cache"""
-
- class _Sentinel(enum.Enum):
- sentinel = object()
-
- def __init__(
- self,
- orig: Callable[..., Any],
- max_entries: int = 1000,
- cache_context: bool = False,
- ):
- super().__init__(
- orig, num_args=None, uncached_args=None, cache_context=cache_context
- )
- self.max_entries = max_entries
-
- def __get__(self, obj: Optional[Any], owner: Optional[Type]) -> Callable[..., Any]:
- cache: LruCache[CacheKey, Any] = LruCache(
- cache_name=self.orig.__name__,
- max_size=self.max_entries,
- )
-
- get_cache_key = self.cache_key_builder
- sentinel = LruCacheDescriptor._Sentinel.sentinel
-
- @functools.wraps(self.orig)
- def _wrapped(*args: Any, **kwargs: Any) -> Any:
- invalidate_callback = kwargs.pop("on_invalidate", None)
- callbacks = (invalidate_callback,) if invalidate_callback else ()
-
- cache_key = get_cache_key(args, kwargs)
-
- ret = cache.get(cache_key, default=sentinel, callbacks=callbacks)
- if ret != sentinel:
- return ret
-
- # Add our own `cache_context` to argument list if the wrapped function
- # has asked for one
- if self.add_cache_context:
- kwargs["cache_context"] = _CacheContext.get_instance(cache, cache_key)
-
- ret2 = self.orig(obj, *args, **kwargs)
- cache.set(cache_key, ret2, callbacks=callbacks)
-
- return ret2
-
- wrapped = cast(_CachedFunction, _wrapped)
- wrapped.cache = cache
- obj.__dict__[self.orig.__name__] = wrapped
-
- return wrapped
-
-
class DeferredCacheDescriptor(_CacheDescriptorBase):
"""A method decorator that applies a memoizing cache around the function.
@@ -301,12 +200,14 @@ class DeferredCacheDescriptor(_CacheDescriptorBase):
cache_context: bool = False,
iterable: bool = False,
prune_unread_entries: bool = True,
+ name: Optional[str] = None,
):
super().__init__(
orig,
num_args=num_args,
uncached_args=uncached_args,
cache_context=cache_context,
+ name=name,
)
if tree and self.num_args < 2:
@@ -321,7 +222,7 @@ class DeferredCacheDescriptor(_CacheDescriptorBase):
def __get__(self, obj: Optional[Any], owner: Optional[Type]) -> Callable[..., Any]:
cache: DeferredCache[CacheKey, Any] = DeferredCache(
- name=self.orig.__name__,
+ name=self.name,
max_entries=self.max_entries,
tree=self.tree,
iterable=self.iterable,
@@ -358,7 +259,7 @@ class DeferredCacheDescriptor(_CacheDescriptorBase):
return make_deferred_yieldable(ret)
- wrapped = cast(_CachedFunction, _wrapped)
+ wrapped = cast(CachedFunction, _wrapped)
if self.num_args == 1:
assert not self.tree
@@ -372,7 +273,7 @@ class DeferredCacheDescriptor(_CacheDescriptorBase):
wrapped.cache = cache
wrapped.num_args = self.num_args
- obj.__dict__[self.orig.__name__] = wrapped
+ obj.__dict__[self.name] = wrapped
return wrapped
@@ -393,6 +294,7 @@ class DeferredCacheListDescriptor(_CacheDescriptorBase):
cached_method_name: str,
list_name: str,
num_args: Optional[int] = None,
+ name: Optional[str] = None,
):
"""
Args:
@@ -403,7 +305,7 @@ class DeferredCacheListDescriptor(_CacheDescriptorBase):
but including list_name) to use as cache keys. Defaults to all
named args of the function.
"""
- super().__init__(orig, num_args=num_args, uncached_args=None)
+ super().__init__(orig, num_args=num_args, uncached_args=None, name=name)
self.list_name = list_name
@@ -425,6 +327,12 @@ class DeferredCacheListDescriptor(_CacheDescriptorBase):
cache: DeferredCache[CacheKey, Any] = cached_method.cache
num_args = cached_method.num_args
+ if num_args != self.num_args:
+ raise TypeError(
+ "Number of args (%s) does not match underlying cache_method_name=%s (%s)."
+ % (self.num_args, self.cached_method_name, num_args)
+ )
+
@functools.wraps(self.orig)
def wrapped(*args: Any, **kwargs: Any) -> "defer.Deferred[Dict]":
# If we're passed a cache_context then we'll want to call its
@@ -435,16 +343,6 @@ class DeferredCacheListDescriptor(_CacheDescriptorBase):
keyargs = [arg_dict[arg_nm] for arg_nm in self.arg_names]
list_args = arg_dict[self.list_name]
- results = {}
-
- def update_results_dict(res: Any, arg: Hashable) -> None:
- results[arg] = res
-
- # list of deferreds to wait for
- cached_defers = []
-
- missing = set()
-
# If the cache takes a single arg then that is used as the key,
# otherwise a tuple is used.
if num_args == 1:
@@ -452,6 +350,9 @@ class DeferredCacheListDescriptor(_CacheDescriptorBase):
def arg_to_cache_key(arg: Hashable) -> Hashable:
return arg
+ def cache_key_to_arg(key: tuple) -> Hashable:
+ return key
+
else:
keylist = list(keyargs)
@@ -459,58 +360,53 @@ class DeferredCacheListDescriptor(_CacheDescriptorBase):
keylist[self.list_pos] = arg
return tuple(keylist)
- for arg in list_args:
- try:
- res = cache.get(arg_to_cache_key(arg), callback=invalidate_callback)
- if not res.called:
- res.addCallback(update_results_dict, arg)
- cached_defers.append(res)
- else:
- results[arg] = res.result
- except KeyError:
- missing.add(arg)
+ def cache_key_to_arg(key: tuple) -> Hashable:
+ return key[self.list_pos]
+
+ cache_keys = [arg_to_cache_key(arg) for arg in list_args]
+ immediate_results, pending_deferred, missing = cache.get_bulk(
+ cache_keys, callback=invalidate_callback
+ )
+
+ results = {cache_key_to_arg(key): v for key, v in immediate_results.items()}
+
+ cached_defers: List["defer.Deferred[Any]"] = []
+ if pending_deferred:
+
+ def update_results(r: Dict) -> None:
+ for k, v in r.items():
+ results[cache_key_to_arg(k)] = v
+
+ pending_deferred.addCallback(update_results)
+ cached_defers.append(pending_deferred)
if missing:
- # we need a deferred for each entry in the list,
- # which we put in the cache. Each deferred resolves with the
- # relevant result for that key.
- deferreds_map = {}
- for arg in missing:
- deferred: "defer.Deferred[Any]" = defer.Deferred()
- deferreds_map[arg] = deferred
- key = arg_to_cache_key(arg)
- cached_defers.append(
- cache.set(key, deferred, callback=invalidate_callback)
- )
+ cache_entry = cache.start_bulk_input(missing, invalidate_callback)
def complete_all(res: Dict[Hashable, Any]) -> None:
- # the wrapped function has completed. It returns a dict.
- # We can now update our own result map, and then resolve the
- # observable deferreds in the cache.
- for e, d1 in deferreds_map.items():
- val = res.get(e, None)
- # make sure we update the results map before running the
- # deferreds, because as soon as we run the last deferred, the
- # gatherResults() below will complete and return the result
- # dict to our caller.
- results[e] = val
- d1.callback(val)
+ missing_results = {}
+ for key in missing:
+ arg = cache_key_to_arg(key)
+ val = res.get(arg, None)
+
+ results[arg] = val
+ missing_results[key] = val
+
+ cache_entry.complete_bulk(cache, missing_results)
def errback_all(f: Failure) -> None:
- # the wrapped function has failed. Propagate the failure into
- # the cache, which will invalidate the entry, and cause the
- # relevant cached_deferreds to fail, which will propagate the
- # failure to our caller.
- for d1 in deferreds_map.values():
- d1.errback(f)
+ cache_entry.error_bulk(cache, missing, f)
args_to_call = dict(arg_dict)
- args_to_call[self.list_name] = missing
+ args_to_call[self.list_name] = {
+ cache_key_to_arg(key) for key in missing
+ }
# dispatch the call, and attach the two handlers
- defer.maybeDeferred(
+ missing_d = defer.maybeDeferred(
preserve_fn(self.orig), **args_to_call
).addCallbacks(complete_all, errback_all)
+ cached_defers.append(missing_d)
if cached_defers:
d = defer.gatherResults(cached_defers, consumeErrors=True).addCallbacks(
@@ -525,7 +421,7 @@ class DeferredCacheListDescriptor(_CacheDescriptorBase):
else:
return defer.succeed(results)
- obj.__dict__[self.orig.__name__] = wrapped
+ obj.__dict__[self.name] = wrapped
return wrapped
@@ -577,7 +473,8 @@ def cached(
cache_context: bool = False,
iterable: bool = False,
prune_unread_entries: bool = True,
-) -> Callable[[F], _CachedFunction[F]]:
+ name: Optional[str] = None,
+) -> Callable[[F], CachedFunction[F]]:
func = lambda orig: DeferredCacheDescriptor(
orig,
max_entries=max_entries,
@@ -587,21 +484,26 @@ def cached(
cache_context=cache_context,
iterable=iterable,
prune_unread_entries=prune_unread_entries,
+ name=name,
)
- return cast(Callable[[F], _CachedFunction[F]], func)
+ return cast(Callable[[F], CachedFunction[F]], func)
def cachedList(
- *, cached_method_name: str, list_name: str, num_args: Optional[int] = None
-) -> Callable[[F], _CachedFunction[F]]:
+ *,
+ cached_method_name: str,
+ list_name: str,
+ num_args: Optional[int] = None,
+ name: Optional[str] = None,
+) -> Callable[[F], CachedFunction[F]]:
"""Creates a descriptor that wraps a function in a `DeferredCacheListDescriptor`.
Used to do batch lookups for an already created cache. One of the arguments
is specified as a list that is iterated through to lookup keys in the
original cache. A new tuple consisting of the (deduplicated) keys that weren't in
the cache gets passed to the original function, which is expected to results
- in a map of key to value for each passed value. THe new results are stored in the
+ in a map of key to value for each passed value. The new results are stored in the
original cache. Note that any missing values are cached as None.
Args:
@@ -628,9 +530,10 @@ def cachedList(
cached_method_name=cached_method_name,
list_name=list_name,
num_args=num_args,
+ name=name,
)
- return cast(Callable[[F], _CachedFunction[F]], func)
+ return cast(Callable[[F], CachedFunction[F]], func)
def _get_cache_key_builder(
diff --git a/synapse/util/caches/dictionary_cache.py b/synapse/util/caches/dictionary_cache.py
index fa91479c97..5eaf70c7ab 100644
--- a/synapse/util/caches/dictionary_cache.py
+++ b/synapse/util/caches/dictionary_cache.py
@@ -169,10 +169,11 @@ class DictionaryCache(Generic[KT, DKT, DV]):
if it is in the cache.
Returns:
- DictionaryEntry: If `dict_keys` is not None then `DictionaryEntry`
- will contain include the keys that are in the cache. If None then
- will either return the full dict if in the cache, or the empty
- dict (with `full` set to False) if it isn't.
+ If `dict_keys` is not None then `DictionaryEntry` will contain include
+ the keys that are in the cache.
+
+ If None then will either return the full dict if in the cache, or the
+ empty dict (with `full` set to False) if it isn't.
"""
if dict_keys is None:
# The caller wants the full set of dictionary keys for this cache key
diff --git a/synapse/util/caches/expiringcache.py b/synapse/util/caches/expiringcache.py
index c6a5d0dfc0..01ad02af67 100644
--- a/synapse/util/caches/expiringcache.py
+++ b/synapse/util/caches/expiringcache.py
@@ -207,7 +207,7 @@ class ExpiringCache(Generic[KT, VT]):
items from the cache.
Returns:
- bool: Whether the cache changed size or not.
+ Whether the cache changed size or not.
"""
new_size = int(self._original_max_size * factor)
if new_size != self._max_size:
diff --git a/synapse/util/caches/lrucache.py b/synapse/util/caches/lrucache.py
index b3bdedb04c..dcf0eac3bf 100644
--- a/synapse/util/caches/lrucache.py
+++ b/synapse/util/caches/lrucache.py
@@ -389,11 +389,11 @@ class LruCache(Generic[KT, VT]):
cache_name: The name of this cache, for the prometheus metrics. If unset,
no metrics will be reported on this cache.
- cache_type (type):
+ cache_type:
type of underlying cache to be used. Typically one of dict
or TreeCache.
- size_callback (func(V) -> int | None):
+ size_callback:
metrics_collection_callback:
metrics collection callback. This is called early in the metrics
@@ -403,7 +403,7 @@ class LruCache(Generic[KT, VT]):
Ignored if cache_name is None.
- apply_cache_factor_from_config (bool): If true, `max_size` will be
+ apply_cache_factor_from_config: If true, `max_size` will be
multiplied by a cache factor derived from the homeserver config
clock:
@@ -796,7 +796,7 @@ class LruCache(Generic[KT, VT]):
items from the cache.
Returns:
- bool: Whether the cache changed size or not.
+ Whether the cache changed size or not.
"""
if not self.apply_cache_factor_from_config:
return False
@@ -834,9 +834,26 @@ class AsyncLruCache(Generic[KT, VT]):
) -> Optional[VT]:
return self._lru_cache.get(key, update_metrics=update_metrics)
+ async def get_external(
+ self,
+ key: KT,
+ default: Optional[T] = None,
+ update_metrics: bool = True,
+ ) -> Optional[VT]:
+ # This method should fetch from any configured external cache, in this case noop.
+ return None
+
+ def get_local(
+ self, key: KT, default: Optional[T] = None, update_metrics: bool = True
+ ) -> Optional[VT]:
+ return self._lru_cache.get(key, update_metrics=update_metrics)
+
async def set(self, key: KT, value: VT) -> None:
self._lru_cache.set(key, value)
+ def set_local(self, key: KT, value: VT) -> None:
+ self._lru_cache.set(key, value)
+
async def invalidate(self, key: KT) -> None:
# This method should invalidate any external cache and then invalidate the LruCache.
return self._lru_cache.invalidate(key)
diff --git a/synapse/util/caches/stream_change_cache.py b/synapse/util/caches/stream_change_cache.py
index 330709b8b7..666f4b6895 100644
--- a/synapse/util/caches/stream_change_cache.py
+++ b/synapse/util/caches/stream_change_cache.py
@@ -72,7 +72,7 @@ class StreamChangeCache:
items from the cache.
Returns:
- bool: Whether the cache changed size or not.
+ Whether the cache changed size or not.
"""
new_size = math.floor(self._original_max_size * factor)
if new_size != self._max_size:
@@ -188,14 +188,8 @@ class StreamChangeCache:
self._entity_to_key[entity] = stream_pos
self._evict()
- # if the cache is too big, remove entries
- while len(self._cache) > self._max_size:
- k, r = self._cache.popitem(0)
- self._earliest_known_stream_pos = max(k, self._earliest_known_stream_pos)
- for entity in r:
- del self._entity_to_key[entity]
-
def _evict(self) -> None:
+ # if the cache is too big, remove entries
while len(self._cache) > self._max_size:
k, r = self._cache.popitem(0)
self._earliest_known_stream_pos = max(k, self._earliest_known_stream_pos)
@@ -203,7 +197,6 @@ class StreamChangeCache:
self._entity_to_key.pop(entity, None)
def get_max_pos_of_last_change(self, entity: EntityType) -> int:
-
"""Returns an upper bound of the stream id of the last change to an
entity.
"""
diff --git a/synapse/util/caches/treecache.py b/synapse/util/caches/treecache.py
index c1b8ec0c73..fec31da2b6 100644
--- a/synapse/util/caches/treecache.py
+++ b/synapse/util/caches/treecache.py
@@ -135,6 +135,9 @@ class TreeCache:
def values(self):
return iterate_tree_cache_entry(self.root)
+ def items(self):
+ return iterate_tree_cache_items((), self.root)
+
def __len__(self) -> int:
return self.size
diff --git a/synapse/util/cancellation.py b/synapse/util/cancellation.py
new file mode 100644
index 0000000000..472d2e3aeb
--- /dev/null
+++ b/synapse/util/cancellation.py
@@ -0,0 +1,56 @@
+# Copyright 2022 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import Any, Callable, TypeVar
+
+F = TypeVar("F", bound=Callable[..., Any])
+
+
+def cancellable(function: F) -> F:
+ """Marks a function as cancellable.
+
+ Servlet methods with this decorator will be cancelled if the client disconnects before we
+ finish processing the request.
+
+ Although this annotation is particularly useful for servlet methods, it's also
+ useful for intermediate functions, where it documents the fact that the function has
+ been audited for cancellation safety and needs to preserve that.
+ This then simplifies auditing new functions that call those same intermediate
+ functions.
+
+ During cancellation, `Deferred.cancel()` will be invoked on the `Deferred` wrapping
+ the method. The `cancel()` call will propagate down to the `Deferred` that is
+ currently being waited on. That `Deferred` will raise a `CancelledError`, which will
+ propagate up, as per normal exception handling.
+
+ Before applying this decorator to a new function, you MUST recursively check
+ that all `await`s in the function are on `async` functions or `Deferred`s that
+ handle cancellation cleanly, otherwise a variety of bugs may occur, ranging from
+ premature logging context closure, to stuck requests, to database corruption.
+
+ See the documentation page on Cancellation for more information.
+
+ Usage:
+ class SomeServlet(RestServlet):
+ @cancellable
+ async def on_GET(self, request: SynapseRequest) -> ...:
+ ...
+ """
+
+ function.cancellable = True # type: ignore[attr-defined]
+ return function
+
+
+def is_function_cancellable(function: Callable[..., Any]) -> bool:
+ """Checks whether a servlet method has the `@cancellable` flag."""
+ return getattr(function, "cancellable", False)
diff --git a/synapse/util/check_dependencies.py b/synapse/util/check_dependencies.py
index 66f1da7502..3b1e205700 100644
--- a/synapse/util/check_dependencies.py
+++ b/synapse/util/check_dependencies.py
@@ -66,6 +66,21 @@ def _is_dev_dependency(req: Requirement) -> bool:
)
+def _should_ignore_runtime_requirement(req: Requirement) -> bool:
+ # This is a build-time dependency. Irritatingly, `poetry build` ignores the
+ # requirements listed in the [build-system] section of pyproject.toml, so in order
+ # to support `poetry install --no-dev` we have to mark it as a runtime dependency.
+ # See discussion on https://github.com/python-poetry/poetry/issues/6154 (it sounds
+ # like the poetry authors don't consider this a bug?)
+ #
+ # In any case, workaround this by ignoring setuptools_rust here. (It might be
+ # slightly cleaner to put `setuptools_rust` in a `build` extra or similar, but for
+ # now let's do something quick and dirty.
+ if req.name == "setuptools_rust":
+ return True
+ return False
+
+
class Dependency(NamedTuple):
requirement: Requirement
must_be_installed: bool
@@ -77,7 +92,7 @@ def _generic_dependencies() -> Iterable[Dependency]:
assert requirements is not None
for raw_requirement in requirements:
req = Requirement(raw_requirement)
- if _is_dev_dependency(req):
+ if _is_dev_dependency(req) or _should_ignore_runtime_requirement(req):
continue
# https://packaging.pypa.io/en/latest/markers.html#usage notes that
diff --git a/synapse/util/macaroons.py b/synapse/util/macaroons.py
index df77edcce2..5df03d3ddc 100644
--- a/synapse/util/macaroons.py
+++ b/synapse/util/macaroons.py
@@ -24,7 +24,7 @@ from typing_extensions import Literal
from synapse.util import Clock, stringutils
-MacaroonType = Literal["access", "delete_pusher", "session", "login"]
+MacaroonType = Literal["access", "delete_pusher", "session"]
def get_value_from_macaroon(macaroon: pymacaroons.Macaroon, key: str) -> str:
@@ -111,19 +111,6 @@ class OidcSessionData:
"""The session ID of the ongoing UI Auth ("" if this is a login)"""
-@attr.s(slots=True, frozen=True, auto_attribs=True)
-class LoginTokenAttributes:
- """Data we store in a short-term login token"""
-
- user_id: str
-
- auth_provider_id: str
- """The SSO Identity Provider that the user authenticated with, to get this token."""
-
- auth_provider_session_id: Optional[str]
- """The session ID advertised by the SSO Identity Provider."""
-
-
class MacaroonGenerator:
def __init__(self, clock: Clock, location: str, secret_key: bytes):
self._clock = clock
@@ -165,35 +152,6 @@ class MacaroonGenerator:
macaroon.add_first_party_caveat(f"pushkey = {pushkey}")
return macaroon.serialize()
- def generate_short_term_login_token(
- self,
- user_id: str,
- auth_provider_id: str,
- auth_provider_session_id: Optional[str] = None,
- duration_in_ms: int = (2 * 60 * 1000),
- ) -> str:
- """Generate a short-term login token used during SSO logins
-
- Args:
- user_id: The user for which the token is valid.
- auth_provider_id: The SSO IdP the user used.
- auth_provider_session_id: The session ID got during login from the SSO IdP.
-
- Returns:
- A signed token valid for using as a ``m.login.token`` token.
- """
- now = self._clock.time_msec()
- expiry = now + duration_in_ms
- macaroon = self._generate_base_macaroon("login")
- macaroon.add_first_party_caveat(f"user_id = {user_id}")
- macaroon.add_first_party_caveat(f"time < {expiry}")
- macaroon.add_first_party_caveat(f"auth_provider_id = {auth_provider_id}")
- if auth_provider_session_id is not None:
- macaroon.add_first_party_caveat(
- f"auth_provider_session_id = {auth_provider_session_id}"
- )
- return macaroon.serialize()
-
def generate_oidc_session_token(
self,
state: str,
@@ -233,49 +191,6 @@ class MacaroonGenerator:
return macaroon.serialize()
- def verify_short_term_login_token(self, token: str) -> LoginTokenAttributes:
- """Verify a short-term-login macaroon
-
- Checks that the given token is a valid, unexpired short-term-login token
- minted by this server.
-
- Args:
- token: The login token to verify.
-
- Returns:
- A set of attributes carried by this token, including the
- ``user_id`` and informations about the SSO IDP used during that
- login.
-
- Raises:
- MacaroonVerificationFailedException if the verification failed
- """
- macaroon = pymacaroons.Macaroon.deserialize(token)
-
- v = self._base_verifier("login")
- v.satisfy_general(lambda c: c.startswith("user_id = "))
- v.satisfy_general(lambda c: c.startswith("auth_provider_id = "))
- v.satisfy_general(lambda c: c.startswith("auth_provider_session_id = "))
- satisfy_expiry(v, self._clock.time_msec)
- v.verify(macaroon, self._secret_key)
-
- user_id = get_value_from_macaroon(macaroon, "user_id")
- auth_provider_id = get_value_from_macaroon(macaroon, "auth_provider_id")
-
- auth_provider_session_id: Optional[str] = None
- try:
- auth_provider_session_id = get_value_from_macaroon(
- macaroon, "auth_provider_session_id"
- )
- except MacaroonVerificationFailedException:
- pass
-
- return LoginTokenAttributes(
- user_id=user_id,
- auth_provider_id=auth_provider_id,
- auth_provider_session_id=auth_provider_session_id,
- )
-
def verify_guest_token(self, token: str) -> str:
"""Verify a guest access token macaroon
diff --git a/synapse/util/metrics.py b/synapse/util/metrics.py
index bc3b4938ea..165480bdbe 100644
--- a/synapse/util/metrics.py
+++ b/synapse/util/metrics.py
@@ -15,9 +15,9 @@
import logging
from functools import wraps
from types import TracebackType
-from typing import Awaitable, Callable, Optional, Type, TypeVar
+from typing import Awaitable, Callable, Dict, Generator, Optional, Type, TypeVar
-from prometheus_client import Counter
+from prometheus_client import CollectorRegistry, Counter, Metric
from typing_extensions import Concatenate, ParamSpec, Protocol
from synapse.logging.context import (
@@ -208,3 +208,33 @@ class Measure:
metrics.real_time_sum += duration
# TODO: Add other in flight metrics.
+
+
+class DynamicCollectorRegistry(CollectorRegistry):
+ """
+ Custom Prometheus Collector registry that calls a hook first, allowing you
+ to update metrics on-demand.
+
+ Don't forget to register this registry with the main registry!
+ """
+
+ def __init__(self) -> None:
+ super().__init__()
+ self._pre_update_hooks: Dict[str, Callable[[], None]] = {}
+
+ def collect(self) -> Generator[Metric, None, None]:
+ """
+ Collects metrics, calling pre-update hooks first.
+ """
+
+ for pre_update_hook in self._pre_update_hooks.values():
+ pre_update_hook()
+
+ yield from super().collect()
+
+ def register_hook(self, metric_name: str, hook: Callable[[], None]) -> None:
+ """
+ Registers a hook that is called before metric collection.
+ """
+
+ self._pre_update_hooks[metric_name] = hook
diff --git a/synapse/util/ratelimitutils.py b/synapse/util/ratelimitutils.py
index 6394cc39ac..2aceb1a47f 100644
--- a/synapse/util/ratelimitutils.py
+++ b/synapse/util/ratelimitutils.py
@@ -15,8 +15,23 @@
import collections
import contextlib
import logging
+import threading
import typing
-from typing import Any, DefaultDict, Iterator, List, Set
+from typing import (
+ Any,
+ Callable,
+ DefaultDict,
+ Dict,
+ Iterator,
+ List,
+ Mapping,
+ Optional,
+ Set,
+ Tuple,
+)
+
+from prometheus_client.core import Counter
+from typing_extensions import ContextManager
from twisted.internet import defer
@@ -27,6 +42,8 @@ from synapse.logging.context import (
make_deferred_yieldable,
run_in_background,
)
+from synapse.logging.opentracing import start_active_span
+from synapse.metrics import Histogram, LaterGauge
from synapse.util import Clock
if typing.TYPE_CHECKING:
@@ -35,15 +52,127 @@ if typing.TYPE_CHECKING:
logger = logging.getLogger(__name__)
+# Track how much the ratelimiter is affecting requests
+rate_limit_sleep_counter = Counter(
+ "synapse_rate_limit_sleep",
+ "Number of requests slept by the rate limiter",
+ ["rate_limiter_name"],
+)
+rate_limit_reject_counter = Counter(
+ "synapse_rate_limit_reject",
+ "Number of requests rejected by the rate limiter",
+ ["rate_limiter_name"],
+)
+queue_wait_timer = Histogram(
+ "synapse_rate_limit_queue_wait_time_seconds",
+ "Amount of time spent waiting for the rate limiter to let our request through.",
+ ["rate_limiter_name"],
+ buckets=(
+ 0.005,
+ 0.01,
+ 0.025,
+ 0.05,
+ 0.1,
+ 0.25,
+ 0.5,
+ 0.75,
+ 1.0,
+ 2.5,
+ 5.0,
+ 10.0,
+ 20.0,
+ "+Inf",
+ ),
+)
+
+
+_rate_limiter_instances: Set["FederationRateLimiter"] = set()
+# Protects the _rate_limiter_instances set from concurrent access
+_rate_limiter_instances_lock = threading.Lock()
+
+
+def _get_counts_from_rate_limiter_instance(
+ count_func: Callable[["FederationRateLimiter"], int]
+) -> Mapping[Tuple[str, ...], int]:
+ """Returns a count of something (slept/rejected hosts) by (metrics_name)"""
+ # Cast to a list to prevent it changing while the Prometheus
+ # thread is collecting metrics
+ with _rate_limiter_instances_lock:
+ rate_limiter_instances = list(_rate_limiter_instances)
+
+ # Map from (metrics_name,) -> int, the number of something like slept hosts
+ # or rejected hosts. The key type is Tuple[str], but we leave the length
+ # unspecified for compatability with LaterGauge's annotations.
+ counts: Dict[Tuple[str, ...], int] = {}
+ for rate_limiter_instance in rate_limiter_instances:
+ # Only track metrics if they provided a `metrics_name` to
+ # differentiate this instance of the rate limiter.
+ if rate_limiter_instance.metrics_name:
+ key = (rate_limiter_instance.metrics_name,)
+ counts[key] = count_func(rate_limiter_instance)
+
+ return counts
+
+
+# We track the number of affected hosts per time-period so we can
+# differentiate one really noisy homeserver from a general
+# ratelimit tuning problem across the federation.
+LaterGauge(
+ "synapse_rate_limit_sleep_affected_hosts",
+ "Number of hosts that had requests put to sleep",
+ ["rate_limiter_name"],
+ lambda: _get_counts_from_rate_limiter_instance(
+ lambda rate_limiter_instance: sum(
+ ratelimiter.should_sleep()
+ for ratelimiter in rate_limiter_instance.ratelimiters.values()
+ )
+ ),
+)
+LaterGauge(
+ "synapse_rate_limit_reject_affected_hosts",
+ "Number of hosts that had requests rejected",
+ ["rate_limiter_name"],
+ lambda: _get_counts_from_rate_limiter_instance(
+ lambda rate_limiter_instance: sum(
+ ratelimiter.should_reject()
+ for ratelimiter in rate_limiter_instance.ratelimiters.values()
+ )
+ ),
+)
+
+
class FederationRateLimiter:
- def __init__(self, clock: Clock, config: FederationRatelimitSettings):
+ """Used to rate limit request per-host."""
+
+ def __init__(
+ self,
+ clock: Clock,
+ config: FederationRatelimitSettings,
+ metrics_name: Optional[str] = None,
+ ):
+ """
+ Args:
+ clock
+ config
+ metrics_name: The name of the rate limiter so we can differentiate it
+ from the rest in the metrics. If `None`, we don't track metrics
+ for this rate limiter.
+
+ """
+ self.metrics_name = metrics_name
+
def new_limiter() -> "_PerHostRatelimiter":
- return _PerHostRatelimiter(clock=clock, config=config)
+ return _PerHostRatelimiter(
+ clock=clock, config=config, metrics_name=metrics_name
+ )
self.ratelimiters: DefaultDict[
str, "_PerHostRatelimiter"
] = collections.defaultdict(new_limiter)
+ with _rate_limiter_instances_lock:
+ _rate_limiter_instances.add(self)
+
def ratelimit(self, host: str) -> "_GeneratorContextManager[defer.Deferred[None]]":
"""Used to ratelimit an incoming request from a given host
@@ -54,22 +183,32 @@ class FederationRateLimiter:
# Handle request ...
Args:
- host (str): Origin of incoming request.
+ host: Origin of incoming request.
Returns:
context manager which returns a deferred.
"""
- return self.ratelimiters[host].ratelimit()
+ return self.ratelimiters[host].ratelimit(host)
class _PerHostRatelimiter:
- def __init__(self, clock: Clock, config: FederationRatelimitSettings):
+ def __init__(
+ self,
+ clock: Clock,
+ config: FederationRatelimitSettings,
+ metrics_name: Optional[str] = None,
+ ):
"""
Args:
clock
config
+ metrics_name: The name of the rate limiter so we can differentiate it
+ from the rest in the metrics. If `None`, we don't track metrics
+ for this rate limiter.
+ from the rest in the metrics
"""
self.clock = clock
+ self.metrics_name = metrics_name
self.window_size = config.window_size
self.sleep_limit = config.sleep_limit
@@ -94,19 +233,45 @@ class _PerHostRatelimiter:
self.request_times: List[int] = []
@contextlib.contextmanager
- def ratelimit(self) -> "Iterator[defer.Deferred[None]]":
+ def ratelimit(self, host: str) -> "Iterator[defer.Deferred[None]]":
# `contextlib.contextmanager` takes a generator and turns it into a
# context manager. The generator should only yield once with a value
# to be returned by manager.
# Exceptions will be reraised at the yield.
+ self.host = host
+
request_id = object()
- ret = self._on_enter(request_id)
+ # Ideally we'd use `Deferred.fromCoroutine()` here, to save on redundant
+ # type-checking, but we'd need Twisted >= 21.2.
+ ret = defer.ensureDeferred(self._on_enter_with_tracing(request_id))
try:
yield ret
finally:
self._on_exit(request_id)
+ def should_reject(self) -> bool:
+ """
+ Whether to reject the request if we already have too many queued up
+ (either sleeping or in the ready queue).
+ """
+ queue_size = len(self.ready_request_queue) + len(self.sleeping_requests)
+ return queue_size > self.reject_limit
+
+ def should_sleep(self) -> bool:
+ """
+ Whether to sleep the request if we already have too many requests coming
+ through within the window.
+ """
+ return len(self.request_times) > self.sleep_limit
+
+ async def _on_enter_with_tracing(self, request_id: object) -> None:
+ maybe_metrics_cm: ContextManager = contextlib.nullcontext()
+ if self.metrics_name:
+ maybe_metrics_cm = queue_wait_timer.labels(self.metrics_name).time()
+ with start_active_span("ratelimit wait"), maybe_metrics_cm:
+ await self._on_enter(request_id)
+
def _on_enter(self, request_id: object) -> "defer.Deferred[None]":
time_now = self.clock.time_msec()
@@ -117,8 +282,10 @@ class _PerHostRatelimiter:
# reject the request if we already have too many queued up (either
# sleeping or in the ready queue).
- queue_size = len(self.ready_request_queue) + len(self.sleeping_requests)
- if queue_size > self.reject_limit:
+ if self.should_reject():
+ logger.debug("Ratelimiter(%s): rejecting request", self.host)
+ if self.metrics_name:
+ rate_limit_reject_counter.labels(self.metrics_name).inc()
raise LimitExceededError(
retry_after_ms=int(self.window_size / self.sleep_limit)
)
@@ -130,7 +297,8 @@ class _PerHostRatelimiter:
queue_defer: defer.Deferred[None] = defer.Deferred()
self.ready_request_queue[request_id] = queue_defer
logger.info(
- "Ratelimiter: queueing request (queue now %i items)",
+ "Ratelimiter(%s): queueing request (queue now %i items)",
+ self.host,
len(self.ready_request_queue),
)
@@ -139,19 +307,29 @@ class _PerHostRatelimiter:
return defer.succeed(None)
logger.debug(
- "Ratelimit [%s]: len(self.request_times)=%d",
+ "Ratelimit(%s) [%s]: len(self.request_times)=%d",
+ self.host,
id(request_id),
len(self.request_times),
)
- if len(self.request_times) > self.sleep_limit:
- logger.debug("Ratelimiter: sleeping request for %f sec", self.sleep_sec)
+ if self.should_sleep():
+ logger.debug(
+ "Ratelimiter(%s) [%s]: sleeping request for %f sec",
+ self.host,
+ id(request_id),
+ self.sleep_sec,
+ )
+ if self.metrics_name:
+ rate_limit_sleep_counter.labels(self.metrics_name).inc()
ret_defer = run_in_background(self.clock.sleep, self.sleep_sec)
self.sleeping_requests.add(request_id)
def on_wait_finished(_: Any) -> "defer.Deferred[None]":
- logger.debug("Ratelimit [%s]: Finished sleeping", id(request_id))
+ logger.debug(
+ "Ratelimit(%s) [%s]: Finished sleeping", self.host, id(request_id)
+ )
self.sleeping_requests.discard(request_id)
queue_defer = queue_request()
return queue_defer
@@ -161,7 +339,9 @@ class _PerHostRatelimiter:
ret_defer = queue_request()
def on_start(r: object) -> object:
- logger.debug("Ratelimit [%s]: Processing req", id(request_id))
+ logger.debug(
+ "Ratelimit(%s) [%s]: Processing req", self.host, id(request_id)
+ )
self.current_processing.add(request_id)
return r
@@ -183,7 +363,7 @@ class _PerHostRatelimiter:
return make_deferred_yieldable(ret_defer)
def _on_exit(self, request_id: object) -> None:
- logger.debug("Ratelimit [%s]: Processed req", id(request_id))
+ logger.debug("Ratelimit(%s) [%s]: Processed req", self.host, id(request_id))
self.current_processing.discard(request_id)
try:
# start processing the next item on the queue.
diff --git a/synapse/util/retryutils.py b/synapse/util/retryutils.py
index d0a69ff843..dcc037b982 100644
--- a/synapse/util/retryutils.py
+++ b/synapse/util/retryutils.py
@@ -51,7 +51,7 @@ class NotRetryingDestination(Exception):
destination: the domain in question
"""
- msg = "Not retrying server %s." % (destination,)
+ msg = f"Not retrying server {destination} because we tried it recently retry_last_ts={retry_last_ts} and we won't check for another retry_interval={retry_interval}ms."
super().__init__(msg)
self.retry_last_ts = retry_last_ts
diff --git a/synapse/util/rust.py b/synapse/util/rust.py
new file mode 100644
index 0000000000..30ecb9ffd9
--- /dev/null
+++ b/synapse/util/rust.py
@@ -0,0 +1,84 @@
+# Copyright 2022 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import sys
+from hashlib import blake2b
+
+import synapse
+from synapse.synapse_rust import get_rust_file_digest
+
+
+def check_rust_lib_up_to_date() -> None:
+ """For editable installs check if the rust library is outdated and needs to
+ be rebuilt.
+ """
+
+ if not _dist_is_editable():
+ return
+
+ synapse_dir = os.path.dirname(synapse.__file__)
+ synapse_root = os.path.abspath(os.path.join(synapse_dir, ".."))
+
+ # Double check we've not gone into site-packages...
+ if os.path.basename(synapse_root) == "site-packages":
+ return
+
+ # ... and it looks like the root of a python project.
+ if not os.path.exists("pyproject.toml"):
+ return
+
+ # Get the hash of all Rust source files
+ hash = _hash_rust_files_in_directory(os.path.join(synapse_root, "rust", "src"))
+
+ if hash != get_rust_file_digest():
+ raise Exception("Rust module outdated. Please rebuild using `poetry install`")
+
+
+def _hash_rust_files_in_directory(directory: str) -> str:
+ """Get the hash of all files in a directory (recursively)"""
+
+ directory = os.path.abspath(directory)
+
+ paths = []
+
+ dirs = [directory]
+ while dirs:
+ dir = dirs.pop()
+ with os.scandir(dir) as d:
+ for entry in d:
+ if entry.is_dir():
+ dirs.append(entry.path)
+ else:
+ paths.append(entry.path)
+
+ # We sort to make sure that we get a consistent and well-defined ordering.
+ paths.sort()
+
+ hasher = blake2b()
+
+ for path in paths:
+ with open(os.path.join(directory, path), "rb") as f:
+ hasher.update(f.read())
+
+ return hasher.hexdigest()
+
+
+def _dist_is_editable() -> bool:
+ """Is distribution an editable install?"""
+ for path_item in sys.path:
+ egg_link = os.path.join(path_item, "matrix-synapse.egg-link")
+ if os.path.isfile(egg_link):
+ return True
+ return False
diff --git a/synapse/util/stringutils.py b/synapse/util/stringutils.py
index 27a363d7e5..4961fe9313 100644
--- a/synapse/util/stringutils.py
+++ b/synapse/util/stringutils.py
@@ -86,7 +86,7 @@ def parse_server_name(server_name: str) -> Tuple[str, Optional[int]]:
ValueError if the server name could not be parsed.
"""
try:
- if server_name[-1] == "]":
+ if server_name and server_name[-1] == "]":
# ipv6 literal, hopefully
return server_name, None
@@ -123,7 +123,7 @@ def parse_and_validate_server_name(server_name: str) -> Tuple[str, Optional[int]
# that nobody is sneaking IP literals in that look like hostnames, etc.
# look for ipv6 literals
- if host[0] == "[":
+ if host and host[0] == "[":
if host[-1] != "]":
raise ValueError("Mismatched [...] in server name '%s'" % (server_name,))
diff --git a/synapse/util/threepids.py b/synapse/util/threepids.py
index 1e9c2faa64..54bc7589fd 100644
--- a/synapse/util/threepids.py
+++ b/synapse/util/threepids.py
@@ -48,7 +48,7 @@ async def check_3pid_allowed(
registration: whether we want to bind the 3PID as part of registering a new user.
Returns:
- bool: whether the 3PID medium/address is allowed to be added to this HS
+ whether the 3PID medium/address is allowed to be added to this HS
"""
if not await hs.get_password_auth_provider().is_3pid_allowed(
medium, address, registration
diff --git a/synapse/util/wheel_timer.py b/synapse/util/wheel_timer.py
index 177e198e7e..b1ec7f4bd8 100644
--- a/synapse/util/wheel_timer.py
+++ b/synapse/util/wheel_timer.py
@@ -90,10 +90,10 @@ class WheelTimer(Generic[T]):
"""Fetch any objects that have timed out
Args:
- now (ms): Current time in msec
+ now: Current time in msec
Returns:
- list: List of objects that have timed out
+ List of objects that have timed out
"""
now_key = int(now / self.bucket_size)
diff --git a/synapse/visibility.py b/synapse/visibility.py
index 9abbaa5a64..b443857571 100644
--- a/synapse/visibility.py
+++ b/synapse/visibility.py
@@ -23,6 +23,7 @@ from synapse.api.constants import EventTypes, HistoryVisibility, Membership
from synapse.events import EventBase
from synapse.events.snapshot import EventContext
from synapse.events.utils import prune_event
+from synapse.logging.opentracing import trace
from synapse.storage.controllers import StorageControllers
from synapse.storage.databases.main import DataStore
from synapse.storage.state import StateFilter
@@ -51,6 +52,7 @@ MEMBERSHIP_PRIORITY = (
_HISTORY_VIS_KEY: Final[Tuple[str, str]] = (EventTypes.RoomHistoryVisibility, "")
+@trace
async def filter_events_for_client(
storage: StorageControllers,
user_id: str,
@@ -71,8 +73,8 @@ async def filter_events_for_client(
* the user is not currently a member of the room, and:
* the user has not been a member of the room since the given
events
- always_include_ids: set of event ids to specifically
- include (unless sender is ignored)
+ always_include_ids: set of event ids to specifically include, if present
+ in events (unless sender is ignored)
filter_send_to_client: Whether we're checking an event that's going to be
sent to a client. This might not always be the case since this function can
also be called to check whether a user can see the state at a given point.
@@ -82,7 +84,15 @@ async def filter_events_for_client(
"""
# Filter out events that have been soft failed so that we don't relay them
# to clients.
+ events_before_filtering = events
events = [e for e in events if not e.internal_metadata.is_soft_failed()]
+ if len(events_before_filtering) != len(events):
+ if logger.isEnabledFor(logging.DEBUG):
+ logger.debug(
+ "filter_events_for_client: Filtered out soft-failed events: Before=%s, After=%s",
+ [event.event_id for event in events_before_filtering],
+ [event.event_id for event in events],
+ )
types = (_HISTORY_VIS_KEY, (EventTypes.Member, user_id))
@@ -160,6 +170,10 @@ async def filter_event_for_clients_with_state(
if event.internal_metadata.is_soft_failed():
return []
+ # Fast path if we don't have any user IDs to check.
+ if not user_ids:
+ return ()
+
# Make a set for all user IDs that haven't been filtered out by a check.
allowed_user_ids = set(user_ids)
@@ -295,6 +309,10 @@ def _check_client_allowed_to_see_event(
_check_filter_send_to_client(event, clock, retention_policy, sender_ignored)
== _CheckFilter.DENIED
):
+ logger.debug(
+ "_check_client_allowed_to_see_event(event=%s): Filtered out event because `_check_filter_send_to_client` returned `_CheckFilter.DENIED`",
+ event.event_id,
+ )
return None
if event.event_id in always_include_ids:
@@ -306,9 +324,17 @@ def _check_client_allowed_to_see_event(
# for out-of-band membership events (eg, incoming invites, or rejections of
# said invite) for the user themselves.
if event.type == EventTypes.Member and event.state_key == user_id:
- logger.debug("Returning out-of-band-membership event %s", event)
+ logger.debug(
+ "_check_client_allowed_to_see_event(event=%s): Returning out-of-band-membership event %s",
+ event.event_id,
+ event,
+ )
return event
+ logger.debug(
+ "_check_client_allowed_to_see_event(event=%s): Filtered out event because it's an outlier",
+ event.event_id,
+ )
return None
if state is None:
@@ -331,11 +357,21 @@ def _check_client_allowed_to_see_event(
membership_result = _check_membership(user_id, event, visibility, state, is_peeking)
if not membership_result.allowed:
+ logger.debug(
+ "_check_client_allowed_to_see_event(event=%s): Filtered out event because the user can't see the event because of their membership, membership_result.allowed=%s membership_result.joined=%s",
+ event.event_id,
+ membership_result.allowed,
+ membership_result.joined,
+ )
return None
# If the sender has been erased and the user was not joined at the time, we
# must only return the redacted form.
if sender_erased and not membership_result.joined:
+ logger.debug(
+ "_check_client_allowed_to_see_event(event=%s): Returning pruned event because `sender_erased` and the user was not joined at the time",
+ event.event_id,
+ )
event = prune_event(event)
return event
@@ -527,7 +563,8 @@ def get_effective_room_visibility_from_state(state: StateMap[EventBase]) -> str:
async def filter_events_for_server(
storage: StorageControllers,
- server_name: str,
+ target_server_name: str,
+ local_server_name: str,
events: List[EventBase],
redact: bool = True,
check_history_visibility_only: bool = False,
@@ -567,7 +604,7 @@ async def filter_events_for_server(
# if the server is either in the room or has been invited
# into the room.
for ev in memberships.values():
- assert get_domain_from_id(ev.state_key) == server_name
+ assert get_domain_from_id(ev.state_key) == target_server_name
memtype = ev.membership
if memtype == Membership.JOIN:
@@ -586,6 +623,24 @@ async def filter_events_for_server(
# to no users having been erased.
erased_senders = {}
+ # Filter out non-local events when we are in the middle of a partial join, since our servers
+ # list can be out of date and we could leak events to servers not in the room anymore.
+ # This can also be true for local events but we consider it to be an acceptable risk.
+
+ # We do this check as a first step and before retrieving membership events because
+ # otherwise a room could be fully joined after we retrieve those, which would then bypass
+ # this check but would base the filtering on an outdated view of the membership events.
+
+ partial_state_invisible_events = set()
+ if not check_history_visibility_only:
+ for e in events:
+ sender_domain = get_domain_from_id(e.sender)
+ if (
+ sender_domain != local_server_name
+ and await storage.main.is_partial_state_room(e.room_id)
+ ):
+ partial_state_invisible_events.add(e)
+
# Let's check to see if all the events have a history visibility
# of "shared" or "world_readable". If that's the case then we don't
# need to check membership (as we know the server is in the room).
@@ -600,7 +655,7 @@ async def filter_events_for_server(
if event_to_history_vis[e.event_id]
not in (HistoryVisibility.SHARED, HistoryVisibility.WORLD_READABLE)
],
- server_name,
+ target_server_name,
)
to_return = []
@@ -609,6 +664,10 @@ async def filter_events_for_server(
visible = check_event_is_visible(
event_to_history_vis[e.event_id], event_to_memberships.get(e.event_id, {})
)
+
+ if e in partial_state_invisible_events:
+ visible = False
+
if visible and not erased:
to_return.append(e)
elif redact:
diff --git a/tests/api/test_auth.py b/tests/api/test_auth.py
index dfcfaf79b6..e0f363555b 100644
--- a/tests/api/test_auth.py
+++ b/tests/api/test_auth.py
@@ -284,10 +284,13 @@ class AuthTestCase(unittest.HomeserverTestCase):
TokenLookupResult(
user_id="@baldrick:matrix.org",
device_id="device",
+ token_id=5,
token_owner="@admin:matrix.org",
+ token_used=True,
)
)
self.store.insert_client_ip = simple_async_mock(None)
+ self.store.mark_access_token_as_used = simple_async_mock(None)
request = Mock(args={})
request.getClientAddress.return_value.host = "127.0.0.1"
request.args[b"access_token"] = [self.test_token]
@@ -301,10 +304,13 @@ class AuthTestCase(unittest.HomeserverTestCase):
TokenLookupResult(
user_id="@baldrick:matrix.org",
device_id="device",
+ token_id=5,
token_owner="@admin:matrix.org",
+ token_used=True,
)
)
self.store.insert_client_ip = simple_async_mock(None)
+ self.store.mark_access_token_as_used = simple_async_mock(None)
request = Mock(args={})
request.getClientAddress.return_value.host = "127.0.0.1"
request.args[b"access_token"] = [self.test_token]
@@ -347,7 +353,7 @@ class AuthTestCase(unittest.HomeserverTestCase):
serialized = macaroon.serialize()
user_info = self.get_success(self.auth.get_user_by_access_token(serialized))
- self.assertEqual(user_id, user_info.user_id)
+ self.assertEqual(user_id, user_info.user.to_string())
self.assertTrue(user_info.is_guest)
self.store.get_user_by_id.assert_called_with(user_id)
diff --git a/tests/api/test_filtering.py b/tests/api/test_filtering.py
index a269c477fb..d5524d296e 100644
--- a/tests/api/test_filtering.py
+++ b/tests/api/test_filtering.py
@@ -35,6 +35,8 @@ def MockEvent(**kwargs):
kwargs["event_id"] = "fake_event_id"
if "type" not in kwargs:
kwargs["type"] = "fake_type"
+ if "content" not in kwargs:
+ kwargs["content"] = {}
return make_event_from_dict(kwargs)
@@ -44,19 +46,36 @@ class FilteringTestCase(unittest.HomeserverTestCase):
self.datastore = hs.get_datastores().main
def test_errors_on_invalid_filters(self):
+ # See USER_FILTER_SCHEMA for the filter schema.
invalid_filters = [
- {"boom": {}},
+ # `account_data` must be a dictionary
{"account_data": "Hello World"},
+ # `event_fields` entries must not contain backslashes
{"event_fields": [r"\\foo"]},
- {"room": {"timeline": {"limit": 0}, "state": {"not_bars": ["*"]}}},
+ # `event_format` must be "client" or "federation"
{"event_format": "other"},
+ # `not_rooms` must contain valid room IDs
{"room": {"not_rooms": ["#foo:pik-test"]}},
+ # `senders` must contain valid user IDs
{"presence": {"senders": ["@bar;pik.test.com"]}},
]
for filter in invalid_filters:
with self.assertRaises(SynapseError):
self.filtering.check_valid_filter(filter)
+ def test_ignores_unknown_filter_fields(self):
+ # For forward compatibility, we must ignore unknown filter fields.
+ # See USER_FILTER_SCHEMA for the filter schema.
+ filters = [
+ {"org.matrix.msc9999.future_option": True},
+ {"presence": {"org.matrix.msc9999.future_option": True}},
+ {"room": {"org.matrix.msc9999.future_option": True}},
+ {"room": {"timeline": {"org.matrix.msc9999.future_option": True}}},
+ ]
+ for filter in filters:
+ self.filtering.check_valid_filter(filter)
+ # Must not raise.
+
def test_valid_filters(self):
valid_filters = [
{
@@ -357,6 +376,66 @@ class FilteringTestCase(unittest.HomeserverTestCase):
self.assertTrue(Filter(self.hs, definition)._check(event))
+ @unittest.override_config({"experimental_features": {"msc3874_enabled": True}})
+ def test_filter_rel_type(self):
+ definition = {"org.matrix.msc3874.rel_types": ["m.thread"]}
+ event = MockEvent(
+ sender="@foo:bar",
+ type="m.room.message",
+ room_id="!secretbase:unknown",
+ content={},
+ )
+
+ self.assertFalse(Filter(self.hs, definition)._check(event))
+
+ event = MockEvent(
+ sender="@foo:bar",
+ type="m.room.message",
+ room_id="!secretbase:unknown",
+ content={"m.relates_to": {"event_id": "$abc", "rel_type": "m.reference"}},
+ )
+
+ self.assertFalse(Filter(self.hs, definition)._check(event))
+
+ event = MockEvent(
+ sender="@foo:bar",
+ type="m.room.message",
+ room_id="!secretbase:unknown",
+ content={"m.relates_to": {"event_id": "$abc", "rel_type": "m.thread"}},
+ )
+
+ self.assertTrue(Filter(self.hs, definition)._check(event))
+
+ @unittest.override_config({"experimental_features": {"msc3874_enabled": True}})
+ def test_filter_not_rel_type(self):
+ definition = {"org.matrix.msc3874.not_rel_types": ["m.thread"]}
+ event = MockEvent(
+ sender="@foo:bar",
+ type="m.room.message",
+ room_id="!secretbase:unknown",
+ content={"m.relates_to": {"event_id": "$abc", "rel_type": "m.thread"}},
+ )
+
+ self.assertFalse(Filter(self.hs, definition)._check(event))
+
+ event = MockEvent(
+ sender="@foo:bar",
+ type="m.room.message",
+ room_id="!secretbase:unknown",
+ content={},
+ )
+
+ self.assertTrue(Filter(self.hs, definition)._check(event))
+
+ event = MockEvent(
+ sender="@foo:bar",
+ type="m.room.message",
+ room_id="!secretbase:unknown",
+ content={"m.relates_to": {"event_id": "$abc", "rel_type": "m.reference"}},
+ )
+
+ self.assertTrue(Filter(self.hs, definition)._check(event))
+
def test_filter_presence_match(self):
user_filter_json = {"presence": {"types": ["m.*"]}}
filter_id = self.get_success(
@@ -456,7 +535,6 @@ class FilteringTestCase(unittest.HomeserverTestCase):
self.assertEqual(filtered_room_ids, ["!allowed:example.com"])
- @unittest.override_config({"experimental_features": {"msc3440_enabled": True}})
def test_filter_relations(self):
events = [
# An event without a relation.
diff --git a/tests/app/test_openid_listener.py b/tests/app/test_openid_listener.py
index 264e101082..8d03da7f96 100644
--- a/tests/app/test_openid_listener.py
+++ b/tests/app/test_openid_listener.py
@@ -61,7 +61,7 @@ class FederationReaderOpenIDListenerTests(HomeserverTestCase):
}
# Listen with the config
- self.hs._listen_http(parse_listener_def(config))
+ self.hs._listen_http(parse_listener_def(0, config))
# Grab the resource from the site that was told to listen
site = self.reactor.tcpServers[0][1]
@@ -79,7 +79,7 @@ class FederationReaderOpenIDListenerTests(HomeserverTestCase):
self.assertEqual(channel.code, 401)
-@patch("synapse.app.homeserver.KeyApiV2Resource", new=Mock())
+@patch("synapse.app.homeserver.KeyResource", new=Mock())
class SynapseHomeserverOpenIDListenerTests(HomeserverTestCase):
def make_homeserver(self, reactor, clock):
hs = self.setup_test_homeserver(
@@ -109,7 +109,7 @@ class SynapseHomeserverOpenIDListenerTests(HomeserverTestCase):
}
# Listen with the config
- self.hs._listener_http(self.hs.config, parse_listener_def(config))
+ self.hs._listener_http(self.hs.config, parse_listener_def(0, config))
# Grab the resource from the site that was told to listen
site = self.reactor.tcpServers[0][1]
diff --git a/tests/appservice/test_api.py b/tests/appservice/test_api.py
index 532b676365..89ee79396f 100644
--- a/tests/appservice/test_api.py
+++ b/tests/appservice/test_api.py
@@ -11,7 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-from typing import Any, List, Mapping
+from typing import Any, List, Mapping, Sequence, Union
from unittest.mock import Mock
from twisted.test.proto_helpers import MemoryReactor
@@ -69,10 +69,16 @@ class ApplicationServiceApiTestCase(unittest.HomeserverTestCase):
self.request_url = None
- async def get_json(url: str, args: Mapping[Any, Any]) -> List[JsonDict]:
- if not args.get(b"access_token"):
+ async def get_json(
+ url: str,
+ args: Mapping[Any, Any],
+ headers: Mapping[Union[str, bytes], Sequence[Union[str, bytes]]],
+ ) -> List[JsonDict]:
+ # Ensure the access token is passed as both a header and query arg.
+ if not headers.get("Authorization") or not args.get(b"access_token"):
raise RuntimeError("Access token not provided")
+ self.assertEqual(headers.get("Authorization"), [f"Bearer {TOKEN}"])
self.assertEqual(args.get(b"access_token"), TOKEN)
self.request_url = url
if url == URL_USER:
diff --git a/tests/appservice/test_appservice.py b/tests/appservice/test_appservice.py
index 3018d3fc6f..d4dccfc2f0 100644
--- a/tests/appservice/test_appservice.py
+++ b/tests/appservice/test_appservice.py
@@ -43,7 +43,7 @@ class ApplicationServiceTestCase(unittest.TestCase):
self.store = Mock()
self.store.get_aliases_for_room = simple_async_mock([])
- self.store.get_users_in_room = simple_async_mock([])
+ self.store.get_local_users_in_room = simple_async_mock([])
@defer.inlineCallbacks
def test_regex_user_id_prefix_match(self):
@@ -129,7 +129,7 @@ class ApplicationServiceTestCase(unittest.TestCase):
self.store.get_aliases_for_room = simple_async_mock(
["#irc_foobar:matrix.org", "#athing:matrix.org"]
)
- self.store.get_users_in_room = simple_async_mock([])
+ self.store.get_local_users_in_room = simple_async_mock([])
self.assertTrue(
(
yield defer.ensureDeferred(
@@ -184,7 +184,7 @@ class ApplicationServiceTestCase(unittest.TestCase):
self.store.get_aliases_for_room = simple_async_mock(
["#xmpp_foobar:matrix.org", "#athing:matrix.org"]
)
- self.store.get_users_in_room = simple_async_mock([])
+ self.store.get_local_users_in_room = simple_async_mock([])
self.assertFalse(
(
yield defer.ensureDeferred(
@@ -203,7 +203,7 @@ class ApplicationServiceTestCase(unittest.TestCase):
self.service.namespaces[ApplicationService.NS_USERS].append(_regex("@irc_.*"))
self.event.sender = "@irc_foobar:matrix.org"
self.store.get_aliases_for_room = simple_async_mock(["#irc_barfoo:matrix.org"])
- self.store.get_users_in_room = simple_async_mock([])
+ self.store.get_local_users_in_room = simple_async_mock([])
self.assertTrue(
(
yield defer.ensureDeferred(
@@ -236,7 +236,7 @@ class ApplicationServiceTestCase(unittest.TestCase):
def test_member_list_match(self):
self.service.namespaces[ApplicationService.NS_USERS].append(_regex("@irc_.*"))
# Note that @irc_fo:here is the AS user.
- self.store.get_users_in_room = simple_async_mock(
+ self.store.get_local_users_in_room = simple_async_mock(
["@alice:here", "@irc_fo:here", "@bob:here"]
)
self.store.get_aliases_for_room = simple_async_mock([])
diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py
index 820a1a54e2..63628aa6b0 100644
--- a/tests/crypto/test_keyring.py
+++ b/tests/crypto/test_keyring.py
@@ -469,6 +469,18 @@ class ServerKeyFetcherTestCase(unittest.HomeserverTestCase):
keys = self.get_success(fetcher.get_keys(SERVER_NAME, ["key1"], 0))
self.assertEqual(keys, {})
+ def test_keyid_containing_forward_slash(self) -> None:
+ """We should url-encode any url unsafe chars in key ids.
+
+ Detects https://github.com/matrix-org/synapse/issues/14488.
+ """
+ fetcher = ServerKeyFetcher(self.hs)
+ self.get_success(fetcher.get_keys("example.com", ["key/potato"], 0))
+
+ self.http_client.get_json.assert_called_once()
+ args, kwargs = self.http_client.get_json.call_args
+ self.assertEqual(kwargs["path"], "/_matrix/key/v2/server/key%2Fpotato")
+
class PerspectivesKeyFetcherTestCase(unittest.HomeserverTestCase):
def make_homeserver(self, reactor, clock):
diff --git a/tests/events/test_presence_router.py b/tests/events/test_presence_router.py
index ffc3012a86..685a9a6d52 100644
--- a/tests/events/test_presence_router.py
+++ b/tests/events/test_presence_router.py
@@ -141,10 +141,6 @@ class PresenceRouterTestCase(FederatingHomeserverTestCase):
hs = self.setup_test_homeserver(
federation_transport_client=fed_transport_client,
)
- # Load the modules into the homeserver
- module_api = hs.get_module_api()
- for module, config in hs.config.modules.loaded_modules:
- module(config=config, api=module_api)
load_legacy_presence_router(hs)
diff --git a/tests/federation/test_complexity.py b/tests/federation/test_complexity.py
index c6dd99316a..9f1115dd23 100644
--- a/tests/federation/test_complexity.py
+++ b/tests/federation/test_complexity.py
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from http import HTTPStatus
from unittest.mock import Mock
from synapse.api.errors import Codes, SynapseError
@@ -51,7 +50,7 @@ class RoomComplexityTests(unittest.FederatingHomeserverTestCase):
channel = self.make_signed_federation_request(
"GET", "/_matrix/federation/unstable/rooms/%s/complexity" % (room_1,)
)
- self.assertEqual(HTTPStatus.OK, channel.code)
+ self.assertEqual(200, channel.code)
complexity = channel.json_body["v1"]
self.assertTrue(complexity > 0, complexity)
@@ -63,7 +62,7 @@ class RoomComplexityTests(unittest.FederatingHomeserverTestCase):
channel = self.make_signed_federation_request(
"GET", "/_matrix/federation/unstable/rooms/%s/complexity" % (room_1,)
)
- self.assertEqual(HTTPStatus.OK, channel.code)
+ self.assertEqual(200, channel.code)
complexity = channel.json_body["v1"]
self.assertEqual(complexity, 1.23)
diff --git a/tests/federation/test_federation_client.py b/tests/federation/test_federation_client.py
index 50e376f695..e67f405826 100644
--- a/tests/federation/test_federation_client.py
+++ b/tests/federation/test_federation_client.py
@@ -12,25 +12,30 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import json
from unittest import mock
import twisted.web.client
from twisted.internet import defer
-from twisted.internet.protocol import Protocol
-from twisted.python.failure import Failure
from twisted.test.proto_helpers import MemoryReactor
from synapse.api.room_versions import RoomVersions
from synapse.events import EventBase
+from synapse.rest import admin
+from synapse.rest.client import login, room
from synapse.server import HomeServer
-from synapse.types import JsonDict
from synapse.util import Clock
+from tests.test_utils import FakeResponse, event_injection
from tests.unittest import FederatingHomeserverTestCase
class FederationClientTest(FederatingHomeserverTestCase):
+ servlets = [
+ admin.register_servlets,
+ room.register_servlets,
+ login.register_servlets,
+ ]
+
def prepare(self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer):
super().prepare(reactor, clock, homeserver)
@@ -89,8 +94,8 @@ class FederationClientTest(FederatingHomeserverTestCase):
# mock up the response, and have the agent return it
self._mock_agent.request.side_effect = lambda *args, **kwargs: defer.succeed(
- _mock_response(
- {
+ FakeResponse.json(
+ payload={
"pdus": [
create_event_dict,
member_event_dict,
@@ -137,14 +142,14 @@ class FederationClientTest(FederatingHomeserverTestCase):
def test_get_pdu_returns_nothing_when_event_does_not_exist(self):
"""No event should be returned when the event does not exist"""
- remote_pdu = self.get_success(
+ pulled_pdu_info = self.get_success(
self.hs.get_federation_client().get_pdu(
["yet.another.server"],
"event_should_not_exist",
RoomVersions.V9,
)
)
- self.assertEqual(remote_pdu, None)
+ self.assertEqual(pulled_pdu_info, None)
def test_get_pdu(self):
"""Test to make sure an event is returned by `get_pdu()`"""
@@ -164,13 +169,15 @@ class FederationClientTest(FederatingHomeserverTestCase):
remote_pdu.internal_metadata.outlier = True
# Get the event again. This time it should read it from cache.
- remote_pdu2 = self.get_success(
+ pulled_pdu_info2 = self.get_success(
self.hs.get_federation_client().get_pdu(
["yet.another.server"],
remote_pdu.event_id,
RoomVersions.V9,
)
)
+ self.assertIsNotNone(pulled_pdu_info2)
+ remote_pdu2 = pulled_pdu_info2.pdu
# Sanity check that we are working against the same event
self.assertEqual(remote_pdu.event_id, remote_pdu2.event_id)
@@ -199,8 +206,8 @@ class FederationClientTest(FederatingHomeserverTestCase):
# mock up the response, and have the agent return it
self._mock_agent.request.side_effect = lambda *args, **kwargs: defer.succeed(
- _mock_response(
- {
+ FakeResponse.json(
+ payload={
"origin": "yet.another.server",
"origin_server_ts": 900,
"pdus": [
@@ -210,13 +217,15 @@ class FederationClientTest(FederatingHomeserverTestCase):
)
)
- remote_pdu = self.get_success(
+ pulled_pdu_info = self.get_success(
self.hs.get_federation_client().get_pdu(
["yet.another.server"],
"event_id",
RoomVersions.V9,
)
)
+ self.assertIsNotNone(pulled_pdu_info)
+ remote_pdu = pulled_pdu_info.pdu
# check the right call got made to the agent
self._mock_agent.request.assert_called_once_with(
@@ -231,20 +240,68 @@ class FederationClientTest(FederatingHomeserverTestCase):
return remote_pdu
+ def test_backfill_invalid_signature_records_failed_pull_attempts(
+ self,
+ ) -> None:
+ """
+ Test to make sure that events from /backfill with invalid signatures get
+ recorded as failed pull attempts.
+ """
+ OTHER_USER = f"@user:{self.OTHER_SERVER_NAME}"
+ main_store = self.hs.get_datastores().main
+
+ # Create the room
+ user_id = self.register_user("kermit", "test")
+ tok = self.login("kermit", "test")
+ room_id = self.helper.create_room_as(room_creator=user_id, tok=tok)
+
+ # We purposely don't run `add_hashes_and_signatures_from_other_server`
+ # over this because we want the signature check to fail.
+ pulled_event, _ = self.get_success(
+ event_injection.create_event(
+ self.hs,
+ room_id=room_id,
+ sender=OTHER_USER,
+ type="test_event_type",
+ content={"body": "garply"},
+ )
+ )
-def _mock_response(resp: JsonDict):
- body = json.dumps(resp).encode("utf-8")
+ # We expect an outbound request to /backfill, so stub that out
+ self._mock_agent.request.side_effect = lambda *args, **kwargs: defer.succeed(
+ FakeResponse.json(
+ payload={
+ "origin": "yet.another.server",
+ "origin_server_ts": 900,
+ # Mimic the other server returning our new `pulled_event`
+ "pdus": [pulled_event.get_pdu_json()],
+ }
+ )
+ )
- def deliver_body(p: Protocol):
- p.dataReceived(body)
- p.connectionLost(Failure(twisted.web.client.ResponseDone()))
+ self.get_success(
+ self.hs.get_federation_client().backfill(
+ # We use "yet.another.server" instead of
+ # `self.OTHER_SERVER_NAME` because we want to see the behavior
+ # from `_check_sigs_and_hash_and_fetch_one` where it tries to
+ # fetch the PDU again from the origin server if the signature
+ # fails. Just want to make sure that the failure is counted from
+ # both code paths.
+ dest="yet.another.server",
+ room_id=room_id,
+ limit=1,
+ extremities=[pulled_event.event_id],
+ ),
+ )
- response = mock.Mock(
- code=200,
- phrase=b"OK",
- headers=twisted.web.client.Headers({"content-Type": ["application/json"]}),
- length=len(body),
- deliverBody=deliver_body,
- )
- mock.seal(response)
- return response
+ # Make sure our failed pull attempt was recorded
+ backfill_num_attempts = self.get_success(
+ main_store.db_pool.simple_select_one_onecol(
+ table="event_failed_pull_attempts",
+ keyvalues={"event_id": pulled_event.event_id},
+ retcol="num_attempts",
+ )
+ )
+ # This is 2 because it failed once from `self.OTHER_SERVER_NAME` and the
+ # other from "yet.another.server"
+ self.assertEqual(backfill_num_attempts, 2)
diff --git a/tests/federation/test_federation_sender.py b/tests/federation/test_federation_sender.py
index 01a1db6115..f1e357764f 100644
--- a/tests/federation/test_federation_sender.py
+++ b/tests/federation/test_federation_sender.py
@@ -49,7 +49,12 @@ class FederationSenderReceiptsTestCases(HomeserverTestCase):
sender = self.hs.get_federation_sender()
receipt = ReadReceipt(
- "room_id", "m.read", "user_id", ["event_id"], {"ts": 1234}
+ "room_id",
+ "m.read",
+ "user_id",
+ ["event_id"],
+ thread_id=None,
+ data={"ts": 1234},
)
self.successResultOf(defer.ensureDeferred(sender.send_read_receipt(receipt)))
@@ -89,7 +94,12 @@ class FederationSenderReceiptsTestCases(HomeserverTestCase):
sender = self.hs.get_federation_sender()
receipt = ReadReceipt(
- "room_id", "m.read", "user_id", ["event_id"], {"ts": 1234}
+ "room_id",
+ "m.read",
+ "user_id",
+ ["event_id"],
+ thread_id=None,
+ data={"ts": 1234},
)
self.successResultOf(defer.ensureDeferred(sender.send_read_receipt(receipt)))
@@ -121,7 +131,12 @@ class FederationSenderReceiptsTestCases(HomeserverTestCase):
# send the second RR
receipt = ReadReceipt(
- "room_id", "m.read", "user_id", ["other_id"], {"ts": 1234}
+ "room_id",
+ "m.read",
+ "user_id",
+ ["other_id"],
+ thread_id=None,
+ data={"ts": 1234},
)
self.successResultOf(defer.ensureDeferred(sender.send_read_receipt(receipt)))
self.pump()
@@ -173,17 +188,24 @@ class FederationSenderDevicesTestCases(HomeserverTestCase):
return c
def prepare(self, reactor, clock, hs):
- # stub out `get_rooms_for_user` and `get_users_in_room` so that the
+ test_room_id = "!room:host1"
+
+ # stub out `get_rooms_for_user` and `get_current_hosts_in_room` so that the
# server thinks the user shares a room with `@user2:host2`
def get_rooms_for_user(user_id):
- return defer.succeed({"!room:host1"})
+ return defer.succeed({test_room_id})
hs.get_datastores().main.get_rooms_for_user = get_rooms_for_user
- def get_users_in_room(room_id):
- return defer.succeed({"@user2:host2"})
+ async def get_current_hosts_in_room(room_id):
+ if room_id == test_room_id:
+ return ["host2"]
+
+ # TODO: We should fail the test when we encounter an unxpected room ID.
+ # We can't just use `self.fail(...)` here because the app code is greedy
+ # with `Exception` and will catch it before the test can see it.
- hs.get_datastores().main.get_users_in_room = get_users_in_room
+ hs.get_datastores().main.get_current_hosts_in_room = get_current_hosts_in_room
# whenever send_transaction is called, record the edu data
self.edus = []
diff --git a/tests/federation/test_federation_server.py b/tests/federation/test_federation_server.py
index 3a6ef221ae..177e5b5afc 100644
--- a/tests/federation/test_federation_server.py
+++ b/tests/federation/test_federation_server.py
@@ -212,7 +212,7 @@ class SendJoinFederationTests(unittest.FederatingHomeserverTestCase):
self.assertEqual(r[("m.room.member", joining_user)].membership, "join")
@override_config({"experimental_features": {"msc3706_enabled": True}})
- def test_send_join_partial_state(self):
+ def test_send_join_partial_state(self) -> None:
"""When MSC3706 support is enabled, /send_join should return partial state"""
joining_user = "@misspiggy:" + self.OTHER_SERVER_NAME
join_result = self._make_join(joining_user)
@@ -240,6 +240,9 @@ class SendJoinFederationTests(unittest.FederatingHomeserverTestCase):
("m.room.power_levels", ""),
("m.room.join_rules", ""),
("m.room.history_visibility", ""),
+ # Users included here because they're heroes.
+ ("m.room.member", "@kermit:test"),
+ ("m.room.member", "@fozzie:test"),
],
)
@@ -249,9 +252,9 @@ class SendJoinFederationTests(unittest.FederatingHomeserverTestCase):
]
self.assertCountEqual(
returned_auth_chain_events,
- [
- ("m.room.member", "@kermit:test"),
- ],
+ # TODO: change the test so that we get at least one event in the auth chain
+ # here.
+ [],
)
# the room should show that the new user is a member
diff --git a/tests/federation/transport/server/test__base.py b/tests/federation/transport/server/test__base.py
index d33e86db4c..e88e5d8bb3 100644
--- a/tests/federation/transport/server/test__base.py
+++ b/tests/federation/transport/server/test__base.py
@@ -18,9 +18,10 @@ from typing import Dict, List, Tuple
from synapse.api.errors import Codes
from synapse.federation.transport.server import BaseFederationServlet
from synapse.federation.transport.server._base import Authenticator, _parse_auth_header
-from synapse.http.server import JsonResource, cancellable
+from synapse.http.server import JsonResource
from synapse.server import HomeServer
from synapse.types import JsonDict
+from synapse.util.cancellation import cancellable
from synapse.util.ratelimitutils import FederationRateLimiter
from tests import unittest
diff --git a/tests/federation/transport/test_client.py b/tests/federation/transport/test_client.py
index c2320ce133..b84c74fc0e 100644
--- a/tests/federation/transport/test_client.py
+++ b/tests/federation/transport/test_client.py
@@ -13,9 +13,13 @@
# limitations under the License.
import json
+from unittest.mock import Mock
+
+import ijson.common
from synapse.api.room_versions import RoomVersions
from synapse.federation.transport.client import SendJoinParser
+from synapse.util import ExceptionBundle
from tests.unittest import TestCase
@@ -94,3 +98,46 @@ class SendJoinParserTestCase(TestCase):
# Retrieve and check the parsed SendJoinResponse
parsed_response = parser.finish()
self.assertEqual(parsed_response.servers_in_room, ["hs1", "hs2"])
+
+ def test_errors_closing_coroutines(self) -> None:
+ """Check we close all coroutines, even if closing the first raises an Exception.
+
+ We also check that an Exception of some kind is raised, but we don't make any
+ assertions about its attributes or type.
+ """
+ parser = SendJoinParser(RoomVersions.V1, False)
+ response = {"org.matrix.msc3706.servers_in_room": ["hs1", "hs2"]}
+ serialisation = json.dumps(response).encode()
+
+ # Mock the coroutines managed by this parser.
+ # The first one will error when we try to close it.
+ coro_1 = Mock()
+ coro_1.close = Mock(side_effect=RuntimeError("Couldn't close coro 1"))
+
+ coro_2 = Mock()
+
+ coro_3 = Mock()
+ coro_3.close = Mock(side_effect=RuntimeError("Couldn't close coro 3"))
+
+ original_coros = parser._coros
+ parser._coros = [coro_1, coro_2, coro_3]
+
+ # Close the original coroutines. If we don't, when we garbage collect them
+ # they will throw, failing the test. (Oddly, this only started in CPython 3.11).
+ for coro in original_coros:
+ try:
+ coro.close()
+ except ijson.common.IncompleteJSONError:
+ pass
+
+ # Send half of the data to the parser
+ parser.write(serialisation[: len(serialisation) // 2])
+
+ # Close the parser. There should be _some_ kind of exception.
+ with self.assertRaises(ExceptionBundle):
+ parser.finish()
+
+ # In any case, we should have tried to close both coros.
+ coro_1.close.assert_called()
+ coro_2.close.assert_called()
+ coro_3.close.assert_called()
diff --git a/tests/federation/transport/test_knocking.py b/tests/federation/transport/test_knocking.py
index 0d048207b7..d21c11b716 100644
--- a/tests/federation/transport/test_knocking.py
+++ b/tests/federation/transport/test_knocking.py
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
-from http import HTTPStatus
from typing import Dict, List
from synapse.api.constants import EventTypes, JoinRules, Membership
@@ -256,7 +255,7 @@ class FederationKnockingTestCase(
RoomVersions.V7.identifier,
),
)
- self.assertEqual(HTTPStatus.OK, channel.code, channel.result)
+ self.assertEqual(200, channel.code, channel.result)
# Note: We don't expect the knock membership event to be sent over federation as
# part of the stripped room state, as the knocking homeserver already has that
@@ -294,7 +293,7 @@ class FederationKnockingTestCase(
% (room_id, signed_knock_event.event_id),
signed_knock_event_json,
)
- self.assertEqual(HTTPStatus.OK, channel.code, channel.result)
+ self.assertEqual(200, channel.code, channel.result)
# Check that we got the stripped room state in return
room_state_events = channel.json_body["knock_state_events"]
diff --git a/tests/handlers/test_appservice.py b/tests/handlers/test_appservice.py
index b17af2725b..144e49d0fd 100644
--- a/tests/handlers/test_appservice.py
+++ b/tests/handlers/test_appservice.py
@@ -22,7 +22,7 @@ from twisted.test.proto_helpers import MemoryReactor
import synapse.rest.admin
import synapse.storage
-from synapse.api.constants import EduTypes
+from synapse.api.constants import EduTypes, EventTypes
from synapse.appservice import (
ApplicationService,
TransactionOneTimeKeyCounts,
@@ -36,7 +36,7 @@ from synapse.util import Clock
from synapse.util.stringutils import random_string
from tests import unittest
-from tests.test_utils import make_awaitable, simple_async_mock
+from tests.test_utils import event_injection, make_awaitable, simple_async_mock
from tests.unittest import override_config
from tests.utils import MockClock
@@ -76,9 +76,13 @@ class AppServiceHandlerTestCase(unittest.TestCase):
event = Mock(
sender="@someone:anywhere", type="m.room.message", room_id="!foo:bar"
)
- self.mock_store.get_all_new_events_stream.side_effect = [
- make_awaitable((0, [], {})),
- make_awaitable((1, [event], {event.event_id: 0})),
+ self.mock_store.get_all_new_event_ids_stream.side_effect = [
+ make_awaitable((0, {})),
+ make_awaitable((1, {event.event_id: 0})),
+ ]
+ self.mock_store.get_events_as_list.side_effect = [
+ make_awaitable([]),
+ make_awaitable([event]),
]
self.handler.notify_interested_services(RoomStreamToken(None, 1))
@@ -95,10 +99,10 @@ class AppServiceHandlerTestCase(unittest.TestCase):
event = Mock(sender=user_id, type="m.room.message", room_id="!foo:bar")
self.mock_as_api.query_user.return_value = make_awaitable(True)
- self.mock_store.get_all_new_events_stream.side_effect = [
- make_awaitable((0, [event], {event.event_id: 0})),
+ self.mock_store.get_all_new_event_ids_stream.side_effect = [
+ make_awaitable((0, {event.event_id: 0})),
]
-
+ self.mock_store.get_events_as_list.side_effect = [make_awaitable([event])]
self.handler.notify_interested_services(RoomStreamToken(None, 0))
self.mock_as_api.query_user.assert_called_once_with(services[0], user_id)
@@ -112,7 +116,7 @@ class AppServiceHandlerTestCase(unittest.TestCase):
event = Mock(sender=user_id, type="m.room.message", room_id="!foo:bar")
self.mock_as_api.query_user.return_value = make_awaitable(True)
- self.mock_store.get_all_new_events_stream.side_effect = [
+ self.mock_store.get_all_new_event_ids_stream.side_effect = [
make_awaitable((0, [event], {event.event_id: 0})),
]
@@ -386,15 +390,16 @@ class ApplicationServicesHandlerSendEventsTestCase(unittest.HomeserverTestCase):
receipts.register_servlets,
]
- def prepare(self, reactor, clock, hs):
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer):
+ self.hs = hs
# Mock the ApplicationServiceScheduler's _TransactionController's send method so that
# we can track any outgoing ephemeral events
self.send_mock = simple_async_mock()
- hs.get_application_service_handler().scheduler.txn_ctrl.send = self.send_mock
+ hs.get_application_service_handler().scheduler.txn_ctrl.send = self.send_mock # type: ignore[assignment]
# Mock out application services, and allow defining our own in tests
self._services: List[ApplicationService] = []
- self.hs.get_datastores().main.get_app_services = Mock(
+ self.hs.get_datastores().main.get_app_services = Mock( # type: ignore[assignment]
return_value=self._services
)
@@ -412,6 +417,157 @@ class ApplicationServicesHandlerSendEventsTestCase(unittest.HomeserverTestCase):
"exclusive_as_user", "password", self.exclusive_as_user_device_id
)
+ def _notify_interested_services(self):
+ # This is normally set in `notify_interested_services` but we need to call the
+ # internal async version so the reactor gets pushed to completion.
+ self.hs.get_application_service_handler().current_max += 1
+ self.get_success(
+ self.hs.get_application_service_handler()._notify_interested_services(
+ RoomStreamToken(
+ None, self.hs.get_application_service_handler().current_max
+ )
+ )
+ )
+
+ @parameterized.expand(
+ [
+ ("@local_as_user:test", True),
+ # Defining remote users in an application service user namespace regex is a
+ # footgun since the appservice might assume that it'll receive all events
+ # sent by that remote user, but it will only receive events in rooms that
+ # are shared with a local user. So we just remove this footgun possibility
+ # entirely and we won't notify the application service based on remote
+ # users.
+ ("@remote_as_user:remote", False),
+ ]
+ )
+ def test_match_interesting_room_members(
+ self, interesting_user: str, should_notify: bool
+ ):
+ """
+ Test to make sure that a interesting user (local or remote) in the room is
+ notified as expected when someone else in the room sends a message.
+ """
+ # Register an application service that's interested in the `interesting_user`
+ interested_appservice = self._register_application_service(
+ namespaces={
+ ApplicationService.NS_USERS: [
+ {
+ "regex": interesting_user,
+ "exclusive": False,
+ },
+ ],
+ },
+ )
+
+ # Create a room
+ alice = self.register_user("alice", "pass")
+ alice_access_token = self.login("alice", "pass")
+ room_id = self.helper.create_room_as(room_creator=alice, tok=alice_access_token)
+
+ # Join the interesting user to the room
+ self.get_success(
+ event_injection.inject_member_event(
+ self.hs, room_id, interesting_user, "join"
+ )
+ )
+ # Kick the appservice into checking this membership event to get the event out
+ # of the way
+ self._notify_interested_services()
+ # We don't care about the interesting user join event (this test is making sure
+ # the next thing works)
+ self.send_mock.reset_mock()
+
+ # Send a message from an uninteresting user
+ self.helper.send_event(
+ room_id,
+ type=EventTypes.Message,
+ content={
+ "msgtype": "m.text",
+ "body": "message from uninteresting user",
+ },
+ tok=alice_access_token,
+ )
+ # Kick the appservice into checking this new event
+ self._notify_interested_services()
+
+ if should_notify:
+ self.send_mock.assert_called_once()
+ (
+ service,
+ events,
+ _ephemeral,
+ _to_device_messages,
+ _otks,
+ _fbks,
+ _device_list_summary,
+ ) = self.send_mock.call_args[0]
+
+ # Even though the message came from an uninteresting user, it should still
+ # notify us because the interesting user is joined to the room where the
+ # message was sent.
+ self.assertEqual(service, interested_appservice)
+ self.assertEqual(events[0]["type"], "m.room.message")
+ self.assertEqual(events[0]["sender"], alice)
+ else:
+ self.send_mock.assert_not_called()
+
+ def test_application_services_receive_events_sent_by_interesting_local_user(self):
+ """
+ Test to make sure that a messages sent from a local user can be interesting and
+ picked up by the appservice.
+ """
+ # Register an application service that's interested in all local users
+ interested_appservice = self._register_application_service(
+ namespaces={
+ ApplicationService.NS_USERS: [
+ {
+ "regex": ".*",
+ "exclusive": False,
+ },
+ ],
+ },
+ )
+
+ # Create a room
+ alice = self.register_user("alice", "pass")
+ alice_access_token = self.login("alice", "pass")
+ room_id = self.helper.create_room_as(room_creator=alice, tok=alice_access_token)
+
+ # We don't care about interesting events before this (this test is making sure
+ # the next thing works)
+ self.send_mock.reset_mock()
+
+ # Send a message from the interesting local user
+ self.helper.send_event(
+ room_id,
+ type=EventTypes.Message,
+ content={
+ "msgtype": "m.text",
+ "body": "message from interesting local user",
+ },
+ tok=alice_access_token,
+ )
+ # Kick the appservice into checking this new event
+ self._notify_interested_services()
+
+ self.send_mock.assert_called_once()
+ (
+ service,
+ events,
+ _ephemeral,
+ _to_device_messages,
+ _otks,
+ _fbks,
+ _device_list_summary,
+ ) = self.send_mock.call_args[0]
+
+ # Events sent from an interesting local user should also be picked up as
+ # interesting to the appservice.
+ self.assertEqual(service, interested_appservice)
+ self.assertEqual(events[0]["type"], "m.room.message")
+ self.assertEqual(events[0]["sender"], alice)
+
def test_sending_read_receipt_batches_to_application_services(self):
"""Tests that a large batch of read receipts are sent correctly to
interested application services.
@@ -447,6 +603,7 @@ class ApplicationServicesHandlerSendEventsTestCase(unittest.HomeserverTestCase):
receipt_type="m.read",
user_id=self.local_user,
event_ids=[f"$eventid_{i}"],
+ thread_id=None,
data={},
)
)
diff --git a/tests/handlers/test_auth.py b/tests/handlers/test_auth.py
index 7106799d44..036dbbc45b 100644
--- a/tests/handlers/test_auth.py
+++ b/tests/handlers/test_auth.py
@@ -11,6 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+from typing import Optional
from unittest.mock import Mock
import pymacaroons
@@ -19,6 +20,7 @@ from twisted.test.proto_helpers import MemoryReactor
from synapse.api.errors import AuthError, ResourceLimitError
from synapse.rest import admin
+from synapse.rest.client import login
from synapse.server import HomeServer
from synapse.util import Clock
@@ -29,6 +31,7 @@ from tests.test_utils import make_awaitable
class AuthTestCase(unittest.HomeserverTestCase):
servlets = [
admin.register_servlets,
+ login.register_servlets,
]
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
@@ -46,6 +49,23 @@ class AuthTestCase(unittest.HomeserverTestCase):
self.user1 = self.register_user("a_user", "pass")
+ def token_login(self, token: str) -> Optional[str]:
+ body = {
+ "type": "m.login.token",
+ "token": token,
+ }
+
+ channel = self.make_request(
+ "POST",
+ "/_matrix/client/v3/login",
+ body,
+ )
+
+ if channel.code == 200:
+ return channel.json_body["user_id"]
+
+ return None
+
def test_macaroon_caveats(self) -> None:
token = self.macaroon_generator.generate_guest_access_token("a_user")
macaroon = pymacaroons.Macaroon.deserialize(token)
@@ -73,49 +93,62 @@ class AuthTestCase(unittest.HomeserverTestCase):
v.satisfy_general(verify_guest)
v.verify(macaroon, self.hs.config.key.macaroon_secret_key)
- def test_short_term_login_token_gives_user_id(self) -> None:
- token = self.macaroon_generator.generate_short_term_login_token(
- self.user1, "", duration_in_ms=5000
+ def test_login_token_gives_user_id(self) -> None:
+ token = self.get_success(
+ self.auth_handler.create_login_token_for_user_id(
+ self.user1,
+ duration_ms=(5 * 1000),
+ )
)
- res = self.get_success(self.auth_handler.validate_short_term_login_token(token))
+
+ res = self.get_success(self.auth_handler.consume_login_token(token))
self.assertEqual(self.user1, res.user_id)
- self.assertEqual("", res.auth_provider_id)
+ self.assertEqual(None, res.auth_provider_id)
- # when we advance the clock, the token should be rejected
- self.reactor.advance(6)
- self.get_failure(
- self.auth_handler.validate_short_term_login_token(token),
- AuthError,
+ def test_login_token_reuse_fails(self) -> None:
+ token = self.get_success(
+ self.auth_handler.create_login_token_for_user_id(
+ self.user1,
+ duration_ms=(5 * 1000),
+ )
)
- def test_short_term_login_token_gives_auth_provider(self) -> None:
- token = self.macaroon_generator.generate_short_term_login_token(
- self.user1, auth_provider_id="my_idp"
- )
- res = self.get_success(self.auth_handler.validate_short_term_login_token(token))
- self.assertEqual(self.user1, res.user_id)
- self.assertEqual("my_idp", res.auth_provider_id)
+ self.get_success(self.auth_handler.consume_login_token(token))
- def test_short_term_login_token_cannot_replace_user_id(self) -> None:
- token = self.macaroon_generator.generate_short_term_login_token(
- self.user1, "", duration_in_ms=5000
+ self.get_failure(
+ self.auth_handler.consume_login_token(token),
+ AuthError,
)
- macaroon = pymacaroons.Macaroon.deserialize(token)
- res = self.get_success(
- self.auth_handler.validate_short_term_login_token(macaroon.serialize())
+ def test_login_token_expires(self) -> None:
+ token = self.get_success(
+ self.auth_handler.create_login_token_for_user_id(
+ self.user1,
+ duration_ms=(5 * 1000),
+ )
)
- self.assertEqual(self.user1, res.user_id)
-
- # add another "user_id" caveat, which might allow us to override the
- # user_id.
- macaroon.add_first_party_caveat("user_id = b_user")
+ # when we advance the clock, the token should be rejected
+ self.reactor.advance(6)
self.get_failure(
- self.auth_handler.validate_short_term_login_token(macaroon.serialize()),
+ self.auth_handler.consume_login_token(token),
AuthError,
)
+ def test_login_token_gives_auth_provider(self) -> None:
+ token = self.get_success(
+ self.auth_handler.create_login_token_for_user_id(
+ self.user1,
+ auth_provider_id="my_idp",
+ auth_provider_session_id="11-22-33-44",
+ duration_ms=(5 * 1000),
+ )
+ )
+ res = self.get_success(self.auth_handler.consume_login_token(token))
+ self.assertEqual(self.user1, res.user_id)
+ self.assertEqual("my_idp", res.auth_provider_id)
+ self.assertEqual("11-22-33-44", res.auth_provider_session_id)
+
def test_mau_limits_disabled(self) -> None:
self.auth_blocking._limit_usage_by_mau = False
# Ensure does not throw exception
@@ -125,12 +158,12 @@ class AuthTestCase(unittest.HomeserverTestCase):
)
)
- self.get_success(
- self.auth_handler.validate_short_term_login_token(
- self._get_macaroon().serialize()
- )
+ token = self.get_success(
+ self.auth_handler.create_login_token_for_user_id(self.user1)
)
+ self.assertIsNotNone(self.token_login(token))
+
def test_mau_limits_exceeded_large(self) -> None:
self.auth_blocking._limit_usage_by_mau = True
self.hs.get_datastores().main.get_monthly_active_count = Mock(
@@ -147,12 +180,10 @@ class AuthTestCase(unittest.HomeserverTestCase):
self.hs.get_datastores().main.get_monthly_active_count = Mock(
return_value=make_awaitable(self.large_number_of_users)
)
- self.get_failure(
- self.auth_handler.validate_short_term_login_token(
- self._get_macaroon().serialize()
- ),
- ResourceLimitError,
+ token = self.get_success(
+ self.auth_handler.create_login_token_for_user_id(self.user1)
)
+ self.assertIsNone(self.token_login(token))
def test_mau_limits_parity(self) -> None:
# Ensure we're not at the unix epoch.
@@ -171,12 +202,10 @@ class AuthTestCase(unittest.HomeserverTestCase):
),
ResourceLimitError,
)
- self.get_failure(
- self.auth_handler.validate_short_term_login_token(
- self._get_macaroon().serialize()
- ),
- ResourceLimitError,
+ token = self.get_success(
+ self.auth_handler.create_login_token_for_user_id(self.user1)
)
+ self.assertIsNone(self.token_login(token))
# If in monthly active cohort
self.hs.get_datastores().main.user_last_seen_monthly_active = Mock(
@@ -187,11 +216,10 @@ class AuthTestCase(unittest.HomeserverTestCase):
self.user1, device_id=None, valid_until_ms=None
)
)
- self.get_success(
- self.auth_handler.validate_short_term_login_token(
- self._get_macaroon().serialize()
- )
+ token = self.get_success(
+ self.auth_handler.create_login_token_for_user_id(self.user1)
)
+ self.assertIsNotNone(self.token_login(token))
def test_mau_limits_not_exceeded(self) -> None:
self.auth_blocking._limit_usage_by_mau = True
@@ -209,14 +237,7 @@ class AuthTestCase(unittest.HomeserverTestCase):
self.hs.get_datastores().main.get_monthly_active_count = Mock(
return_value=make_awaitable(self.small_number_of_users)
)
- self.get_success(
- self.auth_handler.validate_short_term_login_token(
- self._get_macaroon().serialize()
- )
- )
-
- def _get_macaroon(self) -> pymacaroons.Macaroon:
- token = self.macaroon_generator.generate_short_term_login_token(
- self.user1, "", duration_in_ms=5000
+ token = self.get_success(
+ self.auth_handler.create_login_token_for_user_id(self.user1)
)
- return pymacaroons.Macaroon.deserialize(token)
+ self.assertIsNotNone(self.token_login(token))
diff --git a/tests/handlers/test_deactivate_account.py b/tests/handlers/test_deactivate_account.py
index 7586e472b5..bce65fab7d 100644
--- a/tests/handlers/test_deactivate_account.py
+++ b/tests/handlers/test_deactivate_account.py
@@ -11,8 +11,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-from http import HTTPStatus
-from typing import Any, Dict
from twisted.test.proto_helpers import MemoryReactor
@@ -21,6 +19,7 @@ from synapse.push.rulekinds import PRIORITY_CLASS_MAP
from synapse.rest import admin
from synapse.rest.client import account, login
from synapse.server import HomeServer
+from synapse.synapse_rust.push import PushRule
from synapse.util import Clock
from tests.unittest import HomeserverTestCase
@@ -58,7 +57,7 @@ class DeactivateAccountTestCase(HomeserverTestCase):
access_token=self.token,
)
- self.assertEqual(req.code, HTTPStatus.OK, req)
+ self.assertEqual(req.code, 200, req)
def test_global_account_data_deleted_upon_deactivation(self) -> None:
"""
@@ -131,12 +130,12 @@ class DeactivateAccountTestCase(HomeserverTestCase):
),
)
- def _is_custom_rule(self, push_rule: Dict[str, Any]) -> bool:
+ def _is_custom_rule(self, push_rule: PushRule) -> bool:
"""
Default rules start with a dot: such as .m.rule and .im.vector.
This function returns true iff a rule is custom (not default).
"""
- return "/." not in push_rule["rule_id"]
+ return "/." not in push_rule.rule_id
def test_push_rules_deleted_upon_account_deactivation(self) -> None:
"""
@@ -158,32 +157,30 @@ class DeactivateAccountTestCase(HomeserverTestCase):
)
# Test the rule exists
- push_rules = self.get_success(self._store.get_push_rules_for_user(self.user))
+ filtered_push_rules = self.get_success(
+ self._store.get_push_rules_for_user(self.user)
+ )
# Filter out default rules; we don't care
- push_rules = list(filter(self._is_custom_rule, push_rules))
+ push_rules = [
+ r for r, _ in filtered_push_rules.rules() if self._is_custom_rule(r)
+ ]
# Check our rule made it
- self.assertEqual(
- push_rules,
- [
- {
- "user_name": "@user:test",
- "rule_id": "personal.override.rule1",
- "priority_class": 5,
- "priority": 0,
- "conditions": [],
- "actions": [],
- "default": False,
- }
- ],
- push_rules,
- )
+ self.assertEqual(len(push_rules), 1)
+ self.assertEqual(push_rules[0].rule_id, "personal.override.rule1")
+ self.assertEqual(push_rules[0].priority_class, 5)
+ self.assertEqual(push_rules[0].conditions, [])
+ self.assertEqual(push_rules[0].actions, [])
# Request the deactivation of our account
self._deactivate_my_account()
- push_rules = self.get_success(self._store.get_push_rules_for_user(self.user))
+ filtered_push_rules = self.get_success(
+ self._store.get_push_rules_for_user(self.user)
+ )
# Filter out default rules; we don't care
- push_rules = list(filter(self._is_custom_rule, push_rules))
+ push_rules = [
+ r for r, _ in filtered_push_rules.rules() if self._is_custom_rule(r)
+ ]
# Check our rule no longer exists
self.assertEqual(push_rules, [], push_rules)
@@ -322,3 +319,18 @@ class DeactivateAccountTestCase(HomeserverTestCase):
)
),
)
+
+ def test_deactivate_account_needs_auth(self) -> None:
+ """
+ Tests that making a request to /deactivate with an empty body
+ succeeds in starting the user-interactive auth flow.
+ """
+ req = self.make_request(
+ "POST",
+ "account/deactivate",
+ {},
+ access_token=self.token,
+ )
+
+ self.assertEqual(req.code, 401, req)
+ self.assertEqual(req.json_body["flows"], [{"stages": ["m.login.password"]}])
diff --git a/tests/handlers/test_device.py b/tests/handlers/test_device.py
index b8b465d35b..ce7525e29c 100644
--- a/tests/handlers/test_device.py
+++ b/tests/handlers/test_device.py
@@ -19,7 +19,7 @@ from typing import Optional
from twisted.test.proto_helpers import MemoryReactor
from synapse.api.errors import NotFoundError, SynapseError
-from synapse.handlers.device import MAX_DEVICE_DISPLAY_NAME_LEN
+from synapse.handlers.device import MAX_DEVICE_DISPLAY_NAME_LEN, DeviceHandler
from synapse.server import HomeServer
from synapse.util import Clock
@@ -32,7 +32,9 @@ user2 = "@theresa:bbb"
class DeviceTestCase(unittest.HomeserverTestCase):
def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
hs = self.setup_test_homeserver("server", federation_http_client=None)
- self.handler = hs.get_device_handler()
+ handler = hs.get_device_handler()
+ assert isinstance(handler, DeviceHandler)
+ self.handler = handler
self.store = hs.get_datastores().main
return hs
@@ -61,6 +63,7 @@ class DeviceTestCase(unittest.HomeserverTestCase):
self.assertEqual(res, "fco")
dev = self.get_success(self.handler.store.get_device("@boris:foo", "fco"))
+ assert dev is not None
self.assertEqual(dev["display_name"], "display name")
def test_device_is_preserved_if_exists(self) -> None:
@@ -83,6 +86,7 @@ class DeviceTestCase(unittest.HomeserverTestCase):
self.assertEqual(res2, "fco")
dev = self.get_success(self.handler.store.get_device("@boris:foo", "fco"))
+ assert dev is not None
self.assertEqual(dev["display_name"], "display name")
def test_device_id_is_made_up_if_unspecified(self) -> None:
@@ -95,6 +99,7 @@ class DeviceTestCase(unittest.HomeserverTestCase):
)
dev = self.get_success(self.handler.store.get_device("@theresa:foo", device_id))
+ assert dev is not None
self.assertEqual(dev["display_name"], "display")
def test_get_devices_by_user(self) -> None:
@@ -264,7 +269,9 @@ class DeviceTestCase(unittest.HomeserverTestCase):
class DehydrationTestCase(unittest.HomeserverTestCase):
def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
hs = self.setup_test_homeserver("server", federation_http_client=None)
- self.handler = hs.get_device_handler()
+ handler = hs.get_device_handler()
+ assert isinstance(handler, DeviceHandler)
+ self.handler = handler
self.registration = hs.get_registration_handler()
self.auth = hs.get_auth()
self.store = hs.get_datastores().main
@@ -284,9 +291,9 @@ class DehydrationTestCase(unittest.HomeserverTestCase):
)
)
- retrieved_device_id, device_data = self.get_success(
- self.handler.get_dehydrated_device(user_id=user_id)
- )
+ result = self.get_success(self.handler.get_dehydrated_device(user_id=user_id))
+ assert result is not None
+ retrieved_device_id, device_data = result
self.assertEqual(retrieved_device_id, stored_dehydrated_device_id)
self.assertEqual(device_data, {"device_data": {"foo": "bar"}})
diff --git a/tests/handlers/test_e2e_keys.py b/tests/handlers/test_e2e_keys.py
index 1e6ad4b663..95698bc275 100644
--- a/tests/handlers/test_e2e_keys.py
+++ b/tests/handlers/test_e2e_keys.py
@@ -891,6 +891,12 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase):
new_callable=mock.MagicMock,
return_value=make_awaitable(["some_room_id"]),
)
+ mock_get_users = mock.patch.object(
+ self.store,
+ "get_users_server_still_shares_room_with",
+ new_callable=mock.MagicMock,
+ return_value=make_awaitable({remote_user_id}),
+ )
mock_request = mock.patch.object(
self.hs.get_federation_client(),
"query_user_devices",
@@ -898,7 +904,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase):
return_value=make_awaitable(response_body),
)
- with mock_get_rooms, mock_request as mocked_federation_request:
+ with mock_get_rooms, mock_get_users, mock_request as mocked_federation_request:
# Make the first query and sanity check it succeeds.
response_1 = self.get_success(
e2e_handler.query_devices(
diff --git a/tests/handlers/test_federation.py b/tests/handlers/test_federation.py
index 745750b1d7..d00c69c229 100644
--- a/tests/handlers/test_federation.py
+++ b/tests/handlers/test_federation.py
@@ -19,7 +19,13 @@ from unittest.mock import Mock, patch
from twisted.test.proto_helpers import MemoryReactor
from synapse.api.constants import EventTypes
-from synapse.api.errors import AuthError, Codes, LimitExceededError, SynapseError
+from synapse.api.errors import (
+ AuthError,
+ Codes,
+ LimitExceededError,
+ NotFoundError,
+ SynapseError,
+)
from synapse.api.room_versions import RoomVersions
from synapse.events import EventBase, make_event_from_dict
from synapse.federation.federation_base import event_from_pdu_json
@@ -28,6 +34,7 @@ from synapse.logging.context import LoggingContext, run_in_background
from synapse.rest import admin
from synapse.rest.client import login, room
from synapse.server import HomeServer
+from synapse.storage.databases.main.events_worker import EventCacheEntry
from synapse.util import Clock
from synapse.util.stringutils import random_string
@@ -322,6 +329,102 @@ class FederationTestCase(unittest.FederatingHomeserverTestCase):
)
self.get_success(d)
+ def test_backfill_ignores_known_events(self) -> None:
+ """
+ Tests that events that we already know about are ignored when backfilling.
+ """
+ # Set up users
+ user_id = self.register_user("kermit", "test")
+ tok = self.login("kermit", "test")
+
+ other_server = "otherserver"
+ other_user = "@otheruser:" + other_server
+
+ # Create a room to backfill events into
+ room_id = self.helper.create_room_as(room_creator=user_id, tok=tok)
+ room_version = self.get_success(self.store.get_room_version(room_id))
+
+ # Build an event to backfill
+ event = event_from_pdu_json(
+ {
+ "type": EventTypes.Message,
+ "content": {"body": "hello world", "msgtype": "m.text"},
+ "room_id": room_id,
+ "sender": other_user,
+ "depth": 32,
+ "prev_events": [],
+ "auth_events": [],
+ "origin_server_ts": self.clock.time_msec(),
+ },
+ room_version,
+ )
+
+ # Ensure the event is not already in the DB
+ self.get_failure(
+ self.store.get_event(event.event_id),
+ NotFoundError,
+ )
+
+ # Backfill the event and check that it has entered the DB.
+
+ # We mock out the FederationClient.backfill method, to pretend that a remote
+ # server has returned our fake event.
+ federation_client_backfill_mock = Mock(return_value=make_awaitable([event]))
+ self.hs.get_federation_client().backfill = federation_client_backfill_mock
+
+ # We also mock the persist method with a side effect of itself. This allows us
+ # to track when it has been called while preserving its function.
+ persist_events_and_notify_mock = Mock(
+ side_effect=self.hs.get_federation_event_handler().persist_events_and_notify
+ )
+ self.hs.get_federation_event_handler().persist_events_and_notify = (
+ persist_events_and_notify_mock
+ )
+
+ # Small side-tangent. We populate the event cache with the event, even though
+ # it is not yet in the DB. This is an invalid scenario that can currently occur
+ # due to not properly invalidating the event cache.
+ # See https://github.com/matrix-org/synapse/issues/13476.
+ #
+ # As a result, backfill should not rely on the event cache to check whether
+ # we already have an event in the DB.
+ # TODO: Remove this bit when the event cache is properly invalidated.
+ cache_entry = EventCacheEntry(
+ event=event,
+ redacted_event=None,
+ )
+ self.store._get_event_cache.set_local((event.event_id,), cache_entry)
+
+ # We now call FederationEventHandler.backfill (a separate method) to trigger
+ # a backfill request. It should receive the fake event.
+ self.get_success(
+ self.hs.get_federation_event_handler().backfill(
+ other_user,
+ room_id,
+ limit=10,
+ extremities=[],
+ )
+ )
+
+ # Check that our fake event was persisted.
+ persist_events_and_notify_mock.assert_called_once()
+ persist_events_and_notify_mock.reset_mock()
+
+ # Now we repeat the backfill, having the homeserver receive the fake event
+ # again.
+ self.get_success(
+ self.hs.get_federation_event_handler().backfill(
+ other_user,
+ room_id,
+ limit=10,
+ extremities=[],
+ ),
+ )
+
+ # This time, we expect no event persistence to have occurred, as we already
+ # have this event.
+ persist_events_and_notify_mock.assert_not_called()
+
@unittest.override_config(
{"rc_invites": {"per_user": {"per_second": 0.5, "burst_count": 3}}}
)
diff --git a/tests/handlers/test_federation_event.py b/tests/handlers/test_federation_event.py
index 51c8dd6498..e448cb1901 100644
--- a/tests/handlers/test_federation_event.py
+++ b/tests/handlers/test_federation_event.py
@@ -11,14 +11,23 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+from typing import Optional
from unittest import mock
+from synapse.api.errors import AuthError, StoreError
+from synapse.api.room_versions import RoomVersion
+from synapse.event_auth import (
+ check_state_dependent_auth_rules,
+ check_state_independent_auth_rules,
+)
from synapse.events import make_event_from_dict
from synapse.events.snapshot import EventContext
from synapse.federation.transport.client import StateRequestResponse
from synapse.logging.context import LoggingContext
from synapse.rest import admin
from synapse.rest.client import login, room
+from synapse.state.v2 import _mainline_sort, _reverse_topological_power_sort
+from synapse.types import JsonDict
from tests import unittest
from tests.test_utils import event_injection, make_awaitable
@@ -34,7 +43,7 @@ class FederationEventHandlerTests(unittest.FederatingHomeserverTestCase):
def make_homeserver(self, reactor, clock):
# mock out the federation transport client
self.mock_federation_transport_client = mock.Mock(
- spec=["get_room_state_ids", "get_room_state", "get_event"]
+ spec=["get_room_state_ids", "get_room_state", "get_event", "backfill"]
)
return super().setup_test_homeserver(
federation_transport_client=self.mock_federation_transport_client
@@ -227,3 +236,812 @@ class FederationEventHandlerTests(unittest.FederatingHomeserverTestCase):
if prev_exists_as_outlier:
self.mock_federation_transport_client.get_event.assert_not_called()
+
+ def test_process_pulled_event_records_failed_backfill_attempts(
+ self,
+ ) -> None:
+ """
+ Test to make sure that failed backfill attempts for an event are
+ recorded in the `event_failed_pull_attempts` table.
+
+ In this test, we pretend we are processing a "pulled" event via
+ backfill. The pulled event has a fake `prev_event` which our server has
+ obviously never seen before so it attempts to request the state at that
+ `prev_event` which expectedly fails because it's a fake event. Because
+ the server can't fetch the state at the missing `prev_event`, the
+ "pulled" event fails the history check and is fails to process.
+
+ We check that we correctly record the number of failed pull attempts
+ of the pulled event and as a sanity check, that the "pulled" event isn't
+ persisted.
+ """
+ OTHER_USER = f"@user:{self.OTHER_SERVER_NAME}"
+ main_store = self.hs.get_datastores().main
+
+ # Create the room
+ user_id = self.register_user("kermit", "test")
+ tok = self.login("kermit", "test")
+ room_id = self.helper.create_room_as(room_creator=user_id, tok=tok)
+ room_version = self.get_success(main_store.get_room_version(room_id))
+
+ # We expect an outbound request to /state_ids, so stub that out
+ self.mock_federation_transport_client.get_room_state_ids.return_value = make_awaitable(
+ {
+ # Mimic the other server not knowing about the state at all.
+ # We want to cause Synapse to throw an error (`Unable to get
+ # missing prev_event $fake_prev_event`) and fail to backfill
+ # the pulled event.
+ "pdu_ids": [],
+ "auth_chain_ids": [],
+ }
+ )
+ # We also expect an outbound request to /state
+ self.mock_federation_transport_client.get_room_state.return_value = make_awaitable(
+ StateRequestResponse(
+ # Mimic the other server not knowing about the state at all.
+ # We want to cause Synapse to throw an error (`Unable to get
+ # missing prev_event $fake_prev_event`) and fail to backfill
+ # the pulled event.
+ auth_events=[],
+ state=[],
+ )
+ )
+
+ pulled_event = make_event_from_dict(
+ self.add_hashes_and_signatures_from_other_server(
+ {
+ "type": "test_regular_type",
+ "room_id": room_id,
+ "sender": OTHER_USER,
+ "prev_events": [
+ # The fake prev event will make the pulled event fail
+ # the history check (`Unable to get missing prev_event
+ # $fake_prev_event`)
+ "$fake_prev_event"
+ ],
+ "auth_events": [],
+ "origin_server_ts": 1,
+ "depth": 12,
+ "content": {"body": "pulled"},
+ }
+ ),
+ room_version,
+ )
+
+ # The function under test: try to process the pulled event
+ with LoggingContext("test"):
+ self.get_success(
+ self.hs.get_federation_event_handler()._process_pulled_event(
+ self.OTHER_SERVER_NAME, pulled_event, backfilled=True
+ )
+ )
+
+ # Make sure our failed pull attempt was recorded
+ backfill_num_attempts = self.get_success(
+ main_store.db_pool.simple_select_one_onecol(
+ table="event_failed_pull_attempts",
+ keyvalues={"event_id": pulled_event.event_id},
+ retcol="num_attempts",
+ )
+ )
+ self.assertEqual(backfill_num_attempts, 1)
+
+ # The function under test: try to process the pulled event again
+ with LoggingContext("test"):
+ self.get_success(
+ self.hs.get_federation_event_handler()._process_pulled_event(
+ self.OTHER_SERVER_NAME, pulled_event, backfilled=True
+ )
+ )
+
+ # Make sure our second failed pull attempt was recorded (`num_attempts` was incremented)
+ backfill_num_attempts = self.get_success(
+ main_store.db_pool.simple_select_one_onecol(
+ table="event_failed_pull_attempts",
+ keyvalues={"event_id": pulled_event.event_id},
+ retcol="num_attempts",
+ )
+ )
+ self.assertEqual(backfill_num_attempts, 2)
+
+ # And as a sanity check, make sure the event was not persisted through all of this.
+ persisted = self.get_success(
+ main_store.get_event(pulled_event.event_id, allow_none=True)
+ )
+ self.assertIsNone(
+ persisted,
+ "pulled event that fails the history check should not be persisted at all",
+ )
+
+ def test_process_pulled_event_clears_backfill_attempts_after_being_successfully_persisted(
+ self,
+ ) -> None:
+ """
+ Test to make sure that failed pull attempts
+ (`event_failed_pull_attempts` table) for an event are cleared after the
+ event is successfully persisted.
+
+ In this test, we pretend we are processing a "pulled" event via
+ backfill. The pulled event succesfully processes and the backward
+ extremeties are updated along with clearing out any failed pull attempts
+ for those old extremities.
+
+ We check that we correctly cleared failed pull attempts of the
+ pulled event.
+ """
+ OTHER_USER = f"@user:{self.OTHER_SERVER_NAME}"
+ main_store = self.hs.get_datastores().main
+
+ # Create the room
+ user_id = self.register_user("kermit", "test")
+ tok = self.login("kermit", "test")
+ room_id = self.helper.create_room_as(room_creator=user_id, tok=tok)
+ room_version = self.get_success(main_store.get_room_version(room_id))
+
+ # allow the remote user to send state events
+ self.helper.send_state(
+ room_id,
+ "m.room.power_levels",
+ {"events_default": 0, "state_default": 0},
+ tok=tok,
+ )
+
+ # add the remote user to the room
+ member_event = self.get_success(
+ event_injection.inject_member_event(self.hs, room_id, OTHER_USER, "join")
+ )
+
+ initial_state_map = self.get_success(
+ main_store.get_partial_current_state_ids(room_id)
+ )
+
+ auth_event_ids = [
+ initial_state_map[("m.room.create", "")],
+ initial_state_map[("m.room.power_levels", "")],
+ member_event.event_id,
+ ]
+
+ pulled_event = make_event_from_dict(
+ self.add_hashes_and_signatures_from_other_server(
+ {
+ "type": "test_regular_type",
+ "room_id": room_id,
+ "sender": OTHER_USER,
+ "prev_events": [member_event.event_id],
+ "auth_events": auth_event_ids,
+ "origin_server_ts": 1,
+ "depth": 12,
+ "content": {"body": "pulled"},
+ }
+ ),
+ room_version,
+ )
+
+ # Fake the "pulled" event failing to backfill once so we can test
+ # if it's cleared out later on.
+ self.get_success(
+ main_store.record_event_failed_pull_attempt(
+ pulled_event.room_id, pulled_event.event_id, "fake cause"
+ )
+ )
+ # Make sure we have a failed pull attempt recorded for the pulled event
+ backfill_num_attempts = self.get_success(
+ main_store.db_pool.simple_select_one_onecol(
+ table="event_failed_pull_attempts",
+ keyvalues={"event_id": pulled_event.event_id},
+ retcol="num_attempts",
+ )
+ )
+ self.assertEqual(backfill_num_attempts, 1)
+
+ # The function under test: try to process the pulled event
+ with LoggingContext("test"):
+ self.get_success(
+ self.hs.get_federation_event_handler()._process_pulled_event(
+ self.OTHER_SERVER_NAME, pulled_event, backfilled=True
+ )
+ )
+
+ # Make sure the failed pull attempts for the pulled event are cleared
+ backfill_num_attempts = self.get_success(
+ main_store.db_pool.simple_select_one_onecol(
+ table="event_failed_pull_attempts",
+ keyvalues={"event_id": pulled_event.event_id},
+ retcol="num_attempts",
+ allow_none=True,
+ )
+ )
+ self.assertIsNone(backfill_num_attempts)
+
+ # And as a sanity check, make sure the "pulled" event was persisted.
+ persisted = self.get_success(
+ main_store.get_event(pulled_event.event_id, allow_none=True)
+ )
+ self.assertIsNotNone(persisted, "pulled event was not persisted at all")
+
+ def test_backfill_signature_failure_does_not_fetch_same_prev_event_later(
+ self,
+ ) -> None:
+ """
+ Test to make sure we backoff and don't try to fetch a missing prev_event when we
+ already know it has a invalid signature from checking the signatures of all of
+ the events in the backfill response.
+ """
+ OTHER_USER = f"@user:{self.OTHER_SERVER_NAME}"
+ main_store = self.hs.get_datastores().main
+
+ # Create the room
+ user_id = self.register_user("kermit", "test")
+ tok = self.login("kermit", "test")
+ room_id = self.helper.create_room_as(room_creator=user_id, tok=tok)
+ room_version = self.get_success(main_store.get_room_version(room_id))
+
+ # Allow the remote user to send state events
+ self.helper.send_state(
+ room_id,
+ "m.room.power_levels",
+ {"events_default": 0, "state_default": 0},
+ tok=tok,
+ )
+
+ # Add the remote user to the room
+ member_event = self.get_success(
+ event_injection.inject_member_event(self.hs, room_id, OTHER_USER, "join")
+ )
+
+ initial_state_map = self.get_success(
+ main_store.get_partial_current_state_ids(room_id)
+ )
+
+ auth_event_ids = [
+ initial_state_map[("m.room.create", "")],
+ initial_state_map[("m.room.power_levels", "")],
+ member_event.event_id,
+ ]
+
+ # We purposely don't run `add_hashes_and_signatures_from_other_server`
+ # over this because we want the signature check to fail.
+ pulled_event_without_signatures = make_event_from_dict(
+ {
+ "type": "test_regular_type",
+ "room_id": room_id,
+ "sender": OTHER_USER,
+ "prev_events": [member_event.event_id],
+ "auth_events": auth_event_ids,
+ "origin_server_ts": 1,
+ "depth": 12,
+ "content": {"body": "pulled_event_without_signatures"},
+ },
+ room_version,
+ )
+
+ # Create a regular event that should pass except for the
+ # `pulled_event_without_signatures` in the `prev_event`.
+ pulled_event = make_event_from_dict(
+ self.add_hashes_and_signatures_from_other_server(
+ {
+ "type": "test_regular_type",
+ "room_id": room_id,
+ "sender": OTHER_USER,
+ "prev_events": [
+ member_event.event_id,
+ pulled_event_without_signatures.event_id,
+ ],
+ "auth_events": auth_event_ids,
+ "origin_server_ts": 1,
+ "depth": 12,
+ "content": {"body": "pulled_event"},
+ }
+ ),
+ room_version,
+ )
+
+ # We expect an outbound request to /backfill, so stub that out
+ self.mock_federation_transport_client.backfill.return_value = make_awaitable(
+ {
+ "origin": self.OTHER_SERVER_NAME,
+ "origin_server_ts": 123,
+ "pdus": [
+ # This is one of the important aspects of this test: we include
+ # `pulled_event_without_signatures` so it fails the signature check
+ # when we filter down the backfill response down to events which
+ # have valid signatures in
+ # `_check_sigs_and_hash_for_pulled_events_and_fetch`
+ pulled_event_without_signatures.get_pdu_json(),
+ # Then later when we process this valid signature event, when we
+ # fetch the missing `prev_event`s, we want to make sure that we
+ # backoff and don't try and fetch `pulled_event_without_signatures`
+ # again since we know it just had an invalid signature.
+ pulled_event.get_pdu_json(),
+ ],
+ }
+ )
+
+ # Keep track of the count and make sure we don't make any of these requests
+ event_endpoint_requested_count = 0
+ room_state_ids_endpoint_requested_count = 0
+ room_state_endpoint_requested_count = 0
+
+ async def get_event(
+ destination: str, event_id: str, timeout: Optional[int] = None
+ ) -> None:
+ nonlocal event_endpoint_requested_count
+ event_endpoint_requested_count += 1
+
+ async def get_room_state_ids(
+ destination: str, room_id: str, event_id: str
+ ) -> None:
+ nonlocal room_state_ids_endpoint_requested_count
+ room_state_ids_endpoint_requested_count += 1
+
+ async def get_room_state(
+ room_version: RoomVersion, destination: str, room_id: str, event_id: str
+ ) -> None:
+ nonlocal room_state_endpoint_requested_count
+ room_state_endpoint_requested_count += 1
+
+ # We don't expect an outbound request to `/event`, `/state_ids`, or `/state` in
+ # the happy path but if the logic is sneaking around what we expect, stub that
+ # out so we can detect that failure
+ self.mock_federation_transport_client.get_event.side_effect = get_event
+ self.mock_federation_transport_client.get_room_state_ids.side_effect = (
+ get_room_state_ids
+ )
+ self.mock_federation_transport_client.get_room_state.side_effect = (
+ get_room_state
+ )
+
+ # The function under test: try to backfill and process the pulled event
+ with LoggingContext("test"):
+ self.get_success(
+ self.hs.get_federation_event_handler().backfill(
+ self.OTHER_SERVER_NAME,
+ room_id,
+ limit=1,
+ extremities=["$some_extremity"],
+ )
+ )
+
+ if event_endpoint_requested_count > 0:
+ self.fail(
+ "We don't expect an outbound request to /event in the happy path but if "
+ "the logic is sneaking around what we expect, make sure to fail the test. "
+ "We don't expect it because the signature failure should cause us to backoff "
+ "and not asking about pulled_event_without_signatures="
+ f"{pulled_event_without_signatures.event_id} again"
+ )
+
+ if room_state_ids_endpoint_requested_count > 0:
+ self.fail(
+ "We don't expect an outbound request to /state_ids in the happy path but if "
+ "the logic is sneaking around what we expect, make sure to fail the test. "
+ "We don't expect it because the signature failure should cause us to backoff "
+ "and not asking about pulled_event_without_signatures="
+ f"{pulled_event_without_signatures.event_id} again"
+ )
+
+ if room_state_endpoint_requested_count > 0:
+ self.fail(
+ "We don't expect an outbound request to /state in the happy path but if "
+ "the logic is sneaking around what we expect, make sure to fail the test. "
+ "We don't expect it because the signature failure should cause us to backoff "
+ "and not asking about pulled_event_without_signatures="
+ f"{pulled_event_without_signatures.event_id} again"
+ )
+
+ # Make sure we only recorded a single failure which corresponds to the signature
+ # failure initially in `_check_sigs_and_hash_for_pulled_events_and_fetch` before
+ # we process all of the pulled events.
+ backfill_num_attempts_for_event_without_signatures = self.get_success(
+ main_store.db_pool.simple_select_one_onecol(
+ table="event_failed_pull_attempts",
+ keyvalues={"event_id": pulled_event_without_signatures.event_id},
+ retcol="num_attempts",
+ )
+ )
+ self.assertEqual(backfill_num_attempts_for_event_without_signatures, 1)
+
+ # And make sure we didn't record a failure for the event that has the missing
+ # prev_event because we don't want to cause a cascade of failures. Not being
+ # able to fetch the `prev_events` just means we won't be able to de-outlier the
+ # pulled event. But we can still use an `outlier` in the state/auth chain for
+ # another event. So we shouldn't stop a downstream event from trying to pull it.
+ self.get_failure(
+ main_store.db_pool.simple_select_one_onecol(
+ table="event_failed_pull_attempts",
+ keyvalues={"event_id": pulled_event.event_id},
+ retcol="num_attempts",
+ ),
+ # StoreError: 404: No row found
+ StoreError,
+ )
+
+ def test_process_pulled_event_with_rejected_missing_state(self) -> None:
+ """Ensure that we correctly handle pulled events with missing state containing a
+ rejected state event
+
+ In this test, we pretend we are processing a "pulled" event (eg, via backfill
+ or get_missing_events). The pulled event has a prev_event we haven't previously
+ seen, so the server requests the state at that prev_event. We expect the server
+ to make a /state request.
+
+ We simulate a remote server whose /state includes a rejected kick event for a
+ local user. Notably, the kick event is rejected only because it cites a rejected
+ auth event and would otherwise be accepted based on the room state. During state
+ resolution, we re-run auth and can potentially introduce such rejected events
+ into the state if we are not careful.
+
+ We check that the pulled event is correctly persisted, and that the state
+ afterwards does not include the rejected kick.
+ """
+ # The DAG we are testing looks like:
+ #
+ # ...
+ # |
+ # v
+ # remote admin user joins
+ # | |
+ # +-------+ +-------+
+ # | |
+ # | rejected power levels
+ # | from remote server
+ # | |
+ # | v
+ # | rejected kick of local user
+ # v from remote server
+ # new power levels |
+ # | v
+ # | missing event
+ # | from remote server
+ # | |
+ # +-------+ +-------+
+ # | |
+ # v v
+ # pulled event
+ # from remote server
+ #
+ # (arrows are in the opposite direction to prev_events.)
+
+ OTHER_USER = f"@user:{self.OTHER_SERVER_NAME}"
+ main_store = self.hs.get_datastores().main
+
+ # Create the room.
+ kermit_user_id = self.register_user("kermit", "test")
+ kermit_tok = self.login("kermit", "test")
+ room_id = self.helper.create_room_as(
+ room_creator=kermit_user_id, tok=kermit_tok
+ )
+ room_version = self.get_success(main_store.get_room_version(room_id))
+
+ # Add another local user to the room. This user is going to be kicked in a
+ # rejected event.
+ bert_user_id = self.register_user("bert", "test")
+ bert_tok = self.login("bert", "test")
+ self.helper.join(room_id, user=bert_user_id, tok=bert_tok)
+
+ # Allow the remote user to kick bert.
+ # The remote user is going to send a rejected power levels event later on and we
+ # need state resolution to order it before another power levels event kermit is
+ # going to send later on. Hence we give both users the same power level, so that
+ # ties are broken by `origin_server_ts`.
+ self.helper.send_state(
+ room_id,
+ "m.room.power_levels",
+ {"users": {kermit_user_id: 100, OTHER_USER: 100}},
+ tok=kermit_tok,
+ )
+
+ # Add the remote user to the room.
+ other_member_event = self.get_success(
+ event_injection.inject_member_event(self.hs, room_id, OTHER_USER, "join")
+ )
+
+ initial_state_map = self.get_success(
+ main_store.get_partial_current_state_ids(room_id)
+ )
+ create_event = self.get_success(
+ main_store.get_event(initial_state_map[("m.room.create", "")])
+ )
+ bert_member_event = self.get_success(
+ main_store.get_event(initial_state_map[("m.room.member", bert_user_id)])
+ )
+ power_levels_event = self.get_success(
+ main_store.get_event(initial_state_map[("m.room.power_levels", "")])
+ )
+
+ # We now need a rejected state event that will fail
+ # `check_state_independent_auth_rules` but pass
+ # `check_state_dependent_auth_rules`.
+
+ # First, we create a power levels event that we pretend the remote server has
+ # accepted, but the local homeserver will reject.
+ next_depth = 100
+ next_timestamp = other_member_event.origin_server_ts + 100
+ rejected_power_levels_event = make_event_from_dict(
+ self.add_hashes_and_signatures_from_other_server(
+ {
+ "type": "m.room.power_levels",
+ "state_key": "",
+ "room_id": room_id,
+ "sender": OTHER_USER,
+ "prev_events": [other_member_event.event_id],
+ "auth_events": [
+ initial_state_map[("m.room.create", "")],
+ initial_state_map[("m.room.power_levels", "")],
+ # The event will be rejected because of the duplicated auth
+ # event.
+ other_member_event.event_id,
+ other_member_event.event_id,
+ ],
+ "origin_server_ts": next_timestamp,
+ "depth": next_depth,
+ "content": power_levels_event.content,
+ }
+ ),
+ room_version,
+ )
+ next_depth += 1
+ next_timestamp += 100
+
+ with LoggingContext("send_rejected_power_levels_event"):
+ self.get_success(
+ self.hs.get_federation_event_handler()._process_pulled_event(
+ self.OTHER_SERVER_NAME,
+ rejected_power_levels_event,
+ backfilled=False,
+ )
+ )
+ self.assertEqual(
+ self.get_success(
+ main_store.get_rejection_reason(
+ rejected_power_levels_event.event_id
+ )
+ ),
+ "auth_error",
+ )
+
+ # Then we create a kick event for a local user that cites the rejected power
+ # levels event in its auth events. The kick event will be rejected solely
+ # because of the rejected auth event and would otherwise be accepted.
+ rejected_kick_event = make_event_from_dict(
+ self.add_hashes_and_signatures_from_other_server(
+ {
+ "type": "m.room.member",
+ "state_key": bert_user_id,
+ "room_id": room_id,
+ "sender": OTHER_USER,
+ "prev_events": [rejected_power_levels_event.event_id],
+ "auth_events": [
+ initial_state_map[("m.room.create", "")],
+ rejected_power_levels_event.event_id,
+ initial_state_map[("m.room.member", bert_user_id)],
+ initial_state_map[("m.room.member", OTHER_USER)],
+ ],
+ "origin_server_ts": next_timestamp,
+ "depth": next_depth,
+ "content": {"membership": "leave"},
+ }
+ ),
+ room_version,
+ )
+ next_depth += 1
+ next_timestamp += 100
+
+ # The kick event must fail the state-independent auth rules, but pass the
+ # state-dependent auth rules, so that it has a chance of making it through state
+ # resolution.
+ self.get_failure(
+ check_state_independent_auth_rules(main_store, rejected_kick_event),
+ AuthError,
+ )
+ check_state_dependent_auth_rules(
+ rejected_kick_event,
+ [create_event, power_levels_event, other_member_event, bert_member_event],
+ )
+
+ # The kick event must also win over the original member event during state
+ # resolution.
+ self.assertEqual(
+ self.get_success(
+ _mainline_sort(
+ self.clock,
+ room_id,
+ event_ids=[
+ bert_member_event.event_id,
+ rejected_kick_event.event_id,
+ ],
+ resolved_power_event_id=power_levels_event.event_id,
+ event_map={
+ bert_member_event.event_id: bert_member_event,
+ rejected_kick_event.event_id: rejected_kick_event,
+ },
+ state_res_store=main_store,
+ )
+ ),
+ [bert_member_event.event_id, rejected_kick_event.event_id],
+ "The rejected kick event will not be applied after bert's join event "
+ "during state resolution. The test setup is incorrect.",
+ )
+
+ with LoggingContext("send_rejected_kick_event"):
+ self.get_success(
+ self.hs.get_federation_event_handler()._process_pulled_event(
+ self.OTHER_SERVER_NAME, rejected_kick_event, backfilled=False
+ )
+ )
+ self.assertEqual(
+ self.get_success(
+ main_store.get_rejection_reason(rejected_kick_event.event_id)
+ ),
+ "auth_error",
+ )
+
+ # We need another power levels event which will win over the rejected one during
+ # state resolution, otherwise we hit other issues where we end up with rejected
+ # a power levels event during state resolution.
+ self.reactor.advance(100) # ensure the `origin_server_ts` is larger
+ new_power_levels_event = self.get_success(
+ main_store.get_event(
+ self.helper.send_state(
+ room_id,
+ "m.room.power_levels",
+ {"users": {kermit_user_id: 100, OTHER_USER: 100, bert_user_id: 1}},
+ tok=kermit_tok,
+ )["event_id"]
+ )
+ )
+ self.assertEqual(
+ self.get_success(
+ _reverse_topological_power_sort(
+ self.clock,
+ room_id,
+ event_ids=[
+ new_power_levels_event.event_id,
+ rejected_power_levels_event.event_id,
+ ],
+ event_map={},
+ state_res_store=main_store,
+ full_conflicted_set=set(),
+ )
+ ),
+ [rejected_power_levels_event.event_id, new_power_levels_event.event_id],
+ "The power levels events will not have the desired ordering during state "
+ "resolution. The test setup is incorrect.",
+ )
+
+ # Create a missing event, so that the local homeserver has to do a `/state` or
+ # `/state_ids` request to pull state from the remote homeserver.
+ missing_event = make_event_from_dict(
+ self.add_hashes_and_signatures_from_other_server(
+ {
+ "type": "m.room.message",
+ "room_id": room_id,
+ "sender": OTHER_USER,
+ "prev_events": [rejected_kick_event.event_id],
+ "auth_events": [
+ initial_state_map[("m.room.create", "")],
+ initial_state_map[("m.room.power_levels", "")],
+ initial_state_map[("m.room.member", OTHER_USER)],
+ ],
+ "origin_server_ts": next_timestamp,
+ "depth": next_depth,
+ "content": {"msgtype": "m.text", "body": "foo"},
+ }
+ ),
+ room_version,
+ )
+ next_depth += 1
+ next_timestamp += 100
+
+ # The pulled event has two prev events, one of which is missing. We will make a
+ # `/state` or `/state_ids` request to the remote homeserver to ask it for the
+ # state before the missing prev event.
+ pulled_event = make_event_from_dict(
+ self.add_hashes_and_signatures_from_other_server(
+ {
+ "type": "m.room.message",
+ "room_id": room_id,
+ "sender": OTHER_USER,
+ "prev_events": [
+ new_power_levels_event.event_id,
+ missing_event.event_id,
+ ],
+ "auth_events": [
+ initial_state_map[("m.room.create", "")],
+ new_power_levels_event.event_id,
+ initial_state_map[("m.room.member", OTHER_USER)],
+ ],
+ "origin_server_ts": next_timestamp,
+ "depth": next_depth,
+ "content": {"msgtype": "m.text", "body": "bar"},
+ }
+ ),
+ room_version,
+ )
+ next_depth += 1
+ next_timestamp += 100
+
+ # Prepare the response for the `/state` or `/state_ids` request.
+ # The remote server believes bert has been kicked, while the local server does
+ # not.
+ state_before_missing_event = self.get_success(
+ main_store.get_events_as_list(initial_state_map.values())
+ )
+ state_before_missing_event = [
+ event
+ for event in state_before_missing_event
+ if event.event_id != bert_member_event.event_id
+ ]
+ state_before_missing_event.append(rejected_kick_event)
+
+ # We have to bump the clock a bit, to keep the retry logic in
+ # `FederationClient.get_pdu` happy
+ self.reactor.advance(60000)
+ with LoggingContext("send_pulled_event"):
+
+ async def get_event(
+ destination: str, event_id: str, timeout: Optional[int] = None
+ ) -> JsonDict:
+ self.assertEqual(destination, self.OTHER_SERVER_NAME)
+ self.assertEqual(event_id, missing_event.event_id)
+ return {"pdus": [missing_event.get_pdu_json()]}
+
+ async def get_room_state_ids(
+ destination: str, room_id: str, event_id: str
+ ) -> JsonDict:
+ self.assertEqual(destination, self.OTHER_SERVER_NAME)
+ self.assertEqual(event_id, missing_event.event_id)
+ return {
+ "pdu_ids": [event.event_id for event in state_before_missing_event],
+ "auth_chain_ids": [],
+ }
+
+ async def get_room_state(
+ room_version: RoomVersion, destination: str, room_id: str, event_id: str
+ ) -> StateRequestResponse:
+ self.assertEqual(destination, self.OTHER_SERVER_NAME)
+ self.assertEqual(event_id, missing_event.event_id)
+ return StateRequestResponse(
+ state=state_before_missing_event,
+ auth_events=[],
+ )
+
+ self.mock_federation_transport_client.get_event.side_effect = get_event
+ self.mock_federation_transport_client.get_room_state_ids.side_effect = (
+ get_room_state_ids
+ )
+ self.mock_federation_transport_client.get_room_state.side_effect = (
+ get_room_state
+ )
+
+ self.get_success(
+ self.hs.get_federation_event_handler()._process_pulled_event(
+ self.OTHER_SERVER_NAME, pulled_event, backfilled=False
+ )
+ )
+ self.assertIsNone(
+ self.get_success(
+ main_store.get_rejection_reason(pulled_event.event_id)
+ ),
+ "Pulled event was unexpectedly rejected, likely due to a problem with "
+ "the test setup.",
+ )
+ self.assertEqual(
+ {pulled_event.event_id},
+ self.get_success(
+ main_store.have_events_in_timeline([pulled_event.event_id])
+ ),
+ "Pulled event was not persisted, likely due to a problem with the test "
+ "setup.",
+ )
+
+ # We must not accept rejected events into the room state, so we expect bert
+ # to not be kicked, even if the remote server believes so.
+ new_state_map = self.get_success(
+ main_store.get_partial_current_state_ids(room_id)
+ )
+ self.assertEqual(
+ new_state_map[("m.room.member", bert_user_id)],
+ bert_member_event.event_id,
+ "Rejected kick event unexpectedly became part of room state.",
+ )
diff --git a/tests/handlers/test_message.py b/tests/handlers/test_message.py
index 44da96c792..99384837d0 100644
--- a/tests/handlers/test_message.py
+++ b/tests/handlers/test_message.py
@@ -105,7 +105,10 @@ class EventCreationTestCase(unittest.HomeserverTestCase):
event1, context = self._create_duplicate_event(txn_id)
ret_event1 = self.get_success(
- self.handler.handle_new_client_event(self.requester, event1, context)
+ self.handler.handle_new_client_event(
+ self.requester,
+ events_and_context=[(event1, context)],
+ )
)
stream_id1 = ret_event1.internal_metadata.stream_ordering
@@ -118,7 +121,10 @@ class EventCreationTestCase(unittest.HomeserverTestCase):
self.assertNotEqual(event1.event_id, event2.event_id)
ret_event2 = self.get_success(
- self.handler.handle_new_client_event(self.requester, event2, context)
+ self.handler.handle_new_client_event(
+ self.requester,
+ events_and_context=[(event2, context)],
+ )
)
stream_id2 = ret_event2.internal_metadata.stream_ordering
@@ -314,4 +320,4 @@ class ServerAclValidationTestCase(unittest.HomeserverTestCase):
channel = self.make_request(
"POST", path, content={}, access_token=self.access_token
)
- self.assertEqual(int(channel.result["code"]), 403)
+ self.assertEqual(channel.code, 403)
diff --git a/tests/handlers/test_oidc.py b/tests/handlers/test_oidc.py
index e6cd3af7b7..5955410524 100644
--- a/tests/handlers/test_oidc.py
+++ b/tests/handlers/test_oidc.py
@@ -11,9 +11,8 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-import json
import os
-from typing import Any, Dict
+from typing import Any, Dict, Tuple
from unittest.mock import ANY, Mock, patch
from urllib.parse import parse_qs, urlparse
@@ -22,12 +21,15 @@ import pymacaroons
from twisted.test.proto_helpers import MemoryReactor
from synapse.handlers.sso import MappingException
+from synapse.http.site import SynapseRequest
from synapse.server import HomeServer
-from synapse.types import JsonDict, UserID
+from synapse.types import UserID
from synapse.util import Clock
-from synapse.util.macaroons import OidcSessionData, get_value_from_macaroon
+from synapse.util.macaroons import get_value_from_macaroon
+from synapse.util.stringutils import random_string
from tests.test_utils import FakeResponse, get_awaitable_result, simple_async_mock
+from tests.test_utils.oidc import FakeAuthorizationGrant, FakeOidcServer
from tests.unittest import HomeserverTestCase, override_config
try:
@@ -46,12 +48,6 @@ BASE_URL = "https://synapse/"
CALLBACK_URL = BASE_URL + "_synapse/client/oidc/callback"
SCOPES = ["openid"]
-AUTHORIZATION_ENDPOINT = ISSUER + "authorize"
-TOKEN_ENDPOINT = ISSUER + "token"
-USERINFO_ENDPOINT = ISSUER + "userinfo"
-WELL_KNOWN = ISSUER + ".well-known/openid-configuration"
-JWKS_URI = ISSUER + ".well-known/jwks.json"
-
# config for common cases
DEFAULT_CONFIG = {
"enabled": True,
@@ -66,9 +62,9 @@ DEFAULT_CONFIG = {
EXPLICIT_ENDPOINT_CONFIG = {
**DEFAULT_CONFIG,
"discover": False,
- "authorization_endpoint": AUTHORIZATION_ENDPOINT,
- "token_endpoint": TOKEN_ENDPOINT,
- "jwks_uri": JWKS_URI,
+ "authorization_endpoint": ISSUER + "authorize",
+ "token_endpoint": ISSUER + "token",
+ "jwks_uri": ISSUER + "jwks",
}
@@ -102,27 +98,6 @@ class TestMappingProviderFailures(TestMappingProvider):
}
-async def get_json(url: str) -> JsonDict:
- # Mock get_json calls to handle jwks & oidc discovery endpoints
- if url == WELL_KNOWN:
- # Minimal discovery document, as defined in OpenID.Discovery
- # https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata
- return {
- "issuer": ISSUER,
- "authorization_endpoint": AUTHORIZATION_ENDPOINT,
- "token_endpoint": TOKEN_ENDPOINT,
- "jwks_uri": JWKS_URI,
- "userinfo_endpoint": USERINFO_ENDPOINT,
- "response_types_supported": ["code"],
- "subject_types_supported": ["public"],
- "id_token_signing_alg_values_supported": ["RS256"],
- }
- elif url == JWKS_URI:
- return {"keys": []}
-
- return {}
-
-
def _key_file_path() -> str:
"""path to a file containing the private half of a test key"""
@@ -159,11 +134,11 @@ class OidcHandlerTestCase(HomeserverTestCase):
return config
def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
- self.http_client = Mock(spec=["get_json"])
- self.http_client.get_json.side_effect = get_json
- self.http_client.user_agent = b"Synapse Test"
+ self.fake_server = FakeOidcServer(clock=clock, issuer=ISSUER)
- hs = self.setup_test_homeserver(proxied_http_client=self.http_client)
+ hs = self.setup_test_homeserver()
+ self.hs_patcher = self.fake_server.patch_homeserver(hs=hs)
+ self.hs_patcher.start()
self.handler = hs.get_oidc_handler()
self.provider = self.handler._providers["oidc"]
@@ -175,18 +150,51 @@ class OidcHandlerTestCase(HomeserverTestCase):
# Reduce the number of attempts when generating MXIDs.
sso_handler._MAP_USERNAME_RETRIES = 3
+ auth_handler = hs.get_auth_handler()
+ # Mock the complete SSO login method.
+ self.complete_sso_login = simple_async_mock()
+ auth_handler.complete_sso_login = self.complete_sso_login # type: ignore[assignment]
+
return hs
+ def tearDown(self) -> None:
+ self.hs_patcher.stop()
+ return super().tearDown()
+
+ def reset_mocks(self):
+ """Reset all the Mocks."""
+ self.fake_server.reset_mocks()
+ self.render_error.reset_mock()
+ self.complete_sso_login.reset_mock()
+
def metadata_edit(self, values):
"""Modify the result that will be returned by the well-known query"""
- async def patched_get_json(uri):
- res = await get_json(uri)
- if uri == WELL_KNOWN:
- res.update(values)
- return res
+ metadata = self.fake_server.get_metadata()
+ metadata.update(values)
+ return patch.object(self.fake_server, "get_metadata", return_value=metadata)
- return patch.object(self.http_client, "get_json", patched_get_json)
+ def start_authorization(
+ self,
+ userinfo: dict,
+ client_redirect_url: str = "http://client/redirect",
+ scope: str = "openid",
+ with_sid: bool = False,
+ ) -> Tuple[SynapseRequest, FakeAuthorizationGrant]:
+ """Start an authorization request, and get the callback request back."""
+ nonce = random_string(10)
+ state = random_string(10)
+
+ code, grant = self.fake_server.start_authorization(
+ userinfo=userinfo,
+ scope=scope,
+ client_id=self.provider._client_auth.client_id,
+ redirect_uri=self.provider._callback_url,
+ nonce=nonce,
+ with_sid=with_sid,
+ )
+ session = self._generate_oidc_session_token(state, nonce, client_redirect_url)
+ return _build_callback_request(code, state, session), grant
def assertRenderedError(self, error, error_description=None):
self.render_error.assert_called_once()
@@ -210,52 +218,54 @@ class OidcHandlerTestCase(HomeserverTestCase):
"""The handler should discover the endpoints from OIDC discovery document."""
# This would throw if some metadata were invalid
metadata = self.get_success(self.provider.load_metadata())
- self.http_client.get_json.assert_called_once_with(WELL_KNOWN)
+ self.fake_server.get_metadata_handler.assert_called_once()
- self.assertEqual(metadata.issuer, ISSUER)
- self.assertEqual(metadata.authorization_endpoint, AUTHORIZATION_ENDPOINT)
- self.assertEqual(metadata.token_endpoint, TOKEN_ENDPOINT)
- self.assertEqual(metadata.jwks_uri, JWKS_URI)
- # FIXME: it seems like authlib does not have that defined in its metadata models
- # self.assertEqual(metadata.userinfo_endpoint, USERINFO_ENDPOINT)
+ self.assertEqual(metadata.issuer, self.fake_server.issuer)
+ self.assertEqual(
+ metadata.authorization_endpoint,
+ self.fake_server.authorization_endpoint,
+ )
+ self.assertEqual(metadata.token_endpoint, self.fake_server.token_endpoint)
+ self.assertEqual(metadata.jwks_uri, self.fake_server.jwks_uri)
+ # It seems like authlib does not have that defined in its metadata models
+ self.assertEqual(
+ metadata.get("userinfo_endpoint"),
+ self.fake_server.userinfo_endpoint,
+ )
# subsequent calls should be cached
- self.http_client.reset_mock()
+ self.reset_mocks()
self.get_success(self.provider.load_metadata())
- self.http_client.get_json.assert_not_called()
+ self.fake_server.get_metadata_handler.assert_not_called()
@override_config({"oidc_config": EXPLICIT_ENDPOINT_CONFIG})
def test_no_discovery(self) -> None:
"""When discovery is disabled, it should not try to load from discovery document."""
self.get_success(self.provider.load_metadata())
- self.http_client.get_json.assert_not_called()
+ self.fake_server.get_metadata_handler.assert_not_called()
- @override_config({"oidc_config": EXPLICIT_ENDPOINT_CONFIG})
+ @override_config({"oidc_config": DEFAULT_CONFIG})
def test_load_jwks(self) -> None:
"""JWKS loading is done once (then cached) if used."""
jwks = self.get_success(self.provider.load_jwks())
- self.http_client.get_json.assert_called_once_with(JWKS_URI)
- self.assertEqual(jwks, {"keys": []})
+ self.fake_server.get_jwks_handler.assert_called_once()
+ self.assertEqual(jwks, self.fake_server.get_jwks())
# subsequent calls should be cached…
- self.http_client.reset_mock()
+ self.reset_mocks()
self.get_success(self.provider.load_jwks())
- self.http_client.get_json.assert_not_called()
+ self.fake_server.get_jwks_handler.assert_not_called()
# …unless forced
- self.http_client.reset_mock()
+ self.reset_mocks()
self.get_success(self.provider.load_jwks(force=True))
- self.http_client.get_json.assert_called_once_with(JWKS_URI)
+ self.fake_server.get_jwks_handler.assert_called_once()
- # Throw if the JWKS uri is missing
- original = self.provider.load_metadata
-
- async def patched_load_metadata():
- m = (await original()).copy()
- m.update({"jwks_uri": None})
- return m
-
- with patch.object(self.provider, "load_metadata", patched_load_metadata):
+ with self.metadata_edit({"jwks_uri": None}):
+ # If we don't do this, the load_metadata call will throw because of the
+ # missing jwks_uri
+ self.provider._user_profile_method = "userinfo_endpoint"
+ self.get_success(self.provider.load_metadata(force=True))
self.get_failure(self.provider.load_jwks(force=True), RuntimeError)
@override_config({"oidc_config": DEFAULT_CONFIG})
@@ -359,7 +369,7 @@ class OidcHandlerTestCase(HomeserverTestCase):
self.provider.handle_redirect_request(req, b"http://client/redirect")
)
)
- auth_endpoint = urlparse(AUTHORIZATION_ENDPOINT)
+ auth_endpoint = urlparse(self.fake_server.authorization_endpoint)
self.assertEqual(url.scheme, auth_endpoint.scheme)
self.assertEqual(url.netloc, auth_endpoint.netloc)
@@ -424,48 +434,34 @@ class OidcHandlerTestCase(HomeserverTestCase):
with self.assertRaises(AttributeError):
_ = mapping_provider.get_extra_attributes
- token = {
- "type": "bearer",
- "id_token": "id_token",
- "access_token": "access_token",
- }
username = "bar"
userinfo = {
"sub": "foo",
"username": username,
}
expected_user_id = "@%s:%s" % (username, self.hs.hostname)
- self.provider._exchange_code = simple_async_mock(return_value=token) # type: ignore[assignment]
- self.provider._parse_id_token = simple_async_mock(return_value=userinfo) # type: ignore[assignment]
- self.provider._fetch_userinfo = simple_async_mock(return_value=userinfo) # type: ignore[assignment]
- auth_handler = self.hs.get_auth_handler()
- auth_handler.complete_sso_login = simple_async_mock()
- code = "code"
- state = "state"
- nonce = "nonce"
client_redirect_url = "http://client/redirect"
- ip_address = "10.0.0.1"
- session = self._generate_oidc_session_token(state, nonce, client_redirect_url)
- request = _build_callback_request(code, state, session, ip_address=ip_address)
-
+ request, _ = self.start_authorization(
+ userinfo, client_redirect_url=client_redirect_url
+ )
self.get_success(self.handler.handle_oidc_callback(request))
- auth_handler.complete_sso_login.assert_called_once_with(
+ self.complete_sso_login.assert_called_once_with(
expected_user_id,
- "oidc",
+ self.provider.idp_id,
request,
client_redirect_url,
None,
new_user=True,
auth_provider_session_id=None,
)
- self.provider._exchange_code.assert_called_once_with(code)
- self.provider._parse_id_token.assert_called_once_with(token, nonce=nonce)
- self.provider._fetch_userinfo.assert_not_called()
+ self.fake_server.post_token_handler.assert_called_once()
+ self.fake_server.get_userinfo_handler.assert_not_called()
self.render_error.assert_not_called()
# Handle mapping errors
+ request, _ = self.start_authorization(userinfo)
with patch.object(
self.provider,
"_remote_id_from_userinfo",
@@ -475,81 +471,63 @@ class OidcHandlerTestCase(HomeserverTestCase):
self.assertRenderedError("mapping_error")
# Handle ID token errors
- self.provider._parse_id_token = simple_async_mock(raises=Exception()) # type: ignore[assignment]
- self.get_success(self.handler.handle_oidc_callback(request))
+ request, _ = self.start_authorization(userinfo)
+ with self.fake_server.id_token_override({"iss": "https://bad.issuer/"}):
+ self.get_success(self.handler.handle_oidc_callback(request))
self.assertRenderedError("invalid_token")
- auth_handler.complete_sso_login.reset_mock()
- self.provider._exchange_code.reset_mock()
- self.provider._parse_id_token.reset_mock()
- self.provider._fetch_userinfo.reset_mock()
+ self.reset_mocks()
# With userinfo fetching
self.provider._user_profile_method = "userinfo_endpoint"
- token = {
- "type": "bearer",
- "access_token": "access_token",
- }
- self.provider._exchange_code = simple_async_mock(return_value=token) # type: ignore[assignment]
+ # Without the "openid" scope, the FakeProvider does not generate an id_token
+ request, _ = self.start_authorization(userinfo, scope="")
self.get_success(self.handler.handle_oidc_callback(request))
- auth_handler.complete_sso_login.assert_called_once_with(
+ self.complete_sso_login.assert_called_once_with(
expected_user_id,
- "oidc",
+ self.provider.idp_id,
request,
- client_redirect_url,
+ ANY,
None,
new_user=False,
auth_provider_session_id=None,
)
- self.provider._exchange_code.assert_called_once_with(code)
- self.provider._parse_id_token.assert_not_called()
- self.provider._fetch_userinfo.assert_called_once_with(token)
+ self.fake_server.post_token_handler.assert_called_once()
+ self.fake_server.get_userinfo_handler.assert_called_once()
self.render_error.assert_not_called()
+ self.reset_mocks()
+
# With an ID token, userinfo fetching and sid in the ID token
self.provider._user_profile_method = "userinfo_endpoint"
- token = {
- "type": "bearer",
- "access_token": "access_token",
- "id_token": "id_token",
- }
- id_token = {
- "sid": "abcdefgh",
- }
- self.provider._parse_id_token = simple_async_mock(return_value=id_token) # type: ignore[assignment]
- self.provider._exchange_code = simple_async_mock(return_value=token) # type: ignore[assignment]
- auth_handler.complete_sso_login.reset_mock()
- self.provider._fetch_userinfo.reset_mock()
+ request, grant = self.start_authorization(userinfo, with_sid=True)
+ self.assertIsNotNone(grant.sid)
self.get_success(self.handler.handle_oidc_callback(request))
- auth_handler.complete_sso_login.assert_called_once_with(
+ self.complete_sso_login.assert_called_once_with(
expected_user_id,
- "oidc",
+ self.provider.idp_id,
request,
- client_redirect_url,
+ ANY,
None,
new_user=False,
- auth_provider_session_id=id_token["sid"],
+ auth_provider_session_id=grant.sid,
)
- self.provider._exchange_code.assert_called_once_with(code)
- self.provider._parse_id_token.assert_called_once_with(token, nonce=nonce)
- self.provider._fetch_userinfo.assert_called_once_with(token)
+ self.fake_server.post_token_handler.assert_called_once()
+ self.fake_server.get_userinfo_handler.assert_called_once()
self.render_error.assert_not_called()
# Handle userinfo fetching error
- self.provider._fetch_userinfo = simple_async_mock(raises=Exception()) # type: ignore[assignment]
- self.get_success(self.handler.handle_oidc_callback(request))
+ request, _ = self.start_authorization(userinfo)
+ with self.fake_server.buggy_endpoint(userinfo=True):
+ self.get_success(self.handler.handle_oidc_callback(request))
self.assertRenderedError("fetch_error")
- # Handle code exchange failure
- from synapse.handlers.oidc import OidcError
-
- self.provider._exchange_code = simple_async_mock( # type: ignore[assignment]
- raises=OidcError("invalid_request")
- )
- self.get_success(self.handler.handle_oidc_callback(request))
- self.assertRenderedError("invalid_request")
+ request, _ = self.start_authorization(userinfo)
+ with self.fake_server.buggy_endpoint(token=True):
+ self.get_success(self.handler.handle_oidc_callback(request))
+ self.assertRenderedError("server_error")
@override_config({"oidc_config": DEFAULT_CONFIG})
def test_callback_session(self) -> None:
@@ -599,18 +577,22 @@ class OidcHandlerTestCase(HomeserverTestCase):
)
def test_exchange_code(self) -> None:
"""Code exchange behaves correctly and handles various error scenarios."""
- token = {"type": "bearer"}
- token_json = json.dumps(token).encode("utf-8")
- self.http_client.request = simple_async_mock(
- return_value=FakeResponse(code=200, phrase=b"OK", body=token_json)
+ token = {
+ "type": "Bearer",
+ "access_token": "aabbcc",
+ }
+
+ self.fake_server.post_token_handler.side_effect = None
+ self.fake_server.post_token_handler.return_value = FakeResponse.json(
+ payload=token
)
code = "code"
ret = self.get_success(self.provider._exchange_code(code))
- kwargs = self.http_client.request.call_args[1]
+ kwargs = self.fake_server.request.call_args[1]
self.assertEqual(ret, token)
self.assertEqual(kwargs["method"], "POST")
- self.assertEqual(kwargs["uri"], TOKEN_ENDPOINT)
+ self.assertEqual(kwargs["uri"], self.fake_server.token_endpoint)
args = parse_qs(kwargs["data"].decode("utf-8"))
self.assertEqual(args["grant_type"], ["authorization_code"])
@@ -620,12 +602,8 @@ class OidcHandlerTestCase(HomeserverTestCase):
self.assertEqual(args["redirect_uri"], [CALLBACK_URL])
# Test error handling
- self.http_client.request = simple_async_mock(
- return_value=FakeResponse(
- code=400,
- phrase=b"Bad Request",
- body=b'{"error": "foo", "error_description": "bar"}',
- )
+ self.fake_server.post_token_handler.return_value = FakeResponse.json(
+ code=400, payload={"error": "foo", "error_description": "bar"}
)
from synapse.handlers.oidc import OidcError
@@ -634,46 +612,30 @@ class OidcHandlerTestCase(HomeserverTestCase):
self.assertEqual(exc.value.error_description, "bar")
# Internal server error with no JSON body
- self.http_client.request = simple_async_mock(
- return_value=FakeResponse(
- code=500,
- phrase=b"Internal Server Error",
- body=b"Not JSON",
- )
+ self.fake_server.post_token_handler.return_value = FakeResponse(
+ code=500, body=b"Not JSON"
)
exc = self.get_failure(self.provider._exchange_code(code), OidcError)
self.assertEqual(exc.value.error, "server_error")
# Internal server error with JSON body
- self.http_client.request = simple_async_mock(
- return_value=FakeResponse(
- code=500,
- phrase=b"Internal Server Error",
- body=b'{"error": "internal_server_error"}',
- )
+ self.fake_server.post_token_handler.return_value = FakeResponse.json(
+ code=500, payload={"error": "internal_server_error"}
)
exc = self.get_failure(self.provider._exchange_code(code), OidcError)
self.assertEqual(exc.value.error, "internal_server_error")
# 4xx error without "error" field
- self.http_client.request = simple_async_mock(
- return_value=FakeResponse(
- code=400,
- phrase=b"Bad request",
- body=b"{}",
- )
+ self.fake_server.post_token_handler.return_value = FakeResponse.json(
+ code=400, payload={}
)
exc = self.get_failure(self.provider._exchange_code(code), OidcError)
self.assertEqual(exc.value.error, "server_error")
# 2xx error with "error" field
- self.http_client.request = simple_async_mock(
- return_value=FakeResponse(
- code=200,
- phrase=b"OK",
- body=b'{"error": "some_error"}',
- )
+ self.fake_server.post_token_handler.return_value = FakeResponse.json(
+ code=200, payload={"error": "some_error"}
)
exc = self.get_failure(self.provider._exchange_code(code), OidcError)
self.assertEqual(exc.value.error, "some_error")
@@ -697,11 +659,14 @@ class OidcHandlerTestCase(HomeserverTestCase):
"""Test that code exchange works with a JWK client secret."""
from authlib.jose import jwt
- token = {"type": "bearer"}
- self.http_client.request = simple_async_mock(
- return_value=FakeResponse(
- code=200, phrase=b"OK", body=json.dumps(token).encode("utf-8")
- )
+ token = {
+ "type": "Bearer",
+ "access_token": "aabbcc",
+ }
+
+ self.fake_server.post_token_handler.side_effect = None
+ self.fake_server.post_token_handler.return_value = FakeResponse.json(
+ payload=token
)
code = "code"
@@ -714,9 +679,9 @@ class OidcHandlerTestCase(HomeserverTestCase):
self.assertEqual(ret, token)
# the request should have hit the token endpoint
- kwargs = self.http_client.request.call_args[1]
+ kwargs = self.fake_server.request.call_args[1]
self.assertEqual(kwargs["method"], "POST")
- self.assertEqual(kwargs["uri"], TOKEN_ENDPOINT)
+ self.assertEqual(kwargs["uri"], self.fake_server.token_endpoint)
# the client secret provided to the should be a jwt which can be checked with
# the public key
@@ -750,11 +715,14 @@ class OidcHandlerTestCase(HomeserverTestCase):
)
def test_exchange_code_no_auth(self) -> None:
"""Test that code exchange works with no client secret."""
- token = {"type": "bearer"}
- self.http_client.request = simple_async_mock(
- return_value=FakeResponse(
- code=200, phrase=b"OK", body=json.dumps(token).encode("utf-8")
- )
+ token = {
+ "type": "Bearer",
+ "access_token": "aabbcc",
+ }
+
+ self.fake_server.post_token_handler.side_effect = None
+ self.fake_server.post_token_handler.return_value = FakeResponse.json(
+ payload=token
)
code = "code"
ret = self.get_success(self.provider._exchange_code(code))
@@ -762,9 +730,9 @@ class OidcHandlerTestCase(HomeserverTestCase):
self.assertEqual(ret, token)
# the request should have hit the token endpoint
- kwargs = self.http_client.request.call_args[1]
+ kwargs = self.fake_server.request.call_args[1]
self.assertEqual(kwargs["method"], "POST")
- self.assertEqual(kwargs["uri"], TOKEN_ENDPOINT)
+ self.assertEqual(kwargs["uri"], self.fake_server.token_endpoint)
# check the POSTed data
args = parse_qs(kwargs["data"].decode("utf-8"))
@@ -787,37 +755,19 @@ class OidcHandlerTestCase(HomeserverTestCase):
"""
Login while using a mapping provider that implements get_extra_attributes.
"""
- token = {
- "type": "bearer",
- "id_token": "id_token",
- "access_token": "access_token",
- }
userinfo = {
"sub": "foo",
"username": "foo",
"phone": "1234567",
}
- self.provider._exchange_code = simple_async_mock(return_value=token) # type: ignore[assignment]
- self.provider._parse_id_token = simple_async_mock(return_value=userinfo) # type: ignore[assignment]
- auth_handler = self.hs.get_auth_handler()
- auth_handler.complete_sso_login = simple_async_mock()
-
- state = "state"
- client_redirect_url = "http://client/redirect"
- session = self._generate_oidc_session_token(
- state=state,
- nonce="nonce",
- client_redirect_url=client_redirect_url,
- )
- request = _build_callback_request("code", state, session)
-
+ request, _ = self.start_authorization(userinfo)
self.get_success(self.handler.handle_oidc_callback(request))
- auth_handler.complete_sso_login.assert_called_once_with(
+ self.complete_sso_login.assert_called_once_with(
"@foo:test",
- "oidc",
+ self.provider.idp_id,
request,
- client_redirect_url,
+ ANY,
{"phone": "1234567"},
new_user=True,
auth_provider_session_id=None,
@@ -826,41 +776,40 @@ class OidcHandlerTestCase(HomeserverTestCase):
@override_config({"oidc_config": DEFAULT_CONFIG})
def test_map_userinfo_to_user(self) -> None:
"""Ensure that mapping the userinfo returned from a provider to an MXID works properly."""
- auth_handler = self.hs.get_auth_handler()
- auth_handler.complete_sso_login = simple_async_mock()
-
userinfo: dict = {
"sub": "test_user",
"username": "test_user",
}
- self.get_success(_make_callback_with_userinfo(self.hs, userinfo))
- auth_handler.complete_sso_login.assert_called_once_with(
+ request, _ = self.start_authorization(userinfo)
+ self.get_success(self.handler.handle_oidc_callback(request))
+ self.complete_sso_login.assert_called_once_with(
"@test_user:test",
- "oidc",
- ANY,
+ self.provider.idp_id,
+ request,
ANY,
None,
new_user=True,
auth_provider_session_id=None,
)
- auth_handler.complete_sso_login.reset_mock()
+ self.reset_mocks()
# Some providers return an integer ID.
userinfo = {
"sub": 1234,
"username": "test_user_2",
}
- self.get_success(_make_callback_with_userinfo(self.hs, userinfo))
- auth_handler.complete_sso_login.assert_called_once_with(
+ request, _ = self.start_authorization(userinfo)
+ self.get_success(self.handler.handle_oidc_callback(request))
+ self.complete_sso_login.assert_called_once_with(
"@test_user_2:test",
- "oidc",
- ANY,
+ self.provider.idp_id,
+ request,
ANY,
None,
new_user=True,
auth_provider_session_id=None,
)
- auth_handler.complete_sso_login.reset_mock()
+ self.reset_mocks()
# Test if the mxid is already taken
store = self.hs.get_datastores().main
@@ -869,8 +818,9 @@ class OidcHandlerTestCase(HomeserverTestCase):
store.register_user(user_id=user3.to_string(), password_hash=None)
)
userinfo = {"sub": "test3", "username": "test_user_3"}
- self.get_success(_make_callback_with_userinfo(self.hs, userinfo))
- auth_handler.complete_sso_login.assert_not_called()
+ request, _ = self.start_authorization(userinfo)
+ self.get_success(self.handler.handle_oidc_callback(request))
+ self.complete_sso_login.assert_not_called()
self.assertRenderedError(
"mapping_error",
"Mapping provider does not support de-duplicating Matrix IDs",
@@ -885,38 +835,37 @@ class OidcHandlerTestCase(HomeserverTestCase):
store.register_user(user_id=user.to_string(), password_hash=None)
)
- auth_handler = self.hs.get_auth_handler()
- auth_handler.complete_sso_login = simple_async_mock()
-
# Map a user via SSO.
userinfo = {
"sub": "test",
"username": "test_user",
}
- self.get_success(_make_callback_with_userinfo(self.hs, userinfo))
- auth_handler.complete_sso_login.assert_called_once_with(
+ request, _ = self.start_authorization(userinfo)
+ self.get_success(self.handler.handle_oidc_callback(request))
+ self.complete_sso_login.assert_called_once_with(
user.to_string(),
- "oidc",
- ANY,
+ self.provider.idp_id,
+ request,
ANY,
None,
new_user=False,
auth_provider_session_id=None,
)
- auth_handler.complete_sso_login.reset_mock()
+ self.reset_mocks()
# Subsequent calls should map to the same mxid.
- self.get_success(_make_callback_with_userinfo(self.hs, userinfo))
- auth_handler.complete_sso_login.assert_called_once_with(
+ request, _ = self.start_authorization(userinfo)
+ self.get_success(self.handler.handle_oidc_callback(request))
+ self.complete_sso_login.assert_called_once_with(
user.to_string(),
- "oidc",
- ANY,
+ self.provider.idp_id,
+ request,
ANY,
None,
new_user=False,
auth_provider_session_id=None,
)
- auth_handler.complete_sso_login.reset_mock()
+ self.reset_mocks()
# Note that a second SSO user can be mapped to the same Matrix ID. (This
# requires a unique sub, but something that maps to the same matrix ID,
@@ -927,17 +876,18 @@ class OidcHandlerTestCase(HomeserverTestCase):
"sub": "test1",
"username": "test_user",
}
- self.get_success(_make_callback_with_userinfo(self.hs, userinfo))
- auth_handler.complete_sso_login.assert_called_once_with(
+ request, _ = self.start_authorization(userinfo)
+ self.get_success(self.handler.handle_oidc_callback(request))
+ self.complete_sso_login.assert_called_once_with(
user.to_string(),
- "oidc",
- ANY,
+ self.provider.idp_id,
+ request,
ANY,
None,
new_user=False,
auth_provider_session_id=None,
)
- auth_handler.complete_sso_login.reset_mock()
+ self.reset_mocks()
# Register some non-exact matching cases.
user2 = UserID.from_string("@TEST_user_2:test")
@@ -954,8 +904,9 @@ class OidcHandlerTestCase(HomeserverTestCase):
"sub": "test2",
"username": "TEST_USER_2",
}
- self.get_success(_make_callback_with_userinfo(self.hs, userinfo))
- auth_handler.complete_sso_login.assert_not_called()
+ request, _ = self.start_authorization(userinfo)
+ self.get_success(self.handler.handle_oidc_callback(request))
+ self.complete_sso_login.assert_not_called()
args = self.assertRenderedError("mapping_error")
self.assertTrue(
args[2].startswith(
@@ -969,11 +920,12 @@ class OidcHandlerTestCase(HomeserverTestCase):
store.register_user(user_id=user2.to_string(), password_hash=None)
)
- self.get_success(_make_callback_with_userinfo(self.hs, userinfo))
- auth_handler.complete_sso_login.assert_called_once_with(
+ request, _ = self.start_authorization(userinfo)
+ self.get_success(self.handler.handle_oidc_callback(request))
+ self.complete_sso_login.assert_called_once_with(
"@TEST_USER_2:test",
- "oidc",
- ANY,
+ self.provider.idp_id,
+ request,
ANY,
None,
new_user=False,
@@ -983,9 +935,9 @@ class OidcHandlerTestCase(HomeserverTestCase):
@override_config({"oidc_config": DEFAULT_CONFIG})
def test_map_userinfo_to_invalid_localpart(self) -> None:
"""If the mapping provider generates an invalid localpart it should be rejected."""
- self.get_success(
- _make_callback_with_userinfo(self.hs, {"sub": "test2", "username": "föö"})
- )
+ userinfo = {"sub": "test2", "username": "föö"}
+ request, _ = self.start_authorization(userinfo)
+ self.get_success(self.handler.handle_oidc_callback(request))
self.assertRenderedError("mapping_error", "localpart is invalid: föö")
@override_config(
@@ -1000,9 +952,6 @@ class OidcHandlerTestCase(HomeserverTestCase):
)
def test_map_userinfo_to_user_retries(self) -> None:
"""The mapping provider can retry generating an MXID if the MXID is already in use."""
- auth_handler = self.hs.get_auth_handler()
- auth_handler.complete_sso_login = simple_async_mock()
-
store = self.hs.get_datastores().main
self.get_success(
store.register_user(user_id="@test_user:test", password_hash=None)
@@ -1011,19 +960,20 @@ class OidcHandlerTestCase(HomeserverTestCase):
"sub": "test",
"username": "test_user",
}
- self.get_success(_make_callback_with_userinfo(self.hs, userinfo))
+ request, _ = self.start_authorization(userinfo)
+ self.get_success(self.handler.handle_oidc_callback(request))
# test_user is already taken, so test_user1 gets registered instead.
- auth_handler.complete_sso_login.assert_called_once_with(
+ self.complete_sso_login.assert_called_once_with(
"@test_user1:test",
- "oidc",
- ANY,
+ self.provider.idp_id,
+ request,
ANY,
None,
new_user=True,
auth_provider_session_id=None,
)
- auth_handler.complete_sso_login.reset_mock()
+ self.reset_mocks()
# Register all of the potential mxids for a particular OIDC username.
self.get_success(
@@ -1039,8 +989,9 @@ class OidcHandlerTestCase(HomeserverTestCase):
"sub": "tester",
"username": "tester",
}
- self.get_success(_make_callback_with_userinfo(self.hs, userinfo))
- auth_handler.complete_sso_login.assert_not_called()
+ request, _ = self.start_authorization(userinfo)
+ self.get_success(self.handler.handle_oidc_callback(request))
+ self.complete_sso_login.assert_not_called()
self.assertRenderedError(
"mapping_error", "Unable to generate a Matrix ID from the SSO response"
)
@@ -1052,7 +1003,8 @@ class OidcHandlerTestCase(HomeserverTestCase):
"sub": "tester",
"username": "",
}
- self.get_success(_make_callback_with_userinfo(self.hs, userinfo))
+ request, _ = self.start_authorization(userinfo)
+ self.get_success(self.handler.handle_oidc_callback(request))
self.assertRenderedError("mapping_error", "localpart is invalid: ")
@override_config(
@@ -1071,7 +1023,8 @@ class OidcHandlerTestCase(HomeserverTestCase):
"sub": "tester",
"username": None,
}
- self.get_success(_make_callback_with_userinfo(self.hs, userinfo))
+ request, _ = self.start_authorization(userinfo)
+ self.get_success(self.handler.handle_oidc_callback(request))
self.assertRenderedError("mapping_error", "localpart is invalid: ")
@override_config(
@@ -1084,16 +1037,14 @@ class OidcHandlerTestCase(HomeserverTestCase):
)
def test_attribute_requirements(self) -> None:
"""The required attributes must be met from the OIDC userinfo response."""
- auth_handler = self.hs.get_auth_handler()
- auth_handler.complete_sso_login = simple_async_mock()
-
# userinfo lacking "test": "foobar" attribute should fail.
userinfo = {
"sub": "tester",
"username": "tester",
}
- self.get_success(_make_callback_with_userinfo(self.hs, userinfo))
- auth_handler.complete_sso_login.assert_not_called()
+ request, _ = self.start_authorization(userinfo)
+ self.get_success(self.handler.handle_oidc_callback(request))
+ self.complete_sso_login.assert_not_called()
# userinfo with "test": "foobar" attribute should succeed.
userinfo = {
@@ -1101,13 +1052,14 @@ class OidcHandlerTestCase(HomeserverTestCase):
"username": "tester",
"test": "foobar",
}
- self.get_success(_make_callback_with_userinfo(self.hs, userinfo))
+ request, _ = self.start_authorization(userinfo)
+ self.get_success(self.handler.handle_oidc_callback(request))
# check that the auth handler got called as expected
- auth_handler.complete_sso_login.assert_called_once_with(
+ self.complete_sso_login.assert_called_once_with(
"@tester:test",
- "oidc",
- ANY,
+ self.provider.idp_id,
+ request,
ANY,
None,
new_user=True,
@@ -1124,21 +1076,20 @@ class OidcHandlerTestCase(HomeserverTestCase):
)
def test_attribute_requirements_contains(self) -> None:
"""Test that auth succeeds if userinfo attribute CONTAINS required value"""
- auth_handler = self.hs.get_auth_handler()
- auth_handler.complete_sso_login = simple_async_mock()
# userinfo with "test": ["foobar", "foo", "bar"] attribute should succeed.
userinfo = {
"sub": "tester",
"username": "tester",
"test": ["foobar", "foo", "bar"],
}
- self.get_success(_make_callback_with_userinfo(self.hs, userinfo))
+ request, _ = self.start_authorization(userinfo)
+ self.get_success(self.handler.handle_oidc_callback(request))
# check that the auth handler got called as expected
- auth_handler.complete_sso_login.assert_called_once_with(
+ self.complete_sso_login.assert_called_once_with(
"@tester:test",
- "oidc",
- ANY,
+ self.provider.idp_id,
+ request,
ANY,
None,
new_user=True,
@@ -1158,16 +1109,15 @@ class OidcHandlerTestCase(HomeserverTestCase):
Test that auth fails if attributes exist but don't match,
or are non-string values.
"""
- auth_handler = self.hs.get_auth_handler()
- auth_handler.complete_sso_login = simple_async_mock()
# userinfo with "test": "not_foobar" attribute should fail
userinfo: dict = {
"sub": "tester",
"username": "tester",
"test": "not_foobar",
}
- self.get_success(_make_callback_with_userinfo(self.hs, userinfo))
- auth_handler.complete_sso_login.assert_not_called()
+ request, _ = self.start_authorization(userinfo)
+ self.get_success(self.handler.handle_oidc_callback(request))
+ self.complete_sso_login.assert_not_called()
# userinfo with "test": ["foo", "bar"] attribute should fail
userinfo = {
@@ -1175,8 +1125,9 @@ class OidcHandlerTestCase(HomeserverTestCase):
"username": "tester",
"test": ["foo", "bar"],
}
- self.get_success(_make_callback_with_userinfo(self.hs, userinfo))
- auth_handler.complete_sso_login.assert_not_called()
+ request, _ = self.start_authorization(userinfo)
+ self.get_success(self.handler.handle_oidc_callback(request))
+ self.complete_sso_login.assert_not_called()
# userinfo with "test": False attribute should fail
# this is largely just to ensure we don't crash here
@@ -1185,8 +1136,9 @@ class OidcHandlerTestCase(HomeserverTestCase):
"username": "tester",
"test": False,
}
- self.get_success(_make_callback_with_userinfo(self.hs, userinfo))
- auth_handler.complete_sso_login.assert_not_called()
+ request, _ = self.start_authorization(userinfo)
+ self.get_success(self.handler.handle_oidc_callback(request))
+ self.complete_sso_login.assert_not_called()
# userinfo with "test": None attribute should fail
# a value of None breaks the OIDC spec, but it's important to not crash here
@@ -1195,8 +1147,9 @@ class OidcHandlerTestCase(HomeserverTestCase):
"username": "tester",
"test": None,
}
- self.get_success(_make_callback_with_userinfo(self.hs, userinfo))
- auth_handler.complete_sso_login.assert_not_called()
+ request, _ = self.start_authorization(userinfo)
+ self.get_success(self.handler.handle_oidc_callback(request))
+ self.complete_sso_login.assert_not_called()
# userinfo with "test": 1 attribute should fail
# this is largely just to ensure we don't crash here
@@ -1205,8 +1158,9 @@ class OidcHandlerTestCase(HomeserverTestCase):
"username": "tester",
"test": 1,
}
- self.get_success(_make_callback_with_userinfo(self.hs, userinfo))
- auth_handler.complete_sso_login.assert_not_called()
+ request, _ = self.start_authorization(userinfo)
+ self.get_success(self.handler.handle_oidc_callback(request))
+ self.complete_sso_login.assert_not_called()
# userinfo with "test": 3.14 attribute should fail
# this is largely just to ensure we don't crash here
@@ -1215,8 +1169,9 @@ class OidcHandlerTestCase(HomeserverTestCase):
"username": "tester",
"test": 3.14,
}
- self.get_success(_make_callback_with_userinfo(self.hs, userinfo))
- auth_handler.complete_sso_login.assert_not_called()
+ request, _ = self.start_authorization(userinfo)
+ self.get_success(self.handler.handle_oidc_callback(request))
+ self.complete_sso_login.assert_not_called()
def _generate_oidc_session_token(
self,
@@ -1230,7 +1185,7 @@ class OidcHandlerTestCase(HomeserverTestCase):
return self.handler._macaroon_generator.generate_oidc_session_token(
state=state,
session_data=OidcSessionData(
- idp_id="oidc",
+ idp_id=self.provider.idp_id,
nonce=nonce,
client_redirect_url=client_redirect_url,
ui_auth_session_id=ui_auth_session_id,
@@ -1238,41 +1193,6 @@ class OidcHandlerTestCase(HomeserverTestCase):
)
-async def _make_callback_with_userinfo(
- hs: HomeServer, userinfo: dict, client_redirect_url: str = "http://client/redirect"
-) -> None:
- """Mock up an OIDC callback with the given userinfo dict
-
- We'll pull out the OIDC handler from the homeserver, stub out a couple of methods,
- and poke in the userinfo dict as if it were the response to an OIDC userinfo call.
-
- Args:
- hs: the HomeServer impl to send the callback to.
- userinfo: the OIDC userinfo dict
- client_redirect_url: the URL to redirect to on success.
- """
-
- handler = hs.get_oidc_handler()
- provider = handler._providers["oidc"]
- provider._exchange_code = simple_async_mock(return_value={"id_token": ""}) # type: ignore[assignment]
- provider._parse_id_token = simple_async_mock(return_value=userinfo) # type: ignore[assignment]
- provider._fetch_userinfo = simple_async_mock(return_value=userinfo) # type: ignore[assignment]
-
- state = "state"
- session = handler._macaroon_generator.generate_oidc_session_token(
- state=state,
- session_data=OidcSessionData(
- idp_id="oidc",
- nonce="nonce",
- client_redirect_url=client_redirect_url,
- ui_auth_session_id="",
- ),
- )
- request = _build_callback_request("code", state, session)
-
- await handler.handle_oidc_callback(request)
-
-
def _build_callback_request(
code: str,
state: str,
diff --git a/tests/handlers/test_password_providers.py b/tests/handlers/test_password_providers.py
index 4c62449c89..75934b1707 100644
--- a/tests/handlers/test_password_providers.py
+++ b/tests/handlers/test_password_providers.py
@@ -21,7 +21,6 @@ from unittest.mock import Mock
import synapse
from synapse.api.constants import LoginType
from synapse.api.errors import Codes
-from synapse.handlers.auth import load_legacy_password_auth_providers
from synapse.module_api import ModuleApi
from synapse.rest.client import account, devices, login, logout, register
from synapse.types import JsonDict, UserID
@@ -167,16 +166,6 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase):
mock_password_provider.reset_mock()
super().setUp()
- def make_homeserver(self, reactor, clock):
- hs = self.setup_test_homeserver()
- # Load the modules into the homeserver
- module_api = hs.get_module_api()
- for module, config in hs.config.modules.loaded_modules:
- module(config=config, api=module_api)
- load_legacy_password_auth_providers(hs)
-
- return hs
-
@override_config(legacy_providers_config(LegacyPasswordOnlyAuthProvider))
def test_password_only_auth_progiver_login_legacy(self):
self.password_only_auth_provider_login_test_body()
diff --git a/tests/handlers/test_presence.py b/tests/handlers/test_presence.py
index c96dc6caf2..c5981ff965 100644
--- a/tests/handlers/test_presence.py
+++ b/tests/handlers/test_presence.py
@@ -15,6 +15,7 @@
from typing import Optional
from unittest.mock import Mock, call
+from parameterized import parameterized
from signedjson.key import generate_signing_key
from synapse.api.constants import EventTypes, Membership, PresenceState
@@ -37,6 +38,7 @@ from synapse.rest.client import room
from synapse.types import UserID, get_domain_from_id
from tests import unittest
+from tests.replication._base import BaseMultiWorkerStreamTestCase
class PresenceUpdateTestCase(unittest.HomeserverTestCase):
@@ -505,7 +507,7 @@ class PresenceTimeoutTestCase(unittest.TestCase):
self.assertEqual(state, new_state)
-class PresenceHandlerTestCase(unittest.HomeserverTestCase):
+class PresenceHandlerTestCase(BaseMultiWorkerStreamTestCase):
def prepare(self, reactor, clock, hs):
self.presence_handler = hs.get_presence_handler()
self.clock = hs.get_clock()
@@ -716,20 +718,47 @@ class PresenceHandlerTestCase(unittest.HomeserverTestCase):
# our status message should be the same as it was before
self.assertEqual(state.status_msg, status_msg)
- def test_set_presence_from_syncing_keeps_busy(self):
- """Test that presence set by syncing doesn't affect busy status"""
- # while this isn't the default
- self.presence_handler._busy_presence_enabled = True
+ @parameterized.expand([(False,), (True,)])
+ @unittest.override_config(
+ {
+ "experimental_features": {
+ "msc3026_enabled": True,
+ },
+ }
+ )
+ def test_set_presence_from_syncing_keeps_busy(self, test_with_workers: bool):
+ """Test that presence set by syncing doesn't affect busy status
+ Args:
+ test_with_workers: If True, check the presence state of the user by calling
+ /sync against a worker, rather than the main process.
+ """
user_id = "@test:server"
status_msg = "I'm busy!"
+ # By default, we call /sync against the main process.
+ worker_to_sync_against = self.hs
+ if test_with_workers:
+ # Create a worker and use it to handle /sync traffic instead.
+ # This is used to test that presence changes get replicated from workers
+ # to the main process correctly.
+ worker_to_sync_against = self.make_worker_hs(
+ "synapse.app.generic_worker", {"worker_name": "presence_writer"}
+ )
+
+ # Set presence to BUSY
self._set_presencestate_with_status_msg(user_id, PresenceState.BUSY, status_msg)
+ # Perform a sync with a presence state other than busy. This should NOT change
+ # our presence status; we only change from busy if we explicitly set it via
+ # /presence/*.
self.get_success(
- self.presence_handler.user_syncing(user_id, True, PresenceState.ONLINE)
+ worker_to_sync_against.get_presence_handler().user_syncing(
+ user_id, True, PresenceState.ONLINE
+ )
)
+ # Check against the main process that the user's presence did not change.
state = self.get_success(
self.presence_handler.get_state(UserID.from_string(user_id))
)
diff --git a/tests/handlers/test_profile.py b/tests/handlers/test_profile.py
index f88c725a42..675aa023ac 100644
--- a/tests/handlers/test_profile.py
+++ b/tests/handlers/test_profile.py
@@ -14,6 +14,8 @@
from typing import Any, Awaitable, Callable, Dict
from unittest.mock import Mock
+from parameterized import parameterized
+
from twisted.test.proto_helpers import MemoryReactor
import synapse.types
@@ -327,6 +329,53 @@ class ProfileTestCase(unittest.HomeserverTestCase):
)
self.assertFalse(res)
+ @unittest.override_config(
+ {"server_name": "test:8888", "allowed_avatar_mimetypes": ["image/png"]}
+ )
+ def test_avatar_constraint_on_local_server_with_port(self):
+ """Test that avatar metadata is correctly fetched when the media is on a local
+ server and the server has an explicit port.
+
+ (This was previously a bug)
+ """
+ local_server_name = self.hs.config.server.server_name
+ media_id = "local"
+ local_mxc = f"mxc://{local_server_name}/{media_id}"
+
+ # mock up the existence of the avatar file
+ self._setup_local_files({media_id: {"mimetype": "image/png"}})
+
+ # and now check that check_avatar_size_and_mime_type is happy
+ self.assertTrue(
+ self.get_success(self.handler.check_avatar_size_and_mime_type(local_mxc))
+ )
+
+ @parameterized.expand([("remote",), ("remote:1234",)])
+ @unittest.override_config({"allowed_avatar_mimetypes": ["image/png"]})
+ def test_check_avatar_on_remote_server(self, remote_server_name: str) -> None:
+ """Test that avatar metadata is correctly fetched from a remote server"""
+ media_id = "remote"
+ remote_mxc = f"mxc://{remote_server_name}/{media_id}"
+
+ # if the media is remote, check_avatar_size_and_mime_type just checks the
+ # media cache, so we don't need to instantiate a real remote server. It is
+ # sufficient to poke an entry into the db.
+ self.get_success(
+ self.hs.get_datastores().main.store_cached_remote_media(
+ media_id=media_id,
+ media_type="image/png",
+ media_length=50,
+ origin=remote_server_name,
+ time_now_ms=self.clock.time_msec(),
+ upload_name=None,
+ filesystem_id="xyz",
+ )
+ )
+
+ self.assertTrue(
+ self.get_success(self.handler.check_avatar_size_and_mime_type(remote_mxc))
+ )
+
def _setup_local_files(self, names_and_props: Dict[str, Dict[str, Any]]):
"""Stores metadata about files in the database.
diff --git a/tests/handlers/test_receipts.py b/tests/handlers/test_receipts.py
index a95868b5c0..b55238650c 100644
--- a/tests/handlers/test_receipts.py
+++ b/tests/handlers/test_receipts.py
@@ -25,7 +25,7 @@ class ReceiptsTestCase(unittest.HomeserverTestCase):
def prepare(self, reactor, clock, hs):
self.event_source = hs.get_event_sources().sources.receipt
- def test_filters_out_private_receipt(self):
+ def test_filters_out_private_receipt(self) -> None:
self._test_filters_private(
[
{
@@ -45,7 +45,7 @@ class ReceiptsTestCase(unittest.HomeserverTestCase):
[],
)
- def test_filters_out_private_receipt_and_ignores_rest(self):
+ def test_filters_out_private_receipt_and_ignores_rest(self) -> None:
self._test_filters_private(
[
{
@@ -84,7 +84,9 @@ class ReceiptsTestCase(unittest.HomeserverTestCase):
],
)
- def test_filters_out_event_with_only_private_receipts_and_ignores_the_rest(self):
+ def test_filters_out_event_with_only_private_receipts_and_ignores_the_rest(
+ self,
+ ) -> None:
self._test_filters_private(
[
{
@@ -125,7 +127,7 @@ class ReceiptsTestCase(unittest.HomeserverTestCase):
],
)
- def test_handles_empty_event(self):
+ def test_handles_empty_event(self) -> None:
self._test_filters_private(
[
{
@@ -160,7 +162,9 @@ class ReceiptsTestCase(unittest.HomeserverTestCase):
],
)
- def test_filters_out_receipt_event_with_only_private_receipt_and_ignores_rest(self):
+ def test_filters_out_receipt_event_with_only_private_receipt_and_ignores_rest(
+ self,
+ ) -> None:
self._test_filters_private(
[
{
@@ -207,7 +211,7 @@ class ReceiptsTestCase(unittest.HomeserverTestCase):
],
)
- def test_handles_string_data(self):
+ def test_handles_string_data(self) -> None:
"""
Tests that an invalid shape for read-receipts is handled.
Context: https://github.com/matrix-org/synapse/issues/10603
@@ -242,7 +246,7 @@ class ReceiptsTestCase(unittest.HomeserverTestCase):
],
)
- def test_leaves_our_private_and_their_public(self):
+ def test_leaves_our_private_and_their_public(self) -> None:
self._test_filters_private(
[
{
@@ -296,7 +300,7 @@ class ReceiptsTestCase(unittest.HomeserverTestCase):
],
)
- def test_we_do_not_mutate(self):
+ def test_we_do_not_mutate(self) -> None:
"""Ensure the input values are not modified."""
events = [
{
@@ -320,7 +324,7 @@ class ReceiptsTestCase(unittest.HomeserverTestCase):
def _test_filters_private(
self, events: List[JsonDict], expected_output: List[JsonDict]
- ):
+ ) -> None:
"""Tests that the _filter_out_private returns the expected output"""
filtered_events = self.event_source.filter_out_private_receipts(
events, "@me:server.org"
diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py
index 23f35d5bf5..765df75d91 100644
--- a/tests/handlers/test_register.py
+++ b/tests/handlers/test_register.py
@@ -22,7 +22,6 @@ from synapse.api.errors import (
ResourceLimitError,
SynapseError,
)
-from synapse.events.spamcheck import load_legacy_spam_checkers
from synapse.spam_checker_api import RegistrationBehaviour
from synapse.types import RoomAlias, RoomID, UserID, create_requester
@@ -144,12 +143,6 @@ class RegistrationTestCase(unittest.HomeserverTestCase):
config=hs_config, federation_client=self.mock_federation_client
)
- load_legacy_spam_checkers(hs)
-
- module_api = hs.get_module_api()
- for module, config in hs.config.modules.loaded_modules:
- module(config=config, api=module_api)
-
return hs
def prepare(self, reactor, clock, hs):
@@ -504,7 +497,9 @@ class RegistrationTestCase(unittest.HomeserverTestCase):
)
)
self.get_success(
- event_creation_handler.handle_new_client_event(requester, event, context)
+ event_creation_handler.handle_new_client_event(
+ requester, events_and_context=[(event, context)]
+ )
)
# Register a second user, which won't be be in the room (or even have an invite)
diff --git a/tests/handlers/test_room_member.py b/tests/handlers/test_room_member.py
index 254e7e4b80..6bbfd5dc84 100644
--- a/tests/handlers/test_room_member.py
+++ b/tests/handlers/test_room_member.py
@@ -1,4 +1,3 @@
-from http import HTTPStatus
from unittest.mock import Mock, patch
from twisted.test.proto_helpers import MemoryReactor
@@ -7,7 +6,7 @@ import synapse.rest.admin
import synapse.rest.client.login
import synapse.rest.client.room
from synapse.api.constants import EventTypes, Membership
-from synapse.api.errors import LimitExceededError
+from synapse.api.errors import LimitExceededError, SynapseError
from synapse.crypto.event_signing import add_hashes_and_signatures
from synapse.events import FrozenEventV3
from synapse.federation.federation_client import SendJoinResult
@@ -15,10 +14,14 @@ from synapse.server import HomeServer
from synapse.types import UserID, create_requester
from synapse.util import Clock
-from tests.replication._base import RedisMultiWorkerStreamTestCase
+from tests.replication._base import BaseMultiWorkerStreamTestCase
from tests.server import make_request
from tests.test_utils import make_awaitable
-from tests.unittest import FederatingHomeserverTestCase, override_config
+from tests.unittest import (
+ FederatingHomeserverTestCase,
+ HomeserverTestCase,
+ override_config,
+)
class TestJoinsLimitedByPerRoomRateLimiter(FederatingHomeserverTestCase):
@@ -217,7 +220,7 @@ class TestJoinsLimitedByPerRoomRateLimiter(FederatingHomeserverTestCase):
# - trying to remote-join again.
-class TestReplicatedJoinsLimitedByPerRoomRateLimiter(RedisMultiWorkerStreamTestCase):
+class TestReplicatedJoinsLimitedByPerRoomRateLimiter(BaseMultiWorkerStreamTestCase):
servlets = [
synapse.rest.admin.register_servlets,
synapse.rest.client.login.register_servlets,
@@ -260,7 +263,7 @@ class TestReplicatedJoinsLimitedByPerRoomRateLimiter(RedisMultiWorkerStreamTestC
f"/_matrix/client/v3/rooms/{self.room_id}/join",
access_token=self.bob_token,
)
- self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body)
+ self.assertEqual(channel.code, 200, channel.json_body)
# wait for join to arrive over replication
self.replicate()
@@ -288,3 +291,88 @@ class TestReplicatedJoinsLimitedByPerRoomRateLimiter(RedisMultiWorkerStreamTestC
),
LimitExceededError,
)
+
+
+class RoomMemberMasterHandlerTestCase(HomeserverTestCase):
+ servlets = [
+ synapse.rest.admin.register_servlets,
+ synapse.rest.client.login.register_servlets,
+ synapse.rest.client.room.register_servlets,
+ ]
+
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+ self.handler = hs.get_room_member_handler()
+ self.store = hs.get_datastores().main
+
+ # Create two users.
+ self.alice = self.register_user("alice", "pass")
+ self.alice_ID = UserID.from_string(self.alice)
+ self.alice_token = self.login("alice", "pass")
+ self.bob = self.register_user("bob", "pass")
+ self.bob_ID = UserID.from_string(self.bob)
+ self.bob_token = self.login("bob", "pass")
+
+ # Create a room on this homeserver.
+ self.room_id = self.helper.create_room_as(self.alice, tok=self.alice_token)
+
+ def test_leave_and_forget(self) -> None:
+ """Tests that forget a room is successfully. The test is performed with two users,
+ as forgetting by the last user respectively after all users had left the
+ is a special edge case."""
+ self.helper.join(self.room_id, user=self.bob, tok=self.bob_token)
+
+ # alice is not the last room member that leaves and forgets the room
+ self.helper.leave(self.room_id, user=self.alice, tok=self.alice_token)
+ self.get_success(self.handler.forget(self.alice_ID, self.room_id))
+ self.assertTrue(
+ self.get_success(self.store.did_forget(self.alice, self.room_id))
+ )
+
+ # the server has not forgotten the room
+ self.assertFalse(
+ self.get_success(self.store.is_locally_forgotten_room(self.room_id))
+ )
+
+ def test_leave_and_forget_last_user(self) -> None:
+ """Tests that forget a room is successfully when the last user has left the room."""
+
+ # alice is the last room member that leaves and forgets the room
+ self.helper.leave(self.room_id, user=self.alice, tok=self.alice_token)
+ self.get_success(self.handler.forget(self.alice_ID, self.room_id))
+ self.assertTrue(
+ self.get_success(self.store.did_forget(self.alice, self.room_id))
+ )
+
+ # the server has forgotten the room
+ self.assertTrue(
+ self.get_success(self.store.is_locally_forgotten_room(self.room_id))
+ )
+
+ def test_forget_when_not_left(self) -> None:
+ """Tests that a user cannot not forgets a room that has not left."""
+ self.get_failure(self.handler.forget(self.alice_ID, self.room_id), SynapseError)
+
+ def test_rejoin_forgotten_by_user(self) -> None:
+ """Test that a user that has forgotten a room can do a re-join.
+ The room was not forgotten from the local server.
+ One local user is still member of the room."""
+ self.helper.join(self.room_id, user=self.bob, tok=self.bob_token)
+
+ self.helper.leave(self.room_id, user=self.alice, tok=self.alice_token)
+ self.get_success(self.handler.forget(self.alice_ID, self.room_id))
+ self.assertTrue(
+ self.get_success(self.store.did_forget(self.alice, self.room_id))
+ )
+
+ # the server has not forgotten the room
+ self.assertFalse(
+ self.get_success(self.store.is_locally_forgotten_room(self.room_id))
+ )
+
+ self.helper.join(self.room_id, user=self.alice, tok=self.alice_token)
+ # TODO: A join to a room does not invalidate the forgotten cache
+ # see https://github.com/matrix-org/synapse/issues/13262
+ self.store.did_forget.invalidate_all()
+ self.assertFalse(
+ self.get_success(self.store.did_forget(self.alice, self.room_id))
+ )
diff --git a/tests/handlers/test_sso.py b/tests/handlers/test_sso.py
new file mode 100644
index 0000000000..137deab138
--- /dev/null
+++ b/tests/handlers/test_sso.py
@@ -0,0 +1,145 @@
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from http import HTTPStatus
+from typing import BinaryIO, Callable, Dict, List, Optional, Tuple
+from unittest.mock import Mock
+
+from twisted.test.proto_helpers import MemoryReactor
+from twisted.web.http_headers import Headers
+
+from synapse.api.errors import Codes, SynapseError
+from synapse.http.client import RawHeaders
+from synapse.server import HomeServer
+from synapse.util import Clock
+
+from tests import unittest
+from tests.test_utils import SMALL_PNG, FakeResponse
+
+
+class TestSSOHandler(unittest.HomeserverTestCase):
+ def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
+ self.http_client = Mock(spec=["get_file"])
+ self.http_client.get_file.side_effect = mock_get_file
+ self.http_client.user_agent = b"Synapse Test"
+ hs = self.setup_test_homeserver(
+ proxied_blacklisted_http_client=self.http_client
+ )
+ return hs
+
+ async def test_set_avatar(self) -> None:
+ """Tests successfully setting the avatar of a newly created user"""
+ handler = self.hs.get_sso_handler()
+
+ # Create a new user to set avatar for
+ reg_handler = self.hs.get_registration_handler()
+ user_id = self.get_success(reg_handler.register_user(approved=True))
+
+ self.assertTrue(
+ self.get_success(handler.set_avatar(user_id, "http://my.server/me.png"))
+ )
+
+ # Ensure avatar is set on this newly created user,
+ # so no need to compare for the exact image
+ profile_handler = self.hs.get_profile_handler()
+ profile = self.get_success(profile_handler.get_profile(user_id))
+ self.assertIsNot(profile["avatar_url"], None)
+
+ @unittest.override_config({"max_avatar_size": 1})
+ async def test_set_avatar_too_big_image(self) -> None:
+ """Tests that saving an avatar fails when it is too big"""
+ handler = self.hs.get_sso_handler()
+
+ # any random user works since image check is supposed to fail
+ user_id = "@sso-user:test"
+
+ self.assertFalse(
+ self.get_success(handler.set_avatar(user_id, "http://my.server/me.png"))
+ )
+
+ @unittest.override_config({"allowed_avatar_mimetypes": ["image/jpeg"]})
+ async def test_set_avatar_incorrect_mime_type(self) -> None:
+ """Tests that saving an avatar fails when its mime type is not allowed"""
+ handler = self.hs.get_sso_handler()
+
+ # any random user works since image check is supposed to fail
+ user_id = "@sso-user:test"
+
+ self.assertFalse(
+ self.get_success(handler.set_avatar(user_id, "http://my.server/me.png"))
+ )
+
+ async def test_skip_saving_avatar_when_not_changed(self) -> None:
+ """Tests whether saving of avatar correctly skips if the avatar hasn't
+ changed"""
+ handler = self.hs.get_sso_handler()
+
+ # Create a new user to set avatar for
+ reg_handler = self.hs.get_registration_handler()
+ user_id = self.get_success(reg_handler.register_user(approved=True))
+
+ # set avatar for the first time, should be a success
+ self.assertTrue(
+ self.get_success(handler.set_avatar(user_id, "http://my.server/me.png"))
+ )
+
+ # get avatar picture for comparison after another attempt
+ profile_handler = self.hs.get_profile_handler()
+ profile = self.get_success(profile_handler.get_profile(user_id))
+ url_to_match = profile["avatar_url"]
+
+ # set same avatar for the second time, should be a success
+ self.assertTrue(
+ self.get_success(handler.set_avatar(user_id, "http://my.server/me.png"))
+ )
+
+ # compare avatar picture's url from previous step
+ profile = self.get_success(profile_handler.get_profile(user_id))
+ self.assertEqual(profile["avatar_url"], url_to_match)
+
+
+async def mock_get_file(
+ url: str,
+ output_stream: BinaryIO,
+ max_size: Optional[int] = None,
+ headers: Optional[RawHeaders] = None,
+ is_allowed_content_type: Optional[Callable[[str], bool]] = None,
+) -> Tuple[int, Dict[bytes, List[bytes]], str, int]:
+
+ fake_response = FakeResponse(code=404)
+ if url == "http://my.server/me.png":
+ fake_response = FakeResponse(
+ code=200,
+ headers=Headers(
+ {"Content-Type": ["image/png"], "Content-Length": [str(len(SMALL_PNG))]}
+ ),
+ body=SMALL_PNG,
+ )
+
+ if max_size is not None and max_size < len(SMALL_PNG):
+ raise SynapseError(
+ HTTPStatus.BAD_GATEWAY,
+ "Requested file is too large > %r bytes" % (max_size,),
+ Codes.TOO_LARGE,
+ )
+
+ if is_allowed_content_type and not is_allowed_content_type("image/png"):
+ raise SynapseError(
+ HTTPStatus.BAD_GATEWAY,
+ (
+ "Requested file's content type not allowed for this operation: %s"
+ % "image/png"
+ ),
+ )
+
+ output_stream.write(fake_response.body)
+
+ return len(SMALL_PNG), {b"Content-Type": [b"image/png"]}, "", 200
diff --git a/tests/handlers/test_sync.py b/tests/handlers/test_sync.py
index e3f38fbcc5..ab5c101eb7 100644
--- a/tests/handlers/test_sync.py
+++ b/tests/handlers/test_sync.py
@@ -159,6 +159,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
# Blow away caches (supported room versions can only change due to a restart).
self.store.get_rooms_for_user_with_stream_ordering.invalidate_all()
+ self.store.get_rooms_for_user.invalidate_all()
self.get_success(self.store._get_event_cache.clear())
self.store._event_ref.clear()
diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py
index 7af1333126..9c821b3042 100644
--- a/tests/handlers/test_typing.py
+++ b/tests/handlers/test_typing.py
@@ -25,7 +25,7 @@ from synapse.api.constants import EduTypes
from synapse.api.errors import AuthError
from synapse.federation.transport.server import TransportLayerServer
from synapse.server import HomeServer
-from synapse.types import JsonDict, UserID, create_requester
+from synapse.types import JsonDict, Requester, UserID, create_requester
from synapse.util import Clock
from tests import unittest
@@ -117,8 +117,10 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase):
self.room_members = []
- async def check_user_in_room(room_id: str, user_id: str) -> None:
- if user_id not in [u.to_string() for u in self.room_members]:
+ async def check_user_in_room(room_id: str, requester: Requester) -> None:
+ if requester.user.to_string() not in [
+ u.to_string() for u in self.room_members
+ ]:
raise AuthError(401, "User is not in the room")
return None
@@ -127,7 +129,7 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase):
async def check_host_in_room(room_id: str, server_name: str) -> bool:
return room_id == ROOM_ID
- hs.get_event_auth_handler().check_host_in_room = check_host_in_room
+ hs.get_event_auth_handler().is_host_in_room = check_host_in_room
async def get_current_hosts_in_room(room_id: str):
return {member.domain for member in self.room_members}
@@ -136,6 +138,10 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase):
get_current_hosts_in_room
)
+ hs.get_storage_controllers().state.get_current_hosts_in_room_or_partial_state_approximation = (
+ get_current_hosts_in_room
+ )
+
async def get_users_in_room(room_id: str):
return {str(u) for u in self.room_members}
diff --git a/tests/http/__init__.py b/tests/http/__init__.py
index e74f7f5b48..093537adef 100644
--- a/tests/http/__init__.py
+++ b/tests/http/__init__.py
@@ -13,6 +13,7 @@
# limitations under the License.
import os.path
import subprocess
+from typing import List
from zope.interface import implementer
@@ -70,14 +71,14 @@ subjectAltName = %(sanentries)s
"""
-def create_test_cert_file(sanlist):
+def create_test_cert_file(sanlist: List[bytes]) -> str:
"""build an x509 certificate file
Args:
- sanlist: list[bytes]: a list of subjectAltName values for the cert
+ sanlist: a list of subjectAltName values for the cert
Returns:
- str: the path to the file
+ The path to the file
"""
global cert_file_count
csr_filename = "server.csr"
diff --git a/tests/http/server/_base.py b/tests/http/server/_base.py
index 994d8880b0..5071f83574 100644
--- a/tests/http/server/_base.py
+++ b/tests/http/server/_base.py
@@ -15,7 +15,6 @@
import inspect
import itertools
import logging
-from http import HTTPStatus
from typing import (
Any,
Callable,
@@ -78,7 +77,7 @@ def test_disconnect(
if expect_cancellation:
expected_code = HTTP_STATUS_REQUEST_CANCELLED
else:
- expected_code = HTTPStatus.OK
+ expected_code = 200
request = channel.request
if channel.is_finished():
@@ -141,6 +140,8 @@ def make_request_with_cancellation_test(
method: str,
path: str,
content: Union[bytes, str, JsonDict] = b"",
+ *,
+ token: Optional[str] = None,
) -> FakeChannel:
"""Performs a request repeatedly, disconnecting at successive `await`s, until
one completes.
@@ -212,7 +213,13 @@ def make_request_with_cancellation_test(
with deferred_patch.patch():
# Start the request.
channel = make_request(
- reactor, site, method, path, content, await_result=False
+ reactor,
+ site,
+ method,
+ path,
+ content,
+ await_result=False,
+ access_token=token,
)
request = channel.request
diff --git a/tests/http/test_endpoint.py b/tests/http/test_endpoint.py
index c8cc21cadd..a801f002a0 100644
--- a/tests/http/test_endpoint.py
+++ b/tests/http/test_endpoint.py
@@ -25,6 +25,8 @@ class ServerNameTestCase(unittest.TestCase):
"[0abc:1def::1234]": ("[0abc:1def::1234]", None),
"1.2.3.4:1": ("1.2.3.4", 1),
"[0abc:1def::1234]:8080": ("[0abc:1def::1234]", 8080),
+ ":80": ("", 80),
+ "": ("", None),
}
for i, o in test_data.items():
@@ -42,6 +44,7 @@ class ServerNameTestCase(unittest.TestCase):
"newline.com\n",
".empty-label.com",
"1234:5678:80", # too many colons
+ ":80",
]
for i in test_data:
try:
diff --git a/tests/http/test_servlet.py b/tests/http/test_servlet.py
index bb966c80c6..46166292fe 100644
--- a/tests/http/test_servlet.py
+++ b/tests/http/test_servlet.py
@@ -18,7 +18,6 @@ from typing import Tuple
from unittest.mock import Mock
from synapse.api.errors import Codes, SynapseError
-from synapse.http.server import cancellable
from synapse.http.servlet import (
RestServlet,
parse_json_object_from_request,
@@ -28,6 +27,7 @@ from synapse.http.site import SynapseRequest
from synapse.rest.client._base import client_patterns
from synapse.server import HomeServer
from synapse.types import JsonDict
+from synapse.util.cancellation import cancellable
from tests import unittest
from tests.http.server._base import test_disconnect
@@ -35,11 +35,13 @@ from tests.http.server._base import test_disconnect
def make_request(content):
"""Make an object that acts enough like a request."""
- request = Mock(spec=["content"])
+ request = Mock(spec=["method", "uri", "content"])
if isinstance(content, dict):
content = json.dumps(content).encode("utf8")
+ request.method = bytes("STUB_METHOD", "ascii")
+ request.uri = bytes("/test_stub_uri", "ascii")
request.content = BytesIO(content)
return request
diff --git a/tests/logging/test_opentracing.py b/tests/logging/test_opentracing.py
index 3b14c76d7e..0917e478a5 100644
--- a/tests/logging/test_opentracing.py
+++ b/tests/logging/test_opentracing.py
@@ -25,6 +25,8 @@ from synapse.logging.context import (
from synapse.logging.opentracing import (
start_active_span,
start_active_span_follows_from,
+ tag_args,
+ trace_with_opname,
)
from synapse.util import Clock
@@ -38,8 +40,12 @@ try:
except ImportError:
jaeger_client = None # type: ignore
+import logging
+
from tests.unittest import TestCase
+logger = logging.getLogger(__name__)
+
class LogContextScopeManagerTestCase(TestCase):
"""
@@ -194,3 +200,80 @@ class LogContextScopeManagerTestCase(TestCase):
self._reporter.get_spans(),
[scopes[1].span, scopes[2].span, scopes[0].span],
)
+
+ def test_trace_decorator_sync(self) -> None:
+ """
+ Test whether we can use `@trace_with_opname` (`@trace`) and `@tag_args`
+ with sync functions
+ """
+ with LoggingContext("root context"):
+
+ @trace_with_opname("fixture_sync_func", tracer=self._tracer)
+ @tag_args
+ def fixture_sync_func() -> str:
+ return "foo"
+
+ result = fixture_sync_func()
+ self.assertEqual(result, "foo")
+
+ # the span should have been reported
+ self.assertEqual(
+ [span.operation_name for span in self._reporter.get_spans()],
+ ["fixture_sync_func"],
+ )
+
+ def test_trace_decorator_deferred(self) -> None:
+ """
+ Test whether we can use `@trace_with_opname` (`@trace`) and `@tag_args`
+ with functions that return deferreds
+ """
+ reactor = MemoryReactorClock()
+
+ with LoggingContext("root context"):
+
+ @trace_with_opname("fixture_deferred_func", tracer=self._tracer)
+ @tag_args
+ def fixture_deferred_func() -> "defer.Deferred[str]":
+ d1: defer.Deferred[str] = defer.Deferred()
+ d1.callback("foo")
+ return d1
+
+ result_d1 = fixture_deferred_func()
+
+ # let the tasks complete
+ reactor.pump((2,) * 8)
+
+ self.assertEqual(self.successResultOf(result_d1), "foo")
+
+ # the span should have been reported
+ self.assertEqual(
+ [span.operation_name for span in self._reporter.get_spans()],
+ ["fixture_deferred_func"],
+ )
+
+ def test_trace_decorator_async(self) -> None:
+ """
+ Test whether we can use `@trace_with_opname` (`@trace`) and `@tag_args`
+ with async functions
+ """
+ reactor = MemoryReactorClock()
+
+ with LoggingContext("root context"):
+
+ @trace_with_opname("fixture_async_func", tracer=self._tracer)
+ @tag_args
+ async def fixture_async_func() -> str:
+ return "foo"
+
+ d1 = defer.ensureDeferred(fixture_async_func())
+
+ # let the tasks complete
+ reactor.pump((2,) * 8)
+
+ self.assertEqual(self.successResultOf(d1), "foo")
+
+ # the span should have been reported
+ self.assertEqual(
+ [span.operation_name for span in self._reporter.get_spans()],
+ ["fixture_async_func"],
+ )
diff --git a/tests/logging/test_terse_json.py b/tests/logging/test_terse_json.py
index 96f399b7ab..0b0d8737c1 100644
--- a/tests/logging/test_terse_json.py
+++ b/tests/logging/test_terse_json.py
@@ -153,6 +153,7 @@ class TerseJsonTestCase(LoggerCleanupMixin, TestCase):
site.site_tag = "test-site"
site.server_version_string = "Server v1"
site.reactor = Mock()
+ site.experimental_cors_msc3886 = False
request = SynapseRequest(FakeChannel(site, None), site)
# Call requestReceived to finish instantiating the object.
request.content = BytesIO()
diff --git a/tests/metrics/__init__.py b/tests/metrics/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/metrics/__init__.py
diff --git a/tests/metrics/test_background_process_metrics.py b/tests/metrics/test_background_process_metrics.py
new file mode 100644
index 0000000000..f0f6cb2912
--- /dev/null
+++ b/tests/metrics/test_background_process_metrics.py
@@ -0,0 +1,19 @@
+from unittest import TestCase as StdlibTestCase
+from unittest.mock import Mock
+
+from synapse.logging.context import ContextResourceUsage, LoggingContext
+from synapse.metrics.background_process_metrics import _BackgroundProcess
+
+
+class TestBackgroundProcessMetrics(StdlibTestCase):
+ def test_update_metrics_with_negative_time_diff(self) -> None:
+ """We should ignore negative reported utime and stime differences"""
+ usage = ContextResourceUsage()
+ usage.ru_stime = usage.ru_utime = -1.0
+
+ mock_logging_context = Mock(spec=LoggingContext)
+ mock_logging_context.get_resource_usage.return_value = usage
+
+ process = _BackgroundProcess("test process", mock_logging_context)
+ # Should not raise
+ process.update_metrics()
diff --git a/tests/test_metrics.py b/tests/metrics/test_metrics.py
index b4574b2ffe..bddc4228bc 100644
--- a/tests/test_metrics.py
+++ b/tests/metrics/test_metrics.py
@@ -12,7 +12,18 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+from typing_extensions import Protocol
+try:
+ from importlib import metadata
+except ImportError:
+ import importlib_metadata as metadata # type: ignore[no-redef]
+
+from unittest.mock import patch
+
+from pkg_resources import parse_version
+
+from synapse.app._base import _set_prometheus_client_use_created_metrics
from synapse.metrics import REGISTRY, InFlightGauge, generate_latest
from synapse.util.caches.deferred_cache import DeferredCache
@@ -43,7 +54,11 @@ def get_sample_labels_value(sample):
class TestMauLimit(unittest.TestCase):
def test_basic(self):
- gauge = InFlightGauge(
+ class MetricEntry(Protocol):
+ foo: int
+ bar: int
+
+ gauge: InFlightGauge[MetricEntry] = InFlightGauge(
"test1", "", labels=["test_label"], sub_metrics=["foo", "bar"]
)
@@ -137,7 +152,7 @@ class CacheMetricsTests(unittest.HomeserverTestCase):
Caches produce metrics reflecting their state when scraped.
"""
CACHE_NAME = "cache_metrics_test_fgjkbdfg"
- cache = DeferredCache(CACHE_NAME, max_entries=777)
+ cache: DeferredCache[str, str] = DeferredCache(CACHE_NAME, max_entries=777)
items = {
x.split(b"{")[0].decode("ascii"): x.split(b" ")[1].decode("ascii")
@@ -162,3 +177,30 @@ class CacheMetricsTests(unittest.HomeserverTestCase):
self.assertEqual(items["synapse_util_caches_cache_size"], "1.0")
self.assertEqual(items["synapse_util_caches_cache_max_size"], "777.0")
+
+
+class PrometheusMetricsHackTestCase(unittest.HomeserverTestCase):
+ if parse_version(metadata.version("prometheus_client")) < parse_version("0.14.0"):
+ skip = "prometheus-client too old"
+
+ def test_created_metrics_disabled(self) -> None:
+ """
+ Tests that a brittle hack, to disable `_created` metrics, works.
+ This involves poking at the internals of prometheus-client.
+ It's not the end of the world if this doesn't work.
+
+ This test gives us a way to notice if prometheus-client changes
+ their internals.
+ """
+ import prometheus_client.metrics
+
+ PRIVATE_FLAG_NAME = "_use_created"
+
+ # By default, the pesky `_created` metrics are enabled.
+ # Check this assumption is still valid.
+ self.assertTrue(getattr(prometheus_client.metrics, PRIVATE_FLAG_NAME))
+
+ with patch("prometheus_client.metrics") as mock:
+ setattr(mock, PRIVATE_FLAG_NAME, True)
+ _set_prometheus_client_use_created_metrics(False)
+ self.assertFalse(getattr(mock, PRIVATE_FLAG_NAME, False))
diff --git a/tests/module_api/test_api.py b/tests/module_api/test_api.py
index 8e05590230..058ca57e55 100644
--- a/tests/module_api/test_api.py
+++ b/tests/module_api/test_api.py
@@ -16,6 +16,7 @@ from unittest.mock import Mock
from twisted.internet import defer
from synapse.api.constants import EduTypes, EventTypes
+from synapse.api.errors import NotFoundError
from synapse.events import EventBase
from synapse.federation.units import Transaction
from synapse.handlers.presence import UserPresenceState
@@ -29,7 +30,6 @@ from tests.replication._base import BaseMultiWorkerStreamTestCase
from tests.test_utils import simple_async_mock
from tests.test_utils.event_injection import inject_member_event
from tests.unittest import HomeserverTestCase, override_config
-from tests.utils import USE_POSTGRES_FOR_TESTS
class ModuleApiTestCase(HomeserverTestCase):
@@ -532,6 +532,34 @@ class ModuleApiTestCase(HomeserverTestCase):
self.assertEqual(res["displayname"], "simone")
self.assertIsNone(res["avatar_url"])
+ def test_update_room_membership_remote_join(self):
+ """Test that the module API can join a remote room."""
+ # Necessary to fake a remote join.
+ fake_stream_id = 1
+ mocked_remote_join = simple_async_mock(
+ return_value=("fake-event-id", fake_stream_id)
+ )
+ self.hs.get_room_member_handler()._remote_join = mocked_remote_join
+ fake_remote_host = f"{self.module_api.server_name}-remote"
+
+ # Given that the join is to be faked, we expect the relevant join event not to
+ # be persisted and the module API method to raise that.
+ self.get_failure(
+ defer.ensureDeferred(
+ self.module_api.update_room_membership(
+ sender=f"@user:{self.module_api.server_name}",
+ target=f"@user:{self.module_api.server_name}",
+ room_id=f"!nonexistent:{fake_remote_host}",
+ new_membership="join",
+ remote_room_hosts=[fake_remote_host],
+ )
+ ),
+ NotFoundError,
+ )
+
+ # Check that a remote join was attempted.
+ self.assertEqual(mocked_remote_join.call_count, 1)
+
def test_get_room_state(self):
"""Tests that a module can retrieve the state of a room through the module API."""
user_id = self.register_user("peter", "hackme")
@@ -654,15 +682,61 @@ class ModuleApiTestCase(HomeserverTestCase):
self.assertEqual(room_id, reference_room_id)
+ def test_create_room(self) -> None:
+ """Test that modules can create a room."""
+ # First test user validation (i.e. user is local).
+ self.get_failure(
+ self.module_api.create_room(
+ user_id=f"@user:{self.module_api.server_name}abc",
+ config={},
+ ratelimit=False,
+ ),
+ RuntimeError,
+ )
+
+ # Now do the happy path.
+ user_id = self.register_user("user", "password")
+ access_token = self.login(user_id, "password")
+
+ room_id, room_alias = self.get_success(
+ self.module_api.create_room(
+ user_id=user_id, config={"room_alias_name": "foo-bar"}, ratelimit=False
+ )
+ )
+
+ # Check room creator.
+ channel = self.make_request(
+ "GET",
+ f"/_matrix/client/v3/rooms/{room_id}/state/m.room.create",
+ access_token=access_token,
+ )
+ self.assertEqual(channel.code, 200, channel.result)
+ self.assertEqual(channel.json_body["creator"], user_id)
+
+ # Check room alias.
+ self.assertEquals(room_alias, f"#foo-bar:{self.module_api.server_name}")
+
+ # Let's try a room with no alias.
+ room_id, room_alias = self.get_success(
+ self.module_api.create_room(user_id=user_id, config={}, ratelimit=False)
+ )
+
+ # Check room creator.
+ channel = self.make_request(
+ "GET",
+ f"/_matrix/client/v3/rooms/{room_id}/state/m.room.create",
+ access_token=access_token,
+ )
+ self.assertEqual(channel.code, 200, channel.result)
+ self.assertEqual(channel.json_body["creator"], user_id)
+
+ # Check room alias.
+ self.assertIsNone(room_alias)
+
class ModuleApiWorkerTestCase(BaseMultiWorkerStreamTestCase):
"""For testing ModuleApi functionality in a multi-worker setup"""
- # Testing stream ID replication from the main to worker processes requires postgres
- # (due to needing `MultiWriterIdGenerator`).
- if not USE_POSTGRES_FOR_TESTS:
- skip = "Requires Postgres"
-
servlets = [
admin.register_servlets,
login.register_servlets,
@@ -672,7 +746,6 @@ class ModuleApiWorkerTestCase(BaseMultiWorkerStreamTestCase):
def default_config(self):
conf = super().default_config()
- conf["redis"] = {"enabled": "true"}
conf["stream_writers"] = {"presence": ["presence_writer"]}
conf["instance_map"] = {
"presence_writer": {"host": "testserv", "port": 1001},
@@ -705,8 +778,11 @@ def _test_sending_local_online_presence_to_local_user(
worker process. The test users will still sync with the main process. The purpose of testing
with a worker is to check whether a Synapse module running on a worker can inform other workers/
the main process that they should include additional presence when a user next syncs.
+ If this argument is True, `test_case` MUST be an instance of BaseMultiWorkerStreamTestCase.
"""
if test_with_workers:
+ assert isinstance(test_case, BaseMultiWorkerStreamTestCase)
+
# Create a worker process to make module_api calls against
worker_hs = test_case.make_worker_hs(
"synapse.app.generic_worker", {"worker_name": "presence_writer"}
diff --git a/tests/push/test_bulk_push_rule_evaluator.py b/tests/push/test_bulk_push_rule_evaluator.py
new file mode 100644
index 0000000000..594e7937a8
--- /dev/null
+++ b/tests/push/test_bulk_push_rule_evaluator.py
@@ -0,0 +1,74 @@
+from unittest.mock import patch
+
+from synapse.api.room_versions import RoomVersions
+from synapse.push.bulk_push_rule_evaluator import BulkPushRuleEvaluator
+from synapse.rest import admin
+from synapse.rest.client import login, register, room
+from synapse.types import create_requester
+
+from tests import unittest
+
+
+class TestBulkPushRuleEvaluator(unittest.HomeserverTestCase):
+
+ servlets = [
+ admin.register_servlets_for_client_rest_resource,
+ room.register_servlets,
+ login.register_servlets,
+ register.register_servlets,
+ ]
+
+ def test_action_for_event_by_user_handles_noninteger_power_levels(self) -> None:
+ """We should convert floats and strings to integers before passing to Rust.
+
+ Reproduces #14060.
+
+ A lack of validation: the gift that keeps on giving.
+ """
+ # Create a new user and room.
+ alice = self.register_user("alice", "pass")
+ token = self.login(alice, "pass")
+
+ room_id = self.helper.create_room_as(
+ alice, room_version=RoomVersions.V9.identifier, tok=token
+ )
+
+ # Alter the power levels in that room to include stringy and floaty levels.
+ # We need to suppress the validation logic or else it will reject these dodgy
+ # values. (Presumably this validation was not always present.)
+ event_creation_handler = self.hs.get_event_creation_handler()
+ requester = create_requester(alice)
+ with patch("synapse.events.validator.validate_canonicaljson"), patch(
+ "synapse.events.validator.jsonschema.validate"
+ ):
+ self.helper.send_state(
+ room_id,
+ "m.room.power_levels",
+ {
+ "users": {alice: "100"}, # stringy
+ "notifications": {"room": 100.0}, # float
+ },
+ token,
+ state_key="",
+ )
+
+ # Create a new message event, and try to evaluate it under the dodgy
+ # power level event.
+ event, context = self.get_success(
+ event_creation_handler.create_event(
+ requester,
+ {
+ "type": "m.room.message",
+ "room_id": room_id,
+ "content": {
+ "msgtype": "m.text",
+ "body": "helo",
+ },
+ "sender": alice,
+ },
+ )
+ )
+
+ bulk_evaluator = BulkPushRuleEvaluator(self.hs)
+ # should not raise
+ self.get_success(bulk_evaluator.action_for_events_by_user([(event, context)]))
diff --git a/tests/push/test_email.py b/tests/push/test_email.py
index 7a3b0d6755..fd14568f55 100644
--- a/tests/push/test_email.py
+++ b/tests/push/test_email.py
@@ -114,7 +114,7 @@ class EmailPusherTests(HomeserverTestCase):
)
self.pusher = self.get_success(
- self.hs.get_pusherpool().add_pusher(
+ self.hs.get_pusherpool().add_or_update_pusher(
user_id=self.user_id,
access_token=self.token_id,
kind="email",
@@ -136,7 +136,7 @@ class EmailPusherTests(HomeserverTestCase):
"""
with self.assertRaises(SynapseError) as cm:
self.get_success_or_raise(
- self.hs.get_pusherpool().add_pusher(
+ self.hs.get_pusherpool().add_or_update_pusher(
user_id=self.user_id,
access_token=self.token_id,
kind="email",
diff --git a/tests/push/test_http.py b/tests/push/test_http.py
index d9c68cdd2d..b383b8401f 100644
--- a/tests/push/test_http.py
+++ b/tests/push/test_http.py
@@ -19,9 +19,10 @@ from twisted.test.proto_helpers import MemoryReactor
import synapse.rest.admin
from synapse.logging.context import make_deferred_yieldable
-from synapse.push import PusherConfigException
-from synapse.rest.client import login, push_rule, receipts, room
+from synapse.push import PusherConfig, PusherConfigException
+from synapse.rest.client import login, push_rule, pusher, receipts, room
from synapse.server import HomeServer
+from synapse.storage.databases.main.registration import TokenLookupResult
from synapse.types import JsonDict
from synapse.util import Clock
@@ -35,6 +36,7 @@ class HTTPPusherTests(HomeserverTestCase):
login.register_servlets,
receipts.register_servlets,
push_rule.register_servlets,
+ pusher.register_servlets,
]
user_id = True
hijack_auth = False
@@ -74,7 +76,7 @@ class HTTPPusherTests(HomeserverTestCase):
def test_data(data: Optional[JsonDict]) -> None:
self.get_failure(
- self.hs.get_pusherpool().add_pusher(
+ self.hs.get_pusherpool().add_or_update_pusher(
user_id=user_id,
access_token=token_id,
kind="http",
@@ -119,7 +121,7 @@ class HTTPPusherTests(HomeserverTestCase):
token_id = user_tuple.token_id
self.get_success(
- self.hs.get_pusherpool().add_pusher(
+ self.hs.get_pusherpool().add_or_update_pusher(
user_id=user_id,
access_token=token_id,
kind="http",
@@ -235,7 +237,7 @@ class HTTPPusherTests(HomeserverTestCase):
token_id = user_tuple.token_id
self.get_success(
- self.hs.get_pusherpool().add_pusher(
+ self.hs.get_pusherpool().add_or_update_pusher(
user_id=user_id,
access_token=token_id,
kind="http",
@@ -355,7 +357,7 @@ class HTTPPusherTests(HomeserverTestCase):
token_id = user_tuple.token_id
self.get_success(
- self.hs.get_pusherpool().add_pusher(
+ self.hs.get_pusherpool().add_or_update_pusher(
user_id=user_id,
access_token=token_id,
kind="http",
@@ -441,7 +443,7 @@ class HTTPPusherTests(HomeserverTestCase):
token_id = user_tuple.token_id
self.get_success(
- self.hs.get_pusherpool().add_pusher(
+ self.hs.get_pusherpool().add_or_update_pusher(
user_id=user_id,
access_token=token_id,
kind="http",
@@ -518,7 +520,7 @@ class HTTPPusherTests(HomeserverTestCase):
token_id = user_tuple.token_id
self.get_success(
- self.hs.get_pusherpool().add_pusher(
+ self.hs.get_pusherpool().add_or_update_pusher(
user_id=user_id,
access_token=token_id,
kind="http",
@@ -624,7 +626,7 @@ class HTTPPusherTests(HomeserverTestCase):
token_id = user_tuple.token_id
self.get_success(
- self.hs.get_pusherpool().add_pusher(
+ self.hs.get_pusherpool().add_or_update_pusher(
user_id=user_id,
access_token=token_id,
kind="http",
@@ -728,18 +730,38 @@ class HTTPPusherTests(HomeserverTestCase):
)
self.assertEqual(channel.code, 200, channel.json_body)
- def _make_user_with_pusher(self, username: str) -> Tuple[str, str]:
+ def _make_user_with_pusher(
+ self, username: str, enabled: bool = True
+ ) -> Tuple[str, str]:
+ """Registers a user and creates a pusher for them.
+
+ Args:
+ username: the localpart of the new user's Matrix ID.
+ enabled: whether to create the pusher in an enabled or disabled state.
+ """
user_id = self.register_user(username, "pass")
access_token = self.login(username, "pass")
# Register the pusher
+ self._set_pusher(user_id, access_token, enabled)
+
+ return user_id, access_token
+
+ def _set_pusher(self, user_id: str, access_token: str, enabled: bool) -> None:
+ """Creates or updates the pusher for the given user.
+
+ Args:
+ user_id: the user's Matrix ID.
+ access_token: the access token associated with the pusher.
+ enabled: whether to enable or disable the pusher.
+ """
user_tuple = self.get_success(
self.hs.get_datastores().main.get_user_by_access_token(access_token)
)
token_id = user_tuple.token_id
self.get_success(
- self.hs.get_pusherpool().add_pusher(
+ self.hs.get_pusherpool().add_or_update_pusher(
user_id=user_id,
access_token=token_id,
kind="http",
@@ -749,11 +771,11 @@ class HTTPPusherTests(HomeserverTestCase):
pushkey="a@example.com",
lang=None,
data={"url": "http://example.com/_matrix/push/v1/notify"},
+ enabled=enabled,
+ device_id=user_tuple.device_id,
)
)
- return user_id, access_token
-
def test_dont_notify_rule_overrides_message(self) -> None:
"""
The override push rule will suppress notification
@@ -791,3 +813,148 @@ class HTTPPusherTests(HomeserverTestCase):
# The user sends a message back (sends a notification)
self.helper.send(room, body="Hello", tok=access_token)
self.assertEqual(len(self.push_attempts), 1)
+
+ @override_config({"experimental_features": {"msc3881_enabled": True}})
+ def test_disable(self) -> None:
+ """Tests that disabling a pusher means it's not pushed to anymore."""
+ user_id, access_token = self._make_user_with_pusher("user")
+ other_user_id, other_access_token = self._make_user_with_pusher("otheruser")
+
+ room = self.helper.create_room_as(user_id, tok=access_token)
+ self.helper.join(room=room, user=other_user_id, tok=other_access_token)
+
+ # Send a message and check that it generated a push.
+ self.helper.send(room, body="Hi!", tok=other_access_token)
+ self.assertEqual(len(self.push_attempts), 1)
+
+ # Disable the pusher.
+ self._set_pusher(user_id, access_token, enabled=False)
+
+ # Send another message and check that it did not generate a push.
+ self.helper.send(room, body="Hi!", tok=other_access_token)
+ self.assertEqual(len(self.push_attempts), 1)
+
+ # Get the pushers for the user and check that it is marked as disabled.
+ channel = self.make_request("GET", "/pushers", access_token=access_token)
+ self.assertEqual(channel.code, 200)
+ self.assertEqual(len(channel.json_body["pushers"]), 1)
+
+ enabled = channel.json_body["pushers"][0]["org.matrix.msc3881.enabled"]
+ self.assertFalse(enabled)
+ self.assertTrue(isinstance(enabled, bool))
+
+ @override_config({"experimental_features": {"msc3881_enabled": True}})
+ def test_enable(self) -> None:
+ """Tests that enabling a disabled pusher means it gets pushed to."""
+ # Create the user with the pusher already disabled.
+ user_id, access_token = self._make_user_with_pusher("user", enabled=False)
+ other_user_id, other_access_token = self._make_user_with_pusher("otheruser")
+
+ room = self.helper.create_room_as(user_id, tok=access_token)
+ self.helper.join(room=room, user=other_user_id, tok=other_access_token)
+
+ # Send a message and check that it did not generate a push.
+ self.helper.send(room, body="Hi!", tok=other_access_token)
+ self.assertEqual(len(self.push_attempts), 0)
+
+ # Enable the pusher.
+ self._set_pusher(user_id, access_token, enabled=True)
+
+ # Send another message and check that it did generate a push.
+ self.helper.send(room, body="Hi!", tok=other_access_token)
+ self.assertEqual(len(self.push_attempts), 1)
+
+ # Get the pushers for the user and check that it is marked as enabled.
+ channel = self.make_request("GET", "/pushers", access_token=access_token)
+ self.assertEqual(channel.code, 200)
+ self.assertEqual(len(channel.json_body["pushers"]), 1)
+
+ enabled = channel.json_body["pushers"][0]["org.matrix.msc3881.enabled"]
+ self.assertTrue(enabled)
+ self.assertTrue(isinstance(enabled, bool))
+
+ @override_config({"experimental_features": {"msc3881_enabled": True}})
+ def test_null_enabled(self) -> None:
+ """Tests that a pusher that has an 'enabled' column set to NULL (eg pushers
+ created before the column was introduced) is considered enabled.
+ """
+ # We intentionally set 'enabled' to None so that it's stored as NULL in the
+ # database.
+ user_id, access_token = self._make_user_with_pusher("user", enabled=None) # type: ignore[arg-type]
+
+ channel = self.make_request("GET", "/pushers", access_token=access_token)
+ self.assertEqual(channel.code, 200)
+ self.assertEqual(len(channel.json_body["pushers"]), 1)
+ self.assertTrue(channel.json_body["pushers"][0]["org.matrix.msc3881.enabled"])
+
+ def test_update_different_device_access_token_device_id(self) -> None:
+ """Tests that if we create a pusher from one device, the update it from another
+ device, the access token and device ID associated with the pusher stays the
+ same.
+ """
+ # Create a user with a pusher.
+ user_id, access_token = self._make_user_with_pusher("user")
+
+ # Get the token ID for the current access token, since that's what we store in
+ # the pushers table. Also get the device ID from it.
+ user_tuple = self.get_success(
+ self.hs.get_datastores().main.get_user_by_access_token(access_token)
+ )
+ token_id = user_tuple.token_id
+ device_id = user_tuple.device_id
+
+ # Generate a new access token, and update the pusher with it.
+ new_token = self.login("user", "pass")
+ self._set_pusher(user_id, new_token, enabled=False)
+
+ # Get the current list of pushers for the user.
+ ret = self.get_success(
+ self.hs.get_datastores().main.get_pushers_by({"user_name": user_id})
+ )
+ pushers: List[PusherConfig] = list(ret)
+
+ # Check that we still have one pusher, and that the access token and device ID
+ # associated with it didn't change.
+ self.assertEqual(len(pushers), 1)
+ self.assertEqual(pushers[0].access_token, token_id)
+ self.assertEqual(pushers[0].device_id, device_id)
+
+ @override_config({"experimental_features": {"msc3881_enabled": True}})
+ def test_device_id(self) -> None:
+ """Tests that a pusher created with a given device ID shows that device ID in
+ GET /pushers requests.
+ """
+ self.register_user("user", "pass")
+ access_token = self.login("user", "pass")
+
+ # We create the pusher with an HTTP request rather than with
+ # _make_user_with_pusher so that we can test the device ID is correctly set when
+ # creating a pusher via an API call.
+ self.make_request(
+ method="POST",
+ path="/pushers/set",
+ content={
+ "kind": "http",
+ "app_id": "m.http",
+ "app_display_name": "HTTP Push Notifications",
+ "device_display_name": "pushy push",
+ "pushkey": "a@example.com",
+ "lang": "en",
+ "data": {"url": "http://example.com/_matrix/push/v1/notify"},
+ },
+ access_token=access_token,
+ )
+
+ # Look up the user info for the access token so we can compare the device ID.
+ lookup_result: TokenLookupResult = self.get_success(
+ self.hs.get_datastores().main.get_user_by_access_token(access_token)
+ )
+
+ # Get the user's devices and check it has the correct device ID.
+ channel = self.make_request("GET", "/pushers", access_token=access_token)
+ self.assertEqual(channel.code, 200)
+ self.assertEqual(len(channel.json_body["pushers"]), 1)
+ self.assertEqual(
+ channel.json_body["pushers"][0]["org.matrix.msc3881.device_id"],
+ lookup_result.device_id,
+ )
diff --git a/tests/push/test_push_rule_evaluator.py b/tests/push/test_push_rule_evaluator.py
index 718f489577..fe7c145840 100644
--- a/tests/push/test_push_rule_evaluator.py
+++ b/tests/push/test_push_rule_evaluator.py
@@ -12,23 +12,25 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from typing import Dict, Optional, Set, Tuple, Union
+from typing import Dict, Optional, Union
import frozendict
from twisted.test.proto_helpers import MemoryReactor
import synapse.rest.admin
-from synapse.api.constants import EventTypes, Membership
+from synapse.api.constants import EventTypes, HistoryVisibility, Membership
from synapse.api.room_versions import RoomVersions
from synapse.appservice import ApplicationService
from synapse.events import FrozenEvent
-from synapse.push import push_rule_evaluator
-from synapse.push.push_rule_evaluator import PushRuleEvaluatorForEvent
+from synapse.push.bulk_push_rule_evaluator import _flatten_dict
+from synapse.push.httppusher import tweaks_for_actions
+from synapse.rest import admin
from synapse.rest.client import login, register, room
from synapse.server import HomeServer
from synapse.storage.databases.main.appservice import _make_exclusive_regex
-from synapse.types import JsonDict
+from synapse.synapse_rust.push import PushRuleEvaluator
+from synapse.types import JsonDict, UserID
from synapse.util import Clock
from tests import unittest
@@ -37,11 +39,8 @@ from tests.test_utils.event_injection import create_event, inject_member_event
class PushRuleEvaluatorTestCase(unittest.TestCase):
def _get_evaluator(
- self,
- content: JsonDict,
- relations: Optional[Dict[str, Set[Tuple[str, str]]]] = None,
- relations_match_enabled: bool = False,
- ) -> PushRuleEvaluatorForEvent:
+ self, content: JsonDict, related_events=None
+ ) -> PushRuleEvaluator:
event = FrozenEvent(
{
"event_id": "$event_id",
@@ -56,13 +55,13 @@ class PushRuleEvaluatorTestCase(unittest.TestCase):
room_member_count = 0
sender_power_level = 0
power_levels: Dict[str, Union[int, Dict[str, int]]] = {}
- return PushRuleEvaluatorForEvent(
- event,
+ return PushRuleEvaluator(
+ _flatten_dict(event),
room_member_count,
sender_power_level,
- power_levels,
- relations or set(),
- relations_match_enabled,
+ power_levels.get("notifications", {}),
+ {} if related_events is None else related_events,
+ True,
)
def test_display_name(self) -> None:
@@ -293,77 +292,218 @@ class PushRuleEvaluatorTestCase(unittest.TestCase):
]
self.assertEqual(
- push_rule_evaluator.tweaks_for_actions(actions),
+ tweaks_for_actions(actions),
{"sound": "default", "highlight": True},
)
- def test_relation_match(self) -> None:
- """Test the relation_match push rule kind."""
-
- # Check if the experimental feature is disabled.
+ def test_related_event_match(self):
evaluator = self._get_evaluator(
- {}, {"m.annotation": {("@user:test", "m.reaction")}}
+ {
+ "m.relates_to": {
+ "event_id": "$parent_event_id",
+ "key": "😀",
+ "rel_type": "m.annotation",
+ "m.in_reply_to": {
+ "event_id": "$parent_event_id",
+ },
+ }
+ },
+ {
+ "m.in_reply_to": {
+ "event_id": "$parent_event_id",
+ "type": "m.room.message",
+ "sender": "@other_user:test",
+ "room_id": "!room:test",
+ "content.msgtype": "m.text",
+ "content.body": "Original message",
+ },
+ "m.annotation": {
+ "event_id": "$parent_event_id",
+ "type": "m.room.message",
+ "sender": "@other_user:test",
+ "room_id": "!room:test",
+ "content.msgtype": "m.text",
+ "content.body": "Original message",
+ },
+ },
+ )
+ self.assertTrue(
+ evaluator.matches(
+ {
+ "kind": "im.nheko.msc3664.related_event_match",
+ "key": "sender",
+ "rel_type": "m.in_reply_to",
+ "pattern": "@other_user:test",
+ },
+ "@user:test",
+ "display_name",
+ )
+ )
+ self.assertFalse(
+ evaluator.matches(
+ {
+ "kind": "im.nheko.msc3664.related_event_match",
+ "key": "sender",
+ "rel_type": "m.in_reply_to",
+ "pattern": "@user:test",
+ },
+ "@other_user:test",
+ "display_name",
+ )
+ )
+ self.assertTrue(
+ evaluator.matches(
+ {
+ "kind": "im.nheko.msc3664.related_event_match",
+ "key": "sender",
+ "rel_type": "m.annotation",
+ "pattern": "@other_user:test",
+ },
+ "@other_user:test",
+ "display_name",
+ )
+ )
+ self.assertFalse(
+ evaluator.matches(
+ {
+ "kind": "im.nheko.msc3664.related_event_match",
+ "key": "sender",
+ "rel_type": "m.in_reply_to",
+ },
+ "@user:test",
+ "display_name",
+ )
+ )
+ self.assertTrue(
+ evaluator.matches(
+ {
+ "kind": "im.nheko.msc3664.related_event_match",
+ "rel_type": "m.in_reply_to",
+ },
+ "@user:test",
+ "display_name",
+ )
+ )
+ self.assertFalse(
+ evaluator.matches(
+ {
+ "kind": "im.nheko.msc3664.related_event_match",
+ "rel_type": "m.replace",
+ },
+ "@other_user:test",
+ "display_name",
+ )
)
- condition = {"kind": "relation_match"}
- # Oddly, an unknown condition always matches.
- self.assertTrue(evaluator.matches(condition, "@user:test", "foo"))
- # A push rule evaluator with the experimental rule enabled.
+ def test_related_event_match_with_fallback(self):
evaluator = self._get_evaluator(
- {}, {"m.annotation": {("@user:test", "m.reaction")}}, True
+ {
+ "m.relates_to": {
+ "event_id": "$parent_event_id",
+ "key": "😀",
+ "rel_type": "m.thread",
+ "is_falling_back": True,
+ "m.in_reply_to": {
+ "event_id": "$parent_event_id",
+ },
+ }
+ },
+ {
+ "m.in_reply_to": {
+ "event_id": "$parent_event_id",
+ "type": "m.room.message",
+ "sender": "@other_user:test",
+ "room_id": "!room:test",
+ "content.msgtype": "m.text",
+ "content.body": "Original message",
+ "im.vector.is_falling_back": "",
+ },
+ "m.thread": {
+ "event_id": "$parent_event_id",
+ "type": "m.room.message",
+ "sender": "@other_user:test",
+ "room_id": "!room:test",
+ "content.msgtype": "m.text",
+ "content.body": "Original message",
+ },
+ },
+ )
+ self.assertTrue(
+ evaluator.matches(
+ {
+ "kind": "im.nheko.msc3664.related_event_match",
+ "key": "sender",
+ "rel_type": "m.in_reply_to",
+ "pattern": "@other_user:test",
+ "include_fallbacks": True,
+ },
+ "@user:test",
+ "display_name",
+ )
+ )
+ self.assertFalse(
+ evaluator.matches(
+ {
+ "kind": "im.nheko.msc3664.related_event_match",
+ "key": "sender",
+ "rel_type": "m.in_reply_to",
+ "pattern": "@other_user:test",
+ "include_fallbacks": False,
+ },
+ "@user:test",
+ "display_name",
+ )
+ )
+ self.assertFalse(
+ evaluator.matches(
+ {
+ "kind": "im.nheko.msc3664.related_event_match",
+ "key": "sender",
+ "rel_type": "m.in_reply_to",
+ "pattern": "@other_user:test",
+ },
+ "@user:test",
+ "display_name",
+ )
)
- # Check just relation type.
- condition = {
- "kind": "org.matrix.msc3772.relation_match",
- "rel_type": "m.annotation",
- }
- self.assertTrue(evaluator.matches(condition, "@user:test", "foo"))
-
- # Check relation type and sender.
- condition = {
- "kind": "org.matrix.msc3772.relation_match",
- "rel_type": "m.annotation",
- "sender": "@user:test",
- }
- self.assertTrue(evaluator.matches(condition, "@user:test", "foo"))
- condition = {
- "kind": "org.matrix.msc3772.relation_match",
- "rel_type": "m.annotation",
- "sender": "@other:test",
- }
- self.assertFalse(evaluator.matches(condition, "@user:test", "foo"))
-
- # Check relation type and event type.
- condition = {
- "kind": "org.matrix.msc3772.relation_match",
- "rel_type": "m.annotation",
- "type": "m.reaction",
- }
- self.assertTrue(evaluator.matches(condition, "@user:test", "foo"))
-
- # Check just sender, this fails since rel_type is required.
- condition = {
- "kind": "org.matrix.msc3772.relation_match",
- "sender": "@user:test",
- }
- self.assertFalse(evaluator.matches(condition, "@user:test", "foo"))
-
- # Check sender glob.
- condition = {
- "kind": "org.matrix.msc3772.relation_match",
- "rel_type": "m.annotation",
- "sender": "@*:test",
- }
- self.assertTrue(evaluator.matches(condition, "@user:test", "foo"))
-
- # Check event type glob.
- condition = {
- "kind": "org.matrix.msc3772.relation_match",
- "rel_type": "m.annotation",
- "event_type": "*.reaction",
- }
- self.assertTrue(evaluator.matches(condition, "@user:test", "foo"))
+ def test_related_event_match_no_related_event(self):
+ evaluator = self._get_evaluator(
+ {"msgtype": "m.text", "body": "Message without related event"}
+ )
+ self.assertFalse(
+ evaluator.matches(
+ {
+ "kind": "im.nheko.msc3664.related_event_match",
+ "key": "sender",
+ "rel_type": "m.in_reply_to",
+ "pattern": "@other_user:test",
+ },
+ "@user:test",
+ "display_name",
+ )
+ )
+ self.assertFalse(
+ evaluator.matches(
+ {
+ "kind": "im.nheko.msc3664.related_event_match",
+ "key": "sender",
+ "rel_type": "m.in_reply_to",
+ },
+ "@user:test",
+ "display_name",
+ )
+ )
+ self.assertFalse(
+ evaluator.matches(
+ {
+ "kind": "im.nheko.msc3664.related_event_match",
+ "rel_type": "m.in_reply_to",
+ },
+ "@user:test",
+ "display_name",
+ )
+ )
class TestBulkPushRuleEvaluator(unittest.HomeserverTestCase):
@@ -439,3 +579,80 @@ class TestBulkPushRuleEvaluator(unittest.HomeserverTestCase):
)
self.assertEqual(len(users_with_push_actions), 0)
+
+
+class BulkPushRuleEvaluatorTestCase(unittest.HomeserverTestCase):
+ servlets = [
+ admin.register_servlets,
+ login.register_servlets,
+ room.register_servlets,
+ ]
+
+ def prepare(
+ self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer
+ ) -> None:
+ self.main_store = homeserver.get_datastores().main
+
+ self.user_id1 = self.register_user("user1", "password")
+ self.tok1 = self.login(self.user_id1, "password")
+ self.user_id2 = self.register_user("user2", "password")
+ self.tok2 = self.login(self.user_id2, "password")
+
+ self.room_id = self.helper.create_room_as(tok=self.tok1)
+
+ # We want to test history visibility works correctly.
+ self.helper.send_state(
+ self.room_id,
+ EventTypes.RoomHistoryVisibility,
+ {"history_visibility": HistoryVisibility.JOINED},
+ tok=self.tok1,
+ )
+
+ def get_notif_count(self, user_id: str) -> int:
+ return self.get_success(
+ self.main_store.db_pool.simple_select_one_onecol(
+ table="event_push_actions",
+ keyvalues={"user_id": user_id},
+ retcol="COALESCE(SUM(notif), 0)",
+ desc="get_staging_notif_count",
+ )
+ )
+
+ def test_plain_message(self) -> None:
+ """Test that sending a normal message in a room will trigger a
+ notification
+ """
+
+ # Have user2 join the room and cle
+ self.helper.join(self.room_id, self.user_id2, tok=self.tok2)
+
+ # They start off with no notifications, but get them when messages are
+ # sent.
+ self.assertEqual(self.get_notif_count(self.user_id2), 0)
+
+ user1 = UserID.from_string(self.user_id1)
+ self.create_and_send_event(self.room_id, user1)
+
+ self.assertEqual(self.get_notif_count(self.user_id2), 1)
+
+ def test_delayed_message(self) -> None:
+ """Test that a delayed message that was from before a user joined
+ doesn't cause a notification for the joined user.
+ """
+ user1 = UserID.from_string(self.user_id1)
+
+ # Send a message before user2 joins
+ event_id1 = self.create_and_send_event(self.room_id, user1)
+
+ # Have user2 join the room
+ self.helper.join(self.room_id, self.user_id2, tok=self.tok2)
+
+ # They start off with no notifications
+ self.assertEqual(self.get_notif_count(self.user_id2), 0)
+
+ # Send another message that references the event before the join to
+ # simulate a "delayed" event
+ self.create_and_send_event(self.room_id, user1, prev_event_ids=[event_id1])
+
+ # user2 should not be notified about it, because they can't see it.
+ self.assertEqual(self.get_notif_count(self.user_id2), 0)
diff --git a/tests/replication/_base.py b/tests/replication/_base.py
index 970d5e533b..3029a16dda 100644
--- a/tests/replication/_base.py
+++ b/tests/replication/_base.py
@@ -24,11 +24,11 @@ from synapse.http.site import SynapseRequest, SynapseSite
from synapse.replication.http import ReplicationRestResource
from synapse.replication.tcp.client import ReplicationDataHandler
from synapse.replication.tcp.handler import ReplicationCommandHandler
-from synapse.replication.tcp.protocol import ClientReplicationStreamProtocol
-from synapse.replication.tcp.resource import (
- ReplicationStreamProtocolFactory,
+from synapse.replication.tcp.protocol import (
+ ClientReplicationStreamProtocol,
ServerReplicationStreamProtocol,
)
+from synapse.replication.tcp.resource import ReplicationStreamProtocolFactory
from synapse.server import HomeServer
from tests import unittest
@@ -220,15 +220,34 @@ class BaseStreamTestCase(unittest.HomeserverTestCase):
class BaseMultiWorkerStreamTestCase(unittest.HomeserverTestCase):
"""Base class for tests running multiple workers.
+ Enables Redis, providing a fake Redis server.
+
Automatically handle HTTP replication requests from workers to master,
unlike `BaseStreamTestCase`.
"""
+ if not hiredis:
+ skip = "Requires hiredis"
+
+ if not USE_POSTGRES_FOR_TESTS:
+ # Redis replication only takes place on Postgres
+ skip = "Requires Postgres"
+
+ def default_config(self) -> Dict[str, Any]:
+ """
+ Overrides the default config to enable Redis.
+ Even if the test only uses make_worker_hs, the main process needs Redis
+ enabled otherwise it won't create a Fake Redis server to listen on the
+ Redis port and accept fake TCP connections.
+ """
+ base = super().default_config()
+ base["redis"] = {"enabled": True}
+ return base
+
def setUp(self):
super().setUp()
# build a replication server
- self.server_factory = ReplicationStreamProtocolFactory(self.hs)
self.streamer = self.hs.get_replication_streamer()
# Fake in memory Redis server that servers can connect to.
@@ -247,15 +266,14 @@ class BaseMultiWorkerStreamTestCase(unittest.HomeserverTestCase):
# handling inbound HTTP requests to that instance.
self._hs_to_site = {self.hs: self.site}
- if self.hs.config.redis.redis_enabled:
- # Handle attempts to connect to fake redis server.
- self.reactor.add_tcp_client_callback(
- "localhost",
- 6379,
- self.connect_any_redis_attempts,
- )
+ # Handle attempts to connect to fake redis server.
+ self.reactor.add_tcp_client_callback(
+ "localhost",
+ 6379,
+ self.connect_any_redis_attempts,
+ )
- self.hs.get_replication_command_handler().start_replication(self.hs)
+ self.hs.get_replication_command_handler().start_replication(self.hs)
# When we see a connection attempt to the master replication listener we
# automatically set up the connection. This is so that tests don't
@@ -339,27 +357,6 @@ class BaseMultiWorkerStreamTestCase(unittest.HomeserverTestCase):
store = worker_hs.get_datastores().main
store.db_pool._db_pool = self.database_pool._db_pool
- # Set up TCP replication between master and the new worker if we don't
- # have Redis support enabled.
- if not worker_hs.config.redis.redis_enabled:
- repl_handler = ReplicationCommandHandler(worker_hs)
- client = ClientReplicationStreamProtocol(
- worker_hs,
- "client",
- "test",
- self.clock,
- repl_handler,
- )
- server = self.server_factory.buildProtocol(
- IPv4Address("TCP", "127.0.0.1", 0)
- )
-
- client_transport = FakeTransport(server, self.reactor)
- client.makeConnection(client_transport)
-
- server_transport = FakeTransport(client, self.reactor)
- server.makeConnection(server_transport)
-
# Set up a resource for the worker
resource = ReplicationRestResource(worker_hs)
@@ -374,12 +371,11 @@ class BaseMultiWorkerStreamTestCase(unittest.HomeserverTestCase):
config=worker_hs.config.server.listeners[0],
resource=resource,
server_version_string="1",
- max_request_body_size=4096,
+ max_request_body_size=8192,
reactor=self.reactor,
)
- if worker_hs.config.redis.redis_enabled:
- worker_hs.get_replication_command_handler().start_replication(worker_hs)
+ worker_hs.get_replication_command_handler().start_replication(worker_hs)
return worker_hs
@@ -546,8 +542,13 @@ class FakeRedisPubSubProtocol(Protocol):
self.send("OK")
elif command == b"GET":
self.send(None)
+
+ # Connection keep-alives.
+ elif command == b"PING":
+ self.send("PONG")
+
else:
- raise Exception("Unknown command")
+ raise Exception(f"Unknown command: {command}")
def send(self, msg):
"""Send a message back to the client."""
@@ -582,27 +583,3 @@ class FakeRedisPubSubProtocol(Protocol):
def connectionLost(self, reason):
self._server.remove_subscriber(self)
-
-
-class RedisMultiWorkerStreamTestCase(BaseMultiWorkerStreamTestCase):
- """
- A test case that enables Redis, providing a fake Redis server.
- """
-
- if not hiredis:
- skip = "Requires hiredis"
-
- if not USE_POSTGRES_FOR_TESTS:
- # Redis replication only takes place on Postgres
- skip = "Requires Postgres"
-
- def default_config(self) -> Dict[str, Any]:
- """
- Overrides the default config to enable Redis.
- Even if the test only uses make_worker_hs, the main process needs Redis
- enabled otherwise it won't create a Fake Redis server to listen on the
- Redis port and accept fake TCP connections.
- """
- base = super().default_config()
- base["redis"] = {"enabled": True}
- return base
diff --git a/tests/replication/http/test__base.py b/tests/replication/http/test__base.py
index 822a957c3a..936ab4504a 100644
--- a/tests/replication/http/test__base.py
+++ b/tests/replication/http/test__base.py
@@ -18,11 +18,12 @@ from typing import Tuple
from twisted.web.server import Request
from synapse.api.errors import Codes
-from synapse.http.server import JsonResource, cancellable
+from synapse.http.server import JsonResource
from synapse.replication.http import REPLICATION_PREFIX
from synapse.replication.http._base import ReplicationEndpoint
from synapse.server import HomeServer
from synapse.types import JsonDict
+from synapse.util.cancellation import cancellable
from tests import unittest
from tests.http.server._base import test_disconnect
diff --git a/tests/replication/slave/storage/test_events.py b/tests/replication/slave/storage/test_events.py
index 531a0db2d0..dce71f7334 100644
--- a/tests/replication/slave/storage/test_events.py
+++ b/tests/replication/slave/storage/test_events.py
@@ -21,8 +21,11 @@ from synapse.api.constants import ReceiptTypes
from synapse.api.room_versions import RoomVersions
from synapse.events import FrozenEvent, _EventInternalMetadata, make_event_from_dict
from synapse.handlers.room import RoomEventSource
-from synapse.replication.slave.storage.events import SlavedEventStore
-from synapse.storage.databases.main.event_push_actions import NotifCounts
+from synapse.storage.databases.main.event_push_actions import (
+ NotifCounts,
+ RoomNotifCounts,
+)
+from synapse.storage.databases.main.events_worker import EventsWorkerStore
from synapse.storage.roommember import GetRoomsForUserWithStreamOrdering, RoomsForUser
from synapse.types import PersistedEventPosition
@@ -55,9 +58,9 @@ def patch__eq__(cls):
return unpatch
-class SlavedEventStoreTestCase(BaseSlavedStoreTestCase):
+class EventsWorkerStoreTestCase(BaseSlavedStoreTestCase):
- STORE_TYPE = SlavedEventStore
+ STORE_TYPE = EventsWorkerStore
def setUp(self):
# Patch up the equality operator for events so that we can check
@@ -140,6 +143,7 @@ class SlavedEventStoreTestCase(BaseSlavedStoreTestCase):
self.persist(type="m.room.create", key="", creator=USER_ID)
self.check("get_invited_rooms_for_local_user", [USER_ID_2], [])
event = self.persist(type="m.room.member", key=USER_ID_2, membership="invite")
+ assert event.internal_metadata.stream_ordering is not None
self.replicate()
@@ -171,14 +175,16 @@ class SlavedEventStoreTestCase(BaseSlavedStoreTestCase):
if send_receipt:
self.get_success(
self.master_store.insert_receipt(
- ROOM_ID, ReceiptTypes.READ, USER_ID_2, [event1.event_id], {}
+ ROOM_ID, ReceiptTypes.READ, USER_ID_2, [event1.event_id], None, {}
)
)
self.check(
"get_unread_event_push_actions_by_room_for_user",
[ROOM_ID, USER_ID_2],
- NotifCounts(highlight_count=0, unread_count=0, notify_count=0),
+ RoomNotifCounts(
+ NotifCounts(highlight_count=0, unread_count=0, notify_count=0), {}
+ ),
)
self.persist(
@@ -191,7 +197,9 @@ class SlavedEventStoreTestCase(BaseSlavedStoreTestCase):
self.check(
"get_unread_event_push_actions_by_room_for_user",
[ROOM_ID, USER_ID_2],
- NotifCounts(highlight_count=0, unread_count=0, notify_count=1),
+ RoomNotifCounts(
+ NotifCounts(highlight_count=0, unread_count=0, notify_count=1), {}
+ ),
)
self.persist(
@@ -206,7 +214,9 @@ class SlavedEventStoreTestCase(BaseSlavedStoreTestCase):
self.check(
"get_unread_event_push_actions_by_room_for_user",
[ROOM_ID, USER_ID_2],
- NotifCounts(highlight_count=1, unread_count=0, notify_count=2),
+ RoomNotifCounts(
+ NotifCounts(highlight_count=1, unread_count=0, notify_count=2), {}
+ ),
)
def test_get_rooms_for_user_with_stream_ordering(self):
@@ -221,6 +231,7 @@ class SlavedEventStoreTestCase(BaseSlavedStoreTestCase):
j2 = self.persist(
type="m.room.member", sender=USER_ID_2, key=USER_ID_2, membership="join"
)
+ assert j2.internal_metadata.stream_ordering is not None
self.replicate()
expected_pos = PersistedEventPosition(
@@ -278,6 +289,7 @@ class SlavedEventStoreTestCase(BaseSlavedStoreTestCase):
)
)
self.replicate()
+ assert j2.internal_metadata.stream_ordering is not None
event_source = RoomEventSource(self.hs)
event_source.store = self.slaved_store
@@ -327,10 +339,10 @@ class SlavedEventStoreTestCase(BaseSlavedStoreTestCase):
event_id = 0
- def persist(self, backfill=False, **kwargs):
+ def persist(self, backfill=False, **kwargs) -> FrozenEvent:
"""
Returns:
- synapse.events.FrozenEvent: The event that was persisted.
+ The event that was persisted.
"""
event, context = self.build_event(**kwargs)
@@ -404,6 +416,7 @@ class SlavedEventStoreTestCase(BaseSlavedStoreTestCase):
event.event_id,
{user_id: actions for user_id, actions in push_actions},
False,
+ "main",
)
)
return event, context
diff --git a/tests/replication/tcp/streams/test_receipts.py b/tests/replication/tcp/streams/test_receipts.py
index eb00117845..ede6d0c118 100644
--- a/tests/replication/tcp/streams/test_receipts.py
+++ b/tests/replication/tcp/streams/test_receipts.py
@@ -33,7 +33,12 @@ class ReceiptsStreamTestCase(BaseStreamTestCase):
# tell the master to send a new receipt
self.get_success(
self.hs.get_datastores().main.insert_receipt(
- "!room:blue", "m.read", USER_ID, ["$event:blue"], {"a": 1}
+ "!room:blue",
+ "m.read",
+ USER_ID,
+ ["$event:blue"],
+ thread_id=None,
+ data={"a": 1},
)
)
self.replicate()
@@ -48,6 +53,7 @@ class ReceiptsStreamTestCase(BaseStreamTestCase):
self.assertEqual("m.read", row.receipt_type)
self.assertEqual(USER_ID, row.user_id)
self.assertEqual("$event:blue", row.event_id)
+ self.assertIsNone(row.thread_id)
self.assertEqual({"a": 1}, row.data)
# Now let's disconnect and insert some data.
@@ -57,7 +63,12 @@ class ReceiptsStreamTestCase(BaseStreamTestCase):
self.get_success(
self.hs.get_datastores().main.insert_receipt(
- "!room2:blue", "m.read", USER_ID, ["$event2:foo"], {"a": 2}
+ "!room2:blue",
+ "m.read",
+ USER_ID,
+ ["$event2:foo"],
+ thread_id=None,
+ data={"a": 2},
)
)
self.replicate()
diff --git a/tests/replication/tcp/test_handler.py b/tests/replication/tcp/test_handler.py
index e6a19eafd5..1e299d2d67 100644
--- a/tests/replication/tcp/test_handler.py
+++ b/tests/replication/tcp/test_handler.py
@@ -12,10 +12,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from tests.replication._base import RedisMultiWorkerStreamTestCase
+from tests.replication._base import BaseMultiWorkerStreamTestCase
-class ChannelsTestCase(RedisMultiWorkerStreamTestCase):
+class ChannelsTestCase(BaseMultiWorkerStreamTestCase):
def test_subscribed_to_enough_redis_channels(self) -> None:
# The default main process is subscribed to the USER_IP channel.
self.assertCountEqual(
diff --git a/tests/replication/test_module_cache_invalidation.py b/tests/replication/test_module_cache_invalidation.py
new file mode 100644
index 0000000000..b93cae67d3
--- /dev/null
+++ b/tests/replication/test_module_cache_invalidation.py
@@ -0,0 +1,79 @@
+# Copyright 2022 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import logging
+
+import synapse
+from synapse.module_api import cached
+
+from tests.replication._base import BaseMultiWorkerStreamTestCase
+
+logger = logging.getLogger(__name__)
+
+FIRST_VALUE = "one"
+SECOND_VALUE = "two"
+
+KEY = "mykey"
+
+
+class TestCache:
+ current_value = FIRST_VALUE
+
+ @cached()
+ async def cached_function(self, user_id: str) -> str:
+ return self.current_value
+
+
+class ModuleCacheInvalidationTestCase(BaseMultiWorkerStreamTestCase):
+ servlets = [
+ synapse.rest.admin.register_servlets,
+ ]
+
+ def test_module_cache_full_invalidation(self):
+ main_cache = TestCache()
+ self.hs.get_module_api().register_cached_function(main_cache.cached_function)
+
+ worker_hs = self.make_worker_hs("synapse.app.generic_worker")
+
+ worker_cache = TestCache()
+ worker_hs.get_module_api().register_cached_function(
+ worker_cache.cached_function
+ )
+
+ self.assertEqual(FIRST_VALUE, self.get_success(main_cache.cached_function(KEY)))
+ self.assertEqual(
+ FIRST_VALUE, self.get_success(worker_cache.cached_function(KEY))
+ )
+
+ main_cache.current_value = SECOND_VALUE
+ worker_cache.current_value = SECOND_VALUE
+ # No invalidation yet, should return the cached value on both the main process and the worker
+ self.assertEqual(FIRST_VALUE, self.get_success(main_cache.cached_function(KEY)))
+ self.assertEqual(
+ FIRST_VALUE, self.get_success(worker_cache.cached_function(KEY))
+ )
+
+ # Full invalidation on the main process, should be replicated on the worker that
+ # should returned the updated value too
+ self.get_success(
+ self.hs.get_module_api().invalidate_cache(
+ main_cache.cached_function, (KEY,)
+ )
+ )
+
+ self.assertEqual(
+ SECOND_VALUE, self.get_success(main_cache.cached_function(KEY))
+ )
+ self.assertEqual(
+ SECOND_VALUE, self.get_success(worker_cache.cached_function(KEY))
+ )
diff --git a/tests/replication/test_multi_media_repo.py b/tests/replication/test_multi_media_repo.py
index 13aa5eb51a..96cdf2c45b 100644
--- a/tests/replication/test_multi_media_repo.py
+++ b/tests/replication/test_multi_media_repo.py
@@ -15,8 +15,9 @@ import logging
import os
from typing import Optional, Tuple
+from twisted.internet.interfaces import IOpenSSLServerConnectionCreator
from twisted.internet.protocol import Factory
-from twisted.protocols.tls import TLSMemoryBIOFactory
+from twisted.protocols.tls import TLSMemoryBIOFactory, TLSMemoryBIOProtocol
from twisted.web.http import HTTPChannel
from twisted.web.server import Request
@@ -102,7 +103,7 @@ class MediaRepoShardTestCase(BaseMultiWorkerStreamTestCase):
)
# fish the test server back out of the server-side TLS protocol.
- http_server = server_tls_protocol.wrappedProtocol
+ http_server: HTTPChannel = server_tls_protocol.wrappedProtocol # type: ignore[assignment]
# give the reactor a pump to get the TLS juices flowing.
self.reactor.pump((0.1,))
@@ -238,16 +239,15 @@ def get_connection_factory():
return test_server_connection_factory
-def _build_test_server(connection_creator):
+def _build_test_server(
+ connection_creator: IOpenSSLServerConnectionCreator,
+) -> TLSMemoryBIOProtocol:
"""Construct a test server
This builds an HTTP channel, wrapped with a TLSMemoryBIOProtocol
Args:
- connection_creator (IOpenSSLServerConnectionCreator): thing to build
- SSL connections
- sanlist (list[bytes]): list of the SAN entries for the cert returned
- by the server
+ connection_creator: thing to build SSL connections
Returns:
TLSMemoryBIOProtocol
diff --git a/tests/replication/test_pusher_shard.py b/tests/replication/test_pusher_shard.py
index 8f4f6688ce..59fea93e49 100644
--- a/tests/replication/test_pusher_shard.py
+++ b/tests/replication/test_pusher_shard.py
@@ -55,7 +55,7 @@ class PusherShardTestCase(BaseMultiWorkerStreamTestCase):
token_id = user_dict.token_id
self.get_success(
- self.hs.get_pusherpool().add_pusher(
+ self.hs.get_pusherpool().add_or_update_pusher(
user_id=user_id,
access_token=token_id,
kind="http",
diff --git a/tests/replication/test_sharded_event_persister.py b/tests/replication/test_sharded_event_persister.py
index a7ca68069e..541d390286 100644
--- a/tests/replication/test_sharded_event_persister.py
+++ b/tests/replication/test_sharded_event_persister.py
@@ -20,7 +20,6 @@ from synapse.storage.util.id_generators import MultiWriterIdGenerator
from tests.replication._base import BaseMultiWorkerStreamTestCase
from tests.server import make_request
-from tests.utils import USE_POSTGRES_FOR_TESTS
logger = logging.getLogger(__name__)
@@ -28,11 +27,6 @@ logger = logging.getLogger(__name__)
class EventPersisterShardTestCase(BaseMultiWorkerStreamTestCase):
"""Checks event persisting sharding works"""
- # Event persister sharding requires postgres (due to needing
- # `MultiWriterIdGenerator`).
- if not USE_POSTGRES_FOR_TESTS:
- skip = "Requires Postgres"
-
servlets = [
admin.register_servlets_for_client_rest_resource,
room.register_servlets,
@@ -50,7 +44,6 @@ class EventPersisterShardTestCase(BaseMultiWorkerStreamTestCase):
def default_config(self):
conf = super().default_config()
- conf["redis"] = {"enabled": "true"}
conf["stream_writers"] = {"events": ["worker1", "worker2"]}
conf["instance_map"] = {
"worker1": {"host": "testserv", "port": 1001},
diff --git a/tests/rest/admin/test_admin.py b/tests/rest/admin/test_admin.py
index 82ac5991e6..a8f6436836 100644
--- a/tests/rest/admin/test_admin.py
+++ b/tests/rest/admin/test_admin.py
@@ -13,7 +13,6 @@
# limitations under the License.
import urllib.parse
-from http import HTTPStatus
from parameterized import parameterized
@@ -42,7 +41,7 @@ class VersionTestCase(unittest.HomeserverTestCase):
def test_version_string(self) -> None:
channel = self.make_request("GET", self.url, shorthand=False)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(
{"server_version", "python_version"}, set(channel.json_body.keys())
)
@@ -79,10 +78,10 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase):
# Should be quarantined
self.assertEqual(
- HTTPStatus.NOT_FOUND,
+ 404,
channel.code,
msg=(
- "Expected to receive a HTTPStatus.NOT_FOUND on accessing quarantined media: %s"
+ "Expected to receive a 404 on accessing quarantined media: %s"
% server_and_media_id
),
)
@@ -107,7 +106,7 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase):
# Expect a forbidden error
self.assertEqual(
- HTTPStatus.FORBIDDEN,
+ 403,
channel.code,
msg="Expected forbidden on quarantining media as a non-admin",
)
@@ -139,7 +138,7 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase):
)
# Should be successful
- self.assertEqual(HTTPStatus.OK, channel.code)
+ self.assertEqual(200, channel.code)
# Quarantine the media
url = "/_synapse/admin/v1/media/quarantine/%s/%s" % (
@@ -152,7 +151,7 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase):
access_token=admin_user_tok,
)
self.pump(1.0)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
# Attempt to access the media
self._ensure_quarantined(admin_user_tok, server_name_and_media_id)
@@ -209,7 +208,7 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase):
access_token=admin_user_tok,
)
self.pump(1.0)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(
channel.json_body, {"num_quarantined": 2}, "Expected 2 quarantined items"
)
@@ -251,7 +250,7 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase):
access_token=admin_user_tok,
)
self.pump(1.0)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(
channel.json_body, {"num_quarantined": 2}, "Expected 2 quarantined items"
)
@@ -285,7 +284,7 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase):
url = "/_synapse/admin/v1/media/protect/%s" % (urllib.parse.quote(media_id_2),)
channel = self.make_request("POST", url, access_token=admin_user_tok)
self.pump(1.0)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
# Quarantine all media by this user
url = "/_synapse/admin/v1/user/%s/media/quarantine" % urllib.parse.quote(
@@ -297,7 +296,7 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase):
access_token=admin_user_tok,
)
self.pump(1.0)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(
channel.json_body, {"num_quarantined": 1}, "Expected 1 quarantined item"
)
@@ -318,10 +317,10 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase):
# Shouldn't be quarantined
self.assertEqual(
- HTTPStatus.OK,
+ 200,
channel.code,
msg=(
- "Expected to receive a HTTPStatus.OK on accessing not-quarantined media: %s"
+ "Expected to receive a 200 on accessing not-quarantined media: %s"
% server_and_media_id_2
),
)
@@ -350,7 +349,7 @@ class PurgeHistoryTestCase(unittest.HomeserverTestCase):
def test_purge_history(self) -> None:
"""
Simple test of purge history API.
- Test only that is is possible to call, get status HTTPStatus.OK and purge_id.
+ Test only that is is possible to call, get status 200 and purge_id.
"""
channel = self.make_request(
@@ -360,7 +359,7 @@ class PurgeHistoryTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertIn("purge_id", channel.json_body)
purge_id = channel.json_body["purge_id"]
@@ -371,5 +370,5 @@ class PurgeHistoryTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("complete", channel.json_body["status"])
diff --git a/tests/rest/admin/test_background_updates.py b/tests/rest/admin/test_background_updates.py
index 6cf56b1e35..d507a3af8d 100644
--- a/tests/rest/admin/test_background_updates.py
+++ b/tests/rest/admin/test_background_updates.py
@@ -11,7 +11,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-from http import HTTPStatus
from typing import Collection
from parameterized import parameterized
@@ -51,7 +50,7 @@ class BackgroundUpdatesTestCase(unittest.HomeserverTestCase):
)
def test_requester_is_no_admin(self, method: str, url: str) -> None:
"""
- If the user is not a server admin, an error HTTPStatus.FORBIDDEN is returned.
+ If the user is not a server admin, an error 403 is returned.
"""
self.register_user("user", "pass", admin=False)
@@ -64,7 +63,7 @@ class BackgroundUpdatesTestCase(unittest.HomeserverTestCase):
access_token=other_user_tok,
)
- self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body)
+ self.assertEqual(403, channel.code, msg=channel.json_body)
self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
def test_invalid_parameter(self) -> None:
@@ -81,7 +80,7 @@ class BackgroundUpdatesTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.MISSING_PARAM, channel.json_body["errcode"])
# job_name invalid
@@ -92,7 +91,7 @@ class BackgroundUpdatesTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.UNKNOWN, channel.json_body["errcode"])
def _register_bg_update(self) -> None:
@@ -125,7 +124,7 @@ class BackgroundUpdatesTestCase(unittest.HomeserverTestCase):
"/_synapse/admin/v1/background_updates/status",
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
# Background updates should be enabled, but none should be running.
self.assertDictEqual(
@@ -147,7 +146,7 @@ class BackgroundUpdatesTestCase(unittest.HomeserverTestCase):
"/_synapse/admin/v1/background_updates/status",
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
# Background updates should be enabled, and one should be running.
self.assertDictEqual(
@@ -181,7 +180,7 @@ class BackgroundUpdatesTestCase(unittest.HomeserverTestCase):
"/_synapse/admin/v1/background_updates/enabled",
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertDictEqual(channel.json_body, {"enabled": True})
# Disable the BG updates
@@ -191,7 +190,7 @@ class BackgroundUpdatesTestCase(unittest.HomeserverTestCase):
content={"enabled": False},
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertDictEqual(channel.json_body, {"enabled": False})
# Advance a bit and get the current status, note this will finish the in
@@ -204,7 +203,7 @@ class BackgroundUpdatesTestCase(unittest.HomeserverTestCase):
"/_synapse/admin/v1/background_updates/status",
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertDictEqual(
channel.json_body,
{
@@ -231,7 +230,7 @@ class BackgroundUpdatesTestCase(unittest.HomeserverTestCase):
"/_synapse/admin/v1/background_updates/status",
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
# There should be no change from the previous /status response.
self.assertDictEqual(
@@ -259,7 +258,7 @@ class BackgroundUpdatesTestCase(unittest.HomeserverTestCase):
content={"enabled": True},
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertDictEqual(channel.json_body, {"enabled": True})
@@ -270,7 +269,7 @@ class BackgroundUpdatesTestCase(unittest.HomeserverTestCase):
"/_synapse/admin/v1/background_updates/status",
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
# Background updates should be enabled and making progress.
self.assertDictEqual(
@@ -325,7 +324,7 @@ class BackgroundUpdatesTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
# test that each background update is waiting now
for update in updates:
@@ -365,4 +364,4 @@ class BackgroundUpdatesTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
diff --git a/tests/rest/admin/test_device.py b/tests/rest/admin/test_device.py
index f7080bda87..03f2112b07 100644
--- a/tests/rest/admin/test_device.py
+++ b/tests/rest/admin/test_device.py
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import urllib.parse
-from http import HTTPStatus
from parameterized import parameterized
@@ -20,6 +19,7 @@ from twisted.test.proto_helpers import MemoryReactor
import synapse.rest.admin
from synapse.api.errors import Codes
+from synapse.handlers.device import DeviceHandler
from synapse.rest.client import login
from synapse.server import HomeServer
from synapse.util import Clock
@@ -35,7 +35,9 @@ class DeviceRestTestCase(unittest.HomeserverTestCase):
]
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
- self.handler = hs.get_device_handler()
+ handler = hs.get_device_handler()
+ assert isinstance(handler, DeviceHandler)
+ self.handler = handler
self.admin_user = self.register_user("admin", "pass", admin=True)
self.admin_user_tok = self.login("admin", "pass")
@@ -58,7 +60,7 @@ class DeviceRestTestCase(unittest.HomeserverTestCase):
channel = self.make_request(method, self.url, b"{}")
self.assertEqual(
- HTTPStatus.UNAUTHORIZED,
+ 401,
channel.code,
msg=channel.json_body,
)
@@ -76,7 +78,7 @@ class DeviceRestTestCase(unittest.HomeserverTestCase):
)
self.assertEqual(
- HTTPStatus.FORBIDDEN,
+ 403,
channel.code,
msg=channel.json_body,
)
@@ -85,7 +87,7 @@ class DeviceRestTestCase(unittest.HomeserverTestCase):
@parameterized.expand(["GET", "PUT", "DELETE"])
def test_user_does_not_exist(self, method: str) -> None:
"""
- Tests that a lookup for a user that does not exist returns a HTTPStatus.NOT_FOUND
+ Tests that a lookup for a user that does not exist returns a 404
"""
url = (
"/_synapse/admin/v2/users/@unknown_person:test/devices/%s"
@@ -98,13 +100,13 @@ class DeviceRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, msg=channel.json_body)
+ self.assertEqual(404, channel.code, msg=channel.json_body)
self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"])
@parameterized.expand(["GET", "PUT", "DELETE"])
def test_user_is_not_local(self, method: str) -> None:
"""
- Tests that a lookup for a user that is not a local returns a HTTPStatus.BAD_REQUEST
+ Tests that a lookup for a user that is not a local returns a 400
"""
url = (
"/_synapse/admin/v2/users/@unknown_person:unknown_domain/devices/%s"
@@ -117,12 +119,12 @@ class DeviceRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual("Can only lookup local users", channel.json_body["error"])
def test_unknown_device(self) -> None:
"""
- Tests that a lookup for a device that does not exist returns either HTTPStatus.NOT_FOUND or HTTPStatus.OK.
+ Tests that a lookup for a device that does not exist returns either 404 or 200.
"""
url = "/_synapse/admin/v2/users/%s/devices/unknown_device" % urllib.parse.quote(
self.other_user
@@ -134,7 +136,7 @@ class DeviceRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, msg=channel.json_body)
+ self.assertEqual(404, channel.code, msg=channel.json_body)
self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"])
channel = self.make_request(
@@ -143,7 +145,7 @@ class DeviceRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
channel = self.make_request(
"DELETE",
@@ -151,8 +153,8 @@ class DeviceRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- # Delete unknown device returns status HTTPStatus.OK
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ # Delete unknown device returns status 200
+ self.assertEqual(200, channel.code, msg=channel.json_body)
def test_update_device_too_long_display_name(self) -> None:
"""
@@ -179,7 +181,7 @@ class DeviceRestTestCase(unittest.HomeserverTestCase):
content=update,
)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.TOO_LARGE, channel.json_body["errcode"])
# Ensure the display name was not updated.
@@ -189,12 +191,12 @@ class DeviceRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("new display", channel.json_body["display_name"])
def test_update_no_display_name(self) -> None:
"""
- Tests that a update for a device without JSON returns a HTTPStatus.OK
+ Tests that a update for a device without JSON returns a 200
"""
# Set iniital display name.
update = {"display_name": "new display"}
@@ -210,7 +212,7 @@ class DeviceRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
# Ensure the display name was not updated.
channel = self.make_request(
@@ -219,7 +221,7 @@ class DeviceRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("new display", channel.json_body["display_name"])
def test_update_display_name(self) -> None:
@@ -234,7 +236,7 @@ class DeviceRestTestCase(unittest.HomeserverTestCase):
content={"display_name": "new displayname"},
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
# Check new display_name
channel = self.make_request(
@@ -243,7 +245,7 @@ class DeviceRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("new displayname", channel.json_body["display_name"])
def test_get_device(self) -> None:
@@ -256,7 +258,7 @@ class DeviceRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(self.other_user, channel.json_body["user_id"])
# Check that all fields are available
self.assertIn("user_id", channel.json_body)
@@ -281,7 +283,7 @@ class DeviceRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
# Ensure that the number of devices is decreased
res = self.get_success(self.handler.get_devices_by_user(self.other_user))
@@ -312,7 +314,7 @@ class DevicesRestTestCase(unittest.HomeserverTestCase):
channel = self.make_request("GET", self.url, b"{}")
self.assertEqual(
- HTTPStatus.UNAUTHORIZED,
+ 401,
channel.code,
msg=channel.json_body,
)
@@ -331,7 +333,7 @@ class DevicesRestTestCase(unittest.HomeserverTestCase):
)
self.assertEqual(
- HTTPStatus.FORBIDDEN,
+ 403,
channel.code,
msg=channel.json_body,
)
@@ -339,7 +341,7 @@ class DevicesRestTestCase(unittest.HomeserverTestCase):
def test_user_does_not_exist(self) -> None:
"""
- Tests that a lookup for a user that does not exist returns a HTTPStatus.NOT_FOUND
+ Tests that a lookup for a user that does not exist returns a 404
"""
url = "/_synapse/admin/v2/users/@unknown_person:test/devices"
channel = self.make_request(
@@ -348,12 +350,12 @@ class DevicesRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, msg=channel.json_body)
+ self.assertEqual(404, channel.code, msg=channel.json_body)
self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"])
def test_user_is_not_local(self) -> None:
"""
- Tests that a lookup for a user that is not a local returns a HTTPStatus.BAD_REQUEST
+ Tests that a lookup for a user that is not a local returns a 400
"""
url = "/_synapse/admin/v2/users/@unknown_person:unknown_domain/devices"
@@ -363,7 +365,7 @@ class DevicesRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual("Can only lookup local users", channel.json_body["error"])
def test_user_has_no_devices(self) -> None:
@@ -379,7 +381,7 @@ class DevicesRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(0, channel.json_body["total"])
self.assertEqual(0, len(channel.json_body["devices"]))
@@ -399,7 +401,7 @@ class DevicesRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(number_devices, channel.json_body["total"])
self.assertEqual(number_devices, len(channel.json_body["devices"]))
self.assertEqual(self.other_user, channel.json_body["devices"][0]["user_id"])
@@ -438,7 +440,7 @@ class DeleteDevicesRestTestCase(unittest.HomeserverTestCase):
channel = self.make_request("POST", self.url, b"{}")
self.assertEqual(
- HTTPStatus.UNAUTHORIZED,
+ 401,
channel.code,
msg=channel.json_body,
)
@@ -457,7 +459,7 @@ class DeleteDevicesRestTestCase(unittest.HomeserverTestCase):
)
self.assertEqual(
- HTTPStatus.FORBIDDEN,
+ 403,
channel.code,
msg=channel.json_body,
)
@@ -465,7 +467,7 @@ class DeleteDevicesRestTestCase(unittest.HomeserverTestCase):
def test_user_does_not_exist(self) -> None:
"""
- Tests that a lookup for a user that does not exist returns a HTTPStatus.NOT_FOUND
+ Tests that a lookup for a user that does not exist returns a 404
"""
url = "/_synapse/admin/v2/users/@unknown_person:test/delete_devices"
channel = self.make_request(
@@ -474,12 +476,12 @@ class DeleteDevicesRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, msg=channel.json_body)
+ self.assertEqual(404, channel.code, msg=channel.json_body)
self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"])
def test_user_is_not_local(self) -> None:
"""
- Tests that a lookup for a user that is not a local returns a HTTPStatus.BAD_REQUEST
+ Tests that a lookup for a user that is not a local returns a 400
"""
url = "/_synapse/admin/v2/users/@unknown_person:unknown_domain/delete_devices"
@@ -489,12 +491,12 @@ class DeleteDevicesRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual("Can only lookup local users", channel.json_body["error"])
def test_unknown_devices(self) -> None:
"""
- Tests that a remove of a device that does not exist returns HTTPStatus.OK.
+ Tests that a remove of a device that does not exist returns 200.
"""
channel = self.make_request(
"POST",
@@ -503,8 +505,8 @@ class DeleteDevicesRestTestCase(unittest.HomeserverTestCase):
content={"devices": ["unknown_device1", "unknown_device2"]},
)
- # Delete unknown devices returns status HTTPStatus.OK
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ # Delete unknown devices returns status 200
+ self.assertEqual(200, channel.code, msg=channel.json_body)
def test_delete_devices(self) -> None:
"""
@@ -533,7 +535,7 @@ class DeleteDevicesRestTestCase(unittest.HomeserverTestCase):
content={"devices": device_ids},
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
res = self.get_success(self.handler.get_devices_by_user(self.other_user))
self.assertEqual(0, len(res))
diff --git a/tests/rest/admin/test_event_reports.py b/tests/rest/admin/test_event_reports.py
index 4f89f8b534..8a4e5c3f77 100644
--- a/tests/rest/admin/test_event_reports.py
+++ b/tests/rest/admin/test_event_reports.py
@@ -11,7 +11,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-from http import HTTPStatus
from typing import List
from twisted.test.proto_helpers import MemoryReactor
@@ -81,16 +80,12 @@ class EventReportsTestCase(unittest.HomeserverTestCase):
"""
channel = self.make_request("GET", self.url, b"{}")
- self.assertEqual(
- HTTPStatus.UNAUTHORIZED,
- channel.code,
- msg=channel.json_body,
- )
+ self.assertEqual(401, channel.code, msg=channel.json_body)
self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"])
def test_requester_is_no_admin(self) -> None:
"""
- If the user is not a server admin, an error HTTPStatus.FORBIDDEN is returned.
+ If the user is not a server admin, an error 403 is returned.
"""
channel = self.make_request(
@@ -99,11 +94,7 @@ class EventReportsTestCase(unittest.HomeserverTestCase):
access_token=self.other_user_tok,
)
- self.assertEqual(
- HTTPStatus.FORBIDDEN,
- channel.code,
- msg=channel.json_body,
- )
+ self.assertEqual(403, channel.code, msg=channel.json_body)
self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
def test_default_success(self) -> None:
@@ -117,7 +108,7 @@ class EventReportsTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], 20)
self.assertEqual(len(channel.json_body["event_reports"]), 20)
self.assertNotIn("next_token", channel.json_body)
@@ -134,7 +125,7 @@ class EventReportsTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], 20)
self.assertEqual(len(channel.json_body["event_reports"]), 5)
self.assertEqual(channel.json_body["next_token"], 5)
@@ -151,7 +142,7 @@ class EventReportsTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], 20)
self.assertEqual(len(channel.json_body["event_reports"]), 15)
self.assertNotIn("next_token", channel.json_body)
@@ -168,7 +159,7 @@ class EventReportsTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], 20)
self.assertEqual(channel.json_body["next_token"], 15)
self.assertEqual(len(channel.json_body["event_reports"]), 10)
@@ -185,7 +176,7 @@ class EventReportsTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], 10)
self.assertEqual(len(channel.json_body["event_reports"]), 10)
self.assertNotIn("next_token", channel.json_body)
@@ -205,7 +196,7 @@ class EventReportsTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], 10)
self.assertEqual(len(channel.json_body["event_reports"]), 10)
self.assertNotIn("next_token", channel.json_body)
@@ -225,7 +216,7 @@ class EventReportsTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], 5)
self.assertEqual(len(channel.json_body["event_reports"]), 5)
self.assertNotIn("next_token", channel.json_body)
@@ -247,7 +238,7 @@ class EventReportsTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], 20)
self.assertEqual(len(channel.json_body["event_reports"]), 20)
report = 1
@@ -265,7 +256,7 @@ class EventReportsTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], 20)
self.assertEqual(len(channel.json_body["event_reports"]), 20)
report = 1
@@ -278,7 +269,7 @@ class EventReportsTestCase(unittest.HomeserverTestCase):
def test_invalid_search_order(self) -> None:
"""
- Testing that a invalid search order returns a HTTPStatus.BAD_REQUEST
+ Testing that a invalid search order returns a 400
"""
channel = self.make_request(
@@ -287,17 +278,13 @@ class EventReportsTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(
- HTTPStatus.BAD_REQUEST,
- channel.code,
- msg=channel.json_body,
- )
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
self.assertEqual("Unknown direction: bar", channel.json_body["error"])
def test_limit_is_negative(self) -> None:
"""
- Testing that a negative limit parameter returns a HTTPStatus.BAD_REQUEST
+ Testing that a negative limit parameter returns a 400
"""
channel = self.make_request(
@@ -306,16 +293,12 @@ class EventReportsTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(
- HTTPStatus.BAD_REQUEST,
- channel.code,
- msg=channel.json_body,
- )
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
def test_from_is_negative(self) -> None:
"""
- Testing that a negative from parameter returns a HTTPStatus.BAD_REQUEST
+ Testing that a negative from parameter returns a 400
"""
channel = self.make_request(
@@ -324,11 +307,7 @@ class EventReportsTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(
- HTTPStatus.BAD_REQUEST,
- channel.code,
- msg=channel.json_body,
- )
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
def test_next_token(self) -> None:
@@ -344,7 +323,7 @@ class EventReportsTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], 20)
self.assertEqual(len(channel.json_body["event_reports"]), 20)
self.assertNotIn("next_token", channel.json_body)
@@ -357,7 +336,7 @@ class EventReportsTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], 20)
self.assertEqual(len(channel.json_body["event_reports"]), 20)
self.assertNotIn("next_token", channel.json_body)
@@ -370,7 +349,7 @@ class EventReportsTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], 20)
self.assertEqual(len(channel.json_body["event_reports"]), 19)
self.assertEqual(channel.json_body["next_token"], 19)
@@ -384,7 +363,7 @@ class EventReportsTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], 20)
self.assertEqual(len(channel.json_body["event_reports"]), 1)
self.assertNotIn("next_token", channel.json_body)
@@ -400,7 +379,7 @@ class EventReportsTestCase(unittest.HomeserverTestCase):
{"score": -100, "reason": "this makes me sad"},
access_token=user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
def _create_event_and_report_without_parameters(
self, room_id: str, user_tok: str
@@ -415,7 +394,7 @@ class EventReportsTestCase(unittest.HomeserverTestCase):
{},
access_token=user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
def _check_fields(self, content: List[JsonDict]) -> None:
"""Checks that all attributes are present in an event report"""
@@ -431,6 +410,33 @@ class EventReportsTestCase(unittest.HomeserverTestCase):
self.assertIn("score", c)
self.assertIn("reason", c)
+ def test_count_correct_despite_table_deletions(self) -> None:
+ """
+ Tests that the count matches the number of rows, even if rows in joined tables
+ are missing.
+ """
+
+ # Delete rows from room_stats_state for one of our rooms.
+ self.get_success(
+ self.hs.get_datastores().main.db_pool.simple_delete(
+ "room_stats_state", {"room_id": self.room_id1}, desc="_"
+ )
+ )
+
+ channel = self.make_request(
+ "GET",
+ self.url,
+ access_token=self.admin_user_tok,
+ )
+
+ self.assertEqual(200, channel.code, msg=channel.json_body)
+ # The 'total' field is 10 because only 10 reports will actually
+ # be retrievable since we deleted the rows in the room_stats_state
+ # table.
+ self.assertEqual(channel.json_body["total"], 10)
+ # This is consistent with the number of rows actually returned.
+ self.assertEqual(len(channel.json_body["event_reports"]), 10)
+
class EventReportDetailTestCase(unittest.HomeserverTestCase):
servlets = [
@@ -466,16 +472,12 @@ class EventReportDetailTestCase(unittest.HomeserverTestCase):
"""
channel = self.make_request("GET", self.url, b"{}")
- self.assertEqual(
- HTTPStatus.UNAUTHORIZED,
- channel.code,
- msg=channel.json_body,
- )
+ self.assertEqual(401, channel.code, msg=channel.json_body)
self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"])
def test_requester_is_no_admin(self) -> None:
"""
- If the user is not a server admin, an error HTTPStatus.FORBIDDEN is returned.
+ If the user is not a server admin, an error 403 is returned.
"""
channel = self.make_request(
@@ -484,11 +486,7 @@ class EventReportDetailTestCase(unittest.HomeserverTestCase):
access_token=self.other_user_tok,
)
- self.assertEqual(
- HTTPStatus.FORBIDDEN,
- channel.code,
- msg=channel.json_body,
- )
+ self.assertEqual(403, channel.code, msg=channel.json_body)
self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
def test_default_success(self) -> None:
@@ -502,12 +500,12 @@ class EventReportDetailTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self._check_fields(channel.json_body)
def test_invalid_report_id(self) -> None:
"""
- Testing that an invalid `report_id` returns a HTTPStatus.BAD_REQUEST.
+ Testing that an invalid `report_id` returns a 400.
"""
# `report_id` is negative
@@ -517,11 +515,7 @@ class EventReportDetailTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(
- HTTPStatus.BAD_REQUEST,
- channel.code,
- msg=channel.json_body,
- )
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
self.assertEqual(
"The report_id parameter must be a string representing a positive integer.",
@@ -535,11 +529,7 @@ class EventReportDetailTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(
- HTTPStatus.BAD_REQUEST,
- channel.code,
- msg=channel.json_body,
- )
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
self.assertEqual(
"The report_id parameter must be a string representing a positive integer.",
@@ -553,11 +543,7 @@ class EventReportDetailTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(
- HTTPStatus.BAD_REQUEST,
- channel.code,
- msg=channel.json_body,
- )
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
self.assertEqual(
"The report_id parameter must be a string representing a positive integer.",
@@ -566,7 +552,7 @@ class EventReportDetailTestCase(unittest.HomeserverTestCase):
def test_report_id_not_found(self) -> None:
"""
- Testing that a not existing `report_id` returns a HTTPStatus.NOT_FOUND.
+ Testing that a not existing `report_id` returns a 404.
"""
channel = self.make_request(
@@ -575,11 +561,7 @@ class EventReportDetailTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(
- HTTPStatus.NOT_FOUND,
- channel.code,
- msg=channel.json_body,
- )
+ self.assertEqual(404, channel.code, msg=channel.json_body)
self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"])
self.assertEqual("Event report not found", channel.json_body["error"])
@@ -594,7 +576,7 @@ class EventReportDetailTestCase(unittest.HomeserverTestCase):
{"score": -100, "reason": "this makes me sad"},
access_token=user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
def _check_fields(self, content: JsonDict) -> None:
"""Checks that all attributes are present in a event report"""
diff --git a/tests/rest/admin/test_federation.py b/tests/rest/admin/test_federation.py
index 929bbdc37d..4c7864c629 100644
--- a/tests/rest/admin/test_federation.py
+++ b/tests/rest/admin/test_federation.py
@@ -11,7 +11,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-from http import HTTPStatus
from typing import List, Optional
from parameterized import parameterized
@@ -64,7 +63,7 @@ class FederationTestCase(unittest.HomeserverTestCase):
access_token=other_user_tok,
)
- self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body)
+ self.assertEqual(403, channel.code, msg=channel.json_body)
self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
def test_invalid_parameter(self) -> None:
@@ -77,7 +76,7 @@ class FederationTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
# negative from
@@ -87,7 +86,7 @@ class FederationTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
# unkown order_by
@@ -97,7 +96,7 @@ class FederationTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
# invalid search order
@@ -107,7 +106,7 @@ class FederationTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
# invalid destination
@@ -117,7 +116,7 @@ class FederationTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, msg=channel.json_body)
+ self.assertEqual(404, channel.code, msg=channel.json_body)
self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"])
# invalid destination
@@ -127,7 +126,7 @@ class FederationTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, msg=channel.json_body)
+ self.assertEqual(404, channel.code, msg=channel.json_body)
self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"])
def test_limit(self) -> None:
@@ -142,7 +141,7 @@ class FederationTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], number_destinations)
self.assertEqual(len(channel.json_body["destinations"]), 5)
self.assertEqual(channel.json_body["next_token"], "5")
@@ -160,7 +159,7 @@ class FederationTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], number_destinations)
self.assertEqual(len(channel.json_body["destinations"]), 15)
self.assertNotIn("next_token", channel.json_body)
@@ -178,7 +177,7 @@ class FederationTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], number_destinations)
self.assertEqual(channel.json_body["next_token"], "15")
self.assertEqual(len(channel.json_body["destinations"]), 10)
@@ -198,7 +197,7 @@ class FederationTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], number_destinations)
self.assertEqual(len(channel.json_body["destinations"]), number_destinations)
self.assertNotIn("next_token", channel.json_body)
@@ -211,7 +210,7 @@ class FederationTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], number_destinations)
self.assertEqual(len(channel.json_body["destinations"]), number_destinations)
self.assertNotIn("next_token", channel.json_body)
@@ -224,7 +223,7 @@ class FederationTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], number_destinations)
self.assertEqual(len(channel.json_body["destinations"]), 19)
self.assertEqual(channel.json_body["next_token"], "19")
@@ -238,7 +237,7 @@ class FederationTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], number_destinations)
self.assertEqual(len(channel.json_body["destinations"]), 1)
self.assertNotIn("next_token", channel.json_body)
@@ -255,7 +254,7 @@ class FederationTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(number_destinations, len(channel.json_body["destinations"]))
self.assertEqual(number_destinations, channel.json_body["total"])
@@ -290,7 +289,7 @@ class FederationTestCase(unittest.HomeserverTestCase):
url,
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], len(expected_destination_list))
returned_order = [
@@ -376,7 +375,7 @@ class FederationTestCase(unittest.HomeserverTestCase):
url.encode("ascii"),
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
# Check that destinations were returned
self.assertTrue("destinations" in channel.json_body)
@@ -418,7 +417,7 @@ class FederationTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("sub0.example.com", channel.json_body["destination"])
# Check that all fields are available
@@ -435,7 +434,7 @@ class FederationTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("sub0.example.com", channel.json_body["destination"])
self.assertEqual(0, channel.json_body["retry_last_ts"])
self.assertEqual(0, channel.json_body["retry_interval"])
@@ -452,7 +451,7 @@ class FederationTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
retry_timings = self.get_success(
self.store.get_destination_retry_timings("sub0.example.com")
@@ -469,7 +468,7 @@ class FederationTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(
"The retry timing does not need to be reset for this destination.",
channel.json_body["error"],
@@ -561,7 +560,7 @@ class DestinationMembershipTestCase(unittest.HomeserverTestCase):
access_token=other_user_tok,
)
- self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body)
+ self.assertEqual(403, channel.code, msg=channel.json_body)
self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
def test_invalid_parameter(self) -> None:
@@ -574,7 +573,7 @@ class DestinationMembershipTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
# negative from
@@ -584,7 +583,7 @@ class DestinationMembershipTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
# invalid search order
@@ -594,7 +593,7 @@ class DestinationMembershipTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
# invalid destination
@@ -604,7 +603,7 @@ class DestinationMembershipTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, msg=channel.json_body)
+ self.assertEqual(404, channel.code, msg=channel.json_body)
self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"])
def test_limit(self) -> None:
@@ -619,7 +618,7 @@ class DestinationMembershipTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], number_rooms)
self.assertEqual(len(channel.json_body["rooms"]), 3)
self.assertEqual(channel.json_body["next_token"], "3")
@@ -637,7 +636,7 @@ class DestinationMembershipTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], number_rooms)
self.assertEqual(len(channel.json_body["rooms"]), 5)
self.assertNotIn("next_token", channel.json_body)
@@ -655,7 +654,7 @@ class DestinationMembershipTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], number_rooms)
self.assertEqual(channel.json_body["next_token"], "8")
self.assertEqual(len(channel.json_body["rooms"]), 5)
@@ -673,7 +672,7 @@ class DestinationMembershipTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel_asc.code, msg=channel_asc.json_body)
+ self.assertEqual(200, channel_asc.code, msg=channel_asc.json_body)
self.assertEqual(channel_asc.json_body["total"], number_rooms)
self.assertEqual(number_rooms, len(channel_asc.json_body["rooms"]))
self._check_fields(channel_asc.json_body["rooms"])
@@ -685,7 +684,7 @@ class DestinationMembershipTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel_desc.code, msg=channel_desc.json_body)
+ self.assertEqual(200, channel_desc.code, msg=channel_desc.json_body)
self.assertEqual(channel_desc.json_body["total"], number_rooms)
self.assertEqual(number_rooms, len(channel_desc.json_body["rooms"]))
self._check_fields(channel_desc.json_body["rooms"])
@@ -711,7 +710,7 @@ class DestinationMembershipTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], number_rooms)
self.assertEqual(len(channel.json_body["rooms"]), number_rooms)
self.assertNotIn("next_token", channel.json_body)
@@ -724,7 +723,7 @@ class DestinationMembershipTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], number_rooms)
self.assertEqual(len(channel.json_body["rooms"]), number_rooms)
self.assertNotIn("next_token", channel.json_body)
@@ -737,7 +736,7 @@ class DestinationMembershipTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], number_rooms)
self.assertEqual(len(channel.json_body["rooms"]), 4)
self.assertEqual(channel.json_body["next_token"], "4")
@@ -751,7 +750,7 @@ class DestinationMembershipTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], number_rooms)
self.assertEqual(len(channel.json_body["rooms"]), 1)
self.assertNotIn("next_token", channel.json_body)
@@ -767,7 +766,7 @@ class DestinationMembershipTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], number_rooms)
self.assertEqual(number_rooms, len(channel.json_body["rooms"]))
self._check_fields(channel.json_body["rooms"])
diff --git a/tests/rest/admin/test_media.py b/tests/rest/admin/test_media.py
index e909e444ac..aadb31ca83 100644
--- a/tests/rest/admin/test_media.py
+++ b/tests/rest/admin/test_media.py
@@ -13,7 +13,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import os
-from http import HTTPStatus
from parameterized import parameterized
@@ -60,7 +59,7 @@ class DeleteMediaByIDTestCase(unittest.HomeserverTestCase):
channel = self.make_request("DELETE", url, b"{}")
self.assertEqual(
- HTTPStatus.UNAUTHORIZED,
+ 401,
channel.code,
msg=channel.json_body,
)
@@ -81,16 +80,12 @@ class DeleteMediaByIDTestCase(unittest.HomeserverTestCase):
access_token=self.other_user_token,
)
- self.assertEqual(
- HTTPStatus.FORBIDDEN,
- channel.code,
- msg=channel.json_body,
- )
+ self.assertEqual(403, channel.code, msg=channel.json_body)
self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
def test_media_does_not_exist(self) -> None:
"""
- Tests that a lookup for a media that does not exist returns a HTTPStatus.NOT_FOUND
+ Tests that a lookup for a media that does not exist returns a 404
"""
url = "/_synapse/admin/v1/media/%s/%s" % (self.server_name, "12345")
@@ -100,12 +95,12 @@ class DeleteMediaByIDTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, msg=channel.json_body)
+ self.assertEqual(404, channel.code, msg=channel.json_body)
self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"])
def test_media_is_not_local(self) -> None:
"""
- Tests that a lookup for a media that is not a local returns a HTTPStatus.BAD_REQUEST
+ Tests that a lookup for a media that is not a local returns a 400
"""
url = "/_synapse/admin/v1/media/%s/%s" % ("unknown_domain", "12345")
@@ -115,7 +110,7 @@ class DeleteMediaByIDTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual("Can only delete local media", channel.json_body["error"])
def test_delete_media(self) -> None:
@@ -131,7 +126,7 @@ class DeleteMediaByIDTestCase(unittest.HomeserverTestCase):
upload_resource,
SMALL_PNG,
tok=self.admin_user_tok,
- expect_code=HTTPStatus.OK,
+ expect_code=200,
)
# Extract media ID from the response
server_and_media_id = response["content_uri"][6:] # Cut off 'mxc://'
@@ -151,11 +146,10 @@ class DeleteMediaByIDTestCase(unittest.HomeserverTestCase):
# Should be successful
self.assertEqual(
- HTTPStatus.OK,
+ 200,
channel.code,
msg=(
- "Expected to receive a HTTPStatus.OK on accessing media: %s"
- % server_and_media_id
+ "Expected to receive a 200 on accessing media: %s" % server_and_media_id
),
)
@@ -172,7 +166,7 @@ class DeleteMediaByIDTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(1, channel.json_body["total"])
self.assertEqual(
media_id,
@@ -189,10 +183,10 @@ class DeleteMediaByIDTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
self.assertEqual(
- HTTPStatus.NOT_FOUND,
+ 404,
channel.code,
msg=(
- "Expected to receive a HTTPStatus.NOT_FOUND on accessing deleted media: %s"
+ "Expected to receive a 404 on accessing deleted media: %s"
% server_and_media_id
),
)
@@ -231,11 +225,7 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase):
channel = self.make_request("POST", self.url, b"{}")
- self.assertEqual(
- HTTPStatus.UNAUTHORIZED,
- channel.code,
- msg=channel.json_body,
- )
+ self.assertEqual(401, channel.code, msg=channel.json_body)
self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"])
def test_requester_is_no_admin(self) -> None:
@@ -251,16 +241,12 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase):
access_token=self.other_user_token,
)
- self.assertEqual(
- HTTPStatus.FORBIDDEN,
- channel.code,
- msg=channel.json_body,
- )
+ self.assertEqual(403, channel.code, msg=channel.json_body)
self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
def test_media_is_not_local(self) -> None:
"""
- Tests that a lookup for media that is not local returns a HTTPStatus.BAD_REQUEST
+ Tests that a lookup for media that is not local returns a 400
"""
url = "/_synapse/admin/v1/media/%s/delete" % "unknown_domain"
@@ -270,7 +256,7 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual("Can only delete local media", channel.json_body["error"])
def test_missing_parameter(self) -> None:
@@ -283,11 +269,7 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(
- HTTPStatus.BAD_REQUEST,
- channel.code,
- msg=channel.json_body,
- )
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.MISSING_PARAM, channel.json_body["errcode"])
self.assertEqual(
"Missing integer query parameter 'before_ts'", channel.json_body["error"]
@@ -303,11 +285,7 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(
- HTTPStatus.BAD_REQUEST,
- channel.code,
- msg=channel.json_body,
- )
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
self.assertEqual(
"Query parameter before_ts must be a positive integer.",
@@ -320,11 +298,7 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(
- HTTPStatus.BAD_REQUEST,
- channel.code,
- msg=channel.json_body,
- )
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
self.assertEqual(
"Query parameter before_ts you provided is from the year 1970. "
@@ -338,11 +312,7 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(
- HTTPStatus.BAD_REQUEST,
- channel.code,
- msg=channel.json_body,
- )
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
self.assertEqual(
"Query parameter size_gt must be a string representing a positive integer.",
@@ -355,11 +325,7 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(
- HTTPStatus.BAD_REQUEST,
- channel.code,
- msg=channel.json_body,
- )
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
self.assertEqual(
"Boolean query parameter 'keep_profiles' must be one of ['true', 'false']",
@@ -388,7 +354,7 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase):
self.url + "?before_ts=" + str(now_ms),
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(1, channel.json_body["total"])
self.assertEqual(
media_id,
@@ -413,7 +379,7 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase):
self.url + "?before_ts=" + str(now_ms),
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(0, channel.json_body["total"])
self._access_media(server_and_media_id)
@@ -425,7 +391,7 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase):
self.url + "?before_ts=" + str(now_ms),
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(1, channel.json_body["total"])
self.assertEqual(
server_and_media_id.split("/")[1],
@@ -449,7 +415,7 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase):
self.url + "?before_ts=" + str(now_ms) + "&size_gt=67",
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(0, channel.json_body["total"])
self._access_media(server_and_media_id)
@@ -460,7 +426,7 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase):
self.url + "?before_ts=" + str(now_ms) + "&size_gt=66",
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(1, channel.json_body["total"])
self.assertEqual(
server_and_media_id.split("/")[1],
@@ -485,7 +451,7 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase):
content={"avatar_url": "mxc://%s" % (server_and_media_id,)},
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
now_ms = self.clock.time_msec()
channel = self.make_request(
@@ -493,7 +459,7 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase):
self.url + "?before_ts=" + str(now_ms) + "&keep_profiles=true",
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(0, channel.json_body["total"])
self._access_media(server_and_media_id)
@@ -504,7 +470,7 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase):
self.url + "?before_ts=" + str(now_ms) + "&keep_profiles=false",
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(1, channel.json_body["total"])
self.assertEqual(
server_and_media_id.split("/")[1],
@@ -530,7 +496,7 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase):
content={"url": "mxc://%s" % (server_and_media_id,)},
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
now_ms = self.clock.time_msec()
channel = self.make_request(
@@ -538,7 +504,7 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase):
self.url + "?before_ts=" + str(now_ms) + "&keep_profiles=true",
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(0, channel.json_body["total"])
self._access_media(server_and_media_id)
@@ -549,7 +515,7 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase):
self.url + "?before_ts=" + str(now_ms) + "&keep_profiles=false",
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(1, channel.json_body["total"])
self.assertEqual(
server_and_media_id.split("/")[1],
@@ -569,7 +535,7 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase):
upload_resource,
SMALL_PNG,
tok=self.admin_user_tok,
- expect_code=HTTPStatus.OK,
+ expect_code=200,
)
# Extract media ID from the response
server_and_media_id = response["content_uri"][6:] # Cut off 'mxc://'
@@ -602,10 +568,10 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase):
if expect_success:
self.assertEqual(
- HTTPStatus.OK,
+ 200,
channel.code,
msg=(
- "Expected to receive a HTTPStatus.OK on accessing media: %s"
+ "Expected to receive a 200 on accessing media: %s"
% server_and_media_id
),
)
@@ -613,10 +579,10 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase):
self.assertTrue(os.path.exists(local_path))
else:
self.assertEqual(
- HTTPStatus.NOT_FOUND,
+ 404,
channel.code,
msg=(
- "Expected to receive a HTTPStatus.NOT_FOUND on accessing deleted media: %s"
+ "Expected to receive a 404 on accessing deleted media: %s"
% (server_and_media_id)
),
)
@@ -648,7 +614,7 @@ class QuarantineMediaByIDTestCase(unittest.HomeserverTestCase):
upload_resource,
SMALL_PNG,
tok=self.admin_user_tok,
- expect_code=HTTPStatus.OK,
+ expect_code=200,
)
# Extract media ID from the response
server_and_media_id = response["content_uri"][6:] # Cut off 'mxc://'
@@ -668,11 +634,7 @@ class QuarantineMediaByIDTestCase(unittest.HomeserverTestCase):
b"{}",
)
- self.assertEqual(
- HTTPStatus.UNAUTHORIZED,
- channel.code,
- msg=channel.json_body,
- )
+ self.assertEqual(401, channel.code, msg=channel.json_body)
self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"])
@parameterized.expand(["quarantine", "unquarantine"])
@@ -689,11 +651,7 @@ class QuarantineMediaByIDTestCase(unittest.HomeserverTestCase):
access_token=self.other_user_token,
)
- self.assertEqual(
- HTTPStatus.FORBIDDEN,
- channel.code,
- msg=channel.json_body,
- )
+ self.assertEqual(403, channel.code, msg=channel.json_body)
self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
def test_quarantine_media(self) -> None:
@@ -712,7 +670,7 @@ class QuarantineMediaByIDTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertFalse(channel.json_body)
media_info = self.get_success(self.store.get_local_media(self.media_id))
@@ -726,7 +684,7 @@ class QuarantineMediaByIDTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertFalse(channel.json_body)
media_info = self.get_success(self.store.get_local_media(self.media_id))
@@ -753,7 +711,7 @@ class QuarantineMediaByIDTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertFalse(channel.json_body)
# verify that is not in quarantine
@@ -785,7 +743,7 @@ class ProtectMediaByIDTestCase(unittest.HomeserverTestCase):
upload_resource,
SMALL_PNG,
tok=self.admin_user_tok,
- expect_code=HTTPStatus.OK,
+ expect_code=200,
)
# Extract media ID from the response
server_and_media_id = response["content_uri"][6:] # Cut off 'mxc://'
@@ -801,11 +759,7 @@ class ProtectMediaByIDTestCase(unittest.HomeserverTestCase):
channel = self.make_request("POST", self.url % (action, self.media_id), b"{}")
- self.assertEqual(
- HTTPStatus.UNAUTHORIZED,
- channel.code,
- msg=channel.json_body,
- )
+ self.assertEqual(401, channel.code, msg=channel.json_body)
self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"])
@parameterized.expand(["protect", "unprotect"])
@@ -822,11 +776,7 @@ class ProtectMediaByIDTestCase(unittest.HomeserverTestCase):
access_token=self.other_user_token,
)
- self.assertEqual(
- HTTPStatus.FORBIDDEN,
- channel.code,
- msg=channel.json_body,
- )
+ self.assertEqual(403, channel.code, msg=channel.json_body)
self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
def test_protect_media(self) -> None:
@@ -845,7 +795,7 @@ class ProtectMediaByIDTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertFalse(channel.json_body)
media_info = self.get_success(self.store.get_local_media(self.media_id))
@@ -859,7 +809,7 @@ class ProtectMediaByIDTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertFalse(channel.json_body)
media_info = self.get_success(self.store.get_local_media(self.media_id))
@@ -895,7 +845,7 @@ class PurgeMediaCacheTestCase(unittest.HomeserverTestCase):
channel = self.make_request("POST", self.url, b"{}")
self.assertEqual(
- HTTPStatus.UNAUTHORIZED,
+ 401,
channel.code,
msg=channel.json_body,
)
@@ -914,11 +864,7 @@ class PurgeMediaCacheTestCase(unittest.HomeserverTestCase):
access_token=self.other_user_token,
)
- self.assertEqual(
- HTTPStatus.FORBIDDEN,
- channel.code,
- msg=channel.json_body,
- )
+ self.assertEqual(403, channel.code, msg=channel.json_body)
self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
def test_invalid_parameter(self) -> None:
@@ -931,11 +877,7 @@ class PurgeMediaCacheTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(
- HTTPStatus.BAD_REQUEST,
- channel.code,
- msg=channel.json_body,
- )
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
self.assertEqual(
"Query parameter before_ts must be a positive integer.",
@@ -948,11 +890,7 @@ class PurgeMediaCacheTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(
- HTTPStatus.BAD_REQUEST,
- channel.code,
- msg=channel.json_body,
- )
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
self.assertEqual(
"Query parameter before_ts you provided is from the year 1970. "
diff --git a/tests/rest/admin/test_registration_tokens.py b/tests/rest/admin/test_registration_tokens.py
index 8354250ec2..8f8abc21c7 100644
--- a/tests/rest/admin/test_registration_tokens.py
+++ b/tests/rest/admin/test_registration_tokens.py
@@ -13,7 +13,6 @@
# limitations under the License.
import random
import string
-from http import HTTPStatus
from typing import Optional
from twisted.test.proto_helpers import MemoryReactor
@@ -74,11 +73,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase):
def test_create_no_auth(self) -> None:
"""Try to create a token without authentication."""
channel = self.make_request("POST", self.url + "/new", {})
- self.assertEqual(
- HTTPStatus.UNAUTHORIZED,
- channel.code,
- msg=channel.json_body,
- )
+ self.assertEqual(401, channel.code, msg=channel.json_body)
self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"])
def test_create_requester_not_admin(self) -> None:
@@ -89,11 +84,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase):
{},
access_token=self.other_user_tok,
)
- self.assertEqual(
- HTTPStatus.FORBIDDEN,
- channel.code,
- msg=channel.json_body,
- )
+ self.assertEqual(403, channel.code, msg=channel.json_body)
self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
def test_create_using_defaults(self) -> None:
@@ -105,7 +96,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(len(channel.json_body["token"]), 16)
self.assertIsNone(channel.json_body["uses_allowed"])
self.assertIsNone(channel.json_body["expiry_time"])
@@ -129,7 +120,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["token"], token)
self.assertEqual(channel.json_body["uses_allowed"], 1)
self.assertEqual(channel.json_body["expiry_time"], data["expiry_time"])
@@ -150,7 +141,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(len(channel.json_body["token"]), 16)
self.assertIsNone(channel.json_body["uses_allowed"])
self.assertIsNone(channel.json_body["expiry_time"])
@@ -168,11 +159,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(
- HTTPStatus.BAD_REQUEST,
- channel.code,
- msg=channel.json_body,
- )
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["errcode"], Codes.INVALID_PARAM)
def test_create_token_invalid_chars(self) -> None:
@@ -188,11 +175,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(
- HTTPStatus.BAD_REQUEST,
- channel.code,
- msg=channel.json_body,
- )
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["errcode"], Codes.INVALID_PARAM)
def test_create_token_already_exists(self) -> None:
@@ -207,7 +190,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase):
data,
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel1.code, msg=channel1.json_body)
+ self.assertEqual(200, channel1.code, msg=channel1.json_body)
channel2 = self.make_request(
"POST",
@@ -215,7 +198,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase):
data,
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel2.code, msg=channel2.json_body)
+ self.assertEqual(400, channel2.code, msg=channel2.json_body)
self.assertEqual(channel2.json_body["errcode"], Codes.INVALID_PARAM)
def test_create_unable_to_generate_token(self) -> None:
@@ -251,7 +234,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase):
{"uses_allowed": 0},
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["uses_allowed"], 0)
# Should fail with negative integer
@@ -262,7 +245,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
self.assertEqual(
- HTTPStatus.BAD_REQUEST,
+ 400,
channel.code,
msg=channel.json_body,
)
@@ -275,11 +258,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase):
{"uses_allowed": 1.5},
access_token=self.admin_user_tok,
)
- self.assertEqual(
- HTTPStatus.BAD_REQUEST,
- channel.code,
- msg=channel.json_body,
- )
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["errcode"], Codes.INVALID_PARAM)
def test_create_expiry_time(self) -> None:
@@ -291,11 +270,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase):
{"expiry_time": self.clock.time_msec() - 10000},
access_token=self.admin_user_tok,
)
- self.assertEqual(
- HTTPStatus.BAD_REQUEST,
- channel.code,
- msg=channel.json_body,
- )
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["errcode"], Codes.INVALID_PARAM)
# Should fail with float
@@ -305,11 +280,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase):
{"expiry_time": self.clock.time_msec() + 1000000.5},
access_token=self.admin_user_tok,
)
- self.assertEqual(
- HTTPStatus.BAD_REQUEST,
- channel.code,
- msg=channel.json_body,
- )
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["errcode"], Codes.INVALID_PARAM)
def test_create_length(self) -> None:
@@ -321,7 +292,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase):
{"length": 64},
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(len(channel.json_body["token"]), 64)
# Should fail with 0
@@ -331,11 +302,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase):
{"length": 0},
access_token=self.admin_user_tok,
)
- self.assertEqual(
- HTTPStatus.BAD_REQUEST,
- channel.code,
- msg=channel.json_body,
- )
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["errcode"], Codes.INVALID_PARAM)
# Should fail with a negative integer
@@ -345,11 +312,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase):
{"length": -5},
access_token=self.admin_user_tok,
)
- self.assertEqual(
- HTTPStatus.BAD_REQUEST,
- channel.code,
- msg=channel.json_body,
- )
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["errcode"], Codes.INVALID_PARAM)
# Should fail with a float
@@ -359,11 +322,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase):
{"length": 8.5},
access_token=self.admin_user_tok,
)
- self.assertEqual(
- HTTPStatus.BAD_REQUEST,
- channel.code,
- msg=channel.json_body,
- )
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["errcode"], Codes.INVALID_PARAM)
# Should fail with 65
@@ -373,11 +332,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase):
{"length": 65},
access_token=self.admin_user_tok,
)
- self.assertEqual(
- HTTPStatus.BAD_REQUEST,
- channel.code,
- msg=channel.json_body,
- )
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["errcode"], Codes.INVALID_PARAM)
# UPDATING
@@ -389,11 +344,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase):
self.url + "/1234", # Token doesn't exist but that doesn't matter
{},
)
- self.assertEqual(
- HTTPStatus.UNAUTHORIZED,
- channel.code,
- msg=channel.json_body,
- )
+ self.assertEqual(401, channel.code, msg=channel.json_body)
self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"])
def test_update_requester_not_admin(self) -> None:
@@ -404,11 +355,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase):
{},
access_token=self.other_user_tok,
)
- self.assertEqual(
- HTTPStatus.FORBIDDEN,
- channel.code,
- msg=channel.json_body,
- )
+ self.assertEqual(403, channel.code, msg=channel.json_body)
self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
def test_update_non_existent(self) -> None:
@@ -420,11 +367,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(
- HTTPStatus.NOT_FOUND,
- channel.code,
- msg=channel.json_body,
- )
+ self.assertEqual(404, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["errcode"], Codes.NOT_FOUND)
def test_update_uses_allowed(self) -> None:
@@ -439,7 +382,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase):
{"uses_allowed": 1},
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["uses_allowed"], 1)
self.assertIsNone(channel.json_body["expiry_time"])
@@ -450,7 +393,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase):
{"uses_allowed": 0},
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["uses_allowed"], 0)
self.assertIsNone(channel.json_body["expiry_time"])
@@ -461,7 +404,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase):
{"uses_allowed": None},
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertIsNone(channel.json_body["uses_allowed"])
self.assertIsNone(channel.json_body["expiry_time"])
@@ -472,11 +415,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase):
{"uses_allowed": 1.5},
access_token=self.admin_user_tok,
)
- self.assertEqual(
- HTTPStatus.BAD_REQUEST,
- channel.code,
- msg=channel.json_body,
- )
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["errcode"], Codes.INVALID_PARAM)
# Should fail with a negative integer
@@ -486,11 +425,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase):
{"uses_allowed": -5},
access_token=self.admin_user_tok,
)
- self.assertEqual(
- HTTPStatus.BAD_REQUEST,
- channel.code,
- msg=channel.json_body,
- )
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["errcode"], Codes.INVALID_PARAM)
def test_update_expiry_time(self) -> None:
@@ -506,7 +441,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase):
{"expiry_time": new_expiry_time},
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["expiry_time"], new_expiry_time)
self.assertIsNone(channel.json_body["uses_allowed"])
@@ -517,7 +452,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase):
{"expiry_time": None},
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertIsNone(channel.json_body["expiry_time"])
self.assertIsNone(channel.json_body["uses_allowed"])
@@ -529,11 +464,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase):
{"expiry_time": past_time},
access_token=self.admin_user_tok,
)
- self.assertEqual(
- HTTPStatus.BAD_REQUEST,
- channel.code,
- msg=channel.json_body,
- )
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["errcode"], Codes.INVALID_PARAM)
# Should fail a float
@@ -543,11 +474,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase):
{"expiry_time": new_expiry_time + 0.5},
access_token=self.admin_user_tok,
)
- self.assertEqual(
- HTTPStatus.BAD_REQUEST,
- channel.code,
- msg=channel.json_body,
- )
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["errcode"], Codes.INVALID_PARAM)
def test_update_both(self) -> None:
@@ -568,7 +495,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["uses_allowed"], 1)
self.assertEqual(channel.json_body["expiry_time"], new_expiry_time)
@@ -589,11 +516,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(
- HTTPStatus.BAD_REQUEST,
- channel.code,
- msg=channel.json_body,
- )
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["errcode"], Codes.INVALID_PARAM)
# DELETING
@@ -605,11 +528,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase):
self.url + "/1234", # Token doesn't exist but that doesn't matter
{},
)
- self.assertEqual(
- HTTPStatus.UNAUTHORIZED,
- channel.code,
- msg=channel.json_body,
- )
+ self.assertEqual(401, channel.code, msg=channel.json_body)
self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"])
def test_delete_requester_not_admin(self) -> None:
@@ -620,11 +539,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase):
{},
access_token=self.other_user_tok,
)
- self.assertEqual(
- HTTPStatus.FORBIDDEN,
- channel.code,
- msg=channel.json_body,
- )
+ self.assertEqual(403, channel.code, msg=channel.json_body)
self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
def test_delete_non_existent(self) -> None:
@@ -636,11 +551,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(
- HTTPStatus.NOT_FOUND,
- channel.code,
- msg=channel.json_body,
- )
+ self.assertEqual(404, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["errcode"], Codes.NOT_FOUND)
def test_delete(self) -> None:
@@ -655,7 +566,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
# GETTING ONE
@@ -666,11 +577,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase):
self.url + "/1234", # Token doesn't exist but that doesn't matter
{},
)
- self.assertEqual(
- HTTPStatus.UNAUTHORIZED,
- channel.code,
- msg=channel.json_body,
- )
+ self.assertEqual(401, channel.code, msg=channel.json_body)
self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"])
def test_get_requester_not_admin(self) -> None:
@@ -682,7 +589,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase):
access_token=self.other_user_tok,
)
self.assertEqual(
- HTTPStatus.FORBIDDEN,
+ 403,
channel.code,
msg=channel.json_body,
)
@@ -697,11 +604,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(
- HTTPStatus.NOT_FOUND,
- channel.code,
- msg=channel.json_body,
- )
+ self.assertEqual(404, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["errcode"], Codes.NOT_FOUND)
def test_get(self) -> None:
@@ -716,7 +619,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["token"], token)
self.assertIsNone(channel.json_body["uses_allowed"])
self.assertIsNone(channel.json_body["expiry_time"])
@@ -728,11 +631,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase):
def test_list_no_auth(self) -> None:
"""Try to list tokens without authentication."""
channel = self.make_request("GET", self.url, {})
- self.assertEqual(
- HTTPStatus.UNAUTHORIZED,
- channel.code,
- msg=channel.json_body,
- )
+ self.assertEqual(401, channel.code, msg=channel.json_body)
self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"])
def test_list_requester_not_admin(self) -> None:
@@ -743,11 +642,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase):
{},
access_token=self.other_user_tok,
)
- self.assertEqual(
- HTTPStatus.FORBIDDEN,
- channel.code,
- msg=channel.json_body,
- )
+ self.assertEqual(403, channel.code, msg=channel.json_body)
self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
def test_list_all(self) -> None:
@@ -762,7 +657,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(len(channel.json_body["registration_tokens"]), 1)
token_info = channel.json_body["registration_tokens"][0]
self.assertEqual(token_info["token"], token)
@@ -780,11 +675,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(
- HTTPStatus.BAD_REQUEST,
- channel.code,
- msg=channel.json_body,
- )
+ self.assertEqual(400, channel.code, msg=channel.json_body)
def _test_list_query_parameter(self, valid: str) -> None:
"""Helper used to test both valid=true and valid=false."""
@@ -816,7 +707,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(len(channel.json_body["registration_tokens"]), 2)
token_info_1 = channel.json_body["registration_tokens"][0]
token_info_2 = channel.json_body["registration_tokens"][1]
diff --git a/tests/rest/admin/test_room.py b/tests/rest/admin/test_room.py
index 989cbdb5e2..e0f5d54aba 100644
--- a/tests/rest/admin/test_room.py
+++ b/tests/rest/admin/test_room.py
@@ -11,8 +11,9 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+import json
+import time
import urllib.parse
-from http import HTTPStatus
from typing import List, Optional
from unittest.mock import Mock
@@ -23,10 +24,11 @@ from twisted.test.proto_helpers import MemoryReactor
import synapse.rest.admin
from synapse.api.constants import EventTypes, Membership, RoomTypes
from synapse.api.errors import Codes
-from synapse.handlers.pagination import PaginationHandler
+from synapse.handlers.pagination import PaginationHandler, PurgeStatus
from synapse.rest.client import directory, events, login, room
from synapse.server import HomeServer
from synapse.util import Clock
+from synapse.util.stringutils import random_string
from tests import unittest
@@ -68,7 +70,7 @@ class DeleteRoomTestCase(unittest.HomeserverTestCase):
def test_requester_is_no_admin(self) -> None:
"""
- If the user is not a server admin, an error HTTPStatus.FORBIDDEN is returned.
+ If the user is not a server admin, an error 403 is returned.
"""
channel = self.make_request(
@@ -78,7 +80,7 @@ class DeleteRoomTestCase(unittest.HomeserverTestCase):
access_token=self.other_user_tok,
)
- self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body)
+ self.assertEqual(403, channel.code, msg=channel.json_body)
self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
def test_room_does_not_exist(self) -> None:
@@ -94,11 +96,11 @@ class DeleteRoomTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
def test_room_is_not_valid(self) -> None:
"""
- Check that invalid room names, return an error HTTPStatus.BAD_REQUEST.
+ Check that invalid room names, return an error 400.
"""
url = "/_synapse/admin/v1/rooms/%s" % "invalidroom"
@@ -109,7 +111,7 @@ class DeleteRoomTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(
"invalidroom is not a legal room ID",
channel.json_body["error"],
@@ -127,7 +129,7 @@ class DeleteRoomTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertIn("new_room_id", channel.json_body)
self.assertIn("kicked_users", channel.json_body)
self.assertIn("failed_to_kick_users", channel.json_body)
@@ -145,7 +147,7 @@ class DeleteRoomTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(
"User must be our own: @not:exist.bla",
channel.json_body["error"],
@@ -163,7 +165,7 @@ class DeleteRoomTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.BAD_JSON, channel.json_body["errcode"])
def test_purge_is_not_bool(self) -> None:
@@ -178,7 +180,7 @@ class DeleteRoomTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.BAD_JSON, channel.json_body["errcode"])
def test_purge_room_and_block(self) -> None:
@@ -202,7 +204,7 @@ class DeleteRoomTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(None, channel.json_body["new_room_id"])
self.assertEqual(self.other_user, channel.json_body["kicked_users"][0])
self.assertIn("failed_to_kick_users", channel.json_body)
@@ -233,7 +235,7 @@ class DeleteRoomTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(None, channel.json_body["new_room_id"])
self.assertEqual(self.other_user, channel.json_body["kicked_users"][0])
self.assertIn("failed_to_kick_users", channel.json_body)
@@ -265,7 +267,7 @@ class DeleteRoomTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(None, channel.json_body["new_room_id"])
self.assertEqual(self.other_user, channel.json_body["kicked_users"][0])
self.assertIn("failed_to_kick_users", channel.json_body)
@@ -296,7 +298,7 @@ class DeleteRoomTestCase(unittest.HomeserverTestCase):
)
# The room is now blocked.
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self._is_blocked(room_id)
def test_shutdown_room_consent(self) -> None:
@@ -319,7 +321,7 @@ class DeleteRoomTestCase(unittest.HomeserverTestCase):
self.room_id,
body="foo",
tok=self.other_user_tok,
- expect_code=HTTPStatus.FORBIDDEN,
+ expect_code=403,
)
# Test that room is not purged
@@ -337,7 +339,7 @@ class DeleteRoomTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(self.other_user, channel.json_body["kicked_users"][0])
self.assertIn("new_room_id", channel.json_body)
self.assertIn("failed_to_kick_users", channel.json_body)
@@ -366,7 +368,7 @@ class DeleteRoomTestCase(unittest.HomeserverTestCase):
{"history_visibility": "world_readable"},
access_token=self.other_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
# Test that room is not purged
with self.assertRaises(AssertionError):
@@ -383,7 +385,7 @@ class DeleteRoomTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(self.other_user, channel.json_body["kicked_users"][0])
self.assertIn("new_room_id", channel.json_body)
self.assertIn("failed_to_kick_users", channel.json_body)
@@ -398,7 +400,7 @@ class DeleteRoomTestCase(unittest.HomeserverTestCase):
self._has_no_members(self.room_id)
# Assert we can no longer peek into the room
- self._assert_peek(self.room_id, expect_code=HTTPStatus.FORBIDDEN)
+ self._assert_peek(self.room_id, expect_code=403)
def _is_blocked(self, room_id: str, expect: bool = True) -> None:
"""Assert that the room is blocked or not"""
@@ -494,7 +496,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase):
)
def test_requester_is_no_admin(self, method: str, url: str) -> None:
"""
- If the user is not a server admin, an error HTTPStatus.FORBIDDEN is returned.
+ If the user is not a server admin, an error 403 is returned.
"""
channel = self.make_request(
@@ -504,7 +506,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase):
access_token=self.other_user_tok,
)
- self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body)
+ self.assertEqual(403, channel.code, msg=channel.json_body)
self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
def test_room_does_not_exist(self) -> None:
@@ -522,7 +524,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertIn("delete_id", channel.json_body)
delete_id = channel.json_body["delete_id"]
@@ -533,7 +535,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(1, len(channel.json_body["results"]))
self.assertEqual("complete", channel.json_body["results"][0]["status"])
self.assertEqual(delete_id, channel.json_body["results"][0]["delete_id"])
@@ -546,7 +548,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase):
)
def test_room_is_not_valid(self, method: str, url: str) -> None:
"""
- Check that invalid room names, return an error HTTPStatus.BAD_REQUEST.
+ Check that invalid room names, return an error 400.
"""
channel = self.make_request(
@@ -556,7 +558,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(
"invalidroom is not a legal room ID",
channel.json_body["error"],
@@ -574,7 +576,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertIn("delete_id", channel.json_body)
delete_id = channel.json_body["delete_id"]
@@ -592,7 +594,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(
"User must be our own: @not:exist.bla",
channel.json_body["error"],
@@ -610,7 +612,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.BAD_JSON, channel.json_body["errcode"])
def test_purge_is_not_bool(self) -> None:
@@ -625,7 +627,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.BAD_JSON, channel.json_body["errcode"])
def test_delete_expired_status(self) -> None:
@@ -639,7 +641,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertIn("delete_id", channel.json_body)
delete_id1 = channel.json_body["delete_id"]
@@ -654,7 +656,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertIn("delete_id", channel.json_body)
delete_id2 = channel.json_body["delete_id"]
@@ -665,7 +667,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(2, len(channel.json_body["results"]))
self.assertEqual("complete", channel.json_body["results"][0]["status"])
self.assertEqual("complete", channel.json_body["results"][1]["status"])
@@ -682,7 +684,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(1, len(channel.json_body["results"]))
self.assertEqual("complete", channel.json_body["results"][0]["status"])
self.assertEqual(delete_id2, channel.json_body["results"][0]["delete_id"])
@@ -696,7 +698,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, msg=channel.json_body)
+ self.assertEqual(404, channel.code, msg=channel.json_body)
self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"])
def test_delete_same_room_twice(self) -> None:
@@ -722,9 +724,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(
- HTTPStatus.BAD_REQUEST, second_channel.code, msg=second_channel.json_body
- )
+ self.assertEqual(400, second_channel.code, msg=second_channel.json_body)
self.assertEqual(Codes.UNKNOWN, second_channel.json_body["errcode"])
self.assertEqual(
f"History purge already in progress for {self.room_id}",
@@ -733,7 +733,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase):
# get result of first call
first_channel.await_result()
- self.assertEqual(HTTPStatus.OK, first_channel.code, msg=first_channel.json_body)
+ self.assertEqual(200, first_channel.code, msg=first_channel.json_body)
self.assertIn("delete_id", first_channel.json_body)
# check status after finish the task
@@ -764,7 +764,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertIn("delete_id", channel.json_body)
delete_id = channel.json_body["delete_id"]
@@ -795,7 +795,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertIn("delete_id", channel.json_body)
delete_id = channel.json_body["delete_id"]
@@ -827,7 +827,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertIn("delete_id", channel.json_body)
delete_id = channel.json_body["delete_id"]
@@ -858,7 +858,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase):
self.room_id,
body="foo",
tok=self.other_user_tok,
- expect_code=HTTPStatus.FORBIDDEN,
+ expect_code=403,
)
# Test that room is not purged
@@ -876,7 +876,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertIn("delete_id", channel.json_body)
delete_id = channel.json_body["delete_id"]
@@ -887,7 +887,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase):
self.url_status_by_room_id,
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(1, len(channel.json_body["results"]))
# Test that member has moved to new room
@@ -914,7 +914,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase):
content={"history_visibility": "world_readable"},
access_token=self.other_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
# Test that room is not purged
with self.assertRaises(AssertionError):
@@ -931,7 +931,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertIn("delete_id", channel.json_body)
delete_id = channel.json_body["delete_id"]
@@ -942,7 +942,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase):
self.url_status_by_room_id,
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(1, len(channel.json_body["results"]))
# Test that member has moved to new room
@@ -955,7 +955,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase):
self._has_no_members(self.room_id)
# Assert we can no longer peek into the room
- self._assert_peek(self.room_id, expect_code=HTTPStatus.FORBIDDEN)
+ self._assert_peek(self.room_id, expect_code=403)
def _is_blocked(self, room_id: str, expect: bool = True) -> None:
"""Assert that the room is blocked or not"""
@@ -1026,9 +1026,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase):
self.url_status_by_room_id,
access_token=self.admin_user_tok,
)
- self.assertEqual(
- HTTPStatus.OK, channel_room_id.code, msg=channel_room_id.json_body
- )
+ self.assertEqual(200, channel_room_id.code, msg=channel_room_id.json_body)
self.assertEqual(1, len(channel_room_id.json_body["results"]))
self.assertEqual(
delete_id, channel_room_id.json_body["results"][0]["delete_id"]
@@ -1041,7 +1039,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
self.assertEqual(
- HTTPStatus.OK,
+ 200,
channel_delete_id.code,
msg=channel_delete_id.json_body,
)
@@ -1085,7 +1083,9 @@ class RoomTestCase(unittest.HomeserverTestCase):
room_ids = []
for _ in range(total_rooms):
room_id = self.helper.create_room_as(
- self.admin_user, tok=self.admin_user_tok
+ self.admin_user,
+ tok=self.admin_user_tok,
+ is_public=True,
)
room_ids.append(room_id)
@@ -1100,7 +1100,7 @@ class RoomTestCase(unittest.HomeserverTestCase):
)
# Check request completed successfully
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
# Check that response json body contains a "rooms" key
self.assertTrue(
@@ -1124,8 +1124,8 @@ class RoomTestCase(unittest.HomeserverTestCase):
self.assertIn("version", r)
self.assertIn("creator", r)
self.assertIn("encryption", r)
- self.assertIn("federatable", r)
- self.assertIn("public", r)
+ self.assertIs(r["federatable"], True)
+ self.assertIs(r["public"], True)
self.assertIn("join_rules", r)
self.assertIn("guest_access", r)
self.assertIn("history_visibility", r)
@@ -1186,7 +1186,7 @@ class RoomTestCase(unittest.HomeserverTestCase):
url.encode("ascii"),
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertTrue("rooms" in channel.json_body)
for r in channel.json_body["rooms"]:
@@ -1226,7 +1226,7 @@ class RoomTestCase(unittest.HomeserverTestCase):
url.encode("ascii"),
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
def test_correct_room_attributes(self) -> None:
"""Test the correct attributes for a room are returned"""
@@ -1253,7 +1253,7 @@ class RoomTestCase(unittest.HomeserverTestCase):
{"room_id": room_id},
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
# Set this new alias as the canonical alias for this room
self.helper.send_state(
@@ -1285,7 +1285,7 @@ class RoomTestCase(unittest.HomeserverTestCase):
url.encode("ascii"),
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
# Check that rooms were returned
self.assertTrue("rooms" in channel.json_body)
@@ -1341,7 +1341,7 @@ class RoomTestCase(unittest.HomeserverTestCase):
url.encode("ascii"),
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
# Check that rooms were returned
self.assertTrue("rooms" in channel.json_body)
@@ -1487,7 +1487,7 @@ class RoomTestCase(unittest.HomeserverTestCase):
def _search_test(
expected_room_id: Optional[str],
search_term: str,
- expected_http_code: int = HTTPStatus.OK,
+ expected_http_code: int = 200,
) -> None:
"""Search for a room and check that the returned room's id is a match
@@ -1505,7 +1505,7 @@ class RoomTestCase(unittest.HomeserverTestCase):
)
self.assertEqual(expected_http_code, channel.code, msg=channel.json_body)
- if expected_http_code != HTTPStatus.OK:
+ if expected_http_code != 200:
return
# Check that rooms were returned
@@ -1548,7 +1548,7 @@ class RoomTestCase(unittest.HomeserverTestCase):
_search_test(None, "foo")
_search_test(None, "bar")
- _search_test(None, "", expected_http_code=HTTPStatus.BAD_REQUEST)
+ _search_test(None, "", expected_http_code=400)
# Test that the whole room id returns the room
_search_test(room_id_1, room_id_1)
@@ -1585,15 +1585,19 @@ class RoomTestCase(unittest.HomeserverTestCase):
url.encode("ascii"),
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(room_id, channel.json_body["rooms"][0].get("room_id"))
self.assertEqual("ж", channel.json_body["rooms"][0].get("name"))
def test_single_room(self) -> None:
"""Test that a single room can be requested correctly"""
# Create two test rooms
- room_id_1 = self.helper.create_room_as(self.admin_user, tok=self.admin_user_tok)
- room_id_2 = self.helper.create_room_as(self.admin_user, tok=self.admin_user_tok)
+ room_id_1 = self.helper.create_room_as(
+ self.admin_user, tok=self.admin_user_tok, is_public=True
+ )
+ room_id_2 = self.helper.create_room_as(
+ self.admin_user, tok=self.admin_user_tok, is_public=False
+ )
room_name_1 = "something"
room_name_2 = "else"
@@ -1618,7 +1622,7 @@ class RoomTestCase(unittest.HomeserverTestCase):
url.encode("ascii"),
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertIn("room_id", channel.json_body)
self.assertIn("name", channel.json_body)
@@ -1638,7 +1642,11 @@ class RoomTestCase(unittest.HomeserverTestCase):
self.assertIn("history_visibility", channel.json_body)
self.assertIn("state_events", channel.json_body)
self.assertIn("room_type", channel.json_body)
+ self.assertIn("forgotten", channel.json_body)
+
self.assertEqual(room_id_1, channel.json_body["room_id"])
+ self.assertIs(True, channel.json_body["federatable"])
+ self.assertIs(True, channel.json_body["public"])
def test_single_room_devices(self) -> None:
"""Test that `joined_local_devices` can be requested correctly"""
@@ -1650,7 +1658,7 @@ class RoomTestCase(unittest.HomeserverTestCase):
url.encode("ascii"),
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(1, channel.json_body["joined_local_devices"])
# Have another user join the room
@@ -1664,7 +1672,7 @@ class RoomTestCase(unittest.HomeserverTestCase):
url.encode("ascii"),
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(2, channel.json_body["joined_local_devices"])
# leave room
@@ -1676,7 +1684,7 @@ class RoomTestCase(unittest.HomeserverTestCase):
url.encode("ascii"),
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(0, channel.json_body["joined_local_devices"])
def test_room_members(self) -> None:
@@ -1707,7 +1715,7 @@ class RoomTestCase(unittest.HomeserverTestCase):
url.encode("ascii"),
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertCountEqual(
["@admin:test", "@foo:test", "@bar:test"], channel.json_body["members"]
@@ -1720,7 +1728,7 @@ class RoomTestCase(unittest.HomeserverTestCase):
url.encode("ascii"),
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertCountEqual(
["@admin:test", "@bar:test", "@foobar:test"], channel.json_body["members"]
@@ -1738,7 +1746,7 @@ class RoomTestCase(unittest.HomeserverTestCase):
url.encode("ascii"),
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertIn("state", channel.json_body)
# testing that the state events match is painful and not done here. We assume that
# the create_room already does the right thing, so no need to verify that we got
@@ -1755,7 +1763,7 @@ class RoomTestCase(unittest.HomeserverTestCase):
{"room_id": room_id},
access_token=admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
# Set this new alias as the canonical alias for this room
self.helper.send_state(
@@ -1784,10 +1792,203 @@ class RoomTestCase(unittest.HomeserverTestCase):
# delete the rooms and get joined roomed membership
url = f"/_matrix/client/r0/rooms/{room_id}/joined_members"
channel = self.make_request("GET", url.encode("ascii"), access_token=user_tok)
- self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body)
+ self.assertEqual(403, channel.code, msg=channel.json_body)
self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
+class RoomMessagesTestCase(unittest.HomeserverTestCase):
+ servlets = [
+ synapse.rest.admin.register_servlets,
+ login.register_servlets,
+ room.register_servlets,
+ ]
+
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+ self.admin_user = self.register_user("admin", "pass", admin=True)
+ self.admin_user_tok = self.login("admin", "pass")
+
+ self.user = self.register_user("foo", "pass")
+ self.user_tok = self.login("foo", "pass")
+ self.room_id = self.helper.create_room_as(self.user, tok=self.user_tok)
+
+ def test_timestamp_to_event(self) -> None:
+ """Test that providing the current timestamp can get the last event."""
+ self.helper.send(self.room_id, body="message 1", tok=self.user_tok)
+ second_event_id = self.helper.send(
+ self.room_id, body="message 2", tok=self.user_tok
+ )["event_id"]
+ ts = str(round(time.time() * 1000))
+
+ channel = self.make_request(
+ "GET",
+ "/_synapse/admin/v1/rooms/%s/timestamp_to_event?dir=b&ts=%s"
+ % (self.room_id, ts),
+ access_token=self.admin_user_tok,
+ )
+ self.assertEqual(200, channel.code)
+ self.assertIn("event_id", channel.json_body)
+ self.assertEqual(second_event_id, channel.json_body["event_id"])
+
+ def test_topo_token_is_accepted(self) -> None:
+ """Test Topo Token is accepted."""
+ token = "t1-0_0_0_0_0_0_0_0_0"
+ channel = self.make_request(
+ "GET",
+ "/_synapse/admin/v1/rooms/%s/messages?from=%s" % (self.room_id, token),
+ access_token=self.admin_user_tok,
+ )
+ self.assertEqual(200, channel.code)
+ self.assertIn("start", channel.json_body)
+ self.assertEqual(token, channel.json_body["start"])
+ self.assertIn("chunk", channel.json_body)
+ self.assertIn("end", channel.json_body)
+
+ def test_stream_token_is_accepted_for_fwd_pagianation(self) -> None:
+ """Test that stream token is accepted for forward pagination."""
+ token = "s0_0_0_0_0_0_0_0_0"
+ channel = self.make_request(
+ "GET",
+ "/_synapse/admin/v1/rooms/%s/messages?from=%s" % (self.room_id, token),
+ access_token=self.admin_user_tok,
+ )
+ self.assertEqual(200, channel.code)
+ self.assertIn("start", channel.json_body)
+ self.assertEqual(token, channel.json_body["start"])
+ self.assertIn("chunk", channel.json_body)
+ self.assertIn("end", channel.json_body)
+
+ def test_room_messages_backward(self) -> None:
+ """Test room messages can be retrieved by an admin that isn't in the room."""
+ latest_event_id = self.helper.send(
+ self.room_id, body="message 1", tok=self.user_tok
+ )["event_id"]
+
+ # Check that we get the first and second message when querying /messages.
+ channel = self.make_request(
+ "GET",
+ "/_synapse/admin/v1/rooms/%s/messages?dir=b" % (self.room_id,),
+ access_token=self.admin_user_tok,
+ )
+ self.assertEqual(channel.code, 200, channel.json_body)
+
+ chunk = channel.json_body["chunk"]
+ self.assertEqual(len(chunk), 6, [event["content"] for event in chunk])
+
+ # in backwards, this is the first event
+ self.assertEqual(chunk[0]["event_id"], latest_event_id)
+
+ def test_room_messages_forward(self) -> None:
+ """Test room messages can be retrieved by an admin that isn't in the room."""
+ latest_event_id = self.helper.send(
+ self.room_id, body="message 1", tok=self.user_tok
+ )["event_id"]
+
+ # Check that we get the first and second message when querying /messages.
+ channel = self.make_request(
+ "GET",
+ "/_synapse/admin/v1/rooms/%s/messages?dir=f" % (self.room_id,),
+ access_token=self.admin_user_tok,
+ )
+ self.assertEqual(channel.code, 200, channel.json_body)
+
+ chunk = channel.json_body["chunk"]
+ self.assertEqual(len(chunk), 6, [event["content"] for event in chunk])
+
+ # in forward, this is the last event
+ self.assertEqual(chunk[5]["event_id"], latest_event_id)
+
+ def test_room_messages_purge(self) -> None:
+ """Test room messages can be retrieved by an admin that isn't in the room."""
+ store = self.hs.get_datastores().main
+ pagination_handler = self.hs.get_pagination_handler()
+
+ # Send a first message in the room, which will be removed by the purge.
+ first_event_id = self.helper.send(
+ self.room_id, body="message 1", tok=self.user_tok
+ )["event_id"]
+ first_token = self.get_success(
+ store.get_topological_token_for_event(first_event_id)
+ )
+ first_token_str = self.get_success(first_token.to_string(store))
+
+ # Send a second message in the room, which won't be removed, and which we'll
+ # use as the marker to purge events before.
+ second_event_id = self.helper.send(
+ self.room_id, body="message 2", tok=self.user_tok
+ )["event_id"]
+ second_token = self.get_success(
+ store.get_topological_token_for_event(second_event_id)
+ )
+ second_token_str = self.get_success(second_token.to_string(store))
+
+ # Send a third event in the room to ensure we don't fall under any edge case
+ # due to our marker being the latest forward extremity in the room.
+ self.helper.send(self.room_id, body="message 3", tok=self.user_tok)
+
+ # Check that we get the first and second message when querying /messages.
+ channel = self.make_request(
+ "GET",
+ "/_synapse/admin/v1/rooms/%s/messages?from=%s&dir=b&filter=%s"
+ % (
+ self.room_id,
+ second_token_str,
+ json.dumps({"types": [EventTypes.Message]}),
+ ),
+ access_token=self.admin_user_tok,
+ )
+ self.assertEqual(channel.code, 200, channel.json_body)
+
+ chunk = channel.json_body["chunk"]
+ self.assertEqual(len(chunk), 2, [event["content"] for event in chunk])
+
+ # Purge every event before the second event.
+ purge_id = random_string(16)
+ pagination_handler._purges_by_id[purge_id] = PurgeStatus()
+ self.get_success(
+ pagination_handler._purge_history(
+ purge_id=purge_id,
+ room_id=self.room_id,
+ token=second_token_str,
+ delete_local_events=True,
+ )
+ )
+
+ # Check that we only get the second message through /message now that the first
+ # has been purged.
+ channel = self.make_request(
+ "GET",
+ "/_synapse/admin/v1/rooms/%s/messages?from=%s&dir=b&filter=%s"
+ % (
+ self.room_id,
+ second_token_str,
+ json.dumps({"types": [EventTypes.Message]}),
+ ),
+ access_token=self.admin_user_tok,
+ )
+ self.assertEqual(channel.code, 200, channel.json_body)
+
+ chunk = channel.json_body["chunk"]
+ self.assertEqual(len(chunk), 1, [event["content"] for event in chunk])
+
+ # Check that we get no event, but also no error, when querying /messages with
+ # the token that was pointing at the first event, because we don't have it
+ # anymore.
+ channel = self.make_request(
+ "GET",
+ "/_synapse/admin/v1/rooms/%s/messages?from=%s&dir=b&filter=%s"
+ % (
+ self.room_id,
+ first_token_str,
+ json.dumps({"types": [EventTypes.Message]}),
+ ),
+ access_token=self.admin_user_tok,
+ )
+ self.assertEqual(channel.code, 200, channel.json_body)
+
+ chunk = channel.json_body["chunk"]
+ self.assertEqual(len(chunk), 0, [event["content"] for event in chunk])
+
+
class JoinAliasRoomTestCase(unittest.HomeserverTestCase):
servlets = [
@@ -1813,7 +2014,7 @@ class JoinAliasRoomTestCase(unittest.HomeserverTestCase):
def test_requester_is_no_admin(self) -> None:
"""
- If the user is not a server admin, an error HTTPStatus.FORBIDDEN is returned.
+ If the user is not a server admin, an error 403 is returned.
"""
channel = self.make_request(
@@ -1823,7 +2024,7 @@ class JoinAliasRoomTestCase(unittest.HomeserverTestCase):
access_token=self.second_tok,
)
- self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body)
+ self.assertEqual(403, channel.code, msg=channel.json_body)
self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
def test_invalid_parameter(self) -> None:
@@ -1838,12 +2039,12 @@ class JoinAliasRoomTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.MISSING_PARAM, channel.json_body["errcode"])
def test_local_user_does_not_exist(self) -> None:
"""
- Tests that a lookup for a user that does not exist returns a HTTPStatus.NOT_FOUND
+ Tests that a lookup for a user that does not exist returns a 404
"""
channel = self.make_request(
@@ -1853,7 +2054,7 @@ class JoinAliasRoomTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, msg=channel.json_body)
+ self.assertEqual(404, channel.code, msg=channel.json_body)
self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"])
def test_remote_user(self) -> None:
@@ -1868,7 +2069,7 @@ class JoinAliasRoomTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(
"This endpoint can only be used with local users",
channel.json_body["error"],
@@ -1876,7 +2077,7 @@ class JoinAliasRoomTestCase(unittest.HomeserverTestCase):
def test_room_does_not_exist(self) -> None:
"""
- Check that unknown rooms/server return error HTTPStatus.NOT_FOUND.
+ Check that unknown rooms/server return error 404.
"""
url = "/_synapse/admin/v1/join/!unknown:test"
@@ -1887,7 +2088,7 @@ class JoinAliasRoomTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, msg=channel.json_body)
+ self.assertEqual(404, channel.code, msg=channel.json_body)
self.assertEqual(
"Can't join remote room because no servers that are in the room have been provided.",
channel.json_body["error"],
@@ -1895,7 +2096,7 @@ class JoinAliasRoomTestCase(unittest.HomeserverTestCase):
def test_room_is_not_valid(self) -> None:
"""
- Check that invalid room names, return an error HTTPStatus.BAD_REQUEST.
+ Check that invalid room names, return an error 400.
"""
url = "/_synapse/admin/v1/join/invalidroom"
@@ -1906,7 +2107,7 @@ class JoinAliasRoomTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(
"invalidroom was not legal room ID or room alias",
channel.json_body["error"],
@@ -1924,7 +2125,7 @@ class JoinAliasRoomTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(self.public_room_id, channel.json_body["room_id"])
# Validate if user is a member of the room
@@ -1934,7 +2135,7 @@ class JoinAliasRoomTestCase(unittest.HomeserverTestCase):
"/_matrix/client/r0/joined_rooms",
access_token=self.second_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(self.public_room_id, channel.json_body["joined_rooms"][0])
def test_join_private_room_if_not_member(self) -> None:
@@ -1954,7 +2155,7 @@ class JoinAliasRoomTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body)
+ self.assertEqual(403, channel.code, msg=channel.json_body)
self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
def test_join_private_room_if_member(self) -> None:
@@ -1982,7 +2183,7 @@ class JoinAliasRoomTestCase(unittest.HomeserverTestCase):
"/_matrix/client/r0/joined_rooms",
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(private_room_id, channel.json_body["joined_rooms"][0])
# Join user to room.
@@ -1995,7 +2196,7 @@ class JoinAliasRoomTestCase(unittest.HomeserverTestCase):
content={"user_id": self.second_user_id},
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(private_room_id, channel.json_body["room_id"])
# Validate if user is a member of the room
@@ -2005,7 +2206,7 @@ class JoinAliasRoomTestCase(unittest.HomeserverTestCase):
"/_matrix/client/r0/joined_rooms",
access_token=self.second_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(private_room_id, channel.json_body["joined_rooms"][0])
def test_join_private_room_if_owner(self) -> None:
@@ -2025,7 +2226,7 @@ class JoinAliasRoomTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(private_room_id, channel.json_body["room_id"])
# Validate if user is a member of the room
@@ -2035,7 +2236,7 @@ class JoinAliasRoomTestCase(unittest.HomeserverTestCase):
"/_matrix/client/r0/joined_rooms",
access_token=self.second_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(private_room_id, channel.json_body["joined_rooms"][0])
def test_context_as_non_admin(self) -> None:
@@ -2069,7 +2270,7 @@ class JoinAliasRoomTestCase(unittest.HomeserverTestCase):
% (room_id, events[midway]["event_id"]),
access_token=tok,
)
- self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body)
+ self.assertEqual(403, channel.code, msg=channel.json_body)
self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
def test_context_as_admin(self) -> None:
@@ -2099,7 +2300,7 @@ class JoinAliasRoomTestCase(unittest.HomeserverTestCase):
% (room_id, events[midway]["event_id"]),
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(
channel.json_body["event"]["event_id"], events[midway]["event_id"]
)
@@ -2158,7 +2359,7 @@ class MakeRoomAdminTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
# Now we test that we can join the room and ban a user.
self.helper.join(room_id, self.admin_user, tok=self.admin_user_tok)
@@ -2185,7 +2386,7 @@ class MakeRoomAdminTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
# Now we test that we can join the room (we should have received an
# invite) and can ban a user.
@@ -2211,7 +2412,7 @@ class MakeRoomAdminTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
# Now we test that we can join the room and ban a user.
self.helper.join(room_id, self.second_user_id, tok=self.second_tok)
@@ -2245,11 +2446,11 @@ class MakeRoomAdminTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- # We expect this to fail with a HTTPStatus.BAD_REQUEST as there are no room admins.
+ # We expect this to fail with a 400 as there are no room admins.
#
# (Note we assert the error message to ensure that it's not denied for
# some other reason)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(
channel.json_body["error"],
"No local admin user in room with power to update power levels.",
@@ -2279,7 +2480,7 @@ class BlockRoomTestCase(unittest.HomeserverTestCase):
@parameterized.expand([("PUT",), ("GET",)])
def test_requester_is_no_admin(self, method: str) -> None:
- """If the user is not a server admin, an error HTTPStatus.FORBIDDEN is returned."""
+ """If the user is not a server admin, an error 403 is returned."""
channel = self.make_request(
method,
@@ -2288,12 +2489,12 @@ class BlockRoomTestCase(unittest.HomeserverTestCase):
access_token=self.other_user_tok,
)
- self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body)
+ self.assertEqual(403, channel.code, msg=channel.json_body)
self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
@parameterized.expand([("PUT",), ("GET",)])
def test_room_is_not_valid(self, method: str) -> None:
- """Check that invalid room names, return an error HTTPStatus.BAD_REQUEST."""
+ """Check that invalid room names, return an error 400."""
channel = self.make_request(
method,
@@ -2302,7 +2503,7 @@ class BlockRoomTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(
"invalidroom is not a legal room ID",
channel.json_body["error"],
@@ -2319,7 +2520,7 @@ class BlockRoomTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.BAD_JSON, channel.json_body["errcode"])
# `block` is not set
@@ -2330,7 +2531,7 @@ class BlockRoomTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.MISSING_PARAM, channel.json_body["errcode"])
# no content is send
@@ -2340,7 +2541,7 @@ class BlockRoomTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.NOT_JSON, channel.json_body["errcode"])
def test_block_room(self) -> None:
@@ -2354,7 +2555,7 @@ class BlockRoomTestCase(unittest.HomeserverTestCase):
content={"block": True},
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertTrue(channel.json_body["block"])
self._is_blocked(room_id, expect=True)
@@ -2378,7 +2579,7 @@ class BlockRoomTestCase(unittest.HomeserverTestCase):
content={"block": True},
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertTrue(channel.json_body["block"])
self._is_blocked(self.room_id, expect=True)
@@ -2394,7 +2595,7 @@ class BlockRoomTestCase(unittest.HomeserverTestCase):
content={"block": False},
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertFalse(channel.json_body["block"])
self._is_blocked(room_id, expect=False)
@@ -2418,7 +2619,7 @@ class BlockRoomTestCase(unittest.HomeserverTestCase):
content={"block": False},
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertFalse(channel.json_body["block"])
self._is_blocked(self.room_id, expect=False)
@@ -2433,7 +2634,7 @@ class BlockRoomTestCase(unittest.HomeserverTestCase):
self.url % room_id,
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertTrue(channel.json_body["block"])
self.assertEqual(self.other_user, channel.json_body["user_id"])
@@ -2457,7 +2658,7 @@ class BlockRoomTestCase(unittest.HomeserverTestCase):
self.url % room_id,
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertFalse(channel.json_body["block"])
self.assertNotIn("user_id", channel.json_body)
diff --git a/tests/rest/admin/test_server_notice.py b/tests/rest/admin/test_server_notice.py
index dbcba2663c..a2f347f666 100644
--- a/tests/rest/admin/test_server_notice.py
+++ b/tests/rest/admin/test_server_notice.py
@@ -11,7 +11,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-from http import HTTPStatus
from typing import List
from twisted.test.proto_helpers import MemoryReactor
@@ -57,7 +56,7 @@ class ServerNoticeTestCase(unittest.HomeserverTestCase):
channel = self.make_request("POST", self.url)
self.assertEqual(
- HTTPStatus.UNAUTHORIZED,
+ 401,
channel.code,
msg=channel.json_body,
)
@@ -72,7 +71,7 @@ class ServerNoticeTestCase(unittest.HomeserverTestCase):
)
self.assertEqual(
- HTTPStatus.FORBIDDEN,
+ 403,
channel.code,
msg=channel.json_body,
)
@@ -80,7 +79,7 @@ class ServerNoticeTestCase(unittest.HomeserverTestCase):
@override_config({"server_notices": {"system_mxid_localpart": "notices"}})
def test_user_does_not_exist(self) -> None:
- """Tests that a lookup for a user that does not exist returns a HTTPStatus.NOT_FOUND"""
+ """Tests that a lookup for a user that does not exist returns a 404"""
channel = self.make_request(
"POST",
self.url,
@@ -88,13 +87,13 @@ class ServerNoticeTestCase(unittest.HomeserverTestCase):
content={"user_id": "@unknown_person:test", "content": ""},
)
- self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, msg=channel.json_body)
+ self.assertEqual(404, channel.code, msg=channel.json_body)
self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"])
@override_config({"server_notices": {"system_mxid_localpart": "notices"}})
def test_user_is_not_local(self) -> None:
"""
- Tests that a lookup for a user that is not a local returns a HTTPStatus.BAD_REQUEST
+ Tests that a lookup for a user that is not a local returns a 400
"""
channel = self.make_request(
"POST",
@@ -106,7 +105,7 @@ class ServerNoticeTestCase(unittest.HomeserverTestCase):
},
)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(
"Server notices can only be sent to local users", channel.json_body["error"]
)
@@ -122,7 +121,7 @@ class ServerNoticeTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.NOT_JSON, channel.json_body["errcode"])
# no content
@@ -133,7 +132,7 @@ class ServerNoticeTestCase(unittest.HomeserverTestCase):
content={"user_id": self.other_user},
)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.MISSING_PARAM, channel.json_body["errcode"])
# no body
@@ -144,7 +143,7 @@ class ServerNoticeTestCase(unittest.HomeserverTestCase):
content={"user_id": self.other_user, "content": ""},
)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.UNKNOWN, channel.json_body["errcode"])
self.assertEqual("'body' not in content", channel.json_body["error"])
@@ -156,10 +155,66 @@ class ServerNoticeTestCase(unittest.HomeserverTestCase):
content={"user_id": self.other_user, "content": {"body": ""}},
)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.UNKNOWN, channel.json_body["errcode"])
self.assertEqual("'msgtype' not in content", channel.json_body["error"])
+ @override_config(
+ {
+ "server_notices": {
+ "system_mxid_localpart": "notices",
+ "system_mxid_avatar_url": "somthingwrong",
+ },
+ "max_avatar_size": "10M",
+ }
+ )
+ def test_invalid_avatar_url(self) -> None:
+ """If avatar url in homeserver.yaml is invalid and
+ "check avatar size and mime type" is set, an error is returned.
+ TODO: Should be checked when reading the configuration."""
+ channel = self.make_request(
+ "POST",
+ self.url,
+ access_token=self.admin_user_tok,
+ content={
+ "user_id": self.other_user,
+ "content": {"msgtype": "m.text", "body": "test msg"},
+ },
+ )
+
+ self.assertEqual(500, channel.code, msg=channel.json_body)
+ self.assertEqual(Codes.UNKNOWN, channel.json_body["errcode"])
+
+ @override_config(
+ {
+ "server_notices": {
+ "system_mxid_localpart": "notices",
+ "system_mxid_display_name": "test display name",
+ "system_mxid_avatar_url": None,
+ },
+ "max_avatar_size": "10M",
+ }
+ )
+ def test_displayname_is_set_avatar_is_none(self) -> None:
+ """
+ Tests that sending a server notices is successfully,
+ if a display_name is set, avatar_url is `None` and
+ "check avatar size and mime type" is set.
+ """
+ channel = self.make_request(
+ "POST",
+ self.url,
+ access_token=self.admin_user_tok,
+ content={
+ "user_id": self.other_user,
+ "content": {"msgtype": "m.text", "body": "test msg"},
+ },
+ )
+ self.assertEqual(200, channel.code, msg=channel.json_body)
+
+ # user has one invite
+ self._check_invite_and_join_status(self.other_user, 1, 0)
+
def test_server_notice_disabled(self) -> None:
"""Tests that server returns error if server notice is disabled"""
channel = self.make_request(
@@ -172,7 +227,7 @@ class ServerNoticeTestCase(unittest.HomeserverTestCase):
},
)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.UNKNOWN, channel.json_body["errcode"])
self.assertEqual(
"Server notices are not enabled on this server", channel.json_body["error"]
@@ -197,7 +252,7 @@ class ServerNoticeTestCase(unittest.HomeserverTestCase):
"content": {"msgtype": "m.text", "body": "test msg one"},
},
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
# user has one invite
invited_rooms = self._check_invite_and_join_status(self.other_user, 1, 0)
@@ -226,7 +281,7 @@ class ServerNoticeTestCase(unittest.HomeserverTestCase):
"content": {"msgtype": "m.text", "body": "test msg two"},
},
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
# user has no new invites or memberships
self._check_invite_and_join_status(self.other_user, 0, 1)
@@ -260,7 +315,7 @@ class ServerNoticeTestCase(unittest.HomeserverTestCase):
"content": {"msgtype": "m.text", "body": "test msg one"},
},
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
# user has one invite
invited_rooms = self._check_invite_and_join_status(self.other_user, 1, 0)
@@ -301,7 +356,7 @@ class ServerNoticeTestCase(unittest.HomeserverTestCase):
"content": {"msgtype": "m.text", "body": "test msg two"},
},
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
# user has one invite
invited_rooms = self._check_invite_and_join_status(self.other_user, 1, 0)
@@ -341,7 +396,7 @@ class ServerNoticeTestCase(unittest.HomeserverTestCase):
"content": {"msgtype": "m.text", "body": "test msg one"},
},
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
# user has one invite
invited_rooms = self._check_invite_and_join_status(self.other_user, 1, 0)
@@ -388,7 +443,7 @@ class ServerNoticeTestCase(unittest.HomeserverTestCase):
"content": {"msgtype": "m.text", "body": "test msg two"},
},
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
# user has one invite
invited_rooms = self._check_invite_and_join_status(self.other_user, 1, 0)
@@ -538,7 +593,7 @@ class ServerNoticeTestCase(unittest.HomeserverTestCase):
channel = self.make_request(
"GET", "/_matrix/client/r0/sync", access_token=token
)
- self.assertEqual(channel.code, HTTPStatus.OK)
+ self.assertEqual(channel.code, 200)
# Get the messages
room = channel.json_body["rooms"]["join"][room_id]
diff --git a/tests/rest/admin/test_statistics.py b/tests/rest/admin/test_statistics.py
index 7cb8ec57ba..b60f16b914 100644
--- a/tests/rest/admin/test_statistics.py
+++ b/tests/rest/admin/test_statistics.py
@@ -12,7 +12,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-from http import HTTPStatus
from typing import List, Optional
from twisted.test.proto_helpers import MemoryReactor
@@ -51,16 +50,12 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase):
"""
channel = self.make_request("GET", self.url, b"{}")
- self.assertEqual(
- HTTPStatus.UNAUTHORIZED,
- channel.code,
- msg=channel.json_body,
- )
+ self.assertEqual(401, channel.code, msg=channel.json_body)
self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"])
def test_requester_is_no_admin(self) -> None:
"""
- If the user is not a server admin, an error HTTPStatus.FORBIDDEN is returned.
+ If the user is not a server admin, an error 403 is returned.
"""
channel = self.make_request(
"GET",
@@ -69,11 +64,7 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase):
access_token=self.other_user_tok,
)
- self.assertEqual(
- HTTPStatus.FORBIDDEN,
- channel.code,
- msg=channel.json_body,
- )
+ self.assertEqual(403, channel.code, msg=channel.json_body)
self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
def test_invalid_parameter(self) -> None:
@@ -87,11 +78,7 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(
- HTTPStatus.BAD_REQUEST,
- channel.code,
- msg=channel.json_body,
- )
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
# negative from
@@ -101,11 +88,7 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(
- HTTPStatus.BAD_REQUEST,
- channel.code,
- msg=channel.json_body,
- )
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
# negative limit
@@ -115,11 +98,7 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(
- HTTPStatus.BAD_REQUEST,
- channel.code,
- msg=channel.json_body,
- )
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
# negative from_ts
@@ -129,11 +108,7 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(
- HTTPStatus.BAD_REQUEST,
- channel.code,
- msg=channel.json_body,
- )
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
# negative until_ts
@@ -143,11 +118,7 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(
- HTTPStatus.BAD_REQUEST,
- channel.code,
- msg=channel.json_body,
- )
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
# until_ts smaller from_ts
@@ -157,11 +128,7 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(
- HTTPStatus.BAD_REQUEST,
- channel.code,
- msg=channel.json_body,
- )
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
# empty search term
@@ -171,11 +138,7 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(
- HTTPStatus.BAD_REQUEST,
- channel.code,
- msg=channel.json_body,
- )
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
# invalid search order
@@ -185,11 +148,7 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(
- HTTPStatus.BAD_REQUEST,
- channel.code,
- msg=channel.json_body,
- )
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
def test_limit(self) -> None:
@@ -204,7 +163,7 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], 10)
self.assertEqual(len(channel.json_body["users"]), 5)
self.assertEqual(channel.json_body["next_token"], 5)
@@ -222,7 +181,7 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], 20)
self.assertEqual(len(channel.json_body["users"]), 15)
self.assertNotIn("next_token", channel.json_body)
@@ -240,7 +199,7 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], 20)
self.assertEqual(channel.json_body["next_token"], 15)
self.assertEqual(len(channel.json_body["users"]), 10)
@@ -262,7 +221,7 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], number_users)
self.assertEqual(len(channel.json_body["users"]), number_users)
self.assertNotIn("next_token", channel.json_body)
@@ -275,7 +234,7 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], number_users)
self.assertEqual(len(channel.json_body["users"]), number_users)
self.assertNotIn("next_token", channel.json_body)
@@ -288,7 +247,7 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], number_users)
self.assertEqual(len(channel.json_body["users"]), 19)
self.assertEqual(channel.json_body["next_token"], 19)
@@ -301,7 +260,7 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], number_users)
self.assertEqual(len(channel.json_body["users"]), 1)
self.assertNotIn("next_token", channel.json_body)
@@ -318,7 +277,7 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(0, channel.json_body["total"])
self.assertEqual(0, len(channel.json_body["users"]))
@@ -415,7 +374,7 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase):
self.url,
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["users"][0]["media_count"], 3)
# filter media starting at `ts1` after creating first media
@@ -425,7 +384,7 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase):
self.url + "?from_ts=%s" % (ts1,),
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], 0)
self._create_media(self.other_user_tok, 3)
@@ -440,7 +399,7 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase):
self.url + "?from_ts=%s&until_ts=%s" % (ts1, ts2),
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["users"][0]["media_count"], 3)
# filter media until `ts2` and earlier
@@ -449,7 +408,7 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase):
self.url + "?until_ts=%s" % (ts2,),
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["users"][0]["media_count"], 6)
def test_search_term(self) -> None:
@@ -461,7 +420,7 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase):
self.url,
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], 20)
# filter user 1 and 10-19 by `user_id`
@@ -470,7 +429,7 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase):
self.url + "?search_term=foo_user_1",
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], 11)
# filter on this user in `displayname`
@@ -479,7 +438,7 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase):
self.url + "?search_term=bar_user_10",
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["users"][0]["displayname"], "bar_user_10")
self.assertEqual(channel.json_body["total"], 1)
@@ -489,7 +448,7 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase):
self.url + "?search_term=foobar",
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], 0)
def _create_users_with_media(self, number_users: int, media_per_user: int) -> None:
@@ -515,7 +474,7 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase):
for _ in range(number_media):
# Upload some media into the room
self.helper.upload_media(
- upload_resource, SMALL_PNG, tok=user_token, expect_code=HTTPStatus.OK
+ upload_resource, SMALL_PNG, tok=user_token, expect_code=200
)
def _check_fields(self, content: List[JsonDict]) -> None:
@@ -549,7 +508,7 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase):
url.encode("ascii"),
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], len(expected_user_list))
returned_order = [row["user_id"] for row in channel.json_body["users"]]
diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py
index 12db68d564..e8c9457794 100644
--- a/tests/rest/admin/test_user.py
+++ b/tests/rest/admin/test_user.py
@@ -1,4 +1,4 @@
-# Copyright 2018-2021 The Matrix.org Foundation C.I.C.
+# Copyright 2018-2022 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -17,7 +17,6 @@ import hmac
import os
import urllib.parse
from binascii import unhexlify
-from http import HTTPStatus
from typing import List, Optional
from unittest.mock import Mock, patch
@@ -26,13 +25,13 @@ from parameterized import parameterized, parameterized_class
from twisted.test.proto_helpers import MemoryReactor
import synapse.rest.admin
-from synapse.api.constants import UserTypes
+from synapse.api.constants import ApprovalNoticeMedium, LoginType, UserTypes
from synapse.api.errors import Codes, HttpResponseException, ResourceLimitError
from synapse.api.room_versions import RoomVersions
-from synapse.rest.client import devices, login, logout, profile, room, sync
+from synapse.rest.client import devices, login, logout, profile, register, room, sync
from synapse.rest.media.v1.filepath import MediaFilePaths
from synapse.server import HomeServer
-from synapse.types import JsonDict, UserID
+from synapse.types import JsonDict, UserID, create_requester
from synapse.util import Clock
from tests import unittest
@@ -42,14 +41,12 @@ from tests.unittest import override_config
class UserRegisterTestCase(unittest.HomeserverTestCase):
-
servlets = [
synapse.rest.admin.register_servlets_for_client_rest_resource,
profile.register_servlets,
]
def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
-
self.url = "/_synapse/admin/v1/register"
self.registration_handler = Mock()
@@ -79,7 +76,7 @@ class UserRegisterTestCase(unittest.HomeserverTestCase):
channel = self.make_request("POST", self.url, b"{}")
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(
"Shared secret registration is not enabled", channel.json_body["error"]
)
@@ -111,7 +108,7 @@ class UserRegisterTestCase(unittest.HomeserverTestCase):
body = {"nonce": nonce}
channel = self.make_request("POST", self.url, body)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual("username must be specified", channel.json_body["error"])
# 61 seconds
@@ -119,7 +116,7 @@ class UserRegisterTestCase(unittest.HomeserverTestCase):
channel = self.make_request("POST", self.url, body)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual("unrecognised nonce", channel.json_body["error"])
def test_register_incorrect_nonce(self) -> None:
@@ -142,7 +139,7 @@ class UserRegisterTestCase(unittest.HomeserverTestCase):
}
channel = self.make_request("POST", self.url, body)
- self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body)
+ self.assertEqual(403, channel.code, msg=channel.json_body)
self.assertEqual("HMAC incorrect", channel.json_body["error"])
def test_register_correct_nonce(self) -> None:
@@ -169,7 +166,7 @@ class UserRegisterTestCase(unittest.HomeserverTestCase):
}
channel = self.make_request("POST", self.url, body)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("@bob:test", channel.json_body["user_id"])
def test_nonce_reuse(self) -> None:
@@ -192,13 +189,13 @@ class UserRegisterTestCase(unittest.HomeserverTestCase):
}
channel = self.make_request("POST", self.url, body)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("@bob:test", channel.json_body["user_id"])
# Now, try and reuse it
channel = self.make_request("POST", self.url, body)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual("unrecognised nonce", channel.json_body["error"])
def test_missing_parts(self) -> None:
@@ -219,7 +216,7 @@ class UserRegisterTestCase(unittest.HomeserverTestCase):
# Must be an empty body present
channel = self.make_request("POST", self.url, {})
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual("nonce must be specified", channel.json_body["error"])
#
@@ -229,28 +226,28 @@ class UserRegisterTestCase(unittest.HomeserverTestCase):
# Must be present
channel = self.make_request("POST", self.url, {"nonce": nonce()})
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual("username must be specified", channel.json_body["error"])
# Must be a string
body = {"nonce": nonce(), "username": 1234}
channel = self.make_request("POST", self.url, body)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual("Invalid username", channel.json_body["error"])
# Must not have null bytes
body = {"nonce": nonce(), "username": "abcd\u0000"}
channel = self.make_request("POST", self.url, body)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual("Invalid username", channel.json_body["error"])
# Must not have null bytes
body = {"nonce": nonce(), "username": "a" * 1000}
channel = self.make_request("POST", self.url, body)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual("Invalid username", channel.json_body["error"])
#
@@ -261,28 +258,28 @@ class UserRegisterTestCase(unittest.HomeserverTestCase):
body = {"nonce": nonce(), "username": "a"}
channel = self.make_request("POST", self.url, body)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual("password must be specified", channel.json_body["error"])
# Must be a string
body = {"nonce": nonce(), "username": "a", "password": 1234}
channel = self.make_request("POST", self.url, body)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual("Invalid password", channel.json_body["error"])
# Must not have null bytes
body = {"nonce": nonce(), "username": "a", "password": "abcd\u0000"}
channel = self.make_request("POST", self.url, body)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual("Invalid password", channel.json_body["error"])
# Super long
body = {"nonce": nonce(), "username": "a", "password": "A" * 1000}
channel = self.make_request("POST", self.url, body)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual("Invalid password", channel.json_body["error"])
#
@@ -298,7 +295,7 @@ class UserRegisterTestCase(unittest.HomeserverTestCase):
}
channel = self.make_request("POST", self.url, body)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual("Invalid user type", channel.json_body["error"])
def test_displayname(self) -> None:
@@ -323,11 +320,11 @@ class UserRegisterTestCase(unittest.HomeserverTestCase):
channel = self.make_request("POST", self.url, body)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("@bob1:test", channel.json_body["user_id"])
channel = self.make_request("GET", "/profile/@bob1:test/displayname")
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("bob1", channel.json_body["displayname"])
# displayname is None
@@ -347,11 +344,11 @@ class UserRegisterTestCase(unittest.HomeserverTestCase):
}
channel = self.make_request("POST", self.url, body)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("@bob2:test", channel.json_body["user_id"])
channel = self.make_request("GET", "/profile/@bob2:test/displayname")
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("bob2", channel.json_body["displayname"])
# displayname is empty
@@ -371,11 +368,11 @@ class UserRegisterTestCase(unittest.HomeserverTestCase):
}
channel = self.make_request("POST", self.url, body)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("@bob3:test", channel.json_body["user_id"])
channel = self.make_request("GET", "/profile/@bob3:test/displayname")
- self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, msg=channel.json_body)
+ self.assertEqual(404, channel.code, msg=channel.json_body)
# set displayname
channel = self.make_request("GET", self.url)
@@ -394,11 +391,11 @@ class UserRegisterTestCase(unittest.HomeserverTestCase):
}
channel = self.make_request("POST", self.url, body)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("@bob4:test", channel.json_body["user_id"])
channel = self.make_request("GET", "/profile/@bob4:test/displayname")
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("Bob's Name", channel.json_body["displayname"])
@override_config(
@@ -442,12 +439,11 @@ class UserRegisterTestCase(unittest.HomeserverTestCase):
}
channel = self.make_request("POST", self.url, body)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("@bob:test", channel.json_body["user_id"])
class UsersListTestCase(unittest.HomeserverTestCase):
-
servlets = [
synapse.rest.admin.register_servlets,
login.register_servlets,
@@ -466,7 +462,7 @@ class UsersListTestCase(unittest.HomeserverTestCase):
"""
channel = self.make_request("GET", self.url, b"{}")
- self.assertEqual(HTTPStatus.UNAUTHORIZED, channel.code, msg=channel.json_body)
+ self.assertEqual(401, channel.code, msg=channel.json_body)
self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"])
def test_requester_is_no_admin(self) -> None:
@@ -478,7 +474,7 @@ class UsersListTestCase(unittest.HomeserverTestCase):
channel = self.make_request("GET", self.url, access_token=other_user_token)
- self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body)
+ self.assertEqual(403, channel.code, msg=channel.json_body)
self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
def test_all_users(self) -> None:
@@ -494,7 +490,7 @@ class UsersListTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(3, len(channel.json_body["users"]))
self.assertEqual(3, channel.json_body["total"])
@@ -508,7 +504,7 @@ class UsersListTestCase(unittest.HomeserverTestCase):
expected_user_id: Optional[str],
search_term: str,
search_field: Optional[str] = "name",
- expected_http_code: Optional[int] = HTTPStatus.OK,
+ expected_http_code: Optional[int] = 200,
) -> None:
"""Search for a user and check that the returned user's id is a match
@@ -530,7 +526,7 @@ class UsersListTestCase(unittest.HomeserverTestCase):
)
self.assertEqual(expected_http_code, channel.code, msg=channel.json_body)
- if expected_http_code != HTTPStatus.OK:
+ if expected_http_code != 200:
return
# Check that users were returned
@@ -579,6 +575,16 @@ class UsersListTestCase(unittest.HomeserverTestCase):
_search_test(None, "foo", "user_id")
_search_test(None, "bar", "user_id")
+ @override_config(
+ {
+ "experimental_features": {
+ "msc3866": {
+ "enabled": True,
+ "require_approval_for_new_accounts": True,
+ }
+ }
+ }
+ )
def test_invalid_parameter(self) -> None:
"""
If parameters are invalid, an error is returned.
@@ -591,7 +597,7 @@ class UsersListTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
# negative from
@@ -601,7 +607,7 @@ class UsersListTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
# invalid guests
@@ -611,7 +617,7 @@ class UsersListTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
# invalid deactivated
@@ -621,7 +627,17 @@ class UsersListTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
+ self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
+
+ # invalid approved
+ channel = self.make_request(
+ "GET",
+ self.url + "?approved=not_bool",
+ access_token=self.admin_user_tok,
+ )
+
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
# unkown order_by
@@ -631,7 +647,7 @@ class UsersListTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
# invalid search order
@@ -641,7 +657,7 @@ class UsersListTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
def test_limit(self) -> None:
@@ -659,7 +675,7 @@ class UsersListTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], number_users)
self.assertEqual(len(channel.json_body["users"]), 5)
self.assertEqual(channel.json_body["next_token"], "5")
@@ -680,7 +696,7 @@ class UsersListTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], number_users)
self.assertEqual(len(channel.json_body["users"]), 15)
self.assertNotIn("next_token", channel.json_body)
@@ -701,7 +717,7 @@ class UsersListTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], number_users)
self.assertEqual(channel.json_body["next_token"], "15")
self.assertEqual(len(channel.json_body["users"]), 10)
@@ -724,7 +740,7 @@ class UsersListTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], number_users)
self.assertEqual(len(channel.json_body["users"]), number_users)
self.assertNotIn("next_token", channel.json_body)
@@ -737,7 +753,7 @@ class UsersListTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], number_users)
self.assertEqual(len(channel.json_body["users"]), number_users)
self.assertNotIn("next_token", channel.json_body)
@@ -750,7 +766,7 @@ class UsersListTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], number_users)
self.assertEqual(len(channel.json_body["users"]), 19)
self.assertEqual(channel.json_body["next_token"], "19")
@@ -764,7 +780,7 @@ class UsersListTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], number_users)
self.assertEqual(len(channel.json_body["users"]), 1)
self.assertNotIn("next_token", channel.json_body)
@@ -842,6 +858,99 @@ class UsersListTestCase(unittest.HomeserverTestCase):
self._order_test([self.admin_user, user1, user2], "creation_ts", "f")
self._order_test([user2, user1, self.admin_user], "creation_ts", "b")
+ @override_config(
+ {
+ "experimental_features": {
+ "msc3866": {
+ "enabled": True,
+ "require_approval_for_new_accounts": True,
+ }
+ }
+ }
+ )
+ def test_filter_out_approved(self) -> None:
+ """Tests that the endpoint can filter out approved users."""
+ # Create our users.
+ self._create_users(2)
+
+ # Get the list of users.
+ channel = self.make_request(
+ "GET",
+ self.url,
+ access_token=self.admin_user_tok,
+ )
+ self.assertEqual(200, channel.code, channel.result)
+
+ # Exclude the admin, because we don't want to accidentally un-approve the admin.
+ non_admin_user_ids = [
+ user["name"]
+ for user in channel.json_body["users"]
+ if user["name"] != self.admin_user
+ ]
+
+ self.assertEqual(2, len(non_admin_user_ids), non_admin_user_ids)
+
+ # Select a user and un-approve them. We do this rather than the other way around
+ # because, since these users are created by an admin, we consider them already
+ # approved.
+ not_approved_user = non_admin_user_ids[0]
+
+ channel = self.make_request(
+ "PUT",
+ f"/_synapse/admin/v2/users/{not_approved_user}",
+ {"approved": False},
+ access_token=self.admin_user_tok,
+ )
+ self.assertEqual(200, channel.code, channel.result)
+
+ # Now get the list of users again, this time filtering out approved users.
+ channel = self.make_request(
+ "GET",
+ self.url + "?approved=false",
+ access_token=self.admin_user_tok,
+ )
+ self.assertEqual(200, channel.code, channel.result)
+
+ non_admin_user_ids = [
+ user["name"]
+ for user in channel.json_body["users"]
+ if user["name"] != self.admin_user
+ ]
+
+ # We should only have our unapproved user now.
+ self.assertEqual(1, len(non_admin_user_ids), non_admin_user_ids)
+ self.assertEqual(not_approved_user, non_admin_user_ids[0])
+
+ def test_erasure_status(self) -> None:
+ # Create a new user.
+ user_id = self.register_user("eraseme", "eraseme")
+
+ # They should appear in the list users API, marked as not erased.
+ channel = self.make_request(
+ "GET",
+ self.url + "?deactivated=true",
+ access_token=self.admin_user_tok,
+ )
+ users = {user["name"]: user for user in channel.json_body["users"]}
+ self.assertIs(users[user_id]["erased"], False)
+
+ # Deactivate that user, requesting erasure.
+ deactivate_account_handler = self.hs.get_deactivate_account_handler()
+ self.get_success(
+ deactivate_account_handler.deactivate_account(
+ user_id, erase_data=True, requester=create_requester(user_id)
+ )
+ )
+
+ # Repeat the list users query. They should now be marked as erased.
+ channel = self.make_request(
+ "GET",
+ self.url + "?deactivated=true",
+ access_token=self.admin_user_tok,
+ )
+ users = {user["name"]: user for user in channel.json_body["users"]}
+ self.assertIs(users[user_id]["erased"], True)
+
def _order_test(
self,
expected_user_list: List[str],
@@ -867,7 +976,7 @@ class UsersListTestCase(unittest.HomeserverTestCase):
url,
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], len(expected_user_list))
returned_order = [row["name"] for row in channel.json_body["users"]]
@@ -905,11 +1014,100 @@ class UsersListTestCase(unittest.HomeserverTestCase):
)
-class DeactivateAccountTestCase(unittest.HomeserverTestCase):
+class UserDevicesTestCase(unittest.HomeserverTestCase):
+ """
+ Tests user device management-related Admin APIs.
+ """
servlets = [
synapse.rest.admin.register_servlets,
login.register_servlets,
+ sync.register_servlets,
+ ]
+
+ def prepare(
+ self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer
+ ) -> None:
+ # Set up an Admin user to query the Admin API with.
+ self.admin_user_id = self.register_user("admin", "pass", admin=True)
+ self.admin_user_token = self.login("admin", "pass")
+
+ # Set up a test user to query the devices of.
+ self.other_user_device_id = "TESTDEVICEID"
+ self.other_user_device_display_name = "My Test Device"
+ self.other_user_client_ip = "1.2.3.4"
+ self.other_user_user_agent = "EquestriaTechnology/123.0"
+
+ self.other_user_id = self.register_user("user", "pass", displayname="User1")
+ self.other_user_token = self.login(
+ "user",
+ "pass",
+ device_id=self.other_user_device_id,
+ additional_request_fields={
+ "initial_device_display_name": self.other_user_device_display_name,
+ },
+ )
+
+ # Have the "other user" make a request so that the "last_seen_*" fields are
+ # populated in the tests below.
+ channel = self.make_request(
+ "GET",
+ "/_matrix/client/v3/sync",
+ access_token=self.other_user_token,
+ client_ip=self.other_user_client_ip,
+ custom_headers=[
+ ("User-Agent", self.other_user_user_agent),
+ ],
+ )
+ self.assertEqual(200, channel.code, msg=channel.json_body)
+
+ def test_list_user_devices(self) -> None:
+ """
+ Tests that a user's devices and attributes are listed correctly via the Admin API.
+ """
+ # Request all devices of "other user"
+ channel = self.make_request(
+ "GET",
+ f"/_synapse/admin/v2/users/{self.other_user_id}/devices",
+ access_token=self.admin_user_token,
+ )
+ self.assertEqual(200, channel.code, msg=channel.json_body)
+
+ # Double-check we got the single device expected
+ user_devices = channel.json_body["devices"]
+ self.assertEqual(len(user_devices), 1)
+ self.assertEqual(channel.json_body["total"], 1)
+
+ # Check that all the attributes of the device reported are as expected.
+ self._validate_attributes_of_device_response(user_devices[0])
+
+ # Request just a single device for "other user" by its ID
+ channel = self.make_request(
+ "GET",
+ f"/_synapse/admin/v2/users/{self.other_user_id}/devices/"
+ f"{self.other_user_device_id}",
+ access_token=self.admin_user_token,
+ )
+ self.assertEqual(200, channel.code, msg=channel.json_body)
+
+ # Check that all the attributes of the device reported are as expected.
+ self._validate_attributes_of_device_response(channel.json_body)
+
+ def _validate_attributes_of_device_response(self, response: JsonDict) -> None:
+ # Check that all device expected attributes are present
+ self.assertEqual(response["user_id"], self.other_user_id)
+ self.assertEqual(response["device_id"], self.other_user_device_id)
+ self.assertEqual(response["display_name"], self.other_user_device_display_name)
+ self.assertEqual(response["last_seen_ip"], self.other_user_client_ip)
+ self.assertEqual(response["last_seen_user_agent"], self.other_user_user_agent)
+ self.assertIsInstance(response["last_seen_ts"], int)
+ self.assertGreater(response["last_seen_ts"], 0)
+
+
+class DeactivateAccountTestCase(unittest.HomeserverTestCase):
+ servlets = [
+ synapse.rest.admin.register_servlets,
+ login.register_servlets,
]
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
@@ -941,7 +1139,7 @@ class DeactivateAccountTestCase(unittest.HomeserverTestCase):
"""
channel = self.make_request("POST", self.url, b"{}")
- self.assertEqual(HTTPStatus.UNAUTHORIZED, channel.code, msg=channel.json_body)
+ self.assertEqual(401, channel.code, msg=channel.json_body)
self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"])
def test_requester_is_not_admin(self) -> None:
@@ -952,7 +1150,7 @@ class DeactivateAccountTestCase(unittest.HomeserverTestCase):
channel = self.make_request("POST", url, access_token=self.other_user_token)
- self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body)
+ self.assertEqual(403, channel.code, msg=channel.json_body)
self.assertEqual("You are not a server admin", channel.json_body["error"])
channel = self.make_request(
@@ -962,12 +1160,12 @@ class DeactivateAccountTestCase(unittest.HomeserverTestCase):
content=b"{}",
)
- self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body)
+ self.assertEqual(403, channel.code, msg=channel.json_body)
self.assertEqual("You are not a server admin", channel.json_body["error"])
def test_user_does_not_exist(self) -> None:
"""
- Tests that deactivation for a user that does not exist returns a HTTPStatus.NOT_FOUND
+ Tests that deactivation for a user that does not exist returns a 404
"""
channel = self.make_request(
@@ -976,7 +1174,7 @@ class DeactivateAccountTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, msg=channel.json_body)
+ self.assertEqual(404, channel.code, msg=channel.json_body)
self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"])
def test_erase_is_not_bool(self) -> None:
@@ -991,18 +1189,18 @@ class DeactivateAccountTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.BAD_JSON, channel.json_body["errcode"])
def test_user_is_not_local(self) -> None:
"""
- Tests that deactivation for a user that is not a local returns a HTTPStatus.BAD_REQUEST
+ Tests that deactivation for a user that is not a local returns a 400
"""
url = "/_synapse/admin/v1/deactivate/@unknown_person:unknown_domain"
channel = self.make_request("POST", url, access_token=self.admin_user_tok)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual("Can only deactivate local users", channel.json_body["error"])
def test_deactivate_user_erase_true(self) -> None:
@@ -1017,12 +1215,13 @@ class DeactivateAccountTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("@user:test", channel.json_body["name"])
self.assertEqual(False, channel.json_body["deactivated"])
self.assertEqual("foo@bar.com", channel.json_body["threepids"][0]["address"])
self.assertEqual("mxc://servername/mediaid", channel.json_body["avatar_url"])
self.assertEqual("User1", channel.json_body["displayname"])
+ self.assertFalse(channel.json_body["erased"])
# Deactivate and erase user
channel = self.make_request(
@@ -1032,7 +1231,7 @@ class DeactivateAccountTestCase(unittest.HomeserverTestCase):
content={"erase": True},
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
# Get user
channel = self.make_request(
@@ -1041,12 +1240,13 @@ class DeactivateAccountTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("@user:test", channel.json_body["name"])
self.assertEqual(True, channel.json_body["deactivated"])
self.assertEqual(0, len(channel.json_body["threepids"]))
self.assertIsNone(channel.json_body["avatar_url"])
self.assertIsNone(channel.json_body["displayname"])
+ self.assertTrue(channel.json_body["erased"])
self._is_erased("@user:test", True)
@@ -1066,7 +1266,7 @@ class DeactivateAccountTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
content={"erase": True},
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self._is_erased("@user:test", True)
def test_deactivate_user_erase_false(self) -> None:
@@ -1081,7 +1281,7 @@ class DeactivateAccountTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("@user:test", channel.json_body["name"])
self.assertEqual(False, channel.json_body["deactivated"])
self.assertEqual("foo@bar.com", channel.json_body["threepids"][0]["address"])
@@ -1096,7 +1296,7 @@ class DeactivateAccountTestCase(unittest.HomeserverTestCase):
content={"erase": False},
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
# Get user
channel = self.make_request(
@@ -1105,7 +1305,7 @@ class DeactivateAccountTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("@user:test", channel.json_body["name"])
self.assertEqual(True, channel.json_body["deactivated"])
self.assertEqual(0, len(channel.json_body["threepids"]))
@@ -1135,7 +1335,7 @@ class DeactivateAccountTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("@user:test", channel.json_body["name"])
self.assertEqual(False, channel.json_body["deactivated"])
self.assertEqual("foo@bar.com", channel.json_body["threepids"][0]["address"])
@@ -1150,7 +1350,7 @@ class DeactivateAccountTestCase(unittest.HomeserverTestCase):
content={"erase": True},
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
# Get user
channel = self.make_request(
@@ -1159,7 +1359,7 @@ class DeactivateAccountTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("@user:test", channel.json_body["name"])
self.assertEqual(True, channel.json_body["deactivated"])
self.assertEqual(0, len(channel.json_body["threepids"]))
@@ -1178,11 +1378,11 @@ class DeactivateAccountTestCase(unittest.HomeserverTestCase):
class UserRestTestCase(unittest.HomeserverTestCase):
-
servlets = [
synapse.rest.admin.register_servlets,
login.register_servlets,
sync.register_servlets,
+ register.register_servlets,
]
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
@@ -1220,7 +1420,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
access_token=self.other_user_token,
)
- self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body)
+ self.assertEqual(403, channel.code, msg=channel.json_body)
self.assertEqual("You are not a server admin", channel.json_body["error"])
channel = self.make_request(
@@ -1230,12 +1430,12 @@ class UserRestTestCase(unittest.HomeserverTestCase):
content=b"{}",
)
- self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body)
+ self.assertEqual(403, channel.code, msg=channel.json_body)
self.assertEqual("You are not a server admin", channel.json_body["error"])
def test_user_does_not_exist(self) -> None:
"""
- Tests that a lookup for a user that does not exist returns a HTTPStatus.NOT_FOUND
+ Tests that a lookup for a user that does not exist returns a 404
"""
channel = self.make_request(
@@ -1244,7 +1444,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, msg=channel.json_body)
+ self.assertEqual(404, channel.code, msg=channel.json_body)
self.assertEqual("M_NOT_FOUND", channel.json_body["errcode"])
def test_invalid_parameter(self) -> None:
@@ -1259,7 +1459,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
content={"admin": "not_bool"},
)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.BAD_JSON, channel.json_body["errcode"])
# deactivated not bool
@@ -1269,7 +1469,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
content={"deactivated": "not_bool"},
)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.UNKNOWN, channel.json_body["errcode"])
# password not str
@@ -1279,7 +1479,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
content={"password": True},
)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.UNKNOWN, channel.json_body["errcode"])
# password not length
@@ -1289,7 +1489,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
content={"password": "x" * 513},
)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.UNKNOWN, channel.json_body["errcode"])
# user_type not valid
@@ -1299,7 +1499,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
content={"user_type": "new type"},
)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.UNKNOWN, channel.json_body["errcode"])
# external_ids not valid
@@ -1311,7 +1511,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
"external_ids": {"auth_provider": "prov", "wrong_external_id": "id"}
},
)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.MISSING_PARAM, channel.json_body["errcode"])
channel = self.make_request(
@@ -1320,7 +1520,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
content={"external_ids": {"external_id": "id"}},
)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.MISSING_PARAM, channel.json_body["errcode"])
# threepids not valid
@@ -1330,7 +1530,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
content={"threepids": {"medium": "email", "wrong_address": "id"}},
)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.MISSING_PARAM, channel.json_body["errcode"])
channel = self.make_request(
@@ -1339,7 +1539,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
content={"threepids": {"address": "value"}},
)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.MISSING_PARAM, channel.json_body["errcode"])
def test_get_user(self) -> None:
@@ -1352,7 +1552,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("@user:test", channel.json_body["name"])
self.assertEqual("User", channel.json_body["displayname"])
self._check_fields(channel.json_body)
@@ -1379,7 +1579,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
content=body,
)
- self.assertEqual(HTTPStatus.CREATED, channel.code, msg=channel.json_body)
+ self.assertEqual(201, channel.code, msg=channel.json_body)
self.assertEqual("@bob:test", channel.json_body["name"])
self.assertEqual("Bob's name", channel.json_body["displayname"])
self.assertEqual("email", channel.json_body["threepids"][0]["medium"])
@@ -1395,7 +1595,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("@bob:test", channel.json_body["name"])
self.assertEqual("Bob's name", channel.json_body["displayname"])
self.assertEqual("email", channel.json_body["threepids"][0]["medium"])
@@ -1434,7 +1634,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
content=body,
)
- self.assertEqual(HTTPStatus.CREATED, channel.code, msg=channel.json_body)
+ self.assertEqual(201, channel.code, msg=channel.json_body)
self.assertEqual("@bob:test", channel.json_body["name"])
self.assertEqual("Bob's name", channel.json_body["displayname"])
self.assertEqual("email", channel.json_body["threepids"][0]["medium"])
@@ -1458,7 +1658,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("@bob:test", channel.json_body["name"])
self.assertEqual("Bob's name", channel.json_body["displayname"])
self.assertEqual("email", channel.json_body["threepids"][0]["medium"])
@@ -1486,7 +1686,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
# before limit of monthly active users is reached
channel = self.make_request("GET", "/sync", access_token=self.admin_user_tok)
- if channel.code != HTTPStatus.OK:
+ if channel.code != 200:
raise HttpResponseException(
channel.code, channel.result["reason"], channel.result["body"]
)
@@ -1512,7 +1712,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
content={"password": "abc123", "admin": False},
)
- self.assertEqual(HTTPStatus.CREATED, channel.code, msg=channel.json_body)
+ self.assertEqual(201, channel.code, msg=channel.json_body)
self.assertEqual("@bob:test", channel.json_body["name"])
self.assertFalse(channel.json_body["admin"])
@@ -1550,7 +1750,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
)
# Admin user is not blocked by mau anymore
- self.assertEqual(HTTPStatus.CREATED, channel.code, msg=channel.json_body)
+ self.assertEqual(201, channel.code, msg=channel.json_body)
self.assertEqual("@bob:test", channel.json_body["name"])
self.assertFalse(channel.json_body["admin"])
@@ -1585,7 +1785,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
content=body,
)
- self.assertEqual(HTTPStatus.CREATED, channel.code, msg=channel.json_body)
+ self.assertEqual(201, channel.code, msg=channel.json_body)
self.assertEqual("@bob:test", channel.json_body["name"])
self.assertEqual("email", channel.json_body["threepids"][0]["medium"])
self.assertEqual("bob@bob.bob", channel.json_body["threepids"][0]["address"])
@@ -1626,7 +1826,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
content=body,
)
- self.assertEqual(HTTPStatus.CREATED, channel.code, msg=channel.json_body)
+ self.assertEqual(201, channel.code, msg=channel.json_body)
self.assertEqual("@bob:test", channel.json_body["name"])
self.assertEqual("email", channel.json_body["threepids"][0]["medium"])
self.assertEqual("bob@bob.bob", channel.json_body["threepids"][0]["address"])
@@ -1666,7 +1866,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
content=body,
)
- self.assertEqual(HTTPStatus.CREATED, channel.code, msg=channel.json_body)
+ self.assertEqual(201, channel.code, msg=channel.json_body)
self.assertEqual("@bob:test", channel.json_body["name"])
self.assertEqual("msisdn", channel.json_body["threepids"][0]["medium"])
self.assertEqual("1234567890", channel.json_body["threepids"][0]["address"])
@@ -1684,7 +1884,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
content={"password": "hahaha"},
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self._check_fields(channel.json_body)
def test_set_displayname(self) -> None:
@@ -1700,7 +1900,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
content={"displayname": "foobar"},
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("@user:test", channel.json_body["name"])
self.assertEqual("foobar", channel.json_body["displayname"])
@@ -1711,7 +1911,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("@user:test", channel.json_body["name"])
self.assertEqual("foobar", channel.json_body["displayname"])
@@ -1733,7 +1933,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
},
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("@user:test", channel.json_body["name"])
self.assertEqual(2, len(channel.json_body["threepids"]))
# result does not always have the same sort order, therefore it becomes sorted
@@ -1759,7 +1959,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
},
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("@user:test", channel.json_body["name"])
self.assertEqual(2, len(channel.json_body["threepids"]))
self.assertEqual("email", channel.json_body["threepids"][0]["medium"])
@@ -1775,7 +1975,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("@user:test", channel.json_body["name"])
self.assertEqual(2, len(channel.json_body["threepids"]))
self.assertEqual("email", channel.json_body["threepids"][0]["medium"])
@@ -1791,7 +1991,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
content={"threepids": []},
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("@user:test", channel.json_body["name"])
self.assertEqual(0, len(channel.json_body["threepids"]))
self._check_fields(channel.json_body)
@@ -1818,7 +2018,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
},
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(first_user, channel.json_body["name"])
self.assertEqual(1, len(channel.json_body["threepids"]))
self.assertEqual("email", channel.json_body["threepids"][0]["medium"])
@@ -1837,7 +2037,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
},
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("@user:test", channel.json_body["name"])
self.assertEqual(1, len(channel.json_body["threepids"]))
self.assertEqual("email", channel.json_body["threepids"][0]["medium"])
@@ -1859,7 +2059,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
)
# other user has this two threepids
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("@user:test", channel.json_body["name"])
self.assertEqual(2, len(channel.json_body["threepids"]))
# result does not always have the same sort order, therefore it becomes sorted
@@ -1878,7 +2078,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
url_first_user,
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(first_user, channel.json_body["name"])
self.assertEqual(0, len(channel.json_body["threepids"]))
self._check_fields(channel.json_body)
@@ -1907,7 +2107,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
},
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("@user:test", channel.json_body["name"])
self.assertEqual(2, len(channel.json_body["external_ids"]))
# result does not always have the same sort order, therefore it becomes sorted
@@ -1939,7 +2139,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
},
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("@user:test", channel.json_body["name"])
self.assertEqual(2, len(channel.json_body["external_ids"]))
self.assertEqual(
@@ -1958,7 +2158,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("@user:test", channel.json_body["name"])
self.assertEqual(2, len(channel.json_body["external_ids"]))
self.assertEqual(
@@ -1977,7 +2177,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
content={"external_ids": []},
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("@user:test", channel.json_body["name"])
self.assertEqual(0, len(channel.json_body["external_ids"]))
@@ -2006,7 +2206,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
},
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(first_user, channel.json_body["name"])
self.assertEqual(1, len(channel.json_body["external_ids"]))
self.assertEqual(
@@ -2032,7 +2232,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
},
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("@user:test", channel.json_body["name"])
self.assertEqual(1, len(channel.json_body["external_ids"]))
self.assertEqual(
@@ -2064,7 +2264,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
)
# must fail
- self.assertEqual(HTTPStatus.CONFLICT, channel.code, msg=channel.json_body)
+ self.assertEqual(409, channel.code, msg=channel.json_body)
self.assertEqual(Codes.UNKNOWN, channel.json_body["errcode"])
self.assertEqual("External id is already in use.", channel.json_body["error"])
@@ -2075,7 +2275,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("@user:test", channel.json_body["name"])
self.assertEqual(1, len(channel.json_body["external_ids"]))
self.assertEqual(
@@ -2093,7 +2293,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(first_user, channel.json_body["name"])
self.assertEqual(1, len(channel.json_body["external_ids"]))
self.assertEqual(
@@ -2124,7 +2324,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("@user:test", channel.json_body["name"])
self.assertFalse(channel.json_body["deactivated"])
self.assertEqual("foo@bar.com", channel.json_body["threepids"][0]["address"])
@@ -2139,7 +2339,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
content={"deactivated": True},
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("@user:test", channel.json_body["name"])
self.assertTrue(channel.json_body["deactivated"])
self.assertEqual(0, len(channel.json_body["threepids"]))
@@ -2158,7 +2358,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("@user:test", channel.json_body["name"])
self.assertTrue(channel.json_body["deactivated"])
self.assertEqual(0, len(channel.json_body["threepids"]))
@@ -2188,7 +2388,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
content={"deactivated": True},
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("@user:test", channel.json_body["name"])
self.assertTrue(channel.json_body["deactivated"])
@@ -2204,7 +2404,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
content={"displayname": "Foobar"},
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("@user:test", channel.json_body["name"])
self.assertTrue(channel.json_body["deactivated"])
self.assertEqual("Foobar", channel.json_body["displayname"])
@@ -2228,7 +2428,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
content={"deactivated": False},
)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
# Reactivate the user.
channel = self.make_request(
@@ -2237,7 +2437,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
content={"deactivated": False, "password": "foo"},
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("@user:test", channel.json_body["name"])
self.assertFalse(channel.json_body["deactivated"])
self._is_erased("@user:test", False)
@@ -2261,7 +2461,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
content={"deactivated": False, "password": "foo"},
)
- self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body)
+ self.assertEqual(403, channel.code, msg=channel.json_body)
self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
# Reactivate the user without a password.
@@ -2271,7 +2471,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
content={"deactivated": False},
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("@user:test", channel.json_body["name"])
self.assertFalse(channel.json_body["deactivated"])
self._is_erased("@user:test", False)
@@ -2295,7 +2495,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
content={"deactivated": False, "password": "foo"},
)
- self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body)
+ self.assertEqual(403, channel.code, msg=channel.json_body)
self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
# Reactivate the user without a password.
@@ -2305,7 +2505,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
content={"deactivated": False},
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("@user:test", channel.json_body["name"])
self.assertFalse(channel.json_body["deactivated"])
self._is_erased("@user:test", False)
@@ -2326,7 +2526,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
content={"admin": True},
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("@user:test", channel.json_body["name"])
self.assertTrue(channel.json_body["admin"])
@@ -2337,7 +2537,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("@user:test", channel.json_body["name"])
self.assertTrue(channel.json_body["admin"])
@@ -2354,7 +2554,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
content={"user_type": UserTypes.SUPPORT},
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("@user:test", channel.json_body["name"])
self.assertEqual(UserTypes.SUPPORT, channel.json_body["user_type"])
@@ -2365,7 +2565,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("@user:test", channel.json_body["name"])
self.assertEqual(UserTypes.SUPPORT, channel.json_body["user_type"])
@@ -2377,7 +2577,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
content={"user_type": None},
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("@user:test", channel.json_body["name"])
self.assertIsNone(channel.json_body["user_type"])
@@ -2388,7 +2588,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("@user:test", channel.json_body["name"])
self.assertIsNone(channel.json_body["user_type"])
@@ -2407,7 +2607,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
content={"password": "abc123"},
)
- self.assertEqual(HTTPStatus.CREATED, channel.code, msg=channel.json_body)
+ self.assertEqual(201, channel.code, msg=channel.json_body)
self.assertEqual("@bob:test", channel.json_body["name"])
self.assertEqual("bob", channel.json_body["displayname"])
@@ -2418,7 +2618,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("@bob:test", channel.json_body["name"])
self.assertEqual("bob", channel.json_body["displayname"])
self.assertEqual(0, channel.json_body["deactivated"])
@@ -2431,7 +2631,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
content={"password": "abc123", "deactivated": "false"},
)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
# Check user is not deactivated
channel = self.make_request(
@@ -2440,13 +2640,111 @@ class UserRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("@bob:test", channel.json_body["name"])
self.assertEqual("bob", channel.json_body["displayname"])
# Ensure they're still alive
self.assertEqual(0, channel.json_body["deactivated"])
+ @override_config(
+ {
+ "experimental_features": {
+ "msc3866": {
+ "enabled": True,
+ "require_approval_for_new_accounts": True,
+ }
+ }
+ }
+ )
+ def test_approve_account(self) -> None:
+ """Tests that approving an account correctly sets the approved flag for the user."""
+ url = self.url_prefix % "@bob:test"
+
+ # Create the user using the client-server API since otherwise the user will be
+ # marked as approved automatically.
+ channel = self.make_request(
+ "POST",
+ "register",
+ {
+ "username": "bob",
+ "password": "test",
+ "auth": {"type": LoginType.DUMMY},
+ },
+ )
+ self.assertEqual(403, channel.code, channel.result)
+ self.assertEqual(Codes.USER_AWAITING_APPROVAL, channel.json_body["errcode"])
+ self.assertEqual(
+ ApprovalNoticeMedium.NONE, channel.json_body["approval_notice_medium"]
+ )
+
+ # Get user
+ channel = self.make_request(
+ "GET",
+ url,
+ access_token=self.admin_user_tok,
+ )
+
+ self.assertEqual(200, channel.code, msg=channel.json_body)
+ self.assertIs(False, channel.json_body["approved"])
+
+ # Approve user
+ channel = self.make_request(
+ "PUT",
+ url,
+ access_token=self.admin_user_tok,
+ content={"approved": True},
+ )
+
+ self.assertEqual(200, channel.code, msg=channel.json_body)
+ self.assertIs(True, channel.json_body["approved"])
+
+ # Check that the user is now approved
+ channel = self.make_request(
+ "GET",
+ url,
+ access_token=self.admin_user_tok,
+ )
+
+ self.assertEqual(200, channel.code, msg=channel.json_body)
+ self.assertIs(True, channel.json_body["approved"])
+
+ @override_config(
+ {
+ "experimental_features": {
+ "msc3866": {
+ "enabled": True,
+ "require_approval_for_new_accounts": True,
+ }
+ }
+ }
+ )
+ def test_register_approved(self) -> None:
+ url = self.url_prefix % "@bob:test"
+
+ # Create user
+ channel = self.make_request(
+ "PUT",
+ url,
+ access_token=self.admin_user_tok,
+ content={"password": "abc123", "approved": True},
+ )
+
+ self.assertEqual(201, channel.code, msg=channel.json_body)
+ self.assertEqual("@bob:test", channel.json_body["name"])
+ self.assertEqual(1, channel.json_body["approved"])
+
+ # Get user
+ channel = self.make_request(
+ "GET",
+ url,
+ access_token=self.admin_user_tok,
+ )
+
+ self.assertEqual(200, channel.code, msg=channel.json_body)
+ self.assertEqual("@bob:test", channel.json_body["name"])
+ self.assertEqual(1, channel.json_body["approved"])
+
def _is_erased(self, user_id: str, expect: bool) -> None:
"""Assert that the user is erased or not"""
d = self.store.is_user_erased(user_id)
@@ -2465,7 +2763,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
content={"deactivated": True},
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertTrue(channel.json_body["deactivated"])
self._is_erased(user_id, False)
d = self.store.mark_user_erased(user_id)
@@ -2486,11 +2784,13 @@ class UserRestTestCase(unittest.HomeserverTestCase):
self.assertIn("avatar_url", content)
self.assertIn("admin", content)
self.assertIn("deactivated", content)
+ self.assertIn("erased", content)
self.assertIn("shadow_banned", content)
self.assertIn("creation_ts", content)
self.assertIn("appservice_id", content)
self.assertIn("consent_server_notice_sent", content)
self.assertIn("consent_version", content)
+ self.assertIn("consent_ts", content)
self.assertIn("external_ids", content)
# This key was removed intentionally. Ensure it is not accidentally re-included.
@@ -2498,7 +2798,6 @@ class UserRestTestCase(unittest.HomeserverTestCase):
class UserMembershipRestTestCase(unittest.HomeserverTestCase):
-
servlets = [
synapse.rest.admin.register_servlets,
login.register_servlets,
@@ -2520,7 +2819,7 @@ class UserMembershipRestTestCase(unittest.HomeserverTestCase):
"""
channel = self.make_request("GET", self.url, b"{}")
- self.assertEqual(HTTPStatus.UNAUTHORIZED, channel.code, msg=channel.json_body)
+ self.assertEqual(401, channel.code, msg=channel.json_body)
self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"])
def test_requester_is_no_admin(self) -> None:
@@ -2535,7 +2834,7 @@ class UserMembershipRestTestCase(unittest.HomeserverTestCase):
access_token=other_user_token,
)
- self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body)
+ self.assertEqual(403, channel.code, msg=channel.json_body)
self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
def test_user_does_not_exist(self) -> None:
@@ -2549,7 +2848,7 @@ class UserMembershipRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(0, channel.json_body["total"])
self.assertEqual(0, len(channel.json_body["joined_rooms"]))
@@ -2565,7 +2864,7 @@ class UserMembershipRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(0, channel.json_body["total"])
self.assertEqual(0, len(channel.json_body["joined_rooms"]))
@@ -2581,7 +2880,7 @@ class UserMembershipRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(0, channel.json_body["total"])
self.assertEqual(0, len(channel.json_body["joined_rooms"]))
@@ -2602,7 +2901,7 @@ class UserMembershipRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(number_rooms, channel.json_body["total"])
self.assertEqual(number_rooms, len(channel.json_body["joined_rooms"]))
@@ -2649,13 +2948,12 @@ class UserMembershipRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(1, channel.json_body["total"])
self.assertEqual([local_and_remote_room_id], channel.json_body["joined_rooms"])
class PushersRestTestCase(unittest.HomeserverTestCase):
-
servlets = [
synapse.rest.admin.register_servlets,
login.register_servlets,
@@ -2678,7 +2976,7 @@ class PushersRestTestCase(unittest.HomeserverTestCase):
"""
channel = self.make_request("GET", self.url, b"{}")
- self.assertEqual(HTTPStatus.UNAUTHORIZED, channel.code, msg=channel.json_body)
+ self.assertEqual(401, channel.code, msg=channel.json_body)
self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"])
def test_requester_is_no_admin(self) -> None:
@@ -2693,12 +2991,12 @@ class PushersRestTestCase(unittest.HomeserverTestCase):
access_token=other_user_token,
)
- self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body)
+ self.assertEqual(403, channel.code, msg=channel.json_body)
self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
def test_user_does_not_exist(self) -> None:
"""
- Tests that a lookup for a user that does not exist returns a HTTPStatus.NOT_FOUND
+ Tests that a lookup for a user that does not exist returns a 404
"""
url = "/_synapse/admin/v1/users/@unknown_person:test/pushers"
channel = self.make_request(
@@ -2707,12 +3005,12 @@ class PushersRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, msg=channel.json_body)
+ self.assertEqual(404, channel.code, msg=channel.json_body)
self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"])
def test_user_is_not_local(self) -> None:
"""
- Tests that a lookup for a user that is not a local returns a HTTPStatus.BAD_REQUEST
+ Tests that a lookup for a user that is not a local returns a 400
"""
url = "/_synapse/admin/v1/users/@unknown_person:unknown_domain/pushers"
@@ -2722,7 +3020,7 @@ class PushersRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual("Can only look up local users", channel.json_body["error"])
def test_get_pushers(self) -> None:
@@ -2737,7 +3035,7 @@ class PushersRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(0, channel.json_body["total"])
# Register the pusher
@@ -2749,7 +3047,7 @@ class PushersRestTestCase(unittest.HomeserverTestCase):
token_id = user_tuple.token_id
self.get_success(
- self.hs.get_pusherpool().add_pusher(
+ self.hs.get_pusherpool().add_or_update_pusher(
user_id=self.other_user,
access_token=token_id,
kind="http",
@@ -2769,7 +3067,7 @@ class PushersRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(1, channel.json_body["total"])
for p in channel.json_body["pushers"]:
@@ -2784,7 +3082,6 @@ class PushersRestTestCase(unittest.HomeserverTestCase):
class UserMediaRestTestCase(unittest.HomeserverTestCase):
-
servlets = [
synapse.rest.admin.register_servlets,
login.register_servlets,
@@ -2808,7 +3105,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase):
"""Try to list media of an user without authentication."""
channel = self.make_request(method, self.url, {})
- self.assertEqual(HTTPStatus.UNAUTHORIZED, channel.code, msg=channel.json_body)
+ self.assertEqual(401, channel.code, msg=channel.json_body)
self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"])
@parameterized.expand(["GET", "DELETE"])
@@ -2822,12 +3119,12 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase):
access_token=other_user_token,
)
- self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body)
+ self.assertEqual(403, channel.code, msg=channel.json_body)
self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
@parameterized.expand(["GET", "DELETE"])
def test_user_does_not_exist(self, method: str) -> None:
- """Tests that a lookup for a user that does not exist returns a HTTPStatus.NOT_FOUND"""
+ """Tests that a lookup for a user that does not exist returns a 404"""
url = "/_synapse/admin/v1/users/@unknown_person:test/media"
channel = self.make_request(
method,
@@ -2835,12 +3132,12 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, msg=channel.json_body)
+ self.assertEqual(404, channel.code, msg=channel.json_body)
self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"])
@parameterized.expand(["GET", "DELETE"])
def test_user_is_not_local(self, method: str) -> None:
- """Tests that a lookup for a user that is not a local returns a HTTPStatus.BAD_REQUEST"""
+ """Tests that a lookup for a user that is not a local returns a 400"""
url = "/_synapse/admin/v1/users/@unknown_person:unknown_domain/media"
channel = self.make_request(
@@ -2849,7 +3146,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual("Can only look up local users", channel.json_body["error"])
def test_limit_GET(self) -> None:
@@ -2865,7 +3162,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], number_media)
self.assertEqual(len(channel.json_body["media"]), 5)
self.assertEqual(channel.json_body["next_token"], 5)
@@ -2884,7 +3181,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], 5)
self.assertEqual(len(channel.json_body["deleted_media"]), 5)
@@ -2901,7 +3198,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], number_media)
self.assertEqual(len(channel.json_body["media"]), 15)
self.assertNotIn("next_token", channel.json_body)
@@ -2920,7 +3217,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], 15)
self.assertEqual(len(channel.json_body["deleted_media"]), 15)
@@ -2937,7 +3234,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], number_media)
self.assertEqual(channel.json_body["next_token"], 15)
self.assertEqual(len(channel.json_body["media"]), 10)
@@ -2956,7 +3253,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], 10)
self.assertEqual(len(channel.json_body["deleted_media"]), 10)
@@ -2970,7 +3267,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
# invalid search order
@@ -2980,7 +3277,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
# negative limit
@@ -2990,7 +3287,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
# negative from
@@ -3000,7 +3297,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
def test_next_token(self) -> None:
@@ -3023,7 +3320,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], number_media)
self.assertEqual(len(channel.json_body["media"]), number_media)
self.assertNotIn("next_token", channel.json_body)
@@ -3036,7 +3333,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], number_media)
self.assertEqual(len(channel.json_body["media"]), number_media)
self.assertNotIn("next_token", channel.json_body)
@@ -3049,7 +3346,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], number_media)
self.assertEqual(len(channel.json_body["media"]), 19)
self.assertEqual(channel.json_body["next_token"], 19)
@@ -3063,7 +3360,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], number_media)
self.assertEqual(len(channel.json_body["media"]), 1)
self.assertNotIn("next_token", channel.json_body)
@@ -3080,7 +3377,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(0, channel.json_body["total"])
self.assertEqual(0, len(channel.json_body["media"]))
@@ -3095,7 +3392,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(0, channel.json_body["total"])
self.assertEqual(0, len(channel.json_body["deleted_media"]))
@@ -3112,7 +3409,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(number_media, channel.json_body["total"])
self.assertEqual(number_media, len(channel.json_body["media"]))
self.assertNotIn("next_token", channel.json_body)
@@ -3138,7 +3435,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(number_media, channel.json_body["total"])
self.assertEqual(number_media, len(channel.json_body["deleted_media"]))
self.assertCountEqual(channel.json_body["deleted_media"], media_ids)
@@ -3283,7 +3580,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase):
# Upload some media into the room
response = self.helper.upload_media(
- upload_resource, image_data, user_token, filename, expect_code=HTTPStatus.OK
+ upload_resource, image_data, user_token, filename, expect_code=200
)
# Extract media ID from the response
@@ -3301,10 +3598,10 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase):
)
self.assertEqual(
- HTTPStatus.OK,
+ 200,
channel.code,
msg=(
- f"Expected to receive a HTTPStatus.OK on accessing media: {server_and_media_id}"
+ f"Expected to receive a 200 on accessing media: {server_and_media_id}"
),
)
@@ -3350,7 +3647,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase):
url,
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], len(expected_media_list))
returned_order = [row["media_id"] for row in channel.json_body["media"]]
@@ -3386,14 +3683,14 @@ class UserTokenRestTestCase(unittest.HomeserverTestCase):
channel = self.make_request(
"POST", self.url, b"{}", access_token=self.admin_user_tok
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
return channel.json_body["access_token"]
def test_no_auth(self) -> None:
"""Try to login as a user without authentication."""
channel = self.make_request("POST", self.url, b"{}")
- self.assertEqual(HTTPStatus.UNAUTHORIZED, channel.code, msg=channel.json_body)
+ self.assertEqual(401, channel.code, msg=channel.json_body)
self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"])
def test_not_admin(self) -> None:
@@ -3402,7 +3699,7 @@ class UserTokenRestTestCase(unittest.HomeserverTestCase):
"POST", self.url, b"{}", access_token=self.other_user_tok
)
- self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body)
+ self.assertEqual(403, channel.code, msg=channel.json_body)
def test_send_event(self) -> None:
"""Test that sending event as a user works."""
@@ -3427,7 +3724,7 @@ class UserTokenRestTestCase(unittest.HomeserverTestCase):
channel = self.make_request(
"GET", "devices", b"{}", access_token=self.other_user_tok
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
# We should only see the one device (from the login in `prepare`)
self.assertEqual(len(channel.json_body["devices"]), 1)
@@ -3439,21 +3736,21 @@ class UserTokenRestTestCase(unittest.HomeserverTestCase):
# Test that we can successfully make a request
channel = self.make_request("GET", "devices", b"{}", access_token=puppet_token)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
# Logout with the puppet token
channel = self.make_request("POST", "logout", b"{}", access_token=puppet_token)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
# The puppet token should no longer work
channel = self.make_request("GET", "devices", b"{}", access_token=puppet_token)
- self.assertEqual(HTTPStatus.UNAUTHORIZED, channel.code, msg=channel.json_body)
+ self.assertEqual(401, channel.code, msg=channel.json_body)
# .. but the real user's tokens should still work
channel = self.make_request(
"GET", "devices", b"{}", access_token=self.other_user_tok
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
def test_user_logout_all(self) -> None:
"""Tests that the target user calling `/logout/all` does *not* expire
@@ -3464,23 +3761,23 @@ class UserTokenRestTestCase(unittest.HomeserverTestCase):
# Test that we can successfully make a request
channel = self.make_request("GET", "devices", b"{}", access_token=puppet_token)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
# Logout all with the real user token
channel = self.make_request(
"POST", "logout/all", b"{}", access_token=self.other_user_tok
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
# The puppet token should still work
channel = self.make_request("GET", "devices", b"{}", access_token=puppet_token)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
# .. but the real user's tokens shouldn't
channel = self.make_request(
"GET", "devices", b"{}", access_token=self.other_user_tok
)
- self.assertEqual(HTTPStatus.UNAUTHORIZED, channel.code, msg=channel.json_body)
+ self.assertEqual(401, channel.code, msg=channel.json_body)
def test_admin_logout_all(self) -> None:
"""Tests that the admin user calling `/logout/all` does expire the
@@ -3491,23 +3788,23 @@ class UserTokenRestTestCase(unittest.HomeserverTestCase):
# Test that we can successfully make a request
channel = self.make_request("GET", "devices", b"{}", access_token=puppet_token)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
# Logout all with the admin user token
channel = self.make_request(
"POST", "logout/all", b"{}", access_token=self.admin_user_tok
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
# The puppet token should no longer work
channel = self.make_request("GET", "devices", b"{}", access_token=puppet_token)
- self.assertEqual(HTTPStatus.UNAUTHORIZED, channel.code, msg=channel.json_body)
+ self.assertEqual(401, channel.code, msg=channel.json_body)
# .. but the real user's tokens should still work
channel = self.make_request(
"GET", "devices", b"{}", access_token=self.other_user_tok
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
@unittest.override_config(
{
@@ -3538,7 +3835,7 @@ class UserTokenRestTestCase(unittest.HomeserverTestCase):
room_id,
"com.example.test",
tok=self.other_user_tok,
- expect_code=HTTPStatus.FORBIDDEN,
+ expect_code=403,
)
# Login in as the user
@@ -3559,7 +3856,7 @@ class UserTokenRestTestCase(unittest.HomeserverTestCase):
room_id,
user=self.other_user,
tok=self.other_user_tok,
- expect_code=HTTPStatus.FORBIDDEN,
+ expect_code=403,
)
# Logging in as the other user and joining a room should work, even
@@ -3576,7 +3873,6 @@ class UserTokenRestTestCase(unittest.HomeserverTestCase):
],
)
class WhoisRestTestCase(unittest.HomeserverTestCase):
-
servlets = [
synapse.rest.admin.register_servlets,
login.register_servlets,
@@ -3594,7 +3890,7 @@ class WhoisRestTestCase(unittest.HomeserverTestCase):
Try to get information of an user without authentication.
"""
channel = self.make_request("GET", self.url, b"{}")
- self.assertEqual(HTTPStatus.UNAUTHORIZED, channel.code, msg=channel.json_body)
+ self.assertEqual(401, channel.code, msg=channel.json_body)
self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"])
def test_requester_is_not_admin(self) -> None:
@@ -3609,12 +3905,12 @@ class WhoisRestTestCase(unittest.HomeserverTestCase):
self.url,
access_token=other_user2_token,
)
- self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body)
+ self.assertEqual(403, channel.code, msg=channel.json_body)
self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
def test_user_is_not_local(self) -> None:
"""
- Tests that a lookup for a user that is not a local returns a HTTPStatus.BAD_REQUEST
+ Tests that a lookup for a user that is not a local returns a 400
"""
url = self.url_prefix % "@unknown_person:unknown_domain" # type: ignore[attr-defined]
@@ -3623,7 +3919,7 @@ class WhoisRestTestCase(unittest.HomeserverTestCase):
url,
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual("Can only whois a local user", channel.json_body["error"])
def test_get_whois_admin(self) -> None:
@@ -3635,7 +3931,7 @@ class WhoisRestTestCase(unittest.HomeserverTestCase):
self.url,
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(self.other_user, channel.json_body["user_id"])
self.assertIn("devices", channel.json_body)
@@ -3650,13 +3946,12 @@ class WhoisRestTestCase(unittest.HomeserverTestCase):
self.url,
access_token=other_user_token,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(self.other_user, channel.json_body["user_id"])
self.assertIn("devices", channel.json_body)
class ShadowBanRestTestCase(unittest.HomeserverTestCase):
-
servlets = [
synapse.rest.admin.register_servlets,
login.register_servlets,
@@ -3680,7 +3975,7 @@ class ShadowBanRestTestCase(unittest.HomeserverTestCase):
Try to get information of an user without authentication.
"""
channel = self.make_request(method, self.url)
- self.assertEqual(HTTPStatus.UNAUTHORIZED, channel.code, msg=channel.json_body)
+ self.assertEqual(401, channel.code, msg=channel.json_body)
self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"])
@parameterized.expand(["POST", "DELETE"])
@@ -3691,18 +3986,18 @@ class ShadowBanRestTestCase(unittest.HomeserverTestCase):
other_user_token = self.login("user", "pass")
channel = self.make_request(method, self.url, access_token=other_user_token)
- self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body)
+ self.assertEqual(403, channel.code, msg=channel.json_body)
self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
@parameterized.expand(["POST", "DELETE"])
def test_user_is_not_local(self, method: str) -> None:
"""
- Tests that shadow-banning for a user that is not a local returns a HTTPStatus.BAD_REQUEST
+ Tests that shadow-banning for a user that is not a local returns a 400
"""
url = "/_synapse/admin/v1/whois/@unknown_person:unknown_domain"
channel = self.make_request(method, url, access_token=self.admin_user_tok)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
def test_success(self) -> None:
"""
@@ -3715,7 +4010,7 @@ class ShadowBanRestTestCase(unittest.HomeserverTestCase):
self.assertFalse(result.shadow_banned)
channel = self.make_request("POST", self.url, access_token=self.admin_user_tok)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual({}, channel.json_body)
# Ensure the user is shadow-banned (and the cache was cleared).
@@ -3727,7 +4022,7 @@ class ShadowBanRestTestCase(unittest.HomeserverTestCase):
channel = self.make_request(
"DELETE", self.url, access_token=self.admin_user_tok
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual({}, channel.json_body)
# Ensure the user is no longer shadow-banned (and the cache was cleared).
@@ -3737,7 +4032,6 @@ class ShadowBanRestTestCase(unittest.HomeserverTestCase):
class RateLimitTestCase(unittest.HomeserverTestCase):
-
servlets = [
synapse.rest.admin.register_servlets,
login.register_servlets,
@@ -3762,7 +4056,7 @@ class RateLimitTestCase(unittest.HomeserverTestCase):
"""
channel = self.make_request(method, self.url, b"{}")
- self.assertEqual(HTTPStatus.UNAUTHORIZED, channel.code, msg=channel.json_body)
+ self.assertEqual(401, channel.code, msg=channel.json_body)
self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"])
@parameterized.expand(["GET", "POST", "DELETE"])
@@ -3778,13 +4072,13 @@ class RateLimitTestCase(unittest.HomeserverTestCase):
access_token=other_user_token,
)
- self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body)
+ self.assertEqual(403, channel.code, msg=channel.json_body)
self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
@parameterized.expand(["GET", "POST", "DELETE"])
def test_user_does_not_exist(self, method: str) -> None:
"""
- Tests that a lookup for a user that does not exist returns a HTTPStatus.NOT_FOUND
+ Tests that a lookup for a user that does not exist returns a 404
"""
url = "/_synapse/admin/v1/users/@unknown_person:test/override_ratelimit"
@@ -3794,7 +4088,7 @@ class RateLimitTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, msg=channel.json_body)
+ self.assertEqual(404, channel.code, msg=channel.json_body)
self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"])
@parameterized.expand(
@@ -3806,7 +4100,7 @@ class RateLimitTestCase(unittest.HomeserverTestCase):
)
def test_user_is_not_local(self, method: str, error_msg: str) -> None:
"""
- Tests that a lookup for a user that is not a local returns a HTTPStatus.BAD_REQUEST
+ Tests that a lookup for a user that is not a local returns a 400
"""
url = (
"/_synapse/admin/v1/users/@unknown_person:unknown_domain/override_ratelimit"
@@ -3818,7 +4112,7 @@ class RateLimitTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(error_msg, channel.json_body["error"])
def test_invalid_parameter(self) -> None:
@@ -3833,7 +4127,7 @@ class RateLimitTestCase(unittest.HomeserverTestCase):
content={"messages_per_second": "string"},
)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
# messages_per_second is negative
@@ -3844,7 +4138,7 @@ class RateLimitTestCase(unittest.HomeserverTestCase):
content={"messages_per_second": -1},
)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
# burst_count is a string
@@ -3855,7 +4149,7 @@ class RateLimitTestCase(unittest.HomeserverTestCase):
content={"burst_count": "string"},
)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
# burst_count is negative
@@ -3866,7 +4160,7 @@ class RateLimitTestCase(unittest.HomeserverTestCase):
content={"burst_count": -1},
)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
def test_return_zero_when_null(self) -> None:
@@ -3891,7 +4185,7 @@ class RateLimitTestCase(unittest.HomeserverTestCase):
self.url,
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(0, channel.json_body["messages_per_second"])
self.assertEqual(0, channel.json_body["burst_count"])
@@ -3905,7 +4199,7 @@ class RateLimitTestCase(unittest.HomeserverTestCase):
self.url,
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertNotIn("messages_per_second", channel.json_body)
self.assertNotIn("burst_count", channel.json_body)
@@ -3916,7 +4210,7 @@ class RateLimitTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
content={"messages_per_second": 10, "burst_count": 11},
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(10, channel.json_body["messages_per_second"])
self.assertEqual(11, channel.json_body["burst_count"])
@@ -3927,7 +4221,7 @@ class RateLimitTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
content={"messages_per_second": 20, "burst_count": 21},
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(20, channel.json_body["messages_per_second"])
self.assertEqual(21, channel.json_body["burst_count"])
@@ -3937,7 +4231,7 @@ class RateLimitTestCase(unittest.HomeserverTestCase):
self.url,
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(20, channel.json_body["messages_per_second"])
self.assertEqual(21, channel.json_body["burst_count"])
@@ -3947,7 +4241,7 @@ class RateLimitTestCase(unittest.HomeserverTestCase):
self.url,
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertNotIn("messages_per_second", channel.json_body)
self.assertNotIn("burst_count", channel.json_body)
@@ -3957,13 +4251,12 @@ class RateLimitTestCase(unittest.HomeserverTestCase):
self.url,
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertNotIn("messages_per_second", channel.json_body)
self.assertNotIn("burst_count", channel.json_body)
class AccountDataTestCase(unittest.HomeserverTestCase):
-
servlets = [
synapse.rest.admin.register_servlets,
login.register_servlets,
@@ -3982,7 +4275,7 @@ class AccountDataTestCase(unittest.HomeserverTestCase):
"""Try to get information of a user without authentication."""
channel = self.make_request("GET", self.url, {})
- self.assertEqual(HTTPStatus.UNAUTHORIZED, channel.code, msg=channel.json_body)
+ self.assertEqual(401, channel.code, msg=channel.json_body)
self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"])
def test_requester_is_no_admin(self) -> None:
@@ -3995,7 +4288,7 @@ class AccountDataTestCase(unittest.HomeserverTestCase):
access_token=other_user_token,
)
- self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body)
+ self.assertEqual(403, channel.code, msg=channel.json_body)
self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
def test_user_does_not_exist(self) -> None:
@@ -4008,7 +4301,7 @@ class AccountDataTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, msg=channel.json_body)
+ self.assertEqual(404, channel.code, msg=channel.json_body)
self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"])
def test_user_is_not_local(self) -> None:
@@ -4021,7 +4314,7 @@ class AccountDataTestCase(unittest.HomeserverTestCase):
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual("Can only look up local users", channel.json_body["error"])
def test_success(self) -> None:
@@ -4042,7 +4335,7 @@ class AccountDataTestCase(unittest.HomeserverTestCase):
self.url,
access_token=self.admin_user_tok,
)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(
{"a": 1}, channel.json_body["account_data"]["global"]["m.global"]
)
@@ -4050,3 +4343,183 @@ class AccountDataTestCase(unittest.HomeserverTestCase):
{"b": 2},
channel.json_body["account_data"]["rooms"]["test_room"]["m.per_room"],
)
+
+
+class UsersByExternalIdTestCase(unittest.HomeserverTestCase):
+ servlets = [
+ synapse.rest.admin.register_servlets,
+ login.register_servlets,
+ ]
+
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+ self.store = hs.get_datastores().main
+
+ self.admin_user = self.register_user("admin", "pass", admin=True)
+ self.admin_user_tok = self.login("admin", "pass")
+
+ self.other_user = self.register_user("user", "pass")
+ self.get_success(
+ self.store.record_user_external_id(
+ "the-auth-provider", "the-external-id", self.other_user
+ )
+ )
+ self.get_success(
+ self.store.record_user_external_id(
+ "another-auth-provider", "a:complex@external/id", self.other_user
+ )
+ )
+
+ def test_no_auth(self) -> None:
+ """Try to lookup a user without authentication."""
+ url = (
+ "/_synapse/admin/v1/auth_providers/the-auth-provider/users/the-external-id"
+ )
+
+ channel = self.make_request(
+ "GET",
+ url,
+ )
+
+ self.assertEqual(401, channel.code, msg=channel.json_body)
+ self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"])
+
+ def test_binding_does_not_exist(self) -> None:
+ """Tests that a lookup for an external ID that does not exist returns a 404"""
+ url = "/_synapse/admin/v1/auth_providers/the-auth-provider/users/unknown-id"
+
+ channel = self.make_request(
+ "GET",
+ url,
+ access_token=self.admin_user_tok,
+ )
+
+ self.assertEqual(404, channel.code, msg=channel.json_body)
+ self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"])
+
+ def test_success(self) -> None:
+ """Tests a successful external ID lookup"""
+ url = (
+ "/_synapse/admin/v1/auth_providers/the-auth-provider/users/the-external-id"
+ )
+
+ channel = self.make_request(
+ "GET",
+ url,
+ access_token=self.admin_user_tok,
+ )
+
+ self.assertEqual(200, channel.code, msg=channel.json_body)
+ self.assertEqual(
+ {"user_id": self.other_user},
+ channel.json_body,
+ )
+
+ def test_success_urlencoded(self) -> None:
+ """Tests a successful external ID lookup with an url-encoded ID"""
+ url = "/_synapse/admin/v1/auth_providers/another-auth-provider/users/a%3Acomplex%40external%2Fid"
+
+ channel = self.make_request(
+ "GET",
+ url,
+ access_token=self.admin_user_tok,
+ )
+
+ self.assertEqual(200, channel.code, msg=channel.json_body)
+ self.assertEqual(
+ {"user_id": self.other_user},
+ channel.json_body,
+ )
+
+
+class UsersByThreePidTestCase(unittest.HomeserverTestCase):
+ servlets = [
+ synapse.rest.admin.register_servlets,
+ login.register_servlets,
+ ]
+
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+ self.store = hs.get_datastores().main
+
+ self.admin_user = self.register_user("admin", "pass", admin=True)
+ self.admin_user_tok = self.login("admin", "pass")
+
+ self.other_user = self.register_user("user", "pass")
+ self.get_success(
+ self.store.user_add_threepid(
+ self.other_user, "email", "user@email.com", 1, 1
+ )
+ )
+ self.get_success(
+ self.store.user_add_threepid(self.other_user, "msidn", "+1-12345678", 1, 1)
+ )
+
+ def test_no_auth(self) -> None:
+ """Try to look up a user without authentication."""
+ url = "/_synapse/admin/v1/threepid/email/users/user%40email.com"
+
+ channel = self.make_request(
+ "GET",
+ url,
+ )
+
+ self.assertEqual(401, channel.code, msg=channel.json_body)
+ self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"])
+
+ def test_medium_does_not_exist(self) -> None:
+ """Tests that both a lookup for a medium that does not exist and a user that
+ doesn't exist with that third party ID returns a 404"""
+ # test for unknown medium
+ url = "/_synapse/admin/v1/threepid/publickey/users/unknown-key"
+
+ channel = self.make_request(
+ "GET",
+ url,
+ access_token=self.admin_user_tok,
+ )
+
+ self.assertEqual(404, channel.code, msg=channel.json_body)
+ self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"])
+
+ # test for unknown user with a known medium
+ url = "/_synapse/admin/v1/threepid/email/users/unknown"
+
+ channel = self.make_request(
+ "GET",
+ url,
+ access_token=self.admin_user_tok,
+ )
+
+ self.assertEqual(404, channel.code, msg=channel.json_body)
+ self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"])
+
+ def test_success(self) -> None:
+ """Tests a successful medium + address lookup"""
+ # test for email medium with encoded value of user@email.com
+ url = "/_synapse/admin/v1/threepid/email/users/user%40email.com"
+
+ channel = self.make_request(
+ "GET",
+ url,
+ access_token=self.admin_user_tok,
+ )
+
+ self.assertEqual(200, channel.code, msg=channel.json_body)
+ self.assertEqual(
+ {"user_id": self.other_user},
+ channel.json_body,
+ )
+
+ # test for msidn medium with encoded value of +1-12345678
+ url = "/_synapse/admin/v1/threepid/msidn/users/%2B1-12345678"
+
+ channel = self.make_request(
+ "GET",
+ url,
+ access_token=self.admin_user_tok,
+ )
+
+ self.assertEqual(200, channel.code, msg=channel.json_body)
+ self.assertEqual(
+ {"user_id": self.other_user},
+ channel.json_body,
+ )
diff --git a/tests/rest/admin/test_username_available.py b/tests/rest/admin/test_username_available.py
index b21f6d4689..30f12f1bff 100644
--- a/tests/rest/admin/test_username_available.py
+++ b/tests/rest/admin/test_username_available.py
@@ -11,9 +11,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
-from http import HTTPStatus
-
from twisted.test.proto_helpers import MemoryReactor
import synapse.rest.admin
@@ -40,7 +37,7 @@ class UsernameAvailableTestCase(unittest.HomeserverTestCase):
if username == "allowed":
return True
raise SynapseError(
- HTTPStatus.BAD_REQUEST,
+ 400,
"User ID already taken.",
errcode=Codes.USER_IN_USE,
)
@@ -50,27 +47,23 @@ class UsernameAvailableTestCase(unittest.HomeserverTestCase):
def test_username_available(self) -> None:
"""
- The endpoint should return a HTTPStatus.OK response if the username does not exist
+ The endpoint should return a 200 response if the username does not exist
"""
url = "%s?username=%s" % (self.url, "allowed")
channel = self.make_request("GET", url, access_token=self.admin_user_tok)
- self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+ self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertTrue(channel.json_body["available"])
def test_username_unavailable(self) -> None:
"""
- The endpoint should return a HTTPStatus.OK response if the username does not exist
+ The endpoint should return a 200 response if the username does not exist
"""
url = "%s?username=%s" % (self.url, "disallowed")
channel = self.make_request("GET", url, access_token=self.admin_user_tok)
- self.assertEqual(
- HTTPStatus.BAD_REQUEST,
- channel.code,
- msg=channel.json_body,
- )
+ self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["errcode"], "M_USER_IN_USE")
self.assertEqual(channel.json_body["error"], "User ID already taken.")
diff --git a/tests/rest/client/test_account.py b/tests/rest/client/test_account.py
index 7ae926dc9c..c1a7fb2f8a 100644
--- a/tests/rest/client/test_account.py
+++ b/tests/rest/client/test_account.py
@@ -488,7 +488,7 @@ class DeactivateTestCase(unittest.HomeserverTestCase):
channel = self.make_request(
"POST", "account/deactivate", request_data, access_token=tok
)
- self.assertEqual(channel.code, 200)
+ self.assertEqual(channel.code, 200, channel.json_body)
class WhoamiTestCase(unittest.HomeserverTestCase):
@@ -641,21 +641,21 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase):
def test_add_email_no_at(self) -> None:
self._request_token_invalid_email(
"address-without-at.bar",
- expected_errcode=Codes.UNKNOWN,
+ expected_errcode=Codes.BAD_JSON,
expected_error="Unable to parse email address",
)
def test_add_email_two_at(self) -> None:
self._request_token_invalid_email(
"foo@foo@test.bar",
- expected_errcode=Codes.UNKNOWN,
+ expected_errcode=Codes.BAD_JSON,
expected_error="Unable to parse email address",
)
def test_add_email_bad_format(self) -> None:
self._request_token_invalid_email(
"user@bad.example.net@good.example.com",
- expected_errcode=Codes.UNKNOWN,
+ expected_errcode=Codes.BAD_JSON,
expected_error="Unable to parse email address",
)
@@ -1001,7 +1001,7 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase):
HTTPStatus.BAD_REQUEST, channel.code, msg=channel.result["body"]
)
self.assertEqual(expected_errcode, channel.json_body["errcode"])
- self.assertEqual(expected_error, channel.json_body["error"])
+ self.assertIn(expected_error, channel.json_body["error"])
def _validate_token(self, link: str) -> None:
# Remove the host
diff --git a/tests/rest/client/test_auth.py b/tests/rest/client/test_auth.py
index 05355c7fb6..208ec44829 100644
--- a/tests/rest/client/test_auth.py
+++ b/tests/rest/client/test_auth.py
@@ -12,6 +12,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+import re
from http import HTTPStatus
from typing import Any, Dict, List, Optional, Tuple, Union
@@ -20,7 +21,8 @@ from twisted.test.proto_helpers import MemoryReactor
from twisted.web.resource import Resource
import synapse.rest.admin
-from synapse.api.constants import LoginType
+from synapse.api.constants import ApprovalNoticeMedium, LoginType
+from synapse.api.errors import Codes, SynapseError
from synapse.handlers.ui_auth.checkers import UserInteractiveAuthChecker
from synapse.rest.client import account, auth, devices, login, logout, register
from synapse.rest.synapse.client import build_synapse_client_resource_tree
@@ -31,8 +33,8 @@ from synapse.util import Clock
from tests import unittest
from tests.handlers.test_oidc import HAS_OIDC
-from tests.rest.client.utils import TEST_OIDC_CONFIG
-from tests.server import FakeChannel
+from tests.rest.client.utils import TEST_OIDC_CONFIG, TEST_OIDC_ISSUER
+from tests.server import FakeChannel, make_request
from tests.unittest import override_config, skip_unless
@@ -464,9 +466,11 @@ class UIAuthTests(unittest.HomeserverTestCase):
* checking that the original operation succeeds
"""
+ fake_oidc_server = self.helper.fake_oidc_server()
+
# log the user in
remote_user_id = UserID.from_string(self.user).localpart
- login_resp = self.helper.login_via_oidc(remote_user_id)
+ login_resp, _ = self.helper.login_via_oidc(fake_oidc_server, remote_user_id)
self.assertEqual(login_resp["user_id"], self.user)
# initiate a UI Auth process by attempting to delete the device
@@ -480,8 +484,8 @@ class UIAuthTests(unittest.HomeserverTestCase):
# run the UIA-via-SSO flow
session_id = channel.json_body["session"]
- channel = self.helper.auth_via_oidc(
- {"sub": remote_user_id}, ui_auth_session_id=session_id
+ channel, _ = self.helper.auth_via_oidc(
+ fake_oidc_server, {"sub": remote_user_id}, ui_auth_session_id=session_id
)
# that should serve a confirmation page
@@ -498,7 +502,8 @@ class UIAuthTests(unittest.HomeserverTestCase):
@skip_unless(HAS_OIDC, "requires OIDC")
@override_config({"oidc_config": TEST_OIDC_CONFIG})
def test_does_not_offer_password_for_sso_user(self) -> None:
- login_resp = self.helper.login_via_oidc("username")
+ fake_oidc_server = self.helper.fake_oidc_server()
+ login_resp, _ = self.helper.login_via_oidc(fake_oidc_server, "username")
user_tok = login_resp["access_token"]
device_id = login_resp["device_id"]
@@ -521,7 +526,10 @@ class UIAuthTests(unittest.HomeserverTestCase):
@override_config({"oidc_config": TEST_OIDC_CONFIG})
def test_offers_both_flows_for_upgraded_user(self) -> None:
"""A user that had a password and then logged in with SSO should get both flows"""
- login_resp = self.helper.login_via_oidc(UserID.from_string(self.user).localpart)
+ fake_oidc_server = self.helper.fake_oidc_server()
+ login_resp, _ = self.helper.login_via_oidc(
+ fake_oidc_server, UserID.from_string(self.user).localpart
+ )
self.assertEqual(login_resp["user_id"], self.user)
channel = self.delete_device(
@@ -538,8 +546,13 @@ class UIAuthTests(unittest.HomeserverTestCase):
@override_config({"oidc_config": TEST_OIDC_CONFIG})
def test_ui_auth_fails_for_incorrect_sso_user(self) -> None:
"""If the user tries to authenticate with the wrong SSO user, they get an error"""
+
+ fake_oidc_server = self.helper.fake_oidc_server()
+
# log the user in
- login_resp = self.helper.login_via_oidc(UserID.from_string(self.user).localpart)
+ login_resp, _ = self.helper.login_via_oidc(
+ fake_oidc_server, UserID.from_string(self.user).localpart
+ )
self.assertEqual(login_resp["user_id"], self.user)
# start a UI Auth flow by attempting to delete a device
@@ -552,8 +565,8 @@ class UIAuthTests(unittest.HomeserverTestCase):
session_id = channel.json_body["session"]
# do the OIDC auth, but auth as the wrong user
- channel = self.helper.auth_via_oidc(
- {"sub": "wrong_user"}, ui_auth_session_id=session_id
+ channel, _ = self.helper.auth_via_oidc(
+ fake_oidc_server, {"sub": "wrong_user"}, ui_auth_session_id=session_id
)
# that should return a failure message
@@ -567,6 +580,39 @@ class UIAuthTests(unittest.HomeserverTestCase):
body={"auth": {"session": session_id}},
)
+ @skip_unless(HAS_OIDC, "requires OIDC")
+ @override_config(
+ {
+ "oidc_config": TEST_OIDC_CONFIG,
+ "experimental_features": {
+ "msc3866": {
+ "enabled": True,
+ "require_approval_for_new_accounts": True,
+ }
+ },
+ }
+ )
+ def test_sso_not_approved(self) -> None:
+ """Tests that if we register a user via SSO while requiring approval for new
+ accounts, we still raise the correct error before logging the user in.
+ """
+ fake_oidc_server = self.helper.fake_oidc_server()
+ login_resp, _ = self.helper.login_via_oidc(
+ fake_oidc_server, "username", expected_status=403
+ )
+
+ self.assertEqual(login_resp["errcode"], Codes.USER_AWAITING_APPROVAL)
+ self.assertEqual(
+ ApprovalNoticeMedium.NONE, login_resp["approval_notice_medium"]
+ )
+
+ # Check that we didn't register a device for the user during the login attempt.
+ devices = self.get_success(
+ self.hs.get_datastores().main.get_devices_by_user("@username:test")
+ )
+
+ self.assertEqual(len(devices), 0)
+
class RefreshAuthTests(unittest.HomeserverTestCase):
servlets = [
@@ -589,23 +635,10 @@ class RefreshAuthTests(unittest.HomeserverTestCase):
"""
return self.make_request(
"POST",
- "/_matrix/client/v1/refresh",
+ "/_matrix/client/v3/refresh",
{"refresh_token": refresh_token},
)
- def is_access_token_valid(self, access_token: str) -> bool:
- """
- Checks whether an access token is valid, returning whether it is or not.
- """
- code = self.make_request(
- "GET", "/_matrix/client/v3/account/whoami", access_token=access_token
- ).code
-
- # Either 200 or 401 is what we get back; anything else is a bug.
- assert code in {HTTPStatus.OK, HTTPStatus.UNAUTHORIZED}
-
- return code == HTTPStatus.OK
-
def test_login_issue_refresh_token(self) -> None:
"""
A login response should include a refresh_token only if asked.
@@ -691,7 +724,7 @@ class RefreshAuthTests(unittest.HomeserverTestCase):
refresh_response = self.make_request(
"POST",
- "/_matrix/client/v1/refresh",
+ "/_matrix/client/v3/refresh",
{"refresh_token": login_response.json_body["refresh_token"]},
)
self.assertEqual(refresh_response.code, HTTPStatus.OK, refresh_response.result)
@@ -732,7 +765,7 @@ class RefreshAuthTests(unittest.HomeserverTestCase):
refresh_response = self.make_request(
"POST",
- "/_matrix/client/v1/refresh",
+ "/_matrix/client/v3/refresh",
{"refresh_token": login_response.json_body["refresh_token"]},
)
self.assertEqual(refresh_response.code, HTTPStatus.OK, refresh_response.result)
@@ -802,29 +835,37 @@ class RefreshAuthTests(unittest.HomeserverTestCase):
self.reactor.advance(59.0)
# Both tokens should still be valid.
- self.assertTrue(self.is_access_token_valid(refreshable_access_token))
- self.assertTrue(self.is_access_token_valid(nonrefreshable_access_token))
+ self.helper.whoami(refreshable_access_token, expect_code=HTTPStatus.OK)
+ self.helper.whoami(nonrefreshable_access_token, expect_code=HTTPStatus.OK)
# Advance to 61 s (just past 1 minute, the time of expiry)
self.reactor.advance(2.0)
# Only the non-refreshable token is still valid.
- self.assertFalse(self.is_access_token_valid(refreshable_access_token))
- self.assertTrue(self.is_access_token_valid(nonrefreshable_access_token))
+ self.helper.whoami(
+ refreshable_access_token, expect_code=HTTPStatus.UNAUTHORIZED
+ )
+ self.helper.whoami(nonrefreshable_access_token, expect_code=HTTPStatus.OK)
# Advance to 599 s (just shy of 10 minutes, the time of expiry)
self.reactor.advance(599.0 - 61.0)
# It's still the case that only the non-refreshable token is still valid.
- self.assertFalse(self.is_access_token_valid(refreshable_access_token))
- self.assertTrue(self.is_access_token_valid(nonrefreshable_access_token))
+ self.helper.whoami(
+ refreshable_access_token, expect_code=HTTPStatus.UNAUTHORIZED
+ )
+ self.helper.whoami(nonrefreshable_access_token, expect_code=HTTPStatus.OK)
# Advance to 601 s (just past 10 minutes, the time of expiry)
self.reactor.advance(2.0)
# Now neither token is valid.
- self.assertFalse(self.is_access_token_valid(refreshable_access_token))
- self.assertFalse(self.is_access_token_valid(nonrefreshable_access_token))
+ self.helper.whoami(
+ refreshable_access_token, expect_code=HTTPStatus.UNAUTHORIZED
+ )
+ self.helper.whoami(
+ nonrefreshable_access_token, expect_code=HTTPStatus.UNAUTHORIZED
+ )
@override_config(
{"refreshable_access_token_lifetime": "1m", "refresh_token_lifetime": "2m"}
@@ -961,7 +1002,7 @@ class RefreshAuthTests(unittest.HomeserverTestCase):
# This first refresh should work properly
first_refresh_response = self.make_request(
"POST",
- "/_matrix/client/v1/refresh",
+ "/_matrix/client/v3/refresh",
{"refresh_token": login_response.json_body["refresh_token"]},
)
self.assertEqual(
@@ -971,7 +1012,7 @@ class RefreshAuthTests(unittest.HomeserverTestCase):
# This one as well, since the token in the first one was never used
second_refresh_response = self.make_request(
"POST",
- "/_matrix/client/v1/refresh",
+ "/_matrix/client/v3/refresh",
{"refresh_token": login_response.json_body["refresh_token"]},
)
self.assertEqual(
@@ -981,7 +1022,7 @@ class RefreshAuthTests(unittest.HomeserverTestCase):
# This one should not, since the token from the first refresh is not valid anymore
third_refresh_response = self.make_request(
"POST",
- "/_matrix/client/v1/refresh",
+ "/_matrix/client/v3/refresh",
{"refresh_token": first_refresh_response.json_body["refresh_token"]},
)
self.assertEqual(
@@ -1015,7 +1056,7 @@ class RefreshAuthTests(unittest.HomeserverTestCase):
# Now that the access token from the last valid refresh was used once, refreshing with the N-1 token should fail
fourth_refresh_response = self.make_request(
"POST",
- "/_matrix/client/v1/refresh",
+ "/_matrix/client/v3/refresh",
{"refresh_token": login_response.json_body["refresh_token"]},
)
self.assertEqual(
@@ -1027,7 +1068,7 @@ class RefreshAuthTests(unittest.HomeserverTestCase):
# But refreshing from the last valid refresh token still works
fifth_refresh_response = self.make_request(
"POST",
- "/_matrix/client/v1/refresh",
+ "/_matrix/client/v3/refresh",
{"refresh_token": second_refresh_response.json_body["refresh_token"]},
)
self.assertEqual(
@@ -1120,3 +1161,349 @@ class RefreshAuthTests(unittest.HomeserverTestCase):
# and no refresh token
self.assertEqual(_table_length("access_tokens"), 0)
self.assertEqual(_table_length("refresh_tokens"), 0)
+
+
+def oidc_config(
+ id: str, with_localpart_template: bool, **kwargs: Any
+) -> Dict[str, Any]:
+ """Sample OIDC provider config used in backchannel logout tests.
+
+ Args:
+ id: IDP ID for this provider
+ with_localpart_template: Set to `true` to have a default localpart_template in
+ the `user_mapping_provider` config and skip the user mapping session
+ **kwargs: rest of the config
+
+ Returns:
+ A dict suitable for the `oidc_config` or the `oidc_providers[]` parts of
+ the HS config
+ """
+ config: Dict[str, Any] = {
+ "idp_id": id,
+ "idp_name": id,
+ "issuer": TEST_OIDC_ISSUER,
+ "client_id": "test-client-id",
+ "client_secret": "test-client-secret",
+ "scopes": ["openid"],
+ }
+
+ if with_localpart_template:
+ config["user_mapping_provider"] = {
+ "config": {"localpart_template": "{{ user.sub }}"}
+ }
+ else:
+ config["user_mapping_provider"] = {"config": {}}
+
+ config.update(kwargs)
+
+ return config
+
+
+@skip_unless(HAS_OIDC, "Requires OIDC")
+class OidcBackchannelLogoutTests(unittest.HomeserverTestCase):
+ servlets = [
+ account.register_servlets,
+ login.register_servlets,
+ ]
+
+ def default_config(self) -> Dict[str, Any]:
+ config = super().default_config()
+
+ # public_baseurl uses an http:// scheme because FakeChannel.isSecure() returns
+ # False, so synapse will see the requested uri as http://..., so using http in
+ # the public_baseurl stops Synapse trying to redirect to https.
+ config["public_baseurl"] = "http://synapse.test"
+
+ return config
+
+ def create_resource_dict(self) -> Dict[str, Resource]:
+ resource_dict = super().create_resource_dict()
+ resource_dict.update(build_synapse_client_resource_tree(self.hs))
+ return resource_dict
+
+ def submit_logout_token(self, logout_token: str) -> FakeChannel:
+ return self.make_request(
+ "POST",
+ "/_synapse/client/oidc/backchannel_logout",
+ content=f"logout_token={logout_token}",
+ content_is_form=True,
+ )
+
+ @override_config(
+ {
+ "oidc_providers": [
+ oidc_config(
+ id="oidc",
+ with_localpart_template=True,
+ backchannel_logout_enabled=True,
+ )
+ ]
+ }
+ )
+ def test_simple_logout(self) -> None:
+ """
+ Receiving a logout token should logout the user
+ """
+ fake_oidc_server = self.helper.fake_oidc_server()
+ user = "john"
+
+ login_resp, first_grant = self.helper.login_via_oidc(
+ fake_oidc_server, user, with_sid=True
+ )
+ first_access_token: str = login_resp["access_token"]
+ self.helper.whoami(first_access_token, expect_code=HTTPStatus.OK)
+
+ login_resp, second_grant = self.helper.login_via_oidc(
+ fake_oidc_server, user, with_sid=True
+ )
+ second_access_token: str = login_resp["access_token"]
+ self.helper.whoami(second_access_token, expect_code=HTTPStatus.OK)
+
+ self.assertNotEqual(first_grant.sid, second_grant.sid)
+ self.assertEqual(first_grant.userinfo["sub"], second_grant.userinfo["sub"])
+
+ # Logging out of the first session
+ logout_token = fake_oidc_server.generate_logout_token(first_grant)
+ channel = self.submit_logout_token(logout_token)
+ self.assertEqual(channel.code, 200)
+
+ self.helper.whoami(first_access_token, expect_code=HTTPStatus.UNAUTHORIZED)
+ self.helper.whoami(second_access_token, expect_code=HTTPStatus.OK)
+
+ # Logging out of the second session
+ logout_token = fake_oidc_server.generate_logout_token(second_grant)
+ channel = self.submit_logout_token(logout_token)
+ self.assertEqual(channel.code, 200)
+
+ @override_config(
+ {
+ "oidc_providers": [
+ oidc_config(
+ id="oidc",
+ with_localpart_template=True,
+ backchannel_logout_enabled=True,
+ )
+ ]
+ }
+ )
+ def test_logout_during_login(self) -> None:
+ """
+ It should revoke login tokens when receiving a logout token
+ """
+ fake_oidc_server = self.helper.fake_oidc_server()
+ user = "john"
+
+ # Get an authentication, and logout before submitting the logout token
+ client_redirect_url = "https://x"
+ userinfo = {"sub": user}
+ channel, grant = self.helper.auth_via_oidc(
+ fake_oidc_server,
+ userinfo,
+ client_redirect_url,
+ with_sid=True,
+ )
+
+ # expect a confirmation page
+ self.assertEqual(channel.code, HTTPStatus.OK, channel.result)
+
+ # fish the matrix login token out of the body of the confirmation page
+ m = re.search(
+ 'a href="%s.*loginToken=([^"]*)"' % (client_redirect_url,),
+ channel.text_body,
+ )
+ assert m, channel.text_body
+ login_token = m.group(1)
+
+ # Submit a logout
+ logout_token = fake_oidc_server.generate_logout_token(grant)
+ channel = self.submit_logout_token(logout_token)
+ self.assertEqual(channel.code, 200)
+
+ # Now try to exchange the login token
+ channel = make_request(
+ self.hs.get_reactor(),
+ self.site,
+ "POST",
+ "/login",
+ content={"type": "m.login.token", "token": login_token},
+ )
+ # It should have failed
+ self.assertEqual(channel.code, 403)
+
+ @override_config(
+ {
+ "oidc_providers": [
+ oidc_config(
+ id="oidc",
+ with_localpart_template=False,
+ backchannel_logout_enabled=True,
+ )
+ ]
+ }
+ )
+ def test_logout_during_mapping(self) -> None:
+ """
+ It should stop ongoing user mapping session when receiving a logout token
+ """
+ fake_oidc_server = self.helper.fake_oidc_server()
+ user = "john"
+
+ # Get an authentication, and logout before submitting the logout token
+ client_redirect_url = "https://x"
+ userinfo = {"sub": user}
+ channel, grant = self.helper.auth_via_oidc(
+ fake_oidc_server,
+ userinfo,
+ client_redirect_url,
+ with_sid=True,
+ )
+
+ # Expect a user mapping page
+ self.assertEqual(channel.code, HTTPStatus.FOUND, channel.result)
+
+ # We should have a user_mapping_session cookie
+ cookie_headers = channel.headers.getRawHeaders("Set-Cookie")
+ assert cookie_headers
+ cookies: Dict[str, str] = {}
+ for h in cookie_headers:
+ key, value = h.split(";")[0].split("=", maxsplit=1)
+ cookies[key] = value
+
+ user_mapping_session_id = cookies["username_mapping_session"]
+
+ # Getting that session should not raise
+ session = self.hs.get_sso_handler().get_mapping_session(user_mapping_session_id)
+ self.assertIsNotNone(session)
+
+ # Submit a logout
+ logout_token = fake_oidc_server.generate_logout_token(grant)
+ channel = self.submit_logout_token(logout_token)
+ self.assertEqual(channel.code, 200)
+
+ # Now it should raise
+ with self.assertRaises(SynapseError):
+ self.hs.get_sso_handler().get_mapping_session(user_mapping_session_id)
+
+ @override_config(
+ {
+ "oidc_providers": [
+ oidc_config(
+ id="oidc",
+ with_localpart_template=True,
+ backchannel_logout_enabled=False,
+ )
+ ]
+ }
+ )
+ def test_disabled(self) -> None:
+ """
+ Receiving a logout token should do nothing if it is disabled in the config
+ """
+ fake_oidc_server = self.helper.fake_oidc_server()
+ user = "john"
+
+ login_resp, grant = self.helper.login_via_oidc(
+ fake_oidc_server, user, with_sid=True
+ )
+ access_token: str = login_resp["access_token"]
+ self.helper.whoami(access_token, expect_code=HTTPStatus.OK)
+
+ # Logging out shouldn't work
+ logout_token = fake_oidc_server.generate_logout_token(grant)
+ channel = self.submit_logout_token(logout_token)
+ self.assertEqual(channel.code, 400)
+
+ # And the token should still be valid
+ self.helper.whoami(access_token, expect_code=HTTPStatus.OK)
+
+ @override_config(
+ {
+ "oidc_providers": [
+ oidc_config(
+ id="oidc",
+ with_localpart_template=True,
+ backchannel_logout_enabled=True,
+ )
+ ]
+ }
+ )
+ def test_no_sid(self) -> None:
+ """
+ Receiving a logout token without `sid` during the login should do nothing
+ """
+ fake_oidc_server = self.helper.fake_oidc_server()
+ user = "john"
+
+ login_resp, grant = self.helper.login_via_oidc(
+ fake_oidc_server, user, with_sid=False
+ )
+ access_token: str = login_resp["access_token"]
+ self.helper.whoami(access_token, expect_code=HTTPStatus.OK)
+
+ # Logging out shouldn't work
+ logout_token = fake_oidc_server.generate_logout_token(grant)
+ channel = self.submit_logout_token(logout_token)
+ self.assertEqual(channel.code, 400)
+
+ # And the token should still be valid
+ self.helper.whoami(access_token, expect_code=HTTPStatus.OK)
+
+ @override_config(
+ {
+ "oidc_providers": [
+ oidc_config(
+ "first",
+ issuer="https://first-issuer.com/",
+ with_localpart_template=True,
+ backchannel_logout_enabled=True,
+ ),
+ oidc_config(
+ "second",
+ issuer="https://second-issuer.com/",
+ with_localpart_template=True,
+ backchannel_logout_enabled=True,
+ ),
+ ]
+ }
+ )
+ def test_multiple_providers(self) -> None:
+ """
+ It should be able to distinguish login tokens from two different IdPs
+ """
+ first_server = self.helper.fake_oidc_server(issuer="https://first-issuer.com/")
+ second_server = self.helper.fake_oidc_server(
+ issuer="https://second-issuer.com/"
+ )
+ user = "john"
+
+ login_resp, first_grant = self.helper.login_via_oidc(
+ first_server, user, with_sid=True, idp_id="oidc-first"
+ )
+ first_access_token: str = login_resp["access_token"]
+ self.helper.whoami(first_access_token, expect_code=HTTPStatus.OK)
+
+ login_resp, second_grant = self.helper.login_via_oidc(
+ second_server, user, with_sid=True, idp_id="oidc-second"
+ )
+ second_access_token: str = login_resp["access_token"]
+ self.helper.whoami(second_access_token, expect_code=HTTPStatus.OK)
+
+ # `sid` in the fake providers are generated by a counter, so the first grant of
+ # each provider should give the same SID
+ self.assertEqual(first_grant.sid, second_grant.sid)
+ self.assertEqual(first_grant.userinfo["sub"], second_grant.userinfo["sub"])
+
+ # Logging out of the first session
+ logout_token = first_server.generate_logout_token(first_grant)
+ channel = self.submit_logout_token(logout_token)
+ self.assertEqual(channel.code, 200)
+
+ self.helper.whoami(first_access_token, expect_code=HTTPStatus.UNAUTHORIZED)
+ self.helper.whoami(second_access_token, expect_code=HTTPStatus.OK)
+
+ # Logging out of the second session
+ logout_token = second_server.generate_logout_token(second_grant)
+ channel = self.submit_logout_token(logout_token)
+ self.assertEqual(channel.code, 200)
+
+ self.helper.whoami(second_access_token, expect_code=HTTPStatus.UNAUTHORIZED)
diff --git a/tests/rest/client/test_devices.py b/tests/rest/client/test_devices.py
index aa98222434..d80eea17d3 100644
--- a/tests/rest/client/test_devices.py
+++ b/tests/rest/client/test_devices.py
@@ -200,3 +200,37 @@ class DevicesTestCase(unittest.HomeserverTestCase):
self.reactor.advance(43200)
self.get_success(self.handler.get_device(user_id, "abc"))
self.get_failure(self.handler.get_device(user_id, "def"), NotFoundError)
+
+
+class DehydratedDeviceTestCase(unittest.HomeserverTestCase):
+ servlets = [
+ admin.register_servlets_for_client_rest_resource,
+ login.register_servlets,
+ register.register_servlets,
+ devices.register_servlets,
+ ]
+
+ def test_PUT(self) -> None:
+ """Sanity-check that we can PUT a dehydrated device.
+
+ Detects https://github.com/matrix-org/synapse/issues/14334.
+ """
+ alice = self.register_user("alice", "correcthorse")
+ token = self.login(alice, "correcthorse")
+
+ # Have alice update their device list
+ channel = self.make_request(
+ "PUT",
+ "_matrix/client/unstable/org.matrix.msc2697.v2/dehydrated_device",
+ {
+ "device_data": {
+ "algorithm": "org.matrix.msc2697.v1.dehydration.v1.olm",
+ "account": "dehydrated_device",
+ }
+ },
+ access_token=token,
+ shorthand=False,
+ )
+ self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body)
+ device_id = channel.json_body.get("device_id")
+ self.assertIsInstance(device_id, str)
diff --git a/tests/rest/client/test_filter.py b/tests/rest/client/test_filter.py
index 823e8ab8c4..afc8d641be 100644
--- a/tests/rest/client/test_filter.py
+++ b/tests/rest/client/test_filter.py
@@ -43,7 +43,7 @@ class FilterTestCase(unittest.HomeserverTestCase):
self.EXAMPLE_FILTER_JSON,
)
- self.assertEqual(channel.result["code"], b"200")
+ self.assertEqual(channel.code, 200)
self.assertEqual(channel.json_body, {"filter_id": "0"})
filter = self.get_success(
self.store.get_user_filter(user_localpart="apple", filter_id=0)
@@ -58,7 +58,7 @@ class FilterTestCase(unittest.HomeserverTestCase):
self.EXAMPLE_FILTER_JSON,
)
- self.assertEqual(channel.result["code"], b"403")
+ self.assertEqual(channel.code, 403)
self.assertEqual(channel.json_body["errcode"], Codes.FORBIDDEN)
def test_add_filter_non_local_user(self) -> None:
@@ -71,7 +71,7 @@ class FilterTestCase(unittest.HomeserverTestCase):
)
self.hs.is_mine = _is_mine
- self.assertEqual(channel.result["code"], b"403")
+ self.assertEqual(channel.code, 403)
self.assertEqual(channel.json_body["errcode"], Codes.FORBIDDEN)
def test_get_filter(self) -> None:
@@ -85,7 +85,7 @@ class FilterTestCase(unittest.HomeserverTestCase):
"GET", "/_matrix/client/r0/user/%s/filter/%s" % (self.user_id, filter_id)
)
- self.assertEqual(channel.result["code"], b"200")
+ self.assertEqual(channel.code, 200)
self.assertEqual(channel.json_body, self.EXAMPLE_FILTER)
def test_get_filter_non_existant(self) -> None:
@@ -93,7 +93,7 @@ class FilterTestCase(unittest.HomeserverTestCase):
"GET", "/_matrix/client/r0/user/%s/filter/12382148321" % (self.user_id)
)
- self.assertEqual(channel.result["code"], b"404")
+ self.assertEqual(channel.code, 404)
self.assertEqual(channel.json_body["errcode"], Codes.NOT_FOUND)
# Currently invalid params do not have an appropriate errcode
@@ -103,7 +103,7 @@ class FilterTestCase(unittest.HomeserverTestCase):
"GET", "/_matrix/client/r0/user/%s/filter/foobar" % (self.user_id)
)
- self.assertEqual(channel.result["code"], b"400")
+ self.assertEqual(channel.code, 400)
# No ID also returns an invalid_id error
def test_get_filter_no_id(self) -> None:
@@ -111,4 +111,4 @@ class FilterTestCase(unittest.HomeserverTestCase):
"GET", "/_matrix/client/r0/user/%s/filter/" % (self.user_id)
)
- self.assertEqual(channel.result["code"], b"400")
+ self.assertEqual(channel.code, 400)
diff --git a/tests/rest/client/test_identity.py b/tests/rest/client/test_identity.py
index dc17c9d113..b0c8215744 100644
--- a/tests/rest/client/test_identity.py
+++ b/tests/rest/client/test_identity.py
@@ -25,7 +25,6 @@ from tests import unittest
class IdentityTestCase(unittest.HomeserverTestCase):
-
servlets = [
synapse.rest.admin.register_servlets_for_client_rest_resource,
room.register_servlets,
@@ -33,7 +32,6 @@ class IdentityTestCase(unittest.HomeserverTestCase):
]
def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
-
config = self.default_config()
config["enable_3pid_lookup"] = False
self.hs = self.setup_test_homeserver(config=config)
@@ -54,6 +52,7 @@ class IdentityTestCase(unittest.HomeserverTestCase):
"id_server": "testis",
"medium": "email",
"address": "test@example.com",
+ "id_access_token": tok,
}
request_url = ("/rooms/%s/invite" % (room_id)).encode("ascii")
channel = self.make_request(
diff --git a/tests/rest/client/test_keys.py b/tests/rest/client/test_keys.py
index bbc8e74243..741fecea77 100644
--- a/tests/rest/client/test_keys.py
+++ b/tests/rest/client/test_keys.py
@@ -19,6 +19,7 @@ from synapse.rest import admin
from synapse.rest.client import keys, login
from tests import unittest
+from tests.http.server._base import make_request_with_cancellation_test
class KeyQueryTestCase(unittest.HomeserverTestCase):
@@ -89,3 +90,31 @@ class KeyQueryTestCase(unittest.HomeserverTestCase):
Codes.BAD_JSON,
channel.result,
)
+
+ def test_key_query_cancellation(self) -> None:
+ """
+ Tests that /keys/query is cancellable and does not swallow the
+ CancelledError.
+ """
+ self.register_user("alice", "wonderland")
+ alice_token = self.login("alice", "wonderland")
+
+ bob = self.register_user("bob", "uncle")
+
+ channel = make_request_with_cancellation_test(
+ "test_key_query_cancellation",
+ self.reactor,
+ self.site,
+ "POST",
+ "/_matrix/client/r0/keys/query",
+ {
+ "device_keys": {
+ # Empty list means we request keys for all bob's devices
+ bob: [],
+ },
+ },
+ token=alice_token,
+ )
+
+ self.assertEqual(200, channel.code, msg=channel.result["body"])
+ self.assertIn(bob, channel.json_body["device_keys"])
diff --git a/tests/rest/client/test_login.py b/tests/rest/client/test_login.py
index a2958f6959..ff5baa9f0a 100644
--- a/tests/rest/client/test_login.py
+++ b/tests/rest/client/test_login.py
@@ -13,7 +13,6 @@
# limitations under the License.
import time
import urllib.parse
-from http import HTTPStatus
from typing import Any, Dict, List, Optional
from unittest.mock import Mock
from urllib.parse import urlencode
@@ -24,6 +23,8 @@ from twisted.test.proto_helpers import MemoryReactor
from twisted.web.resource import Resource
import synapse.rest.admin
+from synapse.api.constants import ApprovalNoticeMedium, LoginType
+from synapse.api.errors import Codes
from synapse.appservice import ApplicationService
from synapse.rest.client import devices, login, logout, register
from synapse.rest.client.account import WhoamiRestServlet
@@ -35,7 +36,7 @@ from synapse.util import Clock
from tests import unittest
from tests.handlers.test_oidc import HAS_OIDC
from tests.handlers.test_saml import has_saml2
-from tests.rest.client.utils import TEST_OIDC_AUTH_ENDPOINT, TEST_OIDC_CONFIG
+from tests.rest.client.utils import TEST_OIDC_CONFIG
from tests.server import FakeChannel
from tests.test_utils.html_parsers import TestHtmlParser
from tests.unittest import HomeserverTestCase, override_config, skip_unless
@@ -95,6 +96,7 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase):
logout.register_servlets,
devices.register_servlets,
lambda hs, http_server: WhoamiRestServlet(hs).register(http_server),
+ register.register_servlets,
]
def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
@@ -134,10 +136,10 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase):
channel = self.make_request(b"POST", LOGIN_URL, params)
if i == 5:
- self.assertEqual(channel.result["code"], b"429", channel.result)
+ self.assertEqual(channel.code, 429, msg=channel.result)
retry_after_ms = int(channel.json_body["retry_after_ms"])
else:
- self.assertEqual(channel.result["code"], b"200", channel.result)
+ self.assertEqual(channel.code, 200, msg=channel.result)
# Since we're ratelimiting at 1 request/min, retry_after_ms should be lower
# than 1min.
@@ -152,7 +154,7 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase):
}
channel = self.make_request(b"POST", LOGIN_URL, params)
- self.assertEqual(channel.result["code"], b"200", channel.result)
+ self.assertEqual(channel.code, 200, msg=channel.result)
@override_config(
{
@@ -179,10 +181,10 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase):
channel = self.make_request(b"POST", LOGIN_URL, params)
if i == 5:
- self.assertEqual(channel.result["code"], b"429", channel.result)
+ self.assertEqual(channel.code, 429, msg=channel.result)
retry_after_ms = int(channel.json_body["retry_after_ms"])
else:
- self.assertEqual(channel.result["code"], b"200", channel.result)
+ self.assertEqual(channel.code, 200, msg=channel.result)
# Since we're ratelimiting at 1 request/min, retry_after_ms should be lower
# than 1min.
@@ -197,7 +199,7 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase):
}
channel = self.make_request(b"POST", LOGIN_URL, params)
- self.assertEqual(channel.result["code"], b"200", channel.result)
+ self.assertEqual(channel.code, 200, msg=channel.result)
@override_config(
{
@@ -224,10 +226,10 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase):
channel = self.make_request(b"POST", LOGIN_URL, params)
if i == 5:
- self.assertEqual(channel.result["code"], b"429", channel.result)
+ self.assertEqual(channel.code, 429, msg=channel.result)
retry_after_ms = int(channel.json_body["retry_after_ms"])
else:
- self.assertEqual(channel.result["code"], b"403", channel.result)
+ self.assertEqual(channel.code, 403, msg=channel.result)
# Since we're ratelimiting at 1 request/min, retry_after_ms should be lower
# than 1min.
@@ -242,7 +244,7 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase):
}
channel = self.make_request(b"POST", LOGIN_URL, params)
- self.assertEqual(channel.result["code"], b"403", channel.result)
+ self.assertEqual(channel.code, 403, msg=channel.result)
@override_config({"session_lifetime": "24h"})
def test_soft_logout(self) -> None:
@@ -250,7 +252,7 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase):
# we shouldn't be able to make requests without an access token
channel = self.make_request(b"GET", TEST_URL)
- self.assertEqual(channel.result["code"], b"401", channel.result)
+ self.assertEqual(channel.code, 401, msg=channel.result)
self.assertEqual(channel.json_body["errcode"], "M_MISSING_TOKEN")
# log in as normal
@@ -261,20 +263,20 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase):
}
channel = self.make_request(b"POST", LOGIN_URL, params)
- self.assertEqual(channel.code, HTTPStatus.OK, channel.result)
+ self.assertEqual(channel.code, 200, channel.result)
access_token = channel.json_body["access_token"]
device_id = channel.json_body["device_id"]
# we should now be able to make requests with the access token
channel = self.make_request(b"GET", TEST_URL, access_token=access_token)
- self.assertEqual(channel.code, HTTPStatus.OK, channel.result)
+ self.assertEqual(channel.code, 200, channel.result)
# time passes
self.reactor.advance(24 * 3600)
# ... and we should be soft-logouted
channel = self.make_request(b"GET", TEST_URL, access_token=access_token)
- self.assertEqual(channel.code, HTTPStatus.UNAUTHORIZED, channel.result)
+ self.assertEqual(channel.code, 401, channel.result)
self.assertEqual(channel.json_body["errcode"], "M_UNKNOWN_TOKEN")
self.assertEqual(channel.json_body["soft_logout"], True)
@@ -288,7 +290,7 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase):
# more requests with the expired token should still return a soft-logout
self.reactor.advance(3600)
channel = self.make_request(b"GET", TEST_URL, access_token=access_token)
- self.assertEqual(channel.code, HTTPStatus.UNAUTHORIZED, channel.result)
+ self.assertEqual(channel.code, 401, channel.result)
self.assertEqual(channel.json_body["errcode"], "M_UNKNOWN_TOKEN")
self.assertEqual(channel.json_body["soft_logout"], True)
@@ -296,7 +298,7 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase):
self._delete_device(access_token_2, "kermit", "monkey", device_id)
channel = self.make_request(b"GET", TEST_URL, access_token=access_token)
- self.assertEqual(channel.code, HTTPStatus.UNAUTHORIZED, channel.result)
+ self.assertEqual(channel.code, 401, channel.result)
self.assertEqual(channel.json_body["errcode"], "M_UNKNOWN_TOKEN")
self.assertEqual(channel.json_body["soft_logout"], False)
@@ -307,7 +309,7 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase):
channel = self.make_request(
b"DELETE", "devices/" + device_id, access_token=access_token
)
- self.assertEqual(channel.code, HTTPStatus.UNAUTHORIZED, channel.result)
+ self.assertEqual(channel.code, 401, channel.result)
# check it's a UI-Auth fail
self.assertEqual(
set(channel.json_body.keys()),
@@ -330,7 +332,7 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase):
access_token=access_token,
content={"auth": auth},
)
- self.assertEqual(channel.code, HTTPStatus.OK, channel.result)
+ self.assertEqual(channel.code, 200, channel.result)
@override_config({"session_lifetime": "24h"})
def test_session_can_hard_logout_after_being_soft_logged_out(self) -> None:
@@ -341,20 +343,20 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase):
# we should now be able to make requests with the access token
channel = self.make_request(b"GET", TEST_URL, access_token=access_token)
- self.assertEqual(channel.code, HTTPStatus.OK, channel.result)
+ self.assertEqual(channel.code, 200, channel.result)
# time passes
self.reactor.advance(24 * 3600)
# ... and we should be soft-logouted
channel = self.make_request(b"GET", TEST_URL, access_token=access_token)
- self.assertEqual(channel.code, HTTPStatus.UNAUTHORIZED, channel.result)
+ self.assertEqual(channel.code, 401, channel.result)
self.assertEqual(channel.json_body["errcode"], "M_UNKNOWN_TOKEN")
self.assertEqual(channel.json_body["soft_logout"], True)
# Now try to hard logout this session
channel = self.make_request(b"POST", "/logout", access_token=access_token)
- self.assertEqual(channel.result["code"], b"200", channel.result)
+ self.assertEqual(channel.code, 200, msg=channel.result)
@override_config({"session_lifetime": "24h"})
def test_session_can_hard_logout_all_sessions_after_being_soft_logged_out(
@@ -367,20 +369,20 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase):
# we should now be able to make requests with the access token
channel = self.make_request(b"GET", TEST_URL, access_token=access_token)
- self.assertEqual(channel.code, HTTPStatus.OK, channel.result)
+ self.assertEqual(channel.code, 200, channel.result)
# time passes
self.reactor.advance(24 * 3600)
# ... and we should be soft-logouted
channel = self.make_request(b"GET", TEST_URL, access_token=access_token)
- self.assertEqual(channel.code, HTTPStatus.UNAUTHORIZED, channel.result)
+ self.assertEqual(channel.code, 401, channel.result)
self.assertEqual(channel.json_body["errcode"], "M_UNKNOWN_TOKEN")
self.assertEqual(channel.json_body["soft_logout"], True)
# Now try to hard log out all of the user's sessions
channel = self.make_request(b"POST", "/logout/all", access_token=access_token)
- self.assertEqual(channel.result["code"], b"200", channel.result)
+ self.assertEqual(channel.code, 200, msg=channel.result)
def test_login_with_overly_long_device_id_fails(self) -> None:
self.register_user("mickey", "cheese")
@@ -407,6 +409,44 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase):
self.assertEqual(channel.code, 400)
self.assertEqual(channel.json_body["errcode"], "M_INVALID_PARAM")
+ @override_config(
+ {
+ "experimental_features": {
+ "msc3866": {
+ "enabled": True,
+ "require_approval_for_new_accounts": True,
+ }
+ }
+ }
+ )
+ def test_require_approval(self) -> None:
+ channel = self.make_request(
+ "POST",
+ "register",
+ {
+ "username": "kermit",
+ "password": "monkey",
+ "auth": {"type": LoginType.DUMMY},
+ },
+ )
+ self.assertEqual(403, channel.code, channel.result)
+ self.assertEqual(Codes.USER_AWAITING_APPROVAL, channel.json_body["errcode"])
+ self.assertEqual(
+ ApprovalNoticeMedium.NONE, channel.json_body["approval_notice_medium"]
+ )
+
+ params = {
+ "type": LoginType.PASSWORD,
+ "identifier": {"type": "m.id.user", "user": "kermit"},
+ "password": "monkey",
+ }
+ channel = self.make_request("POST", LOGIN_URL, params)
+ self.assertEqual(403, channel.code, channel.result)
+ self.assertEqual(Codes.USER_AWAITING_APPROVAL, channel.json_body["errcode"])
+ self.assertEqual(
+ ApprovalNoticeMedium.NONE, channel.json_body["approval_notice_medium"]
+ )
+
@skip_unless(has_saml2 and HAS_OIDC, "Requires SAML2 and OIDC")
class MultiSSOTestCase(unittest.HomeserverTestCase):
@@ -466,7 +506,7 @@ class MultiSSOTestCase(unittest.HomeserverTestCase):
def test_get_login_flows(self) -> None:
"""GET /login should return password and SSO flows"""
channel = self.make_request("GET", "/_matrix/client/r0/login")
- self.assertEqual(channel.code, HTTPStatus.OK, channel.result)
+ self.assertEqual(channel.code, 200, channel.result)
expected_flow_types = [
"m.login.cas",
@@ -494,14 +534,14 @@ class MultiSSOTestCase(unittest.HomeserverTestCase):
"""/login/sso/redirect should redirect to an identity picker"""
# first hit the redirect url, which should redirect to our idp picker
channel = self._make_sso_redirect_request(None)
- self.assertEqual(channel.code, HTTPStatus.FOUND, channel.result)
+ self.assertEqual(channel.code, 302, channel.result)
location_headers = channel.headers.getRawHeaders("Location")
assert location_headers
uri = location_headers[0]
# hitting that picker should give us some HTML
channel = self.make_request("GET", uri)
- self.assertEqual(channel.code, HTTPStatus.OK, channel.result)
+ self.assertEqual(channel.code, 200, channel.result)
# parse the form to check it has fields assumed elsewhere in this class
html = channel.result["body"].decode("utf-8")
@@ -530,7 +570,7 @@ class MultiSSOTestCase(unittest.HomeserverTestCase):
+ "&idp=cas",
shorthand=False,
)
- self.assertEqual(channel.code, HTTPStatus.FOUND, channel.result)
+ self.assertEqual(channel.code, 302, channel.result)
location_headers = channel.headers.getRawHeaders("Location")
assert location_headers
cas_uri = location_headers[0]
@@ -555,7 +595,7 @@ class MultiSSOTestCase(unittest.HomeserverTestCase):
+ urllib.parse.quote_plus(TEST_CLIENT_REDIRECT_URL)
+ "&idp=saml",
)
- self.assertEqual(channel.code, HTTPStatus.FOUND, channel.result)
+ self.assertEqual(channel.code, 302, channel.result)
location_headers = channel.headers.getRawHeaders("Location")
assert location_headers
saml_uri = location_headers[0]
@@ -572,21 +612,24 @@ class MultiSSOTestCase(unittest.HomeserverTestCase):
def test_login_via_oidc(self) -> None:
"""If OIDC is chosen, should redirect to the OIDC auth endpoint"""
- # pick the default OIDC provider
- channel = self.make_request(
- "GET",
- "/_synapse/client/pick_idp?redirectUrl="
- + urllib.parse.quote_plus(TEST_CLIENT_REDIRECT_URL)
- + "&idp=oidc",
- )
- self.assertEqual(channel.code, HTTPStatus.FOUND, channel.result)
+ fake_oidc_server = self.helper.fake_oidc_server()
+
+ with fake_oidc_server.patch_homeserver(hs=self.hs):
+ # pick the default OIDC provider
+ channel = self.make_request(
+ "GET",
+ "/_synapse/client/pick_idp?redirectUrl="
+ + urllib.parse.quote_plus(TEST_CLIENT_REDIRECT_URL)
+ + "&idp=oidc",
+ )
+ self.assertEqual(channel.code, 302, channel.result)
location_headers = channel.headers.getRawHeaders("Location")
assert location_headers
oidc_uri = location_headers[0]
oidc_uri_path, oidc_uri_query = oidc_uri.split("?", 1)
# it should redirect us to the auth page of the OIDC server
- self.assertEqual(oidc_uri_path, TEST_OIDC_AUTH_ENDPOINT)
+ self.assertEqual(oidc_uri_path, fake_oidc_server.authorization_endpoint)
# ... and should have set a cookie including the redirect url
cookie_headers = channel.headers.getRawHeaders("Set-Cookie")
@@ -603,10 +646,12 @@ class MultiSSOTestCase(unittest.HomeserverTestCase):
TEST_CLIENT_REDIRECT_URL,
)
- channel = self.helper.complete_oidc_auth(oidc_uri, cookies, {"sub": "user1"})
+ channel, _ = self.helper.complete_oidc_auth(
+ fake_oidc_server, oidc_uri, cookies, {"sub": "user1"}
+ )
# that should serve a confirmation page
- self.assertEqual(channel.code, HTTPStatus.OK, channel.result)
+ self.assertEqual(channel.code, 200, channel.result)
content_type_headers = channel.headers.getRawHeaders("Content-Type")
assert content_type_headers
self.assertTrue(content_type_headers[-1].startswith("text/html"))
@@ -634,7 +679,7 @@ class MultiSSOTestCase(unittest.HomeserverTestCase):
"/login",
content={"type": "m.login.token", "token": login_token},
)
- self.assertEqual(chan.code, HTTPStatus.OK, chan.result)
+ self.assertEqual(chan.code, 200, chan.result)
self.assertEqual(chan.json_body["user_id"], "@user1:test")
def test_multi_sso_redirect_to_unknown(self) -> None:
@@ -643,25 +688,28 @@ class MultiSSOTestCase(unittest.HomeserverTestCase):
"GET",
"/_synapse/client/pick_idp?redirectUrl=http://x&idp=xyz",
)
- self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST, channel.result)
+ self.assertEqual(channel.code, 400, channel.result)
def test_client_idp_redirect_to_unknown(self) -> None:
"""If the client tries to pick an unknown IdP, return a 404"""
channel = self._make_sso_redirect_request("xxx")
- self.assertEqual(channel.code, HTTPStatus.NOT_FOUND, channel.result)
+ self.assertEqual(channel.code, 404, channel.result)
self.assertEqual(channel.json_body["errcode"], "M_NOT_FOUND")
def test_client_idp_redirect_to_oidc(self) -> None:
"""If the client pick a known IdP, redirect to it"""
- channel = self._make_sso_redirect_request("oidc")
- self.assertEqual(channel.code, HTTPStatus.FOUND, channel.result)
+ fake_oidc_server = self.helper.fake_oidc_server()
+
+ with fake_oidc_server.patch_homeserver(hs=self.hs):
+ channel = self._make_sso_redirect_request("oidc")
+ self.assertEqual(channel.code, 302, channel.result)
location_headers = channel.headers.getRawHeaders("Location")
assert location_headers
oidc_uri = location_headers[0]
oidc_uri_path, oidc_uri_query = oidc_uri.split("?", 1)
# it should redirect us to the auth page of the OIDC server
- self.assertEqual(oidc_uri_path, TEST_OIDC_AUTH_ENDPOINT)
+ self.assertEqual(oidc_uri_path, fake_oidc_server.authorization_endpoint)
def _make_sso_redirect_request(self, idp_prov: Optional[str] = None) -> FakeChannel:
"""Send a request to /_matrix/client/r0/login/sso/redirect
@@ -765,7 +813,7 @@ class CASTestCase(unittest.HomeserverTestCase):
channel = self.make_request("GET", cas_ticket_url)
# Test that the response is HTML.
- self.assertEqual(channel.code, HTTPStatus.OK, channel.result)
+ self.assertEqual(channel.code, 200, channel.result)
content_type_header_value = ""
for header in channel.result.get("headers", []):
if header[0] == b"Content-Type":
@@ -878,17 +926,17 @@ class JWTTestCase(unittest.HomeserverTestCase):
def test_login_jwt_valid_registered(self) -> None:
self.register_user("kermit", "monkey")
channel = self.jwt_login({"sub": "kermit"})
- self.assertEqual(channel.result["code"], b"200", channel.result)
+ self.assertEqual(channel.code, 200, msg=channel.result)
self.assertEqual(channel.json_body["user_id"], "@kermit:test")
def test_login_jwt_valid_unregistered(self) -> None:
channel = self.jwt_login({"sub": "frog"})
- self.assertEqual(channel.result["code"], b"200", channel.result)
+ self.assertEqual(channel.code, 200, msg=channel.result)
self.assertEqual(channel.json_body["user_id"], "@frog:test")
def test_login_jwt_invalid_signature(self) -> None:
channel = self.jwt_login({"sub": "frog"}, "notsecret")
- self.assertEqual(channel.result["code"], b"403", channel.result)
+ self.assertEqual(channel.code, 403, msg=channel.result)
self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN")
self.assertEqual(
channel.json_body["error"],
@@ -897,7 +945,7 @@ class JWTTestCase(unittest.HomeserverTestCase):
def test_login_jwt_expired(self) -> None:
channel = self.jwt_login({"sub": "frog", "exp": 864000})
- self.assertEqual(channel.result["code"], b"403", channel.result)
+ self.assertEqual(channel.code, 403, msg=channel.result)
self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN")
self.assertEqual(
channel.json_body["error"],
@@ -907,7 +955,7 @@ class JWTTestCase(unittest.HomeserverTestCase):
def test_login_jwt_not_before(self) -> None:
now = int(time.time())
channel = self.jwt_login({"sub": "frog", "nbf": now + 3600})
- self.assertEqual(channel.result["code"], b"403", channel.result)
+ self.assertEqual(channel.code, 403, msg=channel.result)
self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN")
self.assertEqual(
channel.json_body["error"],
@@ -916,7 +964,7 @@ class JWTTestCase(unittest.HomeserverTestCase):
def test_login_no_sub(self) -> None:
channel = self.jwt_login({"username": "root"})
- self.assertEqual(channel.result["code"], b"403", channel.result)
+ self.assertEqual(channel.code, 403, msg=channel.result)
self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN")
self.assertEqual(channel.json_body["error"], "Invalid JWT")
@@ -925,12 +973,12 @@ class JWTTestCase(unittest.HomeserverTestCase):
"""Test validating the issuer claim."""
# A valid issuer.
channel = self.jwt_login({"sub": "kermit", "iss": "test-issuer"})
- self.assertEqual(channel.result["code"], b"200", channel.result)
+ self.assertEqual(channel.code, 200, msg=channel.result)
self.assertEqual(channel.json_body["user_id"], "@kermit:test")
# An invalid issuer.
channel = self.jwt_login({"sub": "kermit", "iss": "invalid"})
- self.assertEqual(channel.result["code"], b"403", channel.result)
+ self.assertEqual(channel.code, 403, msg=channel.result)
self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN")
self.assertEqual(
channel.json_body["error"],
@@ -939,7 +987,7 @@ class JWTTestCase(unittest.HomeserverTestCase):
# Not providing an issuer.
channel = self.jwt_login({"sub": "kermit"})
- self.assertEqual(channel.result["code"], b"403", channel.result)
+ self.assertEqual(channel.code, 403, msg=channel.result)
self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN")
self.assertEqual(
channel.json_body["error"],
@@ -949,7 +997,7 @@ class JWTTestCase(unittest.HomeserverTestCase):
def test_login_iss_no_config(self) -> None:
"""Test providing an issuer claim without requiring it in the configuration."""
channel = self.jwt_login({"sub": "kermit", "iss": "invalid"})
- self.assertEqual(channel.result["code"], b"200", channel.result)
+ self.assertEqual(channel.code, 200, msg=channel.result)
self.assertEqual(channel.json_body["user_id"], "@kermit:test")
@override_config({"jwt_config": {**base_config, "audiences": ["test-audience"]}})
@@ -957,12 +1005,12 @@ class JWTTestCase(unittest.HomeserverTestCase):
"""Test validating the audience claim."""
# A valid audience.
channel = self.jwt_login({"sub": "kermit", "aud": "test-audience"})
- self.assertEqual(channel.result["code"], b"200", channel.result)
+ self.assertEqual(channel.code, 200, msg=channel.result)
self.assertEqual(channel.json_body["user_id"], "@kermit:test")
# An invalid audience.
channel = self.jwt_login({"sub": "kermit", "aud": "invalid"})
- self.assertEqual(channel.result["code"], b"403", channel.result)
+ self.assertEqual(channel.code, 403, msg=channel.result)
self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN")
self.assertEqual(
channel.json_body["error"],
@@ -971,7 +1019,7 @@ class JWTTestCase(unittest.HomeserverTestCase):
# Not providing an audience.
channel = self.jwt_login({"sub": "kermit"})
- self.assertEqual(channel.result["code"], b"403", channel.result)
+ self.assertEqual(channel.code, 403, msg=channel.result)
self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN")
self.assertEqual(
channel.json_body["error"],
@@ -981,7 +1029,7 @@ class JWTTestCase(unittest.HomeserverTestCase):
def test_login_aud_no_config(self) -> None:
"""Test providing an audience without requiring it in the configuration."""
channel = self.jwt_login({"sub": "kermit", "aud": "invalid"})
- self.assertEqual(channel.result["code"], b"403", channel.result)
+ self.assertEqual(channel.code, 403, msg=channel.result)
self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN")
self.assertEqual(
channel.json_body["error"],
@@ -991,20 +1039,20 @@ class JWTTestCase(unittest.HomeserverTestCase):
def test_login_default_sub(self) -> None:
"""Test reading user ID from the default subject claim."""
channel = self.jwt_login({"sub": "kermit"})
- self.assertEqual(channel.result["code"], b"200", channel.result)
+ self.assertEqual(channel.code, 200, msg=channel.result)
self.assertEqual(channel.json_body["user_id"], "@kermit:test")
@override_config({"jwt_config": {**base_config, "subject_claim": "username"}})
def test_login_custom_sub(self) -> None:
"""Test reading user ID from a custom subject claim."""
channel = self.jwt_login({"username": "frog"})
- self.assertEqual(channel.result["code"], b"200", channel.result)
+ self.assertEqual(channel.code, 200, msg=channel.result)
self.assertEqual(channel.json_body["user_id"], "@frog:test")
def test_login_no_token(self) -> None:
params = {"type": "org.matrix.login.jwt"}
channel = self.make_request(b"POST", LOGIN_URL, params)
- self.assertEqual(channel.result["code"], b"403", channel.result)
+ self.assertEqual(channel.code, 403, msg=channel.result)
self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN")
self.assertEqual(channel.json_body["error"], "Token field for JWT is missing")
@@ -1086,12 +1134,12 @@ class JWTPubKeyTestCase(unittest.HomeserverTestCase):
def test_login_jwt_valid(self) -> None:
channel = self.jwt_login({"sub": "kermit"})
- self.assertEqual(channel.result["code"], b"200", channel.result)
+ self.assertEqual(channel.code, 200, msg=channel.result)
self.assertEqual(channel.json_body["user_id"], "@kermit:test")
def test_login_jwt_invalid_signature(self) -> None:
channel = self.jwt_login({"sub": "frog"}, self.bad_privatekey)
- self.assertEqual(channel.result["code"], b"403", channel.result)
+ self.assertEqual(channel.code, 403, msg=channel.result)
self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN")
self.assertEqual(
channel.json_body["error"],
@@ -1152,7 +1200,7 @@ class AppserviceLoginRestServletTestCase(unittest.HomeserverTestCase):
b"POST", LOGIN_URL, params, access_token=self.service.token
)
- self.assertEqual(channel.result["code"], b"200", channel.result)
+ self.assertEqual(channel.code, 200, msg=channel.result)
def test_login_appservice_user_bot(self) -> None:
"""Test that the appservice bot can use /login"""
@@ -1166,7 +1214,7 @@ class AppserviceLoginRestServletTestCase(unittest.HomeserverTestCase):
b"POST", LOGIN_URL, params, access_token=self.service.token
)
- self.assertEqual(channel.result["code"], b"200", channel.result)
+ self.assertEqual(channel.code, 200, msg=channel.result)
def test_login_appservice_wrong_user(self) -> None:
"""Test that non-as users cannot login with the as token"""
@@ -1180,7 +1228,7 @@ class AppserviceLoginRestServletTestCase(unittest.HomeserverTestCase):
b"POST", LOGIN_URL, params, access_token=self.service.token
)
- self.assertEqual(channel.result["code"], b"403", channel.result)
+ self.assertEqual(channel.code, 403, msg=channel.result)
def test_login_appservice_wrong_as(self) -> None:
"""Test that as users cannot login with wrong as token"""
@@ -1194,7 +1242,7 @@ class AppserviceLoginRestServletTestCase(unittest.HomeserverTestCase):
b"POST", LOGIN_URL, params, access_token=self.another_service.token
)
- self.assertEqual(channel.result["code"], b"403", channel.result)
+ self.assertEqual(channel.code, 403, msg=channel.result)
def test_login_appservice_no_token(self) -> None:
"""Test that users must provide a token when using the appservice
@@ -1208,7 +1256,7 @@ class AppserviceLoginRestServletTestCase(unittest.HomeserverTestCase):
}
channel = self.make_request(b"POST", LOGIN_URL, params)
- self.assertEqual(channel.result["code"], b"401", channel.result)
+ self.assertEqual(channel.code, 401, msg=channel.result)
@skip_unless(HAS_OIDC, "requires OIDC")
@@ -1240,13 +1288,17 @@ class UsernamePickerTestCase(HomeserverTestCase):
def test_username_picker(self) -> None:
"""Test the happy path of a username picker flow."""
+ fake_oidc_server = self.helper.fake_oidc_server()
+
# do the start of the login flow
- channel = self.helper.auth_via_oidc(
- {"sub": "tester", "displayname": "Jonny"}, TEST_CLIENT_REDIRECT_URL
+ channel, _ = self.helper.auth_via_oidc(
+ fake_oidc_server,
+ {"sub": "tester", "displayname": "Jonny"},
+ TEST_CLIENT_REDIRECT_URL,
)
# that should redirect to the username picker
- self.assertEqual(channel.code, HTTPStatus.FOUND, channel.result)
+ self.assertEqual(channel.code, 302, channel.result)
location_headers = channel.headers.getRawHeaders("Location")
assert location_headers
picker_url = location_headers[0]
@@ -1290,7 +1342,7 @@ class UsernamePickerTestCase(HomeserverTestCase):
("Content-Length", str(len(content))),
],
)
- self.assertEqual(chan.code, HTTPStatus.FOUND, chan.result)
+ self.assertEqual(chan.code, 302, chan.result)
location_headers = chan.headers.getRawHeaders("Location")
assert location_headers
@@ -1300,7 +1352,7 @@ class UsernamePickerTestCase(HomeserverTestCase):
path=location_headers[0],
custom_headers=[("Cookie", "username_mapping_session=" + session_id)],
)
- self.assertEqual(chan.code, HTTPStatus.FOUND, chan.result)
+ self.assertEqual(chan.code, 302, chan.result)
location_headers = chan.headers.getRawHeaders("Location")
assert location_headers
@@ -1325,5 +1377,5 @@ class UsernamePickerTestCase(HomeserverTestCase):
"/login",
content={"type": "m.login.token", "token": login_token},
)
- self.assertEqual(chan.code, HTTPStatus.OK, chan.result)
+ self.assertEqual(chan.code, 200, chan.result)
self.assertEqual(chan.json_body["user_id"], "@bobby:test")
diff --git a/tests/rest/client/test_login_token_request.py b/tests/rest/client/test_login_token_request.py
new file mode 100644
index 0000000000..c2e1e08811
--- /dev/null
+++ b/tests/rest/client/test_login_token_request.py
@@ -0,0 +1,134 @@
+# Copyright 2022 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from twisted.test.proto_helpers import MemoryReactor
+
+from synapse.rest import admin
+from synapse.rest.client import login, login_token_request
+from synapse.server import HomeServer
+from synapse.util import Clock
+
+from tests import unittest
+from tests.unittest import override_config
+
+endpoint = "/_matrix/client/unstable/org.matrix.msc3882/login/token"
+
+
+class LoginTokenRequestServletTestCase(unittest.HomeserverTestCase):
+
+ servlets = [
+ login.register_servlets,
+ admin.register_servlets,
+ login_token_request.register_servlets,
+ ]
+
+ def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
+ self.hs = self.setup_test_homeserver()
+ self.hs.config.registration.enable_registration = True
+ self.hs.config.registration.registrations_require_3pid = []
+ self.hs.config.registration.auto_join_rooms = []
+ self.hs.config.captcha.enable_registration_captcha = False
+
+ return self.hs
+
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+ self.user = "user123"
+ self.password = "password"
+
+ def test_disabled(self) -> None:
+ channel = self.make_request("POST", endpoint, {}, access_token=None)
+ self.assertEqual(channel.code, 400)
+
+ self.register_user(self.user, self.password)
+ token = self.login(self.user, self.password)
+
+ channel = self.make_request("POST", endpoint, {}, access_token=token)
+ self.assertEqual(channel.code, 400)
+
+ @override_config({"experimental_features": {"msc3882_enabled": True}})
+ def test_require_auth(self) -> None:
+ channel = self.make_request("POST", endpoint, {}, access_token=None)
+ self.assertEqual(channel.code, 401)
+
+ @override_config({"experimental_features": {"msc3882_enabled": True}})
+ def test_uia_on(self) -> None:
+ user_id = self.register_user(self.user, self.password)
+ token = self.login(self.user, self.password)
+
+ channel = self.make_request("POST", endpoint, {}, access_token=token)
+ self.assertEqual(channel.code, 401)
+ self.assertIn({"stages": ["m.login.password"]}, channel.json_body["flows"])
+
+ session = channel.json_body["session"]
+
+ uia = {
+ "auth": {
+ "type": "m.login.password",
+ "identifier": {"type": "m.id.user", "user": self.user},
+ "password": self.password,
+ "session": session,
+ },
+ }
+
+ channel = self.make_request("POST", endpoint, uia, access_token=token)
+ self.assertEqual(channel.code, 200)
+ self.assertEqual(channel.json_body["expires_in"], 300)
+
+ login_token = channel.json_body["login_token"]
+
+ channel = self.make_request(
+ "POST",
+ "/login",
+ content={"type": "m.login.token", "token": login_token},
+ )
+ self.assertEqual(channel.code, 200, channel.result)
+ self.assertEqual(channel.json_body["user_id"], user_id)
+
+ @override_config(
+ {"experimental_features": {"msc3882_enabled": True, "msc3882_ui_auth": False}}
+ )
+ def test_uia_off(self) -> None:
+ user_id = self.register_user(self.user, self.password)
+ token = self.login(self.user, self.password)
+
+ channel = self.make_request("POST", endpoint, {}, access_token=token)
+ self.assertEqual(channel.code, 200)
+ self.assertEqual(channel.json_body["expires_in"], 300)
+
+ login_token = channel.json_body["login_token"]
+
+ channel = self.make_request(
+ "POST",
+ "/login",
+ content={"type": "m.login.token", "token": login_token},
+ )
+ self.assertEqual(channel.code, 200, channel.result)
+ self.assertEqual(channel.json_body["user_id"], user_id)
+
+ @override_config(
+ {
+ "experimental_features": {
+ "msc3882_enabled": True,
+ "msc3882_ui_auth": False,
+ "msc3882_token_timeout": "15s",
+ }
+ }
+ )
+ def test_expires_in(self) -> None:
+ self.register_user(self.user, self.password)
+ token = self.login(self.user, self.password)
+
+ channel = self.make_request("POST", endpoint, {}, access_token=token)
+ self.assertEqual(channel.code, 200)
+ self.assertEqual(channel.json_body["expires_in"], 15)
diff --git a/tests/rest/client/test_models.py b/tests/rest/client/test_models.py
new file mode 100644
index 0000000000..0b8fcb0c47
--- /dev/null
+++ b/tests/rest/client/test_models.py
@@ -0,0 +1,76 @@
+# Copyright 2022 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import unittest as stdlib_unittest
+
+from pydantic import BaseModel, ValidationError
+from typing_extensions import Literal
+
+from synapse.rest.client.models import EmailRequestTokenBody
+
+
+class ThreepidMediumEnumTestCase(stdlib_unittest.TestCase):
+ class Model(BaseModel):
+ medium: Literal["email", "msisdn"]
+
+ def test_accepts_valid_medium_string(self) -> None:
+ """Sanity check that Pydantic behaves sensibly with an enum-of-str
+
+ This is arguably more of a test of a class that inherits from str and Enum
+ simultaneously.
+ """
+ model = self.Model.parse_obj({"medium": "email"})
+ self.assertEqual(model.medium, "email")
+
+ def test_rejects_invalid_medium_value(self) -> None:
+ with self.assertRaises(ValidationError):
+ self.Model.parse_obj({"medium": "interpretive_dance"})
+
+ def test_rejects_invalid_medium_type(self) -> None:
+ with self.assertRaises(ValidationError):
+ self.Model.parse_obj({"medium": 123})
+
+
+class EmailRequestTokenBodyTestCase(stdlib_unittest.TestCase):
+ base_request = {
+ "client_secret": "hunter2",
+ "email": "alice@wonderland.com",
+ "send_attempt": 1,
+ }
+
+ def test_token_required_if_id_server_provided(self) -> None:
+ with self.assertRaises(ValidationError):
+ EmailRequestTokenBody.parse_obj(
+ {
+ **self.base_request,
+ "id_server": "identity.wonderland.com",
+ }
+ )
+ with self.assertRaises(ValidationError):
+ EmailRequestTokenBody.parse_obj(
+ {
+ **self.base_request,
+ "id_server": "identity.wonderland.com",
+ "id_access_token": None,
+ }
+ )
+
+ def test_token_typechecked_when_id_server_provided(self) -> None:
+ with self.assertRaises(ValidationError):
+ EmailRequestTokenBody.parse_obj(
+ {
+ **self.base_request,
+ "id_server": "identity.wonderland.com",
+ "id_access_token": 1337,
+ }
+ )
diff --git a/tests/rest/client/test_redactions.py b/tests/rest/client/test_redactions.py
index 7401b5e0c0..5dfe44defb 100644
--- a/tests/rest/client/test_redactions.py
+++ b/tests/rest/client/test_redactions.py
@@ -11,17 +11,18 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-from typing import List
+from typing import List, Optional
from twisted.test.proto_helpers import MemoryReactor
+from synapse.api.constants import EventTypes, RelationTypes
from synapse.rest import admin
from synapse.rest.client import login, room, sync
from synapse.server import HomeServer
from synapse.types import JsonDict
from synapse.util import Clock
-from tests.unittest import HomeserverTestCase
+from tests.unittest import HomeserverTestCase, override_config
class RedactionsTestCase(HomeserverTestCase):
@@ -67,7 +68,12 @@ class RedactionsTestCase(HomeserverTestCase):
)
def _redact_event(
- self, access_token: str, room_id: str, event_id: str, expect_code: int = 200
+ self,
+ access_token: str,
+ room_id: str,
+ event_id: str,
+ expect_code: int = 200,
+ with_relations: Optional[List[str]] = None,
) -> JsonDict:
"""Helper function to send a redaction event.
@@ -75,13 +81,19 @@ class RedactionsTestCase(HomeserverTestCase):
"""
path = "/_matrix/client/r0/rooms/%s/redact/%s" % (room_id, event_id)
- channel = self.make_request("POST", path, content={}, access_token=access_token)
- self.assertEqual(int(channel.result["code"]), expect_code)
+ request_content = {}
+ if with_relations:
+ request_content["org.matrix.msc3912.with_relations"] = with_relations
+
+ channel = self.make_request(
+ "POST", path, request_content, access_token=access_token
+ )
+ self.assertEqual(channel.code, expect_code)
return channel.json_body
def _sync_room_timeline(self, access_token: str, room_id: str) -> List[JsonDict]:
channel = self.make_request("GET", "sync", access_token=self.mod_access_token)
- self.assertEqual(channel.result["code"], b"200")
+ self.assertEqual(channel.code, 200)
room_sync = channel.json_body["rooms"]["join"][room_id]
return room_sync["timeline"]["events"]
@@ -201,3 +213,256 @@ class RedactionsTestCase(HomeserverTestCase):
# These should all succeed, even though this would be denied by
# the standard message ratelimiter
self._redact_event(self.mod_access_token, self.room_id, msg_id)
+
+ @override_config({"experimental_features": {"msc3912_enabled": True}})
+ def test_redact_relations(self) -> None:
+ """Tests that we can redact the relations of an event at the same time as the
+ event itself.
+ """
+ # Send a root event.
+ res = self.helper.send_event(
+ room_id=self.room_id,
+ type=EventTypes.Message,
+ content={"msgtype": "m.text", "body": "hello"},
+ tok=self.mod_access_token,
+ )
+ root_event_id = res["event_id"]
+
+ # Send an edit to this root event.
+ res = self.helper.send_event(
+ room_id=self.room_id,
+ type=EventTypes.Message,
+ content={
+ "body": " * hello world",
+ "m.new_content": {
+ "body": "hello world",
+ "msgtype": "m.text",
+ },
+ "m.relates_to": {
+ "event_id": root_event_id,
+ "rel_type": RelationTypes.REPLACE,
+ },
+ "msgtype": "m.text",
+ },
+ tok=self.mod_access_token,
+ )
+ edit_event_id = res["event_id"]
+
+ # Also send a threaded message whose root is the same as the edit's.
+ res = self.helper.send_event(
+ room_id=self.room_id,
+ type=EventTypes.Message,
+ content={
+ "msgtype": "m.text",
+ "body": "message 1",
+ "m.relates_to": {
+ "event_id": root_event_id,
+ "rel_type": RelationTypes.THREAD,
+ },
+ },
+ tok=self.mod_access_token,
+ )
+ threaded_event_id = res["event_id"]
+
+ # Also send a reaction, again with the same root.
+ res = self.helper.send_event(
+ room_id=self.room_id,
+ type=EventTypes.Reaction,
+ content={
+ "m.relates_to": {
+ "rel_type": RelationTypes.ANNOTATION,
+ "event_id": root_event_id,
+ "key": "👍",
+ }
+ },
+ tok=self.mod_access_token,
+ )
+ reaction_event_id = res["event_id"]
+
+ # Redact the root event, specifying that we also want to delete events that
+ # relate to it with m.replace.
+ self._redact_event(
+ self.mod_access_token,
+ self.room_id,
+ root_event_id,
+ with_relations=[
+ RelationTypes.REPLACE,
+ RelationTypes.THREAD,
+ ],
+ )
+
+ # Check that the root event got redacted.
+ event_dict = self.helper.get_event(
+ self.room_id, root_event_id, self.mod_access_token
+ )
+ self.assertIn("redacted_because", event_dict, event_dict)
+
+ # Check that the edit got redacted.
+ event_dict = self.helper.get_event(
+ self.room_id, edit_event_id, self.mod_access_token
+ )
+ self.assertIn("redacted_because", event_dict, event_dict)
+
+ # Check that the threaded message got redacted.
+ event_dict = self.helper.get_event(
+ self.room_id, threaded_event_id, self.mod_access_token
+ )
+ self.assertIn("redacted_because", event_dict, event_dict)
+
+ # Check that the reaction did not get redacted.
+ event_dict = self.helper.get_event(
+ self.room_id, reaction_event_id, self.mod_access_token
+ )
+ self.assertNotIn("redacted_because", event_dict, event_dict)
+
+ @override_config({"experimental_features": {"msc3912_enabled": True}})
+ def test_redact_relations_no_perms(self) -> None:
+ """Tests that, when redacting a message along with its relations, if not all
+ the related messages can be redacted because of insufficient permissions, the
+ server still redacts all the ones that can be.
+ """
+ # Send a root event.
+ res = self.helper.send_event(
+ room_id=self.room_id,
+ type=EventTypes.Message,
+ content={
+ "msgtype": "m.text",
+ "body": "root",
+ },
+ tok=self.other_access_token,
+ )
+ root_event_id = res["event_id"]
+
+ # Send a first threaded message, this one from the moderator. We do this for the
+ # first message with the m.thread relation (and not the last one) to ensure
+ # that, when the server fails to redact it, it doesn't stop there, and it
+ # instead goes on to redact the other one.
+ res = self.helper.send_event(
+ room_id=self.room_id,
+ type=EventTypes.Message,
+ content={
+ "msgtype": "m.text",
+ "body": "message 1",
+ "m.relates_to": {
+ "event_id": root_event_id,
+ "rel_type": RelationTypes.THREAD,
+ },
+ },
+ tok=self.mod_access_token,
+ )
+ first_threaded_event_id = res["event_id"]
+
+ # Send a second threaded message, this time from the user who'll perform the
+ # redaction.
+ res = self.helper.send_event(
+ room_id=self.room_id,
+ type=EventTypes.Message,
+ content={
+ "msgtype": "m.text",
+ "body": "message 2",
+ "m.relates_to": {
+ "event_id": root_event_id,
+ "rel_type": RelationTypes.THREAD,
+ },
+ },
+ tok=self.other_access_token,
+ )
+ second_threaded_event_id = res["event_id"]
+
+ # Redact the thread's root, and request that all threaded messages are also
+ # redacted. Send that request from the non-mod user, so that the first threaded
+ # event cannot be redacted.
+ self._redact_event(
+ self.other_access_token,
+ self.room_id,
+ root_event_id,
+ with_relations=[RelationTypes.THREAD],
+ )
+
+ # Check that the thread root got redacted.
+ event_dict = self.helper.get_event(
+ self.room_id, root_event_id, self.other_access_token
+ )
+ self.assertIn("redacted_because", event_dict, event_dict)
+
+ # Check that the last message in the thread got redacted, despite failing to
+ # redact the one before it.
+ event_dict = self.helper.get_event(
+ self.room_id, second_threaded_event_id, self.other_access_token
+ )
+ self.assertIn("redacted_because", event_dict, event_dict)
+
+ # Check that the message that was sent into the tread by the mod user is not
+ # redacted.
+ event_dict = self.helper.get_event(
+ self.room_id, first_threaded_event_id, self.other_access_token
+ )
+ self.assertIn("body", event_dict["content"], event_dict)
+ self.assertEqual("message 1", event_dict["content"]["body"])
+
+ @override_config({"experimental_features": {"msc3912_enabled": True}})
+ def test_redact_relations_txn_id_reuse(self) -> None:
+ """Tests that redacting a message using a transaction ID, then reusing the same
+ transaction ID but providing an additional list of relations to redact, is
+ effectively a no-op.
+ """
+ # Send a root event.
+ res = self.helper.send_event(
+ room_id=self.room_id,
+ type=EventTypes.Message,
+ content={
+ "msgtype": "m.text",
+ "body": "root",
+ },
+ tok=self.mod_access_token,
+ )
+ root_event_id = res["event_id"]
+
+ # Send a first threaded message.
+ res = self.helper.send_event(
+ room_id=self.room_id,
+ type=EventTypes.Message,
+ content={
+ "msgtype": "m.text",
+ "body": "I'm in a thread!",
+ "m.relates_to": {
+ "event_id": root_event_id,
+ "rel_type": RelationTypes.THREAD,
+ },
+ },
+ tok=self.mod_access_token,
+ )
+ threaded_event_id = res["event_id"]
+
+ # Send a first redaction request which redacts only the root event.
+ channel = self.make_request(
+ method="PUT",
+ path=f"/rooms/{self.room_id}/redact/{root_event_id}/foo",
+ content={},
+ access_token=self.mod_access_token,
+ )
+ self.assertEqual(channel.code, 200)
+
+ # Send a second redaction request which redacts the root event as well as
+ # threaded messages.
+ channel = self.make_request(
+ method="PUT",
+ path=f"/rooms/{self.room_id}/redact/{root_event_id}/foo",
+ content={"org.matrix.msc3912.with_relations": [RelationTypes.THREAD]},
+ access_token=self.mod_access_token,
+ )
+ self.assertEqual(channel.code, 200)
+
+ # Check that the root event got redacted.
+ event_dict = self.helper.get_event(
+ self.room_id, root_event_id, self.mod_access_token
+ )
+ self.assertIn("redacted_because", event_dict)
+
+ # Check that the threaded message didn't get redacted (since that wasn't part of
+ # the original redaction).
+ event_dict = self.helper.get_event(
+ self.room_id, threaded_event_id, self.mod_access_token
+ )
+ self.assertIn("body", event_dict["content"], event_dict)
+ self.assertEqual("I'm in a thread!", event_dict["content"]["body"])
diff --git a/tests/rest/client/test_register.py b/tests/rest/client/test_register.py
index f8e64ce6ac..11cf3939d8 100644
--- a/tests/rest/client/test_register.py
+++ b/tests/rest/client/test_register.py
@@ -22,7 +22,11 @@ import pkg_resources
from twisted.test.proto_helpers import MemoryReactor
import synapse.rest.admin
-from synapse.api.constants import APP_SERVICE_REGISTRATION_TYPE, LoginType
+from synapse.api.constants import (
+ APP_SERVICE_REGISTRATION_TYPE,
+ ApprovalNoticeMedium,
+ LoginType,
+)
from synapse.api.errors import Codes
from synapse.appservice import ApplicationService
from synapse.rest.client import account, account_validity, login, logout, register, sync
@@ -70,7 +74,7 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase):
b"POST", self.url + b"?access_token=i_am_an_app_service", request_data
)
- self.assertEqual(channel.result["code"], b"200", channel.result)
+ self.assertEqual(channel.code, 200, msg=channel.result)
det_data = {"user_id": user_id, "home_server": self.hs.hostname}
self.assertDictContainsSubset(det_data, channel.json_body)
@@ -91,7 +95,7 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase):
b"POST", self.url + b"?access_token=i_am_an_app_service", request_data
)
- self.assertEqual(channel.result["code"], b"400", channel.result)
+ self.assertEqual(channel.code, 400, msg=channel.result)
def test_POST_appservice_registration_invalid(self) -> None:
self.appservice = None # no application service exists
@@ -100,20 +104,20 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase):
b"POST", self.url + b"?access_token=i_am_an_app_service", request_data
)
- self.assertEqual(channel.result["code"], b"401", channel.result)
+ self.assertEqual(channel.code, 401, msg=channel.result)
def test_POST_bad_password(self) -> None:
request_data = {"username": "kermit", "password": 666}
channel = self.make_request(b"POST", self.url, request_data)
- self.assertEqual(channel.result["code"], b"400", channel.result)
+ self.assertEqual(channel.code, 400, msg=channel.result)
self.assertEqual(channel.json_body["error"], "Invalid password")
def test_POST_bad_username(self) -> None:
request_data = {"username": 777, "password": "monkey"}
channel = self.make_request(b"POST", self.url, request_data)
- self.assertEqual(channel.result["code"], b"400", channel.result)
+ self.assertEqual(channel.code, 400, msg=channel.result)
self.assertEqual(channel.json_body["error"], "Invalid username")
def test_POST_user_valid(self) -> None:
@@ -132,7 +136,7 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase):
"home_server": self.hs.hostname,
"device_id": device_id,
}
- self.assertEqual(channel.result["code"], b"200", channel.result)
+ self.assertEqual(channel.code, 200, msg=channel.result)
self.assertDictContainsSubset(det_data, channel.json_body)
@override_config({"enable_registration": False})
@@ -142,7 +146,7 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase):
channel = self.make_request(b"POST", self.url, request_data)
- self.assertEqual(channel.result["code"], b"403", channel.result)
+ self.assertEqual(channel.code, 403, msg=channel.result)
self.assertEqual(channel.json_body["error"], "Registration has been disabled")
self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN")
@@ -153,7 +157,7 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase):
channel = self.make_request(b"POST", self.url + b"?kind=guest", b"{}")
det_data = {"home_server": self.hs.hostname, "device_id": "guest_device"}
- self.assertEqual(channel.result["code"], b"200", channel.result)
+ self.assertEqual(channel.code, 200, msg=channel.result)
self.assertDictContainsSubset(det_data, channel.json_body)
def test_POST_disabled_guest_registration(self) -> None:
@@ -161,7 +165,7 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase):
channel = self.make_request(b"POST", self.url + b"?kind=guest", b"{}")
- self.assertEqual(channel.result["code"], b"403", channel.result)
+ self.assertEqual(channel.code, 403, msg=channel.result)
self.assertEqual(channel.json_body["error"], "Guest access is disabled")
@override_config({"rc_registration": {"per_second": 0.17, "burst_count": 5}})
@@ -171,16 +175,16 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase):
channel = self.make_request(b"POST", url, b"{}")
if i == 5:
- self.assertEqual(channel.result["code"], b"429", channel.result)
+ self.assertEqual(channel.code, 429, msg=channel.result)
retry_after_ms = int(channel.json_body["retry_after_ms"])
else:
- self.assertEqual(channel.result["code"], b"200", channel.result)
+ self.assertEqual(channel.code, 200, msg=channel.result)
self.reactor.advance(retry_after_ms / 1000.0 + 1.0)
channel = self.make_request(b"POST", self.url + b"?kind=guest", b"{}")
- self.assertEqual(channel.result["code"], b"200", channel.result)
+ self.assertEqual(channel.code, 200, msg=channel.result)
@override_config({"rc_registration": {"per_second": 0.17, "burst_count": 5}})
def test_POST_ratelimiting(self) -> None:
@@ -194,16 +198,16 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase):
channel = self.make_request(b"POST", self.url, request_data)
if i == 5:
- self.assertEqual(channel.result["code"], b"429", channel.result)
+ self.assertEqual(channel.code, 429, msg=channel.result)
retry_after_ms = int(channel.json_body["retry_after_ms"])
else:
- self.assertEqual(channel.result["code"], b"200", channel.result)
+ self.assertEqual(channel.code, 200, msg=channel.result)
self.reactor.advance(retry_after_ms / 1000.0 + 1.0)
channel = self.make_request(b"POST", self.url + b"?kind=guest", b"{}")
- self.assertEqual(channel.result["code"], b"200", channel.result)
+ self.assertEqual(channel.code, 200, msg=channel.result)
@override_config({"registration_requires_token": True})
def test_POST_registration_requires_token(self) -> None:
@@ -231,7 +235,7 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase):
# Request without auth to get flows and session
channel = self.make_request(b"POST", self.url, params)
- self.assertEqual(channel.result["code"], b"401", channel.result)
+ self.assertEqual(channel.code, 401, msg=channel.result)
flows = channel.json_body["flows"]
# Synapse adds a dummy stage to differentiate flows where otherwise one
# flow would be a subset of another flow.
@@ -248,7 +252,7 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase):
"session": session,
}
channel = self.make_request(b"POST", self.url, params)
- self.assertEqual(channel.result["code"], b"401", channel.result)
+ self.assertEqual(channel.code, 401, msg=channel.result)
completed = channel.json_body["completed"]
self.assertCountEqual([LoginType.REGISTRATION_TOKEN], completed)
@@ -263,7 +267,7 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase):
"home_server": self.hs.hostname,
"device_id": device_id,
}
- self.assertEqual(channel.result["code"], b"200", channel.result)
+ self.assertEqual(channel.code, 200, msg=channel.result)
self.assertDictContainsSubset(det_data, channel.json_body)
# Check the `completed` counter has been incremented and pending is 0
@@ -293,21 +297,21 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase):
"session": session,
}
channel = self.make_request(b"POST", self.url, params)
- self.assertEqual(channel.result["code"], b"401", channel.result)
+ self.assertEqual(channel.code, 401, msg=channel.result)
self.assertEqual(channel.json_body["errcode"], Codes.MISSING_PARAM)
self.assertEqual(channel.json_body["completed"], [])
# Test with non-string (invalid)
params["auth"]["token"] = 1234
channel = self.make_request(b"POST", self.url, params)
- self.assertEqual(channel.result["code"], b"401", channel.result)
+ self.assertEqual(channel.code, 401, msg=channel.result)
self.assertEqual(channel.json_body["errcode"], Codes.INVALID_PARAM)
self.assertEqual(channel.json_body["completed"], [])
# Test with unknown token (invalid)
params["auth"]["token"] = "1234"
channel = self.make_request(b"POST", self.url, params)
- self.assertEqual(channel.result["code"], b"401", channel.result)
+ self.assertEqual(channel.code, 401, msg=channel.result)
self.assertEqual(channel.json_body["errcode"], Codes.UNAUTHORIZED)
self.assertEqual(channel.json_body["completed"], [])
@@ -361,7 +365,7 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase):
"session": session2,
}
channel = self.make_request(b"POST", self.url, params2)
- self.assertEqual(channel.result["code"], b"401", channel.result)
+ self.assertEqual(channel.code, 401, msg=channel.result)
self.assertEqual(channel.json_body["errcode"], Codes.UNAUTHORIZED)
self.assertEqual(channel.json_body["completed"], [])
@@ -381,7 +385,7 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase):
# Check auth still fails when using token with session2
channel = self.make_request(b"POST", self.url, params2)
- self.assertEqual(channel.result["code"], b"401", channel.result)
+ self.assertEqual(channel.code, 401, msg=channel.result)
self.assertEqual(channel.json_body["errcode"], Codes.UNAUTHORIZED)
self.assertEqual(channel.json_body["completed"], [])
@@ -415,7 +419,7 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase):
"session": session,
}
channel = self.make_request(b"POST", self.url, params)
- self.assertEqual(channel.result["code"], b"401", channel.result)
+ self.assertEqual(channel.code, 401, msg=channel.result)
self.assertEqual(channel.json_body["errcode"], Codes.UNAUTHORIZED)
self.assertEqual(channel.json_body["completed"], [])
@@ -570,7 +574,7 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase):
def test_advertised_flows(self) -> None:
channel = self.make_request(b"POST", self.url, b"{}")
- self.assertEqual(channel.result["code"], b"401", channel.result)
+ self.assertEqual(channel.code, 401, msg=channel.result)
flows = channel.json_body["flows"]
# with the stock config, we only expect the dummy flow
@@ -586,14 +590,14 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase):
"require_at_registration": True,
},
"account_threepid_delegates": {
- "email": "https://id_server",
"msisdn": "https://id_server",
},
+ "email": {"notif_from": "Synapse <synapse@example.com>"},
}
)
def test_advertised_flows_captcha_and_terms_and_3pids(self) -> None:
channel = self.make_request(b"POST", self.url, b"{}")
- self.assertEqual(channel.result["code"], b"401", channel.result)
+ self.assertEqual(channel.code, 401, msg=channel.result)
flows = channel.json_body["flows"]
self.assertCountEqual(
@@ -625,7 +629,7 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase):
)
def test_advertised_flows_no_msisdn_email_required(self) -> None:
channel = self.make_request(b"POST", self.url, b"{}")
- self.assertEqual(channel.result["code"], b"401", channel.result)
+ self.assertEqual(channel.code, 401, msg=channel.result)
flows = channel.json_body["flows"]
# with the stock config, we expect all four combinations of 3pid
@@ -765,6 +769,32 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase):
self.assertEqual(channel.code, 400, channel.json_body)
self.assertEqual(channel.json_body["errcode"], Codes.USER_IN_USE)
+ @override_config(
+ {
+ "experimental_features": {
+ "msc3866": {
+ "enabled": True,
+ "require_approval_for_new_accounts": True,
+ }
+ }
+ }
+ )
+ def test_require_approval(self) -> None:
+ channel = self.make_request(
+ "POST",
+ "register",
+ {
+ "username": "kermit",
+ "password": "monkey",
+ "auth": {"type": LoginType.DUMMY},
+ },
+ )
+ self.assertEqual(403, channel.code, channel.result)
+ self.assertEqual(Codes.USER_AWAITING_APPROVAL, channel.json_body["errcode"])
+ self.assertEqual(
+ ApprovalNoticeMedium.NONE, channel.json_body["approval_notice_medium"]
+ )
+
class AccountValidityTestCase(unittest.HomeserverTestCase):
@@ -797,13 +827,13 @@ class AccountValidityTestCase(unittest.HomeserverTestCase):
# endpoint.
channel = self.make_request(b"GET", "/sync", access_token=tok)
- self.assertEqual(channel.result["code"], b"200", channel.result)
+ self.assertEqual(channel.code, 200, msg=channel.result)
self.reactor.advance(datetime.timedelta(weeks=1).total_seconds())
channel = self.make_request(b"GET", "/sync", access_token=tok)
- self.assertEqual(channel.result["code"], b"403", channel.result)
+ self.assertEqual(channel.code, 403, msg=channel.result)
self.assertEqual(
channel.json_body["errcode"], Codes.EXPIRED_ACCOUNT, channel.result
)
@@ -823,12 +853,12 @@ class AccountValidityTestCase(unittest.HomeserverTestCase):
url = "/_synapse/admin/v1/account_validity/validity"
request_data = {"user_id": user_id}
channel = self.make_request(b"POST", url, request_data, access_token=admin_tok)
- self.assertEqual(channel.result["code"], b"200", channel.result)
+ self.assertEqual(channel.code, 200, msg=channel.result)
# The specific endpoint doesn't matter, all we need is an authenticated
# endpoint.
channel = self.make_request(b"GET", "/sync", access_token=tok)
- self.assertEqual(channel.result["code"], b"200", channel.result)
+ self.assertEqual(channel.code, 200, msg=channel.result)
def test_manual_expire(self) -> None:
user_id = self.register_user("kermit", "monkey")
@@ -844,12 +874,12 @@ class AccountValidityTestCase(unittest.HomeserverTestCase):
"enable_renewal_emails": False,
}
channel = self.make_request(b"POST", url, request_data, access_token=admin_tok)
- self.assertEqual(channel.result["code"], b"200", channel.result)
+ self.assertEqual(channel.code, 200, msg=channel.result)
# The specific endpoint doesn't matter, all we need is an authenticated
# endpoint.
channel = self.make_request(b"GET", "/sync", access_token=tok)
- self.assertEqual(channel.result["code"], b"403", channel.result)
+ self.assertEqual(channel.code, 403, msg=channel.result)
self.assertEqual(
channel.json_body["errcode"], Codes.EXPIRED_ACCOUNT, channel.result
)
@@ -868,18 +898,18 @@ class AccountValidityTestCase(unittest.HomeserverTestCase):
"enable_renewal_emails": False,
}
channel = self.make_request(b"POST", url, request_data, access_token=admin_tok)
- self.assertEqual(channel.result["code"], b"200", channel.result)
+ self.assertEqual(channel.code, 200, msg=channel.result)
# Try to log the user out
channel = self.make_request(b"POST", "/logout", access_token=tok)
- self.assertEqual(channel.result["code"], b"200", channel.result)
+ self.assertEqual(channel.code, 200, msg=channel.result)
# Log the user in again (allowed for expired accounts)
tok = self.login("kermit", "monkey")
# Try to log out all of the user's sessions
channel = self.make_request(b"POST", "/logout/all", access_token=tok)
- self.assertEqual(channel.result["code"], b"200", channel.result)
+ self.assertEqual(channel.code, 200, msg=channel.result)
class AccountValidityRenewalByEmailTestCase(unittest.HomeserverTestCase):
@@ -954,7 +984,7 @@ class AccountValidityRenewalByEmailTestCase(unittest.HomeserverTestCase):
renewal_token = self.get_success(self.store.get_renewal_token_for_user(user_id))
url = "/_matrix/client/unstable/account_validity/renew?token=%s" % renewal_token
channel = self.make_request(b"GET", url)
- self.assertEqual(channel.result["code"], b"200", channel.result)
+ self.assertEqual(channel.code, 200, msg=channel.result)
# Check that we're getting HTML back.
content_type = channel.headers.getRawHeaders(b"Content-Type")
@@ -972,7 +1002,7 @@ class AccountValidityRenewalByEmailTestCase(unittest.HomeserverTestCase):
# Move 1 day forward. Try to renew with the same token again.
url = "/_matrix/client/unstable/account_validity/renew?token=%s" % renewal_token
channel = self.make_request(b"GET", url)
- self.assertEqual(channel.result["code"], b"200", channel.result)
+ self.assertEqual(channel.code, 200, msg=channel.result)
# Check that we're getting HTML back.
content_type = channel.headers.getRawHeaders(b"Content-Type")
@@ -992,14 +1022,14 @@ class AccountValidityRenewalByEmailTestCase(unittest.HomeserverTestCase):
# succeed.
self.reactor.advance(datetime.timedelta(days=3).total_seconds())
channel = self.make_request(b"GET", "/sync", access_token=tok)
- self.assertEqual(channel.result["code"], b"200", channel.result)
+ self.assertEqual(channel.code, 200, msg=channel.result)
def test_renewal_invalid_token(self) -> None:
# Hit the renewal endpoint with an invalid token and check that it behaves as
# expected, i.e. that it responds with 404 Not Found and the correct HTML.
url = "/_matrix/client/unstable/account_validity/renew?token=123"
channel = self.make_request(b"GET", url)
- self.assertEqual(channel.result["code"], b"404", channel.result)
+ self.assertEqual(channel.code, 404, msg=channel.result)
# Check that we're getting HTML back.
content_type = channel.headers.getRawHeaders(b"Content-Type")
@@ -1023,7 +1053,7 @@ class AccountValidityRenewalByEmailTestCase(unittest.HomeserverTestCase):
"/_matrix/client/unstable/account_validity/send_mail",
access_token=tok,
)
- self.assertEqual(channel.result["code"], b"200", channel.result)
+ self.assertEqual(channel.code, 200, msg=channel.result)
self.assertEqual(len(self.email_attempts), 1)
@@ -1096,7 +1126,7 @@ class AccountValidityRenewalByEmailTestCase(unittest.HomeserverTestCase):
"/_matrix/client/unstable/account_validity/send_mail",
access_token=tok,
)
- self.assertEqual(channel.result["code"], b"200", channel.result)
+ self.assertEqual(channel.code, 200, msg=channel.result)
self.assertEqual(len(self.email_attempts), 1)
@@ -1176,7 +1206,7 @@ class RegistrationTokenValidityRestServletTestCase(unittest.HomeserverTestCase):
b"GET",
f"{self.url}?token={token}",
)
- self.assertEqual(channel.result["code"], b"200", channel.result)
+ self.assertEqual(channel.code, 200, msg=channel.result)
self.assertEqual(channel.json_body["valid"], True)
def test_GET_token_invalid(self) -> None:
@@ -1185,7 +1215,7 @@ class RegistrationTokenValidityRestServletTestCase(unittest.HomeserverTestCase):
b"GET",
f"{self.url}?token={token}",
)
- self.assertEqual(channel.result["code"], b"200", channel.result)
+ self.assertEqual(channel.code, 200, msg=channel.result)
self.assertEqual(channel.json_body["valid"], False)
@override_config(
@@ -1201,10 +1231,10 @@ class RegistrationTokenValidityRestServletTestCase(unittest.HomeserverTestCase):
)
if i == 5:
- self.assertEqual(channel.result["code"], b"429", channel.result)
+ self.assertEqual(channel.code, 429, msg=channel.result)
retry_after_ms = int(channel.json_body["retry_after_ms"])
else:
- self.assertEqual(channel.result["code"], b"200", channel.result)
+ self.assertEqual(channel.code, 200, msg=channel.result)
self.reactor.advance(retry_after_ms / 1000.0 + 1.0)
@@ -1212,4 +1242,4 @@ class RegistrationTokenValidityRestServletTestCase(unittest.HomeserverTestCase):
b"GET",
f"{self.url}?token={token}",
)
- self.assertEqual(channel.result["code"], b"200", channel.result)
+ self.assertEqual(channel.code, 200, msg=channel.result)
diff --git a/tests/rest/client/test_relations.py b/tests/rest/client/test_relations.py
index d589f07314..b86f341ff5 100644
--- a/tests/rest/client/test_relations.py
+++ b/tests/rest/client/test_relations.py
@@ -654,6 +654,14 @@ class RelationsTestCase(BaseRelationsTestCase):
)
# We also expect to get the original event (the id of which is self.parent_id)
+ # when requesting the unstable endpoint.
+ self.assertNotIn("original_event", channel.json_body)
+ channel = self.make_request(
+ "GET",
+ f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?limit=1",
+ access_token=self.user_token,
+ )
+ self.assertEqual(200, channel.code, channel.json_body)
self.assertEqual(
channel.json_body["original_event"]["event_id"], self.parent_id
)
@@ -728,7 +736,6 @@ class RelationsTestCase(BaseRelationsTestCase):
class RelationPaginationTestCase(BaseRelationsTestCase):
- @unittest.override_config({"experimental_features": {"msc3715_enabled": True}})
def test_basic_paginate_relations(self) -> None:
"""Tests that calling pagination API correctly the latest relations."""
channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a")
@@ -756,11 +763,6 @@ class RelationPaginationTestCase(BaseRelationsTestCase):
channel.json_body["chunk"][0],
)
- # We also expect to get the original event (the id of which is self.parent_id)
- self.assertEqual(
- channel.json_body["original_event"]["event_id"], self.parent_id
- )
-
# Make sure next_batch has something in it that looks like it could be a
# valid token.
self.assertIsInstance(
@@ -771,7 +773,7 @@ class RelationPaginationTestCase(BaseRelationsTestCase):
channel = self.make_request(
"GET",
f"/_matrix/client/v1/rooms/{self.room}/relations"
- f"/{self.parent_id}?limit=1&org.matrix.msc3715.dir=f",
+ f"/{self.parent_id}?limit=1&dir=f",
access_token=self.user_token,
)
self.assertEqual(200, channel.code, channel.json_body)
@@ -809,7 +811,7 @@ class RelationPaginationTestCase(BaseRelationsTestCase):
channel = self.make_request(
"GET",
- f"/_matrix/client/v1/rooms/{self.room}/relations/{self.parent_id}?limit=1{from_token}",
+ f"/_matrix/client/v1/rooms/{self.room}/relations/{self.parent_id}?limit=3{from_token}",
access_token=self.user_token,
)
self.assertEqual(200, channel.code, channel.json_body)
@@ -827,6 +829,32 @@ class RelationPaginationTestCase(BaseRelationsTestCase):
found_event_ids.reverse()
self.assertEqual(found_event_ids, expected_event_ids)
+ # Test forward pagination.
+ prev_token = ""
+ found_event_ids = []
+ for _ in range(20):
+ from_token = ""
+ if prev_token:
+ from_token = "&from=" + prev_token
+
+ channel = self.make_request(
+ "GET",
+ f"/_matrix/client/v1/rooms/{self.room}/relations/{self.parent_id}?dir=f&limit=3{from_token}",
+ access_token=self.user_token,
+ )
+ self.assertEqual(200, channel.code, channel.json_body)
+
+ found_event_ids.extend(e["event_id"] for e in channel.json_body["chunk"])
+ next_batch = channel.json_body.get("next_batch")
+
+ self.assertNotEqual(prev_token, next_batch)
+ prev_token = next_batch
+
+ if not prev_token:
+ break
+
+ self.assertEqual(found_event_ids, expected_event_ids)
+
def test_pagination_from_sync_and_messages(self) -> None:
"""Pagination tokens from /sync and /messages can be used to paginate /relations."""
channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "A")
@@ -999,7 +1027,7 @@ class BundledAggregationsTestCase(BaseRelationsTestCase):
bundled_aggregations,
)
- self._test_bundled_aggregations(RelationTypes.ANNOTATION, assert_annotations, 6)
+ self._test_bundled_aggregations(RelationTypes.ANNOTATION, assert_annotations, 7)
def test_annotation_to_annotation(self) -> None:
"""Any relation to an annotation should be ignored."""
@@ -1035,7 +1063,7 @@ class BundledAggregationsTestCase(BaseRelationsTestCase):
bundled_aggregations,
)
- self._test_bundled_aggregations(RelationTypes.REFERENCE, assert_annotations, 6)
+ self._test_bundled_aggregations(RelationTypes.REFERENCE, assert_annotations, 7)
def test_thread(self) -> None:
"""
@@ -1080,21 +1108,21 @@ class BundledAggregationsTestCase(BaseRelationsTestCase):
# The "user" sent the root event and is making queries for the bundled
# aggregations: they have participated.
- self._test_bundled_aggregations(RelationTypes.THREAD, _gen_assert(True), 8)
+ self._test_bundled_aggregations(RelationTypes.THREAD, _gen_assert(True), 7)
# The "user2" sent replies in the thread and is making queries for the
# bundled aggregations: they have participated.
#
# Note that this re-uses some cached values, so the total number of
# queries is much smaller.
self._test_bundled_aggregations(
- RelationTypes.THREAD, _gen_assert(True), 2, access_token=self.user2_token
+ RelationTypes.THREAD, _gen_assert(True), 3, access_token=self.user2_token
)
# A user with no interactions with the thread: they have not participated.
user3_id, user3_token = self._create_user("charlie")
self.helper.join(self.room, user=user3_id, tok=user3_token)
self._test_bundled_aggregations(
- RelationTypes.THREAD, _gen_assert(False), 2, access_token=user3_token
+ RelationTypes.THREAD, _gen_assert(False), 3, access_token=user3_token
)
def test_thread_with_bundled_aggregations_for_latest(self) -> None:
@@ -1142,7 +1170,7 @@ class BundledAggregationsTestCase(BaseRelationsTestCase):
bundled_aggregations["latest_event"].get("unsigned"),
)
- self._test_bundled_aggregations(RelationTypes.THREAD, assert_thread, 8)
+ self._test_bundled_aggregations(RelationTypes.THREAD, assert_thread, 7)
def test_nested_thread(self) -> None:
"""
@@ -1495,6 +1523,26 @@ class RelationRedactionTestCase(BaseRelationsTestCase):
)
self.assertEqual(200, channel.code, channel.json_body)
+ def _get_threads(self) -> List[Tuple[str, str]]:
+ """Request the threads in the room and returns a list of thread ID and latest event ID."""
+ # Request the threads in the room.
+ channel = self.make_request(
+ "GET",
+ f"/_matrix/client/v1/rooms/{self.room}/threads",
+ access_token=self.user_token,
+ )
+ self.assertEquals(200, channel.code, channel.json_body)
+ threads = channel.json_body["chunk"]
+ return [
+ (
+ t["event_id"],
+ t["unsigned"]["m.relations"][RelationTypes.THREAD]["latest_event"][
+ "event_id"
+ ],
+ )
+ for t in threads
+ ]
+
def test_redact_relation_annotation(self) -> None:
"""
Test that annotations of an event are properly handled after the
@@ -1539,58 +1587,82 @@ class RelationRedactionTestCase(BaseRelationsTestCase):
The redacted event should not be included in bundled aggregations or
the response to relations.
"""
- channel = self._send_relation(
- RelationTypes.THREAD,
- EventTypes.Message,
- content={"body": "reply 1", "msgtype": "m.text"},
- )
- unredacted_event_id = channel.json_body["event_id"]
+ # Create a thread with a few events in it.
+ thread_replies = []
+ for i in range(3):
+ channel = self._send_relation(
+ RelationTypes.THREAD,
+ EventTypes.Message,
+ content={"body": f"reply {i}", "msgtype": "m.text"},
+ )
+ thread_replies.append(channel.json_body["event_id"])
- # Note that the *last* event in the thread is redacted, as that gets
- # included in the bundled aggregation.
- channel = self._send_relation(
- RelationTypes.THREAD,
- EventTypes.Message,
- content={"body": "reply 2", "msgtype": "m.text"},
+ ##################################################
+ # Check the test data is configured as expected. #
+ ##################################################
+ self.assertEquals(self._get_related_events(), list(reversed(thread_replies)))
+ relations = self._get_bundled_aggregations()
+ self.assertDictContainsSubset(
+ {"count": 3, "current_user_participated": True},
+ relations[RelationTypes.THREAD],
+ )
+ # The latest event is the last sent event.
+ self.assertEqual(
+ relations[RelationTypes.THREAD]["latest_event"]["event_id"],
+ thread_replies[-1],
)
- to_redact_event_id = channel.json_body["event_id"]
- # Both relations exist.
- event_ids = self._get_related_events()
+ # There should be one thread, the latest event is the event that will be redacted.
+ self.assertEqual(self._get_threads(), [(self.parent_id, thread_replies[-1])])
+
+ ##########################
+ # Redact the last event. #
+ ##########################
+ self._redact(thread_replies.pop())
+
+ # The thread should still exist, but the latest event should be updated.
+ self.assertEquals(self._get_related_events(), list(reversed(thread_replies)))
relations = self._get_bundled_aggregations()
- self.assertEquals(event_ids, [to_redact_event_id, unredacted_event_id])
self.assertDictContainsSubset(
- {
- "count": 2,
- "current_user_participated": True,
- },
+ {"count": 2, "current_user_participated": True},
relations[RelationTypes.THREAD],
)
- # And the latest event returned is the event that will be redacted.
+ # And the latest event is the last unredacted event.
self.assertEqual(
relations[RelationTypes.THREAD]["latest_event"]["event_id"],
- to_redact_event_id,
+ thread_replies[-1],
)
+ self.assertEqual(self._get_threads(), [(self.parent_id, thread_replies[-1])])
- # Redact one of the reactions.
- self._redact(to_redact_event_id)
+ ###########################################
+ # Redact the *first* event in the thread. #
+ ###########################################
+ self._redact(thread_replies.pop(0))
- # The unredacted relation should still exist.
- event_ids = self._get_related_events()
+ # Nothing should have changed (except the thread count).
+ self.assertEquals(self._get_related_events(), thread_replies)
relations = self._get_bundled_aggregations()
- self.assertEquals(event_ids, [unredacted_event_id])
self.assertDictContainsSubset(
- {
- "count": 1,
- "current_user_participated": True,
- },
+ {"count": 1, "current_user_participated": True},
relations[RelationTypes.THREAD],
)
- # And the latest event is now the unredacted event.
+ # And the latest event is the last unredacted event.
self.assertEqual(
relations[RelationTypes.THREAD]["latest_event"]["event_id"],
- unredacted_event_id,
+ thread_replies[-1],
)
+ self.assertEqual(self._get_threads(), [(self.parent_id, thread_replies[-1])])
+
+ ####################################
+ # Redact the last remaining event. #
+ ####################################
+ self._redact(thread_replies.pop(0))
+ self.assertEquals(thread_replies, [])
+
+ # The event should no longer be considered a thread.
+ self.assertEquals(self._get_related_events(), [])
+ self.assertEquals(self._get_bundled_aggregations(), {})
+ self.assertEqual(self._get_threads(), [])
def test_redact_parent_edit(self) -> None:
"""Test that edits of an event are redacted when the original event
@@ -1649,7 +1721,6 @@ class RelationRedactionTestCase(BaseRelationsTestCase):
{"chunk": [{"type": "m.reaction", "key": "👍", "count": 1}]},
)
- @unittest.override_config({"experimental_features": {"msc3440_enabled": True}})
def test_redact_parent_thread(self) -> None:
"""
Test that thread replies are still available when the root event is redacted.
@@ -1679,3 +1750,165 @@ class RelationRedactionTestCase(BaseRelationsTestCase):
relations[RelationTypes.THREAD]["latest_event"]["event_id"],
related_event_id,
)
+
+
+class ThreadsTestCase(BaseRelationsTestCase):
+ def _get_threads(self, body: JsonDict) -> List[Tuple[str, str]]:
+ return [
+ (
+ ev["event_id"],
+ ev["unsigned"]["m.relations"]["m.thread"]["latest_event"]["event_id"],
+ )
+ for ev in body["chunk"]
+ ]
+
+ def test_threads(self) -> None:
+ """Create threads and ensure the ordering is due to their latest event."""
+ # Create 2 threads.
+ thread_1 = self.parent_id
+ res = self.helper.send(self.room, body="Thread Root!", tok=self.user_token)
+ thread_2 = res["event_id"]
+
+ channel = self._send_relation(RelationTypes.THREAD, "m.room.test")
+ reply_1 = channel.json_body["event_id"]
+ channel = self._send_relation(
+ RelationTypes.THREAD, "m.room.test", parent_id=thread_2
+ )
+ reply_2 = channel.json_body["event_id"]
+
+ # Request the threads in the room.
+ channel = self.make_request(
+ "GET",
+ f"/_matrix/client/v1/rooms/{self.room}/threads",
+ access_token=self.user_token,
+ )
+ self.assertEquals(200, channel.code, channel.json_body)
+ threads = self._get_threads(channel.json_body)
+ self.assertEqual(threads, [(thread_2, reply_2), (thread_1, reply_1)])
+
+ # Update the first thread, the ordering should swap.
+ channel = self._send_relation(RelationTypes.THREAD, "m.room.test")
+ reply_3 = channel.json_body["event_id"]
+
+ channel = self.make_request(
+ "GET",
+ f"/_matrix/client/v1/rooms/{self.room}/threads",
+ access_token=self.user_token,
+ )
+ self.assertEquals(200, channel.code, channel.json_body)
+ # Tuple of (thread ID, latest event ID) for each thread.
+ threads = self._get_threads(channel.json_body)
+ self.assertEqual(threads, [(thread_1, reply_3), (thread_2, reply_2)])
+
+ def test_pagination(self) -> None:
+ """Create threads and paginate through them."""
+ # Create 2 threads.
+ thread_1 = self.parent_id
+ res = self.helper.send(self.room, body="Thread Root!", tok=self.user_token)
+ thread_2 = res["event_id"]
+
+ self._send_relation(RelationTypes.THREAD, "m.room.test")
+ self._send_relation(RelationTypes.THREAD, "m.room.test", parent_id=thread_2)
+
+ # Request the threads in the room.
+ channel = self.make_request(
+ "GET",
+ f"/_matrix/client/v1/rooms/{self.room}/threads?limit=1",
+ access_token=self.user_token,
+ )
+ self.assertEquals(200, channel.code, channel.json_body)
+ thread_roots = [ev["event_id"] for ev in channel.json_body["chunk"]]
+ self.assertEqual(thread_roots, [thread_2])
+
+ # Make sure next_batch has something in it that looks like it could be a
+ # valid token.
+ next_batch = channel.json_body.get("next_batch")
+ self.assertIsInstance(next_batch, str, channel.json_body)
+
+ channel = self.make_request(
+ "GET",
+ f"/_matrix/client/v1/rooms/{self.room}/threads?limit=1&from={next_batch}",
+ access_token=self.user_token,
+ )
+ self.assertEquals(200, channel.code, channel.json_body)
+ thread_roots = [ev["event_id"] for ev in channel.json_body["chunk"]]
+ self.assertEqual(thread_roots, [thread_1], channel.json_body)
+
+ self.assertNotIn("next_batch", channel.json_body, channel.json_body)
+
+ def test_include(self) -> None:
+ """Filtering threads to all or participated in should work."""
+ # Thread 1 has the user as the root event.
+ thread_1 = self.parent_id
+ self._send_relation(
+ RelationTypes.THREAD, "m.room.test", access_token=self.user2_token
+ )
+
+ # Thread 2 has the user replying.
+ res = self.helper.send(self.room, body="Thread Root!", tok=self.user2_token)
+ thread_2 = res["event_id"]
+ self._send_relation(RelationTypes.THREAD, "m.room.test", parent_id=thread_2)
+
+ # Thread 3 has the user not participating in.
+ res = self.helper.send(self.room, body="Another thread!", tok=self.user2_token)
+ thread_3 = res["event_id"]
+ self._send_relation(
+ RelationTypes.THREAD,
+ "m.room.test",
+ access_token=self.user2_token,
+ parent_id=thread_3,
+ )
+
+ # All threads in the room.
+ channel = self.make_request(
+ "GET",
+ f"/_matrix/client/v1/rooms/{self.room}/threads",
+ access_token=self.user_token,
+ )
+ self.assertEquals(200, channel.code, channel.json_body)
+ thread_roots = [ev["event_id"] for ev in channel.json_body["chunk"]]
+ self.assertEqual(
+ thread_roots, [thread_3, thread_2, thread_1], channel.json_body
+ )
+
+ # Only participated threads.
+ channel = self.make_request(
+ "GET",
+ f"/_matrix/client/v1/rooms/{self.room}/threads?include=participated",
+ access_token=self.user_token,
+ )
+ self.assertEquals(200, channel.code, channel.json_body)
+ thread_roots = [ev["event_id"] for ev in channel.json_body["chunk"]]
+ self.assertEqual(thread_roots, [thread_2, thread_1], channel.json_body)
+
+ def test_ignored_user(self) -> None:
+ """Events from ignored users should be ignored."""
+ # Thread 1 has a reply from an ignored user.
+ thread_1 = self.parent_id
+ self._send_relation(
+ RelationTypes.THREAD, "m.room.test", access_token=self.user2_token
+ )
+
+ # Thread 2 is created by an ignored user.
+ res = self.helper.send(self.room, body="Thread Root!", tok=self.user2_token)
+ thread_2 = res["event_id"]
+ self._send_relation(RelationTypes.THREAD, "m.room.test", parent_id=thread_2)
+
+ # Ignore user2.
+ self.get_success(
+ self.store.add_account_data_for_user(
+ self.user_id,
+ AccountDataTypes.IGNORED_USER_LIST,
+ {"ignored_users": {self.user2_id: {}}},
+ )
+ )
+
+ # Only thread 1 is returned.
+ channel = self.make_request(
+ "GET",
+ f"/_matrix/client/v1/rooms/{self.room}/threads",
+ access_token=self.user_token,
+ )
+ self.assertEquals(200, channel.code, channel.json_body)
+ thread_roots = [ev["event_id"] for ev in channel.json_body["chunk"]]
+ self.assertEqual(thread_roots, [thread_1], channel.json_body)
diff --git a/tests/rest/client/test_rendezvous.py b/tests/rest/client/test_rendezvous.py
new file mode 100644
index 0000000000..ad00a476e1
--- /dev/null
+++ b/tests/rest/client/test_rendezvous.py
@@ -0,0 +1,45 @@
+# Copyright 2022 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from twisted.test.proto_helpers import MemoryReactor
+
+from synapse.rest.client import rendezvous
+from synapse.server import HomeServer
+from synapse.util import Clock
+
+from tests import unittest
+from tests.unittest import override_config
+
+endpoint = "/_matrix/client/unstable/org.matrix.msc3886/rendezvous"
+
+
+class RendezvousServletTestCase(unittest.HomeserverTestCase):
+
+ servlets = [
+ rendezvous.register_servlets,
+ ]
+
+ def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
+ self.hs = self.setup_test_homeserver()
+ return self.hs
+
+ def test_disabled(self) -> None:
+ channel = self.make_request("POST", endpoint, {}, access_token=None)
+ self.assertEqual(channel.code, 400)
+
+ @override_config({"experimental_features": {"msc3886_endpoint": "/asd"}})
+ def test_redirect(self) -> None:
+ channel = self.make_request("POST", endpoint, {}, access_token=None)
+ self.assertEqual(channel.code, 307)
+ self.assertEqual(channel.headers.getRawHeaders("Location"), ["/asd"])
diff --git a/tests/rest/client/test_report_event.py b/tests/rest/client/test_report_event.py
index ad0d0209f7..7cb1017a4a 100644
--- a/tests/rest/client/test_report_event.py
+++ b/tests/rest/client/test_report_event.py
@@ -77,6 +77,4 @@ class ReportEventTestCase(unittest.HomeserverTestCase):
channel = self.make_request(
"POST", self.report_path, data, access_token=self.other_user_tok
)
- self.assertEqual(
- response_status, int(channel.result["code"]), msg=channel.result["body"]
- )
+ self.assertEqual(response_status, channel.code, msg=channel.result["body"])
diff --git a/tests/rest/client/test_retention.py b/tests/rest/client/test_retention.py
index ac9c113354..9c8c1889d3 100644
--- a/tests/rest/client/test_retention.py
+++ b/tests/rest/client/test_retention.py
@@ -20,7 +20,7 @@ from synapse.api.constants import EventTypes
from synapse.rest import admin
from synapse.rest.client import login, room
from synapse.server import HomeServer
-from synapse.types import JsonDict
+from synapse.types import JsonDict, create_requester
from synapse.util import Clock
from synapse.visibility import filter_events_for_client
@@ -188,7 +188,7 @@ class RetentionTestCase(unittest.HomeserverTestCase):
message_handler = self.hs.get_message_handler()
create_event = self.get_success(
message_handler.get_room_data(
- self.user_id, room_id, EventTypes.Create, state_key=""
+ create_requester(self.user_id), room_id, EventTypes.Create, state_key=""
)
)
diff --git a/tests/rest/client/test_rooms.py b/tests/rest/client/test_rooms.py
index aa2f578441..e919e089cb 100644
--- a/tests/rest/client/test_rooms.py
+++ b/tests/rest/client/test_rooms.py
@@ -20,7 +20,7 @@
import json
from http import HTTPStatus
from typing import Any, Dict, Iterable, List, Optional, Tuple, Union
-from unittest.mock import Mock, call
+from unittest.mock import Mock, call, patch
from urllib import parse as urlparse
from parameterized import param, parameterized
@@ -35,13 +35,15 @@ from synapse.api.constants import (
EventTypes,
Membership,
PublicRoomsFilterFields,
- RelationTypes,
RoomTypes,
)
from synapse.api.errors import Codes, HttpResponseException
+from synapse.appservice import ApplicationService
+from synapse.events import EventBase
+from synapse.events.snapshot import EventContext
from synapse.handlers.pagination import PurgeStatus
from synapse.rest import admin
-from synapse.rest.client import account, directory, login, profile, room, sync
+from synapse.rest.client import account, directory, login, profile, register, room, sync
from synapse.server import HomeServer
from synapse.types import JsonDict, RoomAlias, UserID, create_requester
from synapse.util import Clock
@@ -49,7 +51,10 @@ from synapse.util.stringutils import random_string
from tests import unittest
from tests.http.server._base import make_request_with_cancellation_test
+from tests.storage.test_stream import PaginationTestCase
from tests.test_utils import make_awaitable
+from tests.test_utils.event_injection import create_event
+from tests.unittest import override_config
PATH_PREFIX = b"/_matrix/client/api/v1"
@@ -710,7 +715,7 @@ class RoomsCreateTestCase(RoomBase):
self.assertEqual(HTTPStatus.OK, channel.code, channel.result)
self.assertTrue("room_id" in channel.json_body)
assert channel.resource_usage is not None
- self.assertEqual(44, channel.resource_usage.db_txn_count)
+ self.assertEqual(33, channel.resource_usage.db_txn_count)
def test_post_room_initial_state(self) -> None:
# POST with initial_state config key, expect new room id
@@ -723,7 +728,7 @@ class RoomsCreateTestCase(RoomBase):
self.assertEqual(HTTPStatus.OK, channel.code, channel.result)
self.assertTrue("room_id" in channel.json_body)
assert channel.resource_usage is not None
- self.assertEqual(50, channel.resource_usage.db_txn_count)
+ self.assertEqual(36, channel.resource_usage.db_txn_count)
def test_post_room_visibility_key(self) -> None:
# POST with visibility config key, expect new room id
@@ -867,6 +872,41 @@ class RoomsCreateTestCase(RoomBase):
self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body)
self.assertEqual(join_mock.call_count, 0)
+ def _create_basic_room(self) -> Tuple[int, object]:
+ """
+ Tries to create a basic room and returns the response code.
+ """
+ channel = self.make_request(
+ "POST",
+ "/createRoom",
+ {},
+ )
+ return channel.code, channel.json_body
+
+ @override_config(
+ {
+ "rc_message": {"per_second": 0.2, "burst_count": 10},
+ }
+ )
+ def test_room_creation_ratelimiting(self) -> None:
+ """
+ Regression test for #14312, where ratelimiting was made too strict.
+ Clients should be able to create 10 rooms in a row
+ without hitting rate limits, using default rate limit config.
+ (We override rate limiting config back to its default value.)
+
+ To ensure we don't make ratelimiting too generous accidentally,
+ also check that we can't create an 11th room.
+ """
+
+ for _ in range(10):
+ code, json_body = self._create_basic_room()
+ self.assertEqual(code, HTTPStatus.OK, json_body)
+
+ # The 6th room hits the rate limit.
+ code, json_body = self._create_basic_room()
+ self.assertEqual(code, HTTPStatus.TOO_MANY_REQUESTS, json_body)
+
class RoomTopicTestCase(RoomBase):
"""Tests /rooms/$room_id/topic REST events."""
@@ -1252,6 +1292,120 @@ class RoomJoinTestCase(RoomBase):
)
+class RoomAppserviceTsParamTestCase(unittest.HomeserverTestCase):
+ servlets = [
+ room.register_servlets,
+ synapse.rest.admin.register_servlets,
+ register.register_servlets,
+ ]
+
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+ self.appservice_user, _ = self.register_appservice_user(
+ "as_user_potato", self.appservice.token
+ )
+
+ # Create a room as the appservice user.
+ args = {
+ "access_token": self.appservice.token,
+ "user_id": self.appservice_user,
+ }
+ channel = self.make_request(
+ "POST",
+ f"/_matrix/client/r0/createRoom?{urlparse.urlencode(args)}",
+ content={"visibility": "public"},
+ )
+
+ assert channel.code == 200
+ self.room = channel.json_body["room_id"]
+
+ self.main_store = self.hs.get_datastores().main
+
+ def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
+ config = self.default_config()
+
+ self.appservice = ApplicationService(
+ token="i_am_an_app_service",
+ id="1234",
+ namespaces={"users": [{"regex": r"@as_user.*", "exclusive": True}]},
+ # Note: this user does not have to match the regex above
+ sender="@as_main:test",
+ )
+
+ mock_load_appservices = Mock(return_value=[self.appservice])
+ with patch(
+ "synapse.storage.databases.main.appservice.load_appservices",
+ mock_load_appservices,
+ ):
+ hs = self.setup_test_homeserver(config=config)
+ return hs
+
+ def test_send_event_ts(self) -> None:
+ """Test sending a non-state event with a custom timestamp."""
+ ts = 1
+
+ url_params = {
+ "user_id": self.appservice_user,
+ "ts": ts,
+ }
+ channel = self.make_request(
+ "PUT",
+ path=f"/_matrix/client/r0/rooms/{self.room}/send/m.room.message/1234?"
+ + urlparse.urlencode(url_params),
+ content={"body": "test", "msgtype": "m.text"},
+ access_token=self.appservice.token,
+ )
+ self.assertEqual(channel.code, 200, channel.json_body)
+ event_id = channel.json_body["event_id"]
+
+ # Ensure the event was persisted with the correct timestamp.
+ res = self.get_success(self.main_store.get_event(event_id))
+ self.assertEquals(ts, res.origin_server_ts)
+
+ def test_send_state_event_ts(self) -> None:
+ """Test sending a state event with a custom timestamp."""
+ ts = 1
+
+ url_params = {
+ "user_id": self.appservice_user,
+ "ts": ts,
+ }
+ channel = self.make_request(
+ "PUT",
+ path=f"/_matrix/client/r0/rooms/{self.room}/state/m.room.name?"
+ + urlparse.urlencode(url_params),
+ content={"name": "test"},
+ access_token=self.appservice.token,
+ )
+ self.assertEqual(channel.code, 200, channel.json_body)
+ event_id = channel.json_body["event_id"]
+
+ # Ensure the event was persisted with the correct timestamp.
+ res = self.get_success(self.main_store.get_event(event_id))
+ self.assertEquals(ts, res.origin_server_ts)
+
+ def test_send_membership_event_ts(self) -> None:
+ """Test sending a membership event with a custom timestamp."""
+ ts = 1
+
+ url_params = {
+ "user_id": self.appservice_user,
+ "ts": ts,
+ }
+ channel = self.make_request(
+ "PUT",
+ path=f"/_matrix/client/r0/rooms/{self.room}/state/m.room.member/{self.appservice_user}?"
+ + urlparse.urlencode(url_params),
+ content={"membership": "join", "display_name": "test"},
+ access_token=self.appservice.token,
+ )
+ self.assertEqual(channel.code, 200, channel.json_body)
+ event_id = channel.json_body["event_id"]
+
+ # Ensure the event was persisted with the correct timestamp.
+ res = self.get_success(self.main_store.get_event(event_id))
+ self.assertEquals(ts, res.origin_server_ts)
+
+
class RoomJoinRatelimitTestCase(RoomBase):
user_id = "@sid1:red"
@@ -1272,10 +1426,22 @@ class RoomJoinRatelimitTestCase(RoomBase):
)
def test_join_local_ratelimit(self) -> None:
"""Tests that local joins are actually rate-limited."""
- for _ in range(3):
- self.helper.create_room_as(self.user_id)
+ # Create 4 rooms
+ room_ids = [
+ self.helper.create_room_as(self.user_id, is_public=True) for _ in range(4)
+ ]
- self.helper.create_room_as(self.user_id, expect_code=429)
+ joiner_user_id = self.register_user("joiner", "secret")
+ # Now make a new user try to join some of them.
+
+ # The user can join 3 rooms
+ for room_id in room_ids[0:3]:
+ self.helper.join(room_id, joiner_user_id)
+
+ # But the user cannot join a 4th room
+ self.helper.join(
+ room_ids[3], joiner_user_id, expect_code=HTTPStatus.TOO_MANY_REQUESTS
+ )
@unittest.override_config(
{"rc_joins": {"local": {"per_second": 0.5, "burst_count": 3}}}
@@ -2098,14 +2264,17 @@ class PublicRoomsRoomTypeFilterTestCase(unittest.HomeserverTestCase):
)
def make_public_rooms_request(
- self, room_types: Union[List[Union[str, None]], None]
+ self,
+ room_types: Optional[List[Union[str, None]]],
+ instance_id: Optional[str] = None,
) -> Tuple[List[Dict[str, Any]], int]:
- channel = self.make_request(
- "POST",
- self.url,
- {"filter": {PublicRoomsFilterFields.ROOM_TYPES: room_types}},
- self.token,
- )
+ body: JsonDict = {"filter": {PublicRoomsFilterFields.ROOM_TYPES: room_types}}
+ if instance_id:
+ body["third_party_instance_id"] = "test|test"
+
+ channel = self.make_request("POST", self.url, body, self.token)
+ self.assertEqual(channel.code, 200)
+
chunk = channel.json_body["chunk"]
count = channel.json_body["total_room_count_estimate"]
@@ -2115,31 +2284,49 @@ class PublicRoomsRoomTypeFilterTestCase(unittest.HomeserverTestCase):
def test_returns_both_rooms_and_spaces_if_no_filter(self) -> None:
chunk, count = self.make_public_rooms_request(None)
-
self.assertEqual(count, 2)
+ # Also check if there's no filter property at all in the body.
+ channel = self.make_request("POST", self.url, {}, self.token)
+ self.assertEqual(channel.code, 200)
+ self.assertEqual(len(channel.json_body["chunk"]), 2)
+ self.assertEqual(channel.json_body["total_room_count_estimate"], 2)
+
+ chunk, count = self.make_public_rooms_request(None, "test|test")
+ self.assertEqual(count, 0)
+
def test_returns_only_rooms_based_on_filter(self) -> None:
chunk, count = self.make_public_rooms_request([None])
self.assertEqual(count, 1)
self.assertEqual(chunk[0].get("room_type", None), None)
+ chunk, count = self.make_public_rooms_request([None], "test|test")
+ self.assertEqual(count, 0)
+
def test_returns_only_space_based_on_filter(self) -> None:
chunk, count = self.make_public_rooms_request(["m.space"])
self.assertEqual(count, 1)
self.assertEqual(chunk[0].get("room_type", None), "m.space")
+ chunk, count = self.make_public_rooms_request(["m.space"], "test|test")
+ self.assertEqual(count, 0)
+
def test_returns_both_rooms_and_space_based_on_filter(self) -> None:
chunk, count = self.make_public_rooms_request(["m.space", None])
-
self.assertEqual(count, 2)
+ chunk, count = self.make_public_rooms_request(["m.space", None], "test|test")
+ self.assertEqual(count, 0)
+
def test_returns_both_rooms_and_spaces_if_array_is_empty(self) -> None:
chunk, count = self.make_public_rooms_request([])
-
self.assertEqual(count, 2)
+ chunk, count = self.make_public_rooms_request([], "test|test")
+ self.assertEqual(count, 0)
+
class PublicRoomsTestRemoteSearchFallbackTestCase(unittest.HomeserverTestCase):
"""Test that we correctly fallback to local filtering if a remote server
@@ -2779,149 +2966,20 @@ class LabelsTestCase(unittest.HomeserverTestCase):
return event_id
-class RelationsTestCase(unittest.HomeserverTestCase):
- servlets = [
- synapse.rest.admin.register_servlets_for_client_rest_resource,
- room.register_servlets,
- login.register_servlets,
- ]
-
- def default_config(self) -> Dict[str, Any]:
- config = super().default_config()
- config["experimental_features"] = {"msc3440_enabled": True}
- return config
-
- def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
- self.user_id = self.register_user("test", "test")
- self.tok = self.login("test", "test")
- self.room_id = self.helper.create_room_as(self.user_id, tok=self.tok)
-
- self.second_user_id = self.register_user("second", "test")
- self.second_tok = self.login("second", "test")
- self.helper.join(
- room=self.room_id, user=self.second_user_id, tok=self.second_tok
- )
-
- self.third_user_id = self.register_user("third", "test")
- self.third_tok = self.login("third", "test")
- self.helper.join(room=self.room_id, user=self.third_user_id, tok=self.third_tok)
-
- # An initial event with a relation from second user.
- res = self.helper.send_event(
- room_id=self.room_id,
- type=EventTypes.Message,
- content={"msgtype": "m.text", "body": "Message 1"},
- tok=self.tok,
- )
- self.event_id_1 = res["event_id"]
- self.helper.send_event(
- room_id=self.room_id,
- type="m.reaction",
- content={
- "m.relates_to": {
- "rel_type": RelationTypes.ANNOTATION,
- "event_id": self.event_id_1,
- "key": "👍",
- }
- },
- tok=self.second_tok,
- )
-
- # Another event with a relation from third user.
- res = self.helper.send_event(
- room_id=self.room_id,
- type=EventTypes.Message,
- content={"msgtype": "m.text", "body": "Message 2"},
- tok=self.tok,
- )
- self.event_id_2 = res["event_id"]
- self.helper.send_event(
- room_id=self.room_id,
- type="m.reaction",
- content={
- "m.relates_to": {
- "rel_type": RelationTypes.REFERENCE,
- "event_id": self.event_id_2,
- }
- },
- tok=self.third_tok,
- )
-
- # An event with no relations.
- self.helper.send_event(
- room_id=self.room_id,
- type=EventTypes.Message,
- content={"msgtype": "m.text", "body": "No relations"},
- tok=self.tok,
- )
-
- def _filter_messages(self, filter: JsonDict) -> List[JsonDict]:
+class RelationsTestCase(PaginationTestCase):
+ def _filter_messages(self, filter: JsonDict) -> List[str]:
"""Make a request to /messages with a filter, returns the chunk of events."""
+ from_token = self.get_success(
+ self.from_token.to_string(self.hs.get_datastores().main)
+ )
channel = self.make_request(
"GET",
- "/rooms/%s/messages?filter=%s&dir=b" % (self.room_id, json.dumps(filter)),
+ f"/rooms/{self.room_id}/messages?filter={json.dumps(filter)}&dir=f&from={from_token}",
access_token=self.tok,
)
self.assertEqual(channel.code, HTTPStatus.OK, channel.result)
- return channel.json_body["chunk"]
-
- def test_filter_relation_senders(self) -> None:
- # Messages which second user reacted to.
- filter = {"related_by_senders": [self.second_user_id]}
- chunk = self._filter_messages(filter)
- self.assertEqual(len(chunk), 1, chunk)
- self.assertEqual(chunk[0]["event_id"], self.event_id_1)
-
- # Messages which third user reacted to.
- filter = {"related_by_senders": [self.third_user_id]}
- chunk = self._filter_messages(filter)
- self.assertEqual(len(chunk), 1, chunk)
- self.assertEqual(chunk[0]["event_id"], self.event_id_2)
-
- # Messages which either user reacted to.
- filter = {"related_by_senders": [self.second_user_id, self.third_user_id]}
- chunk = self._filter_messages(filter)
- self.assertEqual(len(chunk), 2, chunk)
- self.assertCountEqual(
- [c["event_id"] for c in chunk], [self.event_id_1, self.event_id_2]
- )
-
- def test_filter_relation_type(self) -> None:
- # Messages which have annotations.
- filter = {"related_by_rel_types": [RelationTypes.ANNOTATION]}
- chunk = self._filter_messages(filter)
- self.assertEqual(len(chunk), 1, chunk)
- self.assertEqual(chunk[0]["event_id"], self.event_id_1)
-
- # Messages which have references.
- filter = {"related_by_rel_types": [RelationTypes.REFERENCE]}
- chunk = self._filter_messages(filter)
- self.assertEqual(len(chunk), 1, chunk)
- self.assertEqual(chunk[0]["event_id"], self.event_id_2)
-
- # Messages which have either annotations or references.
- filter = {
- "related_by_rel_types": [
- RelationTypes.ANNOTATION,
- RelationTypes.REFERENCE,
- ]
- }
- chunk = self._filter_messages(filter)
- self.assertEqual(len(chunk), 2, chunk)
- self.assertCountEqual(
- [c["event_id"] for c in chunk], [self.event_id_1, self.event_id_2]
- )
-
- def test_filter_relation_senders_and_type(self) -> None:
- # Messages which second user reacted to.
- filter = {
- "related_by_senders": [self.second_user_id],
- "related_by_rel_types": [RelationTypes.ANNOTATION],
- }
- chunk = self._filter_messages(filter)
- self.assertEqual(len(chunk), 1, chunk)
- self.assertEqual(chunk[0]["event_id"], self.event_id_1)
+ return [ev["event_id"] for ev in channel.json_body["chunk"]]
class ContextTestCase(unittest.HomeserverTestCase):
@@ -3461,3 +3519,83 @@ class ThreepidInviteTestCase(unittest.HomeserverTestCase):
# Also check that it stopped before calling _make_and_store_3pid_invite.
make_invite_mock.assert_called_once()
+
+ def test_400_missing_param_without_id_access_token(self) -> None:
+ """
+ Test that a 3pid invite request returns 400 M_MISSING_PARAM
+ if we do not include id_access_token.
+ """
+ channel = self.make_request(
+ method="POST",
+ path="/rooms/" + self.room_id + "/invite",
+ content={
+ "id_server": "example.com",
+ "medium": "email",
+ "address": "teresa@example.com",
+ },
+ access_token=self.tok,
+ )
+ self.assertEqual(channel.code, 400)
+ self.assertEqual(channel.json_body["errcode"], "M_MISSING_PARAM")
+
+
+class TimestampLookupTestCase(unittest.HomeserverTestCase):
+ servlets = [
+ admin.register_servlets,
+ room.register_servlets,
+ login.register_servlets,
+ ]
+
+ def default_config(self) -> JsonDict:
+ config = super().default_config()
+ config["experimental_features"] = {"msc3030_enabled": True}
+ return config
+
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+ self._storage_controllers = self.hs.get_storage_controllers()
+
+ self.room_owner = self.register_user("room_owner", "test")
+ self.room_owner_tok = self.login("room_owner", "test")
+
+ def _inject_outlier(self, room_id: str) -> EventBase:
+ event, _context = self.get_success(
+ create_event(
+ self.hs,
+ room_id=room_id,
+ type="m.test",
+ sender="@test_remote_user:remote",
+ )
+ )
+
+ event.internal_metadata.outlier = True
+ self.get_success(
+ self._storage_controllers.persistence.persist_event(
+ event, EventContext.for_outlier(self._storage_controllers)
+ )
+ )
+ return event
+
+ def test_no_outliers(self) -> None:
+ """
+ Test to make sure `/timestamp_to_event` does not return `outlier` events.
+ We're unable to determine whether an `outlier` is next to a gap so we
+ don't know whether it's actually the closest event. Instead, let's just
+ ignore `outliers` with this endpoint.
+
+ This test is really seeing that we choose the non-`outlier` event behind the
+ `outlier`. Since the gap checking logic considers the latest message in the room
+ as *not* next to a gap, asking over federation does not come into play here.
+ """
+ room_id = self.helper.create_room_as(self.room_owner, tok=self.room_owner_tok)
+
+ outlier_event = self._inject_outlier(room_id)
+
+ channel = self.make_request(
+ "GET",
+ f"/_matrix/client/unstable/org.matrix.msc3030/rooms/{room_id}/timestamp_to_event?dir=b&ts={outlier_event.origin_server_ts}",
+ access_token=self.room_owner_tok,
+ )
+ self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
+
+ # Make sure the outlier event is not returned
+ self.assertNotEqual(channel.json_body["event_id"], outlier_event.event_id)
diff --git a/tests/rest/client/test_shadow_banned.py b/tests/rest/client/test_shadow_banned.py
index d9bd8c4a28..c807a37bc2 100644
--- a/tests/rest/client/test_shadow_banned.py
+++ b/tests/rest/client/test_shadow_banned.py
@@ -26,7 +26,7 @@ from synapse.rest.client import (
room_upgrade_rest_servlet,
)
from synapse.server import HomeServer
-from synapse.types import UserID
+from synapse.types import UserID, create_requester
from synapse.util import Clock
from tests import unittest
@@ -97,7 +97,12 @@ class RoomTestCase(_ShadowBannedBase):
channel = self.make_request(
"POST",
"/rooms/%s/invite" % (room_id,),
- {"id_server": "test", "medium": "email", "address": "test@test.test"},
+ {
+ "id_server": "test",
+ "medium": "email",
+ "address": "test@test.test",
+ "id_access_token": "anytoken",
+ },
access_token=self.banned_access_token,
)
self.assertEqual(200, channel.code, channel.result)
@@ -275,7 +280,7 @@ class ProfileTestCase(_ShadowBannedBase):
message_handler = self.hs.get_message_handler()
event = self.get_success(
message_handler.get_room_data(
- self.banned_user_id,
+ create_requester(self.banned_user_id),
room_id,
"m.room.member",
self.banned_user_id,
@@ -310,7 +315,7 @@ class ProfileTestCase(_ShadowBannedBase):
message_handler = self.hs.get_message_handler()
event = self.get_success(
message_handler.get_room_data(
- self.banned_user_id,
+ create_requester(self.banned_user_id),
room_id,
"m.room.member",
self.banned_user_id,
diff --git a/tests/rest/client/test_sync.py b/tests/rest/client/test_sync.py
index b085c50356..0af643ecd9 100644
--- a/tests/rest/client/test_sync.py
+++ b/tests/rest/client/test_sync.py
@@ -38,7 +38,6 @@ from tests.federation.transport.test_knocking import (
KnockingStrippedStateEventHelperMixin,
)
from tests.server import TimedOutException
-from tests.unittest import override_config
class FilterTestCase(unittest.HomeserverTestCase):
@@ -390,6 +389,11 @@ class ReadReceiptsTestCase(unittest.HomeserverTestCase):
sync.register_servlets,
]
+ def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
+ config = self.default_config()
+
+ return self.setup_test_homeserver(config=config)
+
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.url = "/sync?since=%s"
self.next_batch = "s0"
@@ -408,7 +412,6 @@ class ReadReceiptsTestCase(unittest.HomeserverTestCase):
# Join the second user
self.helper.join(room=self.room_id, user=self.user2, tok=self.tok2)
- @override_config({"experimental_features": {"msc2285_enabled": True}})
def test_private_read_receipts(self) -> None:
# Send a message as the first user
res = self.helper.send(self.room_id, body="hello", tok=self.tok)
@@ -416,7 +419,7 @@ class ReadReceiptsTestCase(unittest.HomeserverTestCase):
# Send a private read receipt to tell the server the first user's message was read
channel = self.make_request(
"POST",
- f"/rooms/{self.room_id}/receipt/org.matrix.msc2285.read.private/{res['event_id']}",
+ f"/rooms/{self.room_id}/receipt/{ReceiptTypes.READ_PRIVATE}/{res['event_id']}",
{},
access_token=self.tok2,
)
@@ -425,7 +428,6 @@ class ReadReceiptsTestCase(unittest.HomeserverTestCase):
# Test that the first user can't see the other user's private read receipt
self.assertIsNone(self._get_read_receipt())
- @override_config({"experimental_features": {"msc2285_enabled": True}})
def test_public_receipt_can_override_private(self) -> None:
"""
Sending a public read receipt to the same event which has a private read
@@ -456,7 +458,6 @@ class ReadReceiptsTestCase(unittest.HomeserverTestCase):
# Test that we did override the private read receipt
self.assertNotEqual(self._get_read_receipt(), None)
- @override_config({"experimental_features": {"msc2285_enabled": True}})
def test_private_receipt_cannot_override_public(self) -> None:
"""
Sending a private read receipt to the same event which has a public read
@@ -543,7 +544,6 @@ class UnreadMessagesTestCase(unittest.HomeserverTestCase):
config = super().default_config()
config["experimental_features"] = {
"msc2654_enabled": True,
- "msc2285_enabled": True,
}
return config
@@ -624,7 +624,7 @@ class UnreadMessagesTestCase(unittest.HomeserverTestCase):
# Send a read receipt to tell the server we've read the latest event.
channel = self.make_request(
"POST",
- f"/rooms/{self.room_id}/receipt/org.matrix.msc2285.read.private/{res['event_id']}",
+ f"/rooms/{self.room_id}/receipt/{ReceiptTypes.READ_PRIVATE}/{res['event_id']}",
{},
access_token=self.tok,
)
@@ -700,7 +700,7 @@ class UnreadMessagesTestCase(unittest.HomeserverTestCase):
self._check_unread_count(5)
res2 = self.helper.send(self.room_id, "hello", tok=self.tok2)
- # Make sure both m.read and org.matrix.msc2285.read.private advance
+ # Make sure both m.read and m.read.private advance
channel = self.make_request(
"POST",
f"/rooms/{self.room_id}/receipt/m.read/{res1['event_id']}",
@@ -712,16 +712,21 @@ class UnreadMessagesTestCase(unittest.HomeserverTestCase):
channel = self.make_request(
"POST",
- f"/rooms/{self.room_id}/receipt/org.matrix.msc2285.read.private/{res2['event_id']}",
+ f"/rooms/{self.room_id}/receipt/{ReceiptTypes.READ_PRIVATE}/{res2['event_id']}",
{},
access_token=self.tok,
)
self.assertEqual(channel.code, 200, channel.json_body)
self._check_unread_count(0)
- # We test for both receipt types that influence notification counts
- @parameterized.expand([ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE])
- def test_read_receipts_only_go_down(self, receipt_type: ReceiptTypes) -> None:
+ # We test for all three receipt types that influence notification counts
+ @parameterized.expand(
+ [
+ ReceiptTypes.READ,
+ ReceiptTypes.READ_PRIVATE,
+ ]
+ )
+ def test_read_receipts_only_go_down(self, receipt_type: str) -> None:
# Join the new user
self.helper.join(room=self.room_id, user=self.user2, tok=self.tok2)
@@ -732,18 +737,18 @@ class UnreadMessagesTestCase(unittest.HomeserverTestCase):
# Read last event
channel = self.make_request(
"POST",
- f"/rooms/{self.room_id}/receipt/{receipt_type}/{res2['event_id']}",
+ f"/rooms/{self.room_id}/receipt/{ReceiptTypes.READ_PRIVATE}/{res2['event_id']}",
{},
access_token=self.tok,
)
self.assertEqual(channel.code, 200, channel.json_body)
self._check_unread_count(0)
- # Make sure neither m.read nor org.matrix.msc2285.read.private make the
+ # Make sure neither m.read nor m.read.private make the
# read receipt go up to an older event
channel = self.make_request(
"POST",
- f"/rooms/{self.room_id}/receipt/org.matrix.msc2285.read.private/{res1['event_id']}",
+ f"/rooms/{self.room_id}/receipt/{ReceiptTypes.READ_PRIVATE}/{res1['event_id']}",
{},
access_token=self.tok,
)
@@ -948,3 +953,24 @@ class ExcludeRoomTestCase(unittest.HomeserverTestCase):
self.assertNotIn(self.excluded_room_id, channel.json_body["rooms"]["invite"])
self.assertIn(self.included_room_id, channel.json_body["rooms"]["invite"])
+
+ def test_incremental_sync(self) -> None:
+ """Tests that activity in the room is properly filtered out of incremental
+ syncs.
+ """
+ channel = self.make_request("GET", "/sync", access_token=self.tok)
+ self.assertEqual(channel.code, 200, channel.result)
+ next_batch = channel.json_body["next_batch"]
+
+ self.helper.send(self.excluded_room_id, tok=self.tok)
+ self.helper.send(self.included_room_id, tok=self.tok)
+
+ channel = self.make_request(
+ "GET",
+ f"/sync?since={next_batch}",
+ access_token=self.tok,
+ )
+ self.assertEqual(channel.code, 200, channel.result)
+
+ self.assertNotIn(self.excluded_room_id, channel.json_body["rooms"]["join"])
+ self.assertIn(self.included_room_id, channel.json_body["rooms"]["join"])
diff --git a/tests/rest/client/test_third_party_rules.py b/tests/rest/client/test_third_party_rules.py
index 1083391b41..2e1b4753dc 100644
--- a/tests/rest/client/test_third_party_rules.py
+++ b/tests/rest/client/test_third_party_rules.py
@@ -156,7 +156,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase):
{},
access_token=self.tok,
)
- self.assertEqual(channel.result["code"], b"200", channel.result)
+ self.assertEqual(channel.code, 200, channel.result)
callback.assert_called_once()
@@ -174,7 +174,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase):
{},
access_token=self.tok,
)
- self.assertEqual(channel.result["code"], b"403", channel.result)
+ self.assertEqual(channel.code, 403, channel.result)
def test_third_party_rules_workaround_synapse_errors_pass_through(self) -> None:
"""
@@ -212,7 +212,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase):
access_token=self.tok,
)
# Check the error code
- self.assertEqual(channel.result["code"], b"429", channel.result)
+ self.assertEqual(channel.code, 429, channel.result)
# Check the JSON body has had the `nasty` key injected
self.assertEqual(
channel.json_body,
@@ -261,7 +261,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase):
{"x": "x"},
access_token=self.tok,
)
- self.assertEqual(channel.result["code"], b"200", channel.result)
+ self.assertEqual(channel.code, 200, channel.result)
event_id = channel.json_body["event_id"]
# ... and check that it got modified
@@ -270,7 +270,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase):
"/_matrix/client/r0/rooms/%s/event/%s" % (self.room_id, event_id),
access_token=self.tok,
)
- self.assertEqual(channel.result["code"], b"200", channel.result)
+ self.assertEqual(channel.code, 200, channel.result)
ev = channel.json_body
self.assertEqual(ev["content"]["x"], "y")
@@ -299,7 +299,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase):
},
access_token=self.tok,
)
- self.assertEqual(channel.result["code"], b"200", channel.result)
+ self.assertEqual(channel.code, 200, channel.result)
orig_event_id = channel.json_body["event_id"]
channel = self.make_request(
@@ -316,7 +316,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase):
},
access_token=self.tok,
)
- self.assertEqual(channel.result["code"], b"200", channel.result)
+ self.assertEqual(channel.code, 200, channel.result)
edited_event_id = channel.json_body["event_id"]
# ... and check that they both got modified
@@ -325,7 +325,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase):
"/_matrix/client/r0/rooms/%s/event/%s" % (self.room_id, orig_event_id),
access_token=self.tok,
)
- self.assertEqual(channel.result["code"], b"200", channel.result)
+ self.assertEqual(channel.code, 200, channel.result)
ev = channel.json_body
self.assertEqual(ev["content"]["body"], "ORIGINAL BODY")
@@ -334,7 +334,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase):
"/_matrix/client/r0/rooms/%s/event/%s" % (self.room_id, edited_event_id),
access_token=self.tok,
)
- self.assertEqual(channel.result["code"], b"200", channel.result)
+ self.assertEqual(channel.code, 200, channel.result)
ev = channel.json_body
self.assertEqual(ev["content"]["body"], "EDITED BODY")
@@ -380,7 +380,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase):
},
access_token=self.tok,
)
- self.assertEqual(channel.result["code"], b"200", channel.result)
+ self.assertEqual(channel.code, 200, channel.result)
event_id = channel.json_body["event_id"]
@@ -389,7 +389,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase):
"/_matrix/client/r0/rooms/%s/event/%s" % (self.room_id, event_id),
access_token=self.tok,
)
- self.assertEqual(channel.result["code"], b"200", channel.result)
+ self.assertEqual(channel.code, 200, channel.result)
self.assertIn("foo", channel.json_body["content"].keys())
self.assertEqual(channel.json_body["content"]["foo"], "bar")
diff --git a/tests/rest/client/test_typing.py b/tests/rest/client/test_typing.py
index 61b66d7685..fdc433a8b5 100644
--- a/tests/rest/client/test_typing.py
+++ b/tests/rest/client/test_typing.py
@@ -59,7 +59,8 @@ class RoomTypingTestCase(unittest.HomeserverTestCase):
self.event_source.get_new_events(
user=UserID.from_string(self.user_id),
from_key=0,
- limit=None,
+ # Limit is unused.
+ limit=0,
room_ids=[self.room_id],
is_guest=False,
)
diff --git a/tests/rest/client/utils.py b/tests/rest/client/utils.py
index 105d418698..8d6f2b6ff9 100644
--- a/tests/rest/client/utils.py
+++ b/tests/rest/client/utils.py
@@ -31,7 +31,6 @@ from typing import (
Tuple,
overload,
)
-from unittest.mock import patch
from urllib.parse import urlencode
import attr
@@ -46,8 +45,19 @@ from synapse.server import HomeServer
from synapse.types import JsonDict
from tests.server import FakeChannel, FakeSite, make_request
-from tests.test_utils import FakeResponse
from tests.test_utils.html_parsers import TestHtmlParser
+from tests.test_utils.oidc import FakeAuthorizationGrant, FakeOidcServer
+
+# an 'oidc_config' suitable for login_via_oidc.
+TEST_OIDC_ISSUER = "https://issuer.test/"
+TEST_OIDC_CONFIG = {
+ "enabled": True,
+ "issuer": TEST_OIDC_ISSUER,
+ "client_id": "test-client-id",
+ "client_secret": "test-client-secret",
+ "scopes": ["openid"],
+ "user_mapping_provider": {"config": {"localpart_template": "{{ user.sub }}"}},
+}
@attr.s(auto_attribs=True)
@@ -140,7 +150,7 @@ class RestHelper:
custom_headers=custom_headers,
)
- assert channel.result["code"] == b"%d" % expect_code, channel.result
+ assert channel.code == expect_code, channel.result
self.auth_user_id = temp_id
if expect_code == HTTPStatus.OK:
@@ -213,11 +223,9 @@ class RestHelper:
data,
)
- assert (
- int(channel.result["code"]) == expect_code
- ), "Expected: %d, got: %d, resp: %r" % (
+ assert channel.code == expect_code, "Expected: %d, got: %d, resp: %r" % (
expect_code,
- int(channel.result["code"]),
+ channel.code,
channel.result["body"],
)
@@ -312,11 +320,9 @@ class RestHelper:
data,
)
- assert (
- int(channel.result["code"]) == expect_code
- ), "Expected: %d, got: %d, resp: %r" % (
+ assert channel.code == expect_code, "Expected: %d, got: %d, resp: %r" % (
expect_code,
- int(channel.result["code"]),
+ channel.code,
channel.result["body"],
)
@@ -396,11 +402,46 @@ class RestHelper:
custom_headers=custom_headers,
)
- assert (
- int(channel.result["code"]) == expect_code
- ), "Expected: %d, got: %d, resp: %r" % (
+ assert channel.code == expect_code, "Expected: %d, got: %d, resp: %r" % (
expect_code,
- int(channel.result["code"]),
+ channel.code,
+ channel.result["body"],
+ )
+
+ return channel.json_body
+
+ def get_event(
+ self,
+ room_id: str,
+ event_id: str,
+ tok: Optional[str] = None,
+ expect_code: int = HTTPStatus.OK,
+ ) -> JsonDict:
+ """Request a specific event from the server.
+
+ Args:
+ room_id: the room in which the event was sent.
+ event_id: the event's ID.
+ tok: the token to request the event with.
+ expect_code: the expected HTTP status for the response.
+
+ Returns:
+ The event as a dict.
+ """
+ path = f"/_matrix/client/v3/rooms/{room_id}/event/{event_id}"
+ if tok:
+ path = path + f"?access_token={tok}"
+
+ channel = make_request(
+ self.hs.get_reactor(),
+ self.site,
+ "GET",
+ path,
+ )
+
+ assert channel.code == expect_code, "Expected: %d, got: %d, resp: %r" % (
+ expect_code,
+ channel.code,
channel.result["body"],
)
@@ -449,11 +490,9 @@ class RestHelper:
channel = make_request(self.hs.get_reactor(), self.site, method, path, content)
- assert (
- int(channel.result["code"]) == expect_code
- ), "Expected: %d, got: %d, resp: %r" % (
+ assert channel.code == expect_code, "Expected: %d, got: %d, resp: %r" % (
expect_code,
- int(channel.result["code"]),
+ channel.code,
channel.result["body"],
)
@@ -545,13 +584,62 @@ class RestHelper:
assert channel.code == expect_code, "Expected: %d, got: %d, resp: %r" % (
expect_code,
- int(channel.result["code"]),
+ channel.code,
channel.result["body"],
)
return channel.json_body
- def login_via_oidc(self, remote_user_id: str) -> JsonDict:
+ def whoami(
+ self,
+ access_token: str,
+ expect_code: Literal[HTTPStatus.OK, HTTPStatus.UNAUTHORIZED] = HTTPStatus.OK,
+ ) -> JsonDict:
+ """Perform a 'whoami' request, which can be a quick way to check for access
+ token validity
+
+ Args:
+ access_token: The user token to use during the request
+ expect_code: The return code to expect from attempting the whoami request
+ """
+ channel = make_request(
+ self.hs.get_reactor(),
+ self.site,
+ "GET",
+ "account/whoami",
+ access_token=access_token,
+ )
+
+ assert channel.code == expect_code, "Exepcted: %d, got %d, resp: %r" % (
+ expect_code,
+ channel.code,
+ channel.result["body"],
+ )
+
+ return channel.json_body
+
+ def fake_oidc_server(self, issuer: str = TEST_OIDC_ISSUER) -> FakeOidcServer:
+ """Create a ``FakeOidcServer``.
+
+ This can be used in conjuction with ``login_via_oidc``::
+
+ fake_oidc_server = self.helper.fake_oidc_server()
+ login_data, _ = self.helper.login_via_oidc(fake_oidc_server, "user")
+ """
+
+ return FakeOidcServer(
+ clock=self.hs.get_clock(),
+ issuer=issuer,
+ )
+
+ def login_via_oidc(
+ self,
+ fake_server: FakeOidcServer,
+ remote_user_id: str,
+ with_sid: bool = False,
+ idp_id: Optional[str] = None,
+ expected_status: int = 200,
+ ) -> Tuple[JsonDict, FakeAuthorizationGrant]:
"""Log in (as a new user) via OIDC
Returns the result of the final token login.
@@ -564,7 +652,14 @@ class RestHelper:
the normal places.
"""
client_redirect_url = "https://x"
- channel = self.auth_via_oidc({"sub": remote_user_id}, client_redirect_url)
+ userinfo = {"sub": remote_user_id}
+ channel, grant = self.auth_via_oidc(
+ fake_server,
+ userinfo,
+ client_redirect_url,
+ with_sid=with_sid,
+ idp_id=idp_id,
+ )
# expect a confirmation page
assert channel.code == HTTPStatus.OK, channel.result
@@ -586,15 +681,20 @@ class RestHelper:
"/login",
content={"type": "m.login.token", "token": login_token},
)
- assert channel.code == HTTPStatus.OK
- return channel.json_body
+ assert (
+ channel.code == expected_status
+ ), f"unexpected status in response: {channel.code}"
+ return channel.json_body, grant
def auth_via_oidc(
self,
+ fake_server: FakeOidcServer,
user_info_dict: JsonDict,
client_redirect_url: Optional[str] = None,
ui_auth_session_id: Optional[str] = None,
- ) -> FakeChannel:
+ with_sid: bool = False,
+ idp_id: Optional[str] = None,
+ ) -> Tuple[FakeChannel, FakeAuthorizationGrant]:
"""Perform an OIDC authentication flow via a mock OIDC provider.
This can be used for either login or user-interactive auth.
@@ -618,6 +718,8 @@ class RestHelper:
the login redirect endpoint
ui_auth_session_id: if set, we will perform a UI Auth flow. The session id
of the UI auth.
+ with_sid: if True, generates a random `sid` (OIDC session ID)
+ idp_id: if set, explicitely chooses one specific IDP
Returns:
A FakeChannel containing the result of calling the OIDC callback endpoint.
@@ -627,14 +729,17 @@ class RestHelper:
cookies: Dict[str, str] = {}
- # if we're doing a ui auth, hit the ui auth redirect endpoint
- if ui_auth_session_id:
- # can't set the client redirect url for UI Auth
- assert client_redirect_url is None
- oauth_uri = self.initiate_sso_ui_auth(ui_auth_session_id, cookies)
- else:
- # otherwise, hit the login redirect endpoint
- oauth_uri = self.initiate_sso_login(client_redirect_url, cookies)
+ with fake_server.patch_homeserver(hs=self.hs):
+ # if we're doing a ui auth, hit the ui auth redirect endpoint
+ if ui_auth_session_id:
+ # can't set the client redirect url for UI Auth
+ assert client_redirect_url is None
+ oauth_uri = self.initiate_sso_ui_auth(ui_auth_session_id, cookies)
+ else:
+ # otherwise, hit the login redirect endpoint
+ oauth_uri = self.initiate_sso_login(
+ client_redirect_url, cookies, idp_id=idp_id
+ )
# we now have a URI for the OIDC IdP, but we skip that and go straight
# back to synapse's OIDC callback resource. However, we do need the "state"
@@ -642,17 +747,21 @@ class RestHelper:
# that synapse passes to the client.
oauth_uri_path, _ = oauth_uri.split("?", 1)
- assert oauth_uri_path == TEST_OIDC_AUTH_ENDPOINT, (
+ assert oauth_uri_path == fake_server.authorization_endpoint, (
"unexpected SSO URI " + oauth_uri_path
)
- return self.complete_oidc_auth(oauth_uri, cookies, user_info_dict)
+ return self.complete_oidc_auth(
+ fake_server, oauth_uri, cookies, user_info_dict, with_sid=with_sid
+ )
def complete_oidc_auth(
self,
+ fake_serer: FakeOidcServer,
oauth_uri: str,
cookies: Mapping[str, str],
user_info_dict: JsonDict,
- ) -> FakeChannel:
+ with_sid: bool = False,
+ ) -> Tuple[FakeChannel, FakeAuthorizationGrant]:
"""Mock out an OIDC authentication flow
Assumes that an OIDC auth has been initiated by one of initiate_sso_login or
@@ -663,50 +772,37 @@ class RestHelper:
Requires the OIDC callback resource to be mounted at the normal place.
Args:
+ fake_server: the fake OIDC server with which the auth should be done
oauth_uri: the OIDC URI returned by synapse's redirect endpoint (ie,
from initiate_sso_login or initiate_sso_ui_auth).
cookies: the cookies set by synapse's redirect endpoint, which will be
sent back to the callback endpoint.
user_info_dict: the remote userinfo that the OIDC provider should present.
Typically this should be '{"sub": "<remote user id>"}'.
+ with_sid: if True, generates a random `sid` (OIDC session ID)
Returns:
A FakeChannel containing the result of calling the OIDC callback endpoint.
"""
_, oauth_uri_qs = oauth_uri.split("?", 1)
params = urllib.parse.parse_qs(oauth_uri_qs)
+
+ code, grant = fake_serer.start_authorization(
+ scope=params["scope"][0],
+ userinfo=user_info_dict,
+ client_id=params["client_id"][0],
+ redirect_uri=params["redirect_uri"][0],
+ nonce=params["nonce"][0],
+ with_sid=with_sid,
+ )
+ state = params["state"][0]
+
callback_uri = "%s?%s" % (
urllib.parse.urlparse(params["redirect_uri"][0]).path,
- urllib.parse.urlencode({"state": params["state"][0], "code": "TEST_CODE"}),
- )
-
- # before we hit the callback uri, stub out some methods in the http client so
- # that we don't have to handle full HTTPS requests.
- # (expected url, json response) pairs, in the order we expect them.
- expected_requests = [
- # first we get a hit to the token endpoint, which we tell to return
- # a dummy OIDC access token
- (TEST_OIDC_TOKEN_ENDPOINT, {"access_token": "TEST"}),
- # and then one to the user_info endpoint, which returns our remote user id.
- (TEST_OIDC_USERINFO_ENDPOINT, user_info_dict),
- ]
-
- async def mock_req(
- method: str,
- uri: str,
- data: Optional[dict] = None,
- headers: Optional[Iterable[Tuple[AnyStr, AnyStr]]] = None,
- ):
- (expected_uri, resp_obj) = expected_requests.pop(0)
- assert uri == expected_uri
- resp = FakeResponse(
- code=HTTPStatus.OK,
- phrase=b"OK",
- body=json.dumps(resp_obj).encode("utf-8"),
- )
- return resp
+ urllib.parse.urlencode({"state": state, "code": code}),
+ )
- with patch.object(self.hs.get_proxied_http_client(), "request", mock_req):
+ with fake_serer.patch_homeserver(hs=self.hs):
# now hit the callback URI with the right params and a made-up code
channel = make_request(
self.hs.get_reactor(),
@@ -717,10 +813,13 @@ class RestHelper:
("Cookie", "%s=%s" % (k, v)) for (k, v) in cookies.items()
],
)
- return channel
+ return channel, grant
def initiate_sso_login(
- self, client_redirect_url: Optional[str], cookies: MutableMapping[str, str]
+ self,
+ client_redirect_url: Optional[str],
+ cookies: MutableMapping[str, str],
+ idp_id: Optional[str] = None,
) -> str:
"""Make a request to the login-via-sso redirect endpoint, and return the target
@@ -731,6 +830,7 @@ class RestHelper:
client_redirect_url: the client redirect URL to pass to the login redirect
endpoint
cookies: any cookies returned will be added to this dict
+ idp_id: if set, explicitely chooses one specific IDP
Returns:
the URI that the client gets redirected to (ie, the SSO server)
@@ -739,6 +839,12 @@ class RestHelper:
if client_redirect_url:
params["redirectUrl"] = client_redirect_url
+ uri = "/_matrix/client/r0/login/sso/redirect"
+ if idp_id is not None:
+ uri = f"{uri}/{idp_id}"
+
+ uri = f"{uri}?{urllib.parse.urlencode(params)}"
+
# hit the redirect url (which should redirect back to the redirect url. This
# is the easiest way of figuring out what the Host header ought to be set to
# to keep Synapse happy.
@@ -746,7 +852,7 @@ class RestHelper:
self.hs.get_reactor(),
self.site,
"GET",
- "/_matrix/client/r0/login/sso/redirect?" + urllib.parse.urlencode(params),
+ uri,
)
assert channel.code == 302
@@ -808,21 +914,3 @@ class RestHelper:
assert len(p.links) == 1, "not exactly one link in confirmation page"
oauth_uri = p.links[0]
return oauth_uri
-
-
-# an 'oidc_config' suitable for login_via_oidc.
-TEST_OIDC_AUTH_ENDPOINT = "https://issuer.test/auth"
-TEST_OIDC_TOKEN_ENDPOINT = "https://issuer.test/token"
-TEST_OIDC_USERINFO_ENDPOINT = "https://issuer.test/userinfo"
-TEST_OIDC_CONFIG = {
- "enabled": True,
- "discover": False,
- "issuer": "https://issuer.test",
- "client_id": "test-client-id",
- "client_secret": "test-client-secret",
- "scopes": ["profile"],
- "authorization_endpoint": TEST_OIDC_AUTH_ENDPOINT,
- "token_endpoint": TEST_OIDC_TOKEN_ENDPOINT,
- "userinfo_endpoint": TEST_OIDC_USERINFO_ENDPOINT,
- "user_mapping_provider": {"config": {"localpart_template": "{{ user.sub }}"}},
-}
diff --git a/tests/rest/key/v2/test_remote_key_resource.py b/tests/rest/key/v2/test_remote_key_resource.py
index ac0ac06b7e..7f1fba1086 100644
--- a/tests/rest/key/v2/test_remote_key_resource.py
+++ b/tests/rest/key/v2/test_remote_key_resource.py
@@ -26,7 +26,7 @@ from twisted.web.resource import NoResource, Resource
from synapse.crypto.keyring import PerspectivesKeyFetcher
from synapse.http.site import SynapseRequest
-from synapse.rest.key.v2 import KeyApiV2Resource
+from synapse.rest.key.v2 import KeyResource
from synapse.server import HomeServer
from synapse.storage.keys import FetchKeyResult
from synapse.types import JsonDict
@@ -46,7 +46,7 @@ class BaseRemoteKeyResourceTestCase(unittest.HomeserverTestCase):
def create_test_resource(self) -> Resource:
return create_resource_tree(
- {"/_matrix/key/v2": KeyApiV2Resource(self.hs)}, root_resource=NoResource()
+ {"/_matrix/key/v2": KeyResource(self.hs)}, root_resource=NoResource()
)
def expect_outgoing_key_request(
diff --git a/tests/rest/media/test_media_retention.py b/tests/rest/media/test_media_retention.py
index 14af07c5af..23f227aed6 100644
--- a/tests/rest/media/test_media_retention.py
+++ b/tests/rest/media/test_media_retention.py
@@ -13,7 +13,9 @@
# limitations under the License.
import io
-from typing import Iterable, Optional, Tuple
+from typing import Iterable, Optional
+
+from matrix_common.types.mxc_uri import MXCUri
from twisted.test.proto_helpers import MemoryReactor
@@ -63,9 +65,9 @@ class MediaRetentionTestCase(unittest.HomeserverTestCase):
last_accessed_ms: Optional[int],
is_quarantined: Optional[bool] = False,
is_protected: Optional[bool] = False,
- ) -> str:
+ ) -> MXCUri:
# "Upload" some media to the local media store
- mxc_uri = self.get_success(
+ mxc_uri: MXCUri = self.get_success(
media_repository.create_content(
media_type="text/plain",
upload_name=None,
@@ -75,13 +77,11 @@ class MediaRetentionTestCase(unittest.HomeserverTestCase):
)
)
- media_id = mxc_uri.split("/")[-1]
-
# Set the last recently accessed time for this media
if last_accessed_ms is not None:
self.get_success(
self.store.update_cached_last_access_time(
- local_media=(media_id,),
+ local_media=(mxc_uri.media_id,),
remote_media=(),
time_ms=last_accessed_ms,
)
@@ -92,7 +92,7 @@ class MediaRetentionTestCase(unittest.HomeserverTestCase):
self.get_success(
self.store.quarantine_media_by_id(
server_name=self.hs.config.server.server_name,
- media_id=media_id,
+ media_id=mxc_uri.media_id,
quarantined_by="@theadmin:test",
)
)
@@ -101,18 +101,18 @@ class MediaRetentionTestCase(unittest.HomeserverTestCase):
# Mark this media as protected from quarantine
self.get_success(
self.store.mark_local_media_as_safe(
- media_id=media_id,
+ media_id=mxc_uri.media_id,
safe=True,
)
)
- return media_id
+ return mxc_uri
def _cache_remote_media_and_set_attributes(
media_id: str,
last_accessed_ms: Optional[int],
is_quarantined: Optional[bool] = False,
- ) -> str:
+ ) -> MXCUri:
# Pretend to cache some remote media
self.get_success(
self.store.store_cached_remote_media(
@@ -146,7 +146,7 @@ class MediaRetentionTestCase(unittest.HomeserverTestCase):
)
)
- return media_id
+ return MXCUri(self.remote_server_name, media_id)
# Start with the local media store
self.local_recently_accessed_media = _create_media_and_set_attributes(
@@ -214,28 +214,16 @@ class MediaRetentionTestCase(unittest.HomeserverTestCase):
# Remote media should be unaffected.
self._assert_if_mxc_uris_purged(
purged=[
- (
- self.hs.config.server.server_name,
- self.local_not_recently_accessed_media,
- ),
- (self.hs.config.server.server_name, self.local_never_accessed_media),
+ self.local_not_recently_accessed_media,
+ self.local_never_accessed_media,
],
not_purged=[
- (self.hs.config.server.server_name, self.local_recently_accessed_media),
- (
- self.hs.config.server.server_name,
- self.local_not_recently_accessed_quarantined_media,
- ),
- (
- self.hs.config.server.server_name,
- self.local_not_recently_accessed_protected_media,
- ),
- (self.remote_server_name, self.remote_recently_accessed_media),
- (self.remote_server_name, self.remote_not_recently_accessed_media),
- (
- self.remote_server_name,
- self.remote_not_recently_accessed_quarantined_media,
- ),
+ self.local_recently_accessed_media,
+ self.local_not_recently_accessed_quarantined_media,
+ self.local_not_recently_accessed_protected_media,
+ self.remote_recently_accessed_media,
+ self.remote_not_recently_accessed_media,
+ self.remote_not_recently_accessed_quarantined_media,
],
)
@@ -261,49 +249,35 @@ class MediaRetentionTestCase(unittest.HomeserverTestCase):
# Remote media accessed <30 days ago should still exist.
self._assert_if_mxc_uris_purged(
purged=[
- (self.remote_server_name, self.remote_not_recently_accessed_media),
+ self.remote_not_recently_accessed_media,
],
not_purged=[
- (self.remote_server_name, self.remote_recently_accessed_media),
- (self.hs.config.server.server_name, self.local_recently_accessed_media),
- (
- self.hs.config.server.server_name,
- self.local_not_recently_accessed_media,
- ),
- (
- self.hs.config.server.server_name,
- self.local_not_recently_accessed_quarantined_media,
- ),
- (
- self.hs.config.server.server_name,
- self.local_not_recently_accessed_protected_media,
- ),
- (
- self.remote_server_name,
- self.remote_not_recently_accessed_quarantined_media,
- ),
- (self.hs.config.server.server_name, self.local_never_accessed_media),
+ self.remote_recently_accessed_media,
+ self.local_recently_accessed_media,
+ self.local_not_recently_accessed_media,
+ self.local_not_recently_accessed_quarantined_media,
+ self.local_not_recently_accessed_protected_media,
+ self.remote_not_recently_accessed_quarantined_media,
+ self.local_never_accessed_media,
],
)
def _assert_if_mxc_uris_purged(
- self, purged: Iterable[Tuple[str, str]], not_purged: Iterable[Tuple[str, str]]
+ self, purged: Iterable[MXCUri], not_purged: Iterable[MXCUri]
) -> None:
- def _assert_mxc_uri_purge_state(
- server_name: str, media_id: str, expect_purged: bool
- ) -> None:
+ def _assert_mxc_uri_purge_state(mxc_uri: MXCUri, expect_purged: bool) -> None:
"""Given an MXC URI, assert whether it has been purged or not."""
- if server_name == self.hs.config.server.server_name:
+ if mxc_uri.server_name == self.hs.config.server.server_name:
found_media_dict = self.get_success(
- self.store.get_local_media(media_id)
+ self.store.get_local_media(mxc_uri.media_id)
)
else:
found_media_dict = self.get_success(
- self.store.get_cached_remote_media(server_name, media_id)
+ self.store.get_cached_remote_media(
+ mxc_uri.server_name, mxc_uri.media_id
+ )
)
- mxc_uri = f"mxc://{server_name}/{media_id}"
-
if expect_purged:
self.assertIsNone(
found_media_dict, msg=f"{mxc_uri} unexpectedly not purged"
@@ -315,7 +289,7 @@ class MediaRetentionTestCase(unittest.HomeserverTestCase):
)
# Assert that the given MXC URIs have either been correctly purged or not.
- for server_name, media_id in purged:
- _assert_mxc_uri_purge_state(server_name, media_id, expect_purged=True)
- for server_name, media_id in not_purged:
- _assert_mxc_uri_purge_state(server_name, media_id, expect_purged=False)
+ for mxc_uri in purged:
+ _assert_mxc_uri_purge_state(mxc_uri, expect_purged=True)
+ for mxc_uri in not_purged:
+ _assert_mxc_uri_purge_state(mxc_uri, expect_purged=False)
diff --git a/tests/rest/media/v1/test_oembed.py b/tests/rest/media/v1/test_oembed.py
index f38d7225f8..319ae8b1cc 100644
--- a/tests/rest/media/v1/test_oembed.py
+++ b/tests/rest/media/v1/test_oembed.py
@@ -14,6 +14,8 @@
import json
+from parameterized import parameterized
+
from twisted.test.proto_helpers import MemoryReactor
from synapse.rest.media.v1.oembed import OEmbedProvider, OEmbedResult
@@ -23,8 +25,16 @@ from synapse.util import Clock
from tests.unittest import HomeserverTestCase
+try:
+ import lxml
+except ImportError:
+ lxml = None
+
class OEmbedTests(HomeserverTestCase):
+ if not lxml:
+ skip = "url preview feature requires lxml"
+
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.oembed = OEmbedProvider(hs)
@@ -36,7 +46,7 @@ class OEmbedTests(HomeserverTestCase):
def test_version(self) -> None:
"""Accept versions that are similar to 1.0 as a string or int (or missing)."""
for version in ("1.0", 1.0, 1):
- result = self.parse_response({"version": version, "type": "link"})
+ result = self.parse_response({"version": version})
# An empty Open Graph response is an error, ensure the URL is included.
self.assertIn("og:url", result.open_graph_result)
@@ -49,3 +59,94 @@ class OEmbedTests(HomeserverTestCase):
result = self.parse_response({"version": version, "type": "link"})
# An empty Open Graph response is an error, ensure the URL is included.
self.assertEqual({}, result.open_graph_result)
+
+ def test_cache_age(self) -> None:
+ """Ensure a cache-age is parsed properly."""
+ # Correct-ish cache ages are allowed.
+ for cache_age in ("1", 1.0, 1):
+ result = self.parse_response({"cache_age": cache_age})
+ self.assertEqual(result.cache_age, 1000)
+
+ # Invalid cache ages are ignored.
+ for cache_age in ("invalid", {}):
+ result = self.parse_response({"cache_age": cache_age})
+ self.assertIsNone(result.cache_age)
+
+ # Cache age is optional.
+ result = self.parse_response({})
+ self.assertIsNone(result.cache_age)
+
+ @parameterized.expand(
+ [
+ ("title", "title"),
+ ("provider_name", "site_name"),
+ ("thumbnail_url", "image"),
+ ],
+ name_func=lambda func, num, p: f"{func.__name__}_{p.args[0]}",
+ )
+ def test_property(self, oembed_property: str, open_graph_property: str) -> None:
+ """Test properties which must be strings."""
+ result = self.parse_response({oembed_property: "test"})
+ self.assertIn(f"og:{open_graph_property}", result.open_graph_result)
+ self.assertEqual(result.open_graph_result[f"og:{open_graph_property}"], "test")
+
+ result = self.parse_response({oembed_property: 1})
+ self.assertNotIn(f"og:{open_graph_property}", result.open_graph_result)
+
+ def test_author_name(self) -> None:
+ """Test the author_name property."""
+ result = self.parse_response({"author_name": "test"})
+ self.assertEqual(result.author_name, "test")
+
+ result = self.parse_response({"author_name": 1})
+ self.assertIsNone(result.author_name)
+
+ def test_rich(self) -> None:
+ """Test a type of rich."""
+ result = self.parse_response({"html": "test<img src='foo'>", "type": "rich"})
+ self.assertIn("og:description", result.open_graph_result)
+ self.assertIn("og:image", result.open_graph_result)
+ self.assertEqual(result.open_graph_result["og:description"], "test")
+ self.assertEqual(result.open_graph_result["og:image"], "foo")
+
+ result = self.parse_response({"type": "rich"})
+ self.assertNotIn("og:description", result.open_graph_result)
+
+ result = self.parse_response({"html": 1, "type": "rich"})
+ self.assertNotIn("og:description", result.open_graph_result)
+
+ def test_photo(self) -> None:
+ """Test a type of photo."""
+ result = self.parse_response({"url": "test", "type": "photo"})
+ self.assertIn("og:image", result.open_graph_result)
+ self.assertEqual(result.open_graph_result["og:image"], "test")
+
+ result = self.parse_response({"type": "photo"})
+ self.assertNotIn("og:image", result.open_graph_result)
+
+ result = self.parse_response({"url": 1, "type": "photo"})
+ self.assertNotIn("og:image", result.open_graph_result)
+
+ def test_video(self) -> None:
+ """Test a type of video."""
+ result = self.parse_response({"html": "test", "type": "video"})
+ self.assertIn("og:type", result.open_graph_result)
+ self.assertEqual(result.open_graph_result["og:type"], "video.other")
+ self.assertIn("og:description", result.open_graph_result)
+ self.assertEqual(result.open_graph_result["og:description"], "test")
+
+ result = self.parse_response({"type": "video"})
+ self.assertIn("og:type", result.open_graph_result)
+ self.assertEqual(result.open_graph_result["og:type"], "video.other")
+ self.assertNotIn("og:description", result.open_graph_result)
+
+ result = self.parse_response({"url": 1, "type": "video"})
+ self.assertIn("og:type", result.open_graph_result)
+ self.assertEqual(result.open_graph_result["og:type"], "video.other")
+ self.assertNotIn("og:description", result.open_graph_result)
+
+ def test_link(self) -> None:
+ """Test type of link."""
+ result = self.parse_response({"type": "link"})
+ self.assertIn("og:type", result.open_graph_result)
+ self.assertEqual(result.open_graph_result["og:type"], "website")
diff --git a/tests/rest/test_health.py b/tests/rest/test_health.py
index da325955f8..c0a2501742 100644
--- a/tests/rest/test_health.py
+++ b/tests/rest/test_health.py
@@ -11,8 +11,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-from http import HTTPStatus
-
from synapse.rest.health import HealthResource
from tests import unittest
@@ -26,5 +24,5 @@ class HealthCheckTests(unittest.HomeserverTestCase):
def test_health(self) -> None:
channel = self.make_request("GET", "/health", shorthand=False)
- self.assertEqual(channel.code, HTTPStatus.OK)
+ self.assertEqual(channel.code, 200)
self.assertEqual(channel.result["body"], b"OK")
diff --git a/tests/rest/test_well_known.py b/tests/rest/test_well_known.py
index d8faafec75..2091b08d89 100644
--- a/tests/rest/test_well_known.py
+++ b/tests/rest/test_well_known.py
@@ -11,8 +11,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-from http import HTTPStatus
-
from twisted.web.resource import Resource
from synapse.rest.well_known import well_known_resource
@@ -38,7 +36,7 @@ class WellKnownTests(unittest.HomeserverTestCase):
"GET", "/.well-known/matrix/client", shorthand=False
)
- self.assertEqual(channel.code, HTTPStatus.OK)
+ self.assertEqual(channel.code, 200)
self.assertEqual(
channel.json_body,
{
@@ -57,7 +55,7 @@ class WellKnownTests(unittest.HomeserverTestCase):
"GET", "/.well-known/matrix/client", shorthand=False
)
- self.assertEqual(channel.code, HTTPStatus.NOT_FOUND)
+ self.assertEqual(channel.code, 404)
@unittest.override_config(
{
@@ -71,7 +69,7 @@ class WellKnownTests(unittest.HomeserverTestCase):
"GET", "/.well-known/matrix/client", shorthand=False
)
- self.assertEqual(channel.code, HTTPStatus.OK)
+ self.assertEqual(channel.code, 200)
self.assertEqual(
channel.json_body,
{
@@ -87,7 +85,7 @@ class WellKnownTests(unittest.HomeserverTestCase):
"GET", "/.well-known/matrix/server", shorthand=False
)
- self.assertEqual(channel.code, HTTPStatus.OK)
+ self.assertEqual(channel.code, 200)
self.assertEqual(
channel.json_body,
{"m.server": "test:443"},
@@ -97,4 +95,4 @@ class WellKnownTests(unittest.HomeserverTestCase):
channel = self.make_request(
"GET", "/.well-known/matrix/server", shorthand=False
)
- self.assertEqual(channel.code, HTTPStatus.NOT_FOUND)
+ self.assertEqual(channel.code, 404)
diff --git a/tests/server.py b/tests/server.py
index 9689e6a0cd..b1730fcc8d 100644
--- a/tests/server.py
+++ b/tests/server.py
@@ -61,6 +61,10 @@ from twisted.web.resource import IResource
from twisted.web.server import Request, Site
from synapse.config.database import DatabaseConnectionConfig
+from synapse.events.presence_router import load_legacy_presence_router
+from synapse.events.spamcheck import load_legacy_spam_checkers
+from synapse.events.third_party_rules import load_legacy_third_party_event_rules
+from synapse.handlers.auth import load_legacy_password_auth_providers
from synapse.http.site import SynapseRequest
from synapse.logging.context import ContextResourceUsage
from synapse.server import HomeServer
@@ -262,7 +266,12 @@ class FakeSite:
site_tag = "test"
access_logger = logging.getLogger("synapse.access.http.fake")
- def __init__(self, resource: IResource, reactor: IReactorTime):
+ def __init__(
+ self,
+ resource: IResource,
+ reactor: IReactorTime,
+ experimental_cors_msc3886: bool = False,
+ ):
"""
Args:
@@ -270,6 +279,7 @@ class FakeSite:
"""
self._resource = resource
self.reactor = reactor
+ self.experimental_cors_msc3886 = experimental_cors_msc3886
def getResourceFor(self, request):
return self._resource
@@ -352,6 +362,12 @@ def make_request(
# Twisted expects to be at the end of the content when parsing the request.
req.content.seek(0, SEEK_END)
+ # Old version of Twisted (<20.3.0) have issues with parsing x-www-form-urlencoded
+ # bodies if the Content-Length header is missing
+ req.requestHeaders.addRawHeader(
+ b"Content-Length", str(len(content)).encode("ascii")
+ )
+
if access_token:
req.requestHeaders.addRawHeader(
b"Authorization", b"Bearer " + access_token.encode("ascii")
@@ -913,4 +929,14 @@ def setup_test_homeserver(
# Make the threadpool and database transactions synchronous for testing.
_make_test_homeserver_synchronous(hs)
+ # Load any configured modules into the homeserver
+ module_api = hs.get_module_api()
+ for module, config in hs.config.modules.loaded_modules:
+ module(config=config, api=module_api)
+
+ load_legacy_spam_checkers(hs)
+ load_legacy_third_party_event_rules(hs)
+ load_legacy_presence_router(hs)
+ load_legacy_password_auth_providers(hs)
+
return hs
diff --git a/tests/server_notices/test_resource_limits_server_notices.py b/tests/server_notices/test_resource_limits_server_notices.py
index e07ae78fc4..7cbc40736c 100644
--- a/tests/server_notices/test_resource_limits_server_notices.py
+++ b/tests/server_notices/test_resource_limits_server_notices.py
@@ -11,16 +11,20 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
+from typing import Tuple
from unittest.mock import Mock
+from twisted.test.proto_helpers import MemoryReactor
+
from synapse.api.constants import EventTypes, LimitBlockingTypes, ServerNoticeMsgType
from synapse.api.errors import ResourceLimitError
from synapse.rest import admin
from synapse.rest.client import login, room, sync
+from synapse.server import HomeServer
from synapse.server_notices.resource_limits_server_notices import (
ResourceLimitsServerNotices,
)
+from synapse.util import Clock
from tests import unittest
from tests.test_utils import make_awaitable
@@ -52,7 +56,7 @@ class TestResourceLimitsServerNotices(unittest.HomeserverTestCase):
return config
- def prepare(self, reactor, clock, hs):
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.server_notices_sender = self.hs.get_server_notices_sender()
# relying on [1] is far from ideal, but the only case where
@@ -251,7 +255,7 @@ class TestResourceLimitsServerNoticesWithRealRooms(unittest.HomeserverTestCase):
c["admin_contact"] = "mailto:user@test.com"
return c
- def prepare(self, reactor, clock, hs):
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.store = self.hs.get_datastores().main
self.server_notices_sender = self.hs.get_server_notices_sender()
self.server_notices_manager = self.hs.get_server_notices_manager()
@@ -347,14 +351,15 @@ class TestResourceLimitsServerNoticesWithRealRooms(unittest.HomeserverTestCase):
self.assertTrue(notice_in_room, "No server notice in room")
- def _trigger_notice_and_join(self):
+ def _trigger_notice_and_join(self) -> Tuple[str, str, str]:
"""Creates enough active users to hit the MAU limit and trigger a system notice
about it, then joins the system notices room with one of the users created.
Returns:
- user_id (str): The ID of the user that joined the room.
- tok (str): The access token of the user that joined the room.
- room_id (str): The ID of the room that's been joined.
+ A tuple of:
+ user_id: The ID of the user that joined the room.
+ tok: The access token of the user that joined the room.
+ room_id: The ID of the room that's been joined.
"""
user_id = None
tok = None
diff --git a/tests/storage/databases/main/test_events_worker.py b/tests/storage/databases/main/test_events_worker.py
index 46d829b062..5773172ab8 100644
--- a/tests/storage/databases/main/test_events_worker.py
+++ b/tests/storage/databases/main/test_events_worker.py
@@ -35,66 +35,45 @@ from synapse.util import Clock
from synapse.util.async_helpers import yieldable_gather_results
from tests import unittest
+from tests.test_utils.event_injection import create_event, inject_event
class HaveSeenEventsTestCase(unittest.HomeserverTestCase):
+ servlets = [
+ admin.register_servlets,
+ room.register_servlets,
+ login.register_servlets,
+ ]
+
def prepare(self, reactor, clock, hs):
+ self.hs = hs
self.store: EventsWorkerStore = hs.get_datastores().main
- # insert some test data
- for rid in ("room1", "room2"):
- self.get_success(
- self.store.db_pool.simple_insert(
- "rooms",
- {"room_id": rid, "room_version": 4},
- )
- )
+ self.user = self.register_user("user", "pass")
+ self.token = self.login(self.user, "pass")
+ self.room_id = self.helper.create_room_as(self.user, tok=self.token)
self.event_ids: List[str] = []
- for idx, rid in enumerate(
- (
- "room1",
- "room1",
- "room1",
- "room2",
- )
- ):
- event_json = {"type": f"test {idx}", "room_id": rid}
- event = make_event_from_dict(event_json, room_version=RoomVersions.V4)
- event_id = event.event_id
-
- self.get_success(
- self.store.db_pool.simple_insert(
- "events",
- {
- "event_id": event_id,
- "room_id": rid,
- "topological_ordering": idx,
- "stream_ordering": idx,
- "type": event.type,
- "processed": True,
- "outlier": False,
- },
- )
- )
- self.get_success(
- self.store.db_pool.simple_insert(
- "event_json",
- {
- "event_id": event_id,
- "room_id": rid,
- "json": json.dumps(event_json),
- "internal_metadata": "{}",
- "format_version": 3,
- },
+ for i in range(3):
+ event = self.get_success(
+ inject_event(
+ hs,
+ room_version=RoomVersions.V7.identifier,
+ room_id=self.room_id,
+ sender=self.user,
+ type="test_event_type",
+ content={"body": f"foobarbaz{i}"},
)
)
- self.event_ids.append(event_id)
+
+ self.event_ids.append(event.event_id)
def test_simple(self):
with LoggingContext(name="test") as ctx:
res = self.get_success(
- self.store.have_seen_events("room1", [self.event_ids[0], "event19"])
+ self.store.have_seen_events(
+ self.room_id, [self.event_ids[0], "eventdoesnotexist"]
+ )
)
self.assertEqual(res, {self.event_ids[0]})
@@ -104,22 +83,87 @@ class HaveSeenEventsTestCase(unittest.HomeserverTestCase):
# a second lookup of the same events should cause no queries
with LoggingContext(name="test") as ctx:
res = self.get_success(
- self.store.have_seen_events("room1", [self.event_ids[0], "event19"])
+ self.store.have_seen_events(
+ self.room_id, [self.event_ids[0], "eventdoesnotexist"]
+ )
)
self.assertEqual(res, {self.event_ids[0]})
self.assertEqual(ctx.get_resource_usage().db_txn_count, 0)
- def test_query_via_event_cache(self):
- # fetch an event into the event cache
- self.get_success(self.store.get_event(self.event_ids[0]))
+ def test_persisting_event_invalidates_cache(self):
+ """
+ Test to make sure that the `have_seen_event` cache
+ is invalidated after we persist an event and returns
+ the updated value.
+ """
+ event, event_context = self.get_success(
+ create_event(
+ self.hs,
+ room_id=self.room_id,
+ sender=self.user,
+ type="test_event_type",
+ content={"body": "garply"},
+ )
+ )
- # looking it up should now cause no db hits
with LoggingContext(name="test") as ctx:
+ # First, check `have_seen_event` for an event we have not seen yet
+ # to prime the cache with a `false` value.
res = self.get_success(
- self.store.have_seen_events("room1", [self.event_ids[0]])
+ self.store.have_seen_events(event.room_id, [event.event_id])
)
- self.assertEqual(res, {self.event_ids[0]})
- self.assertEqual(ctx.get_resource_usage().db_txn_count, 0)
+ self.assertEqual(res, set())
+
+ # That should result in a single db query to lookup
+ self.assertEqual(ctx.get_resource_usage().db_txn_count, 1)
+
+ # Persist the event which should invalidate or prefill the
+ # `have_seen_event` cache so we don't return stale values.
+ persistence = self.hs.get_storage_controllers().persistence
+ self.get_success(
+ persistence.persist_event(
+ event,
+ event_context,
+ )
+ )
+
+ with LoggingContext(name="test") as ctx:
+ # Check `have_seen_event` again and we should see the updated fact
+ # that we have now seen the event after persisting it.
+ res = self.get_success(
+ self.store.have_seen_events(event.room_id, [event.event_id])
+ )
+ self.assertEqual(res, {event.event_id})
+
+ # That should result in a single db query to lookup
+ self.assertEqual(ctx.get_resource_usage().db_txn_count, 1)
+
+ def test_invalidate_cache_by_room_id(self):
+ """
+ Test to make sure that all events associated with the given `(room_id,)`
+ are invalidated in the `have_seen_event` cache.
+ """
+ with LoggingContext(name="test") as ctx:
+ # Prime the cache with some values
+ res = self.get_success(
+ self.store.have_seen_events(self.room_id, self.event_ids)
+ )
+ self.assertEqual(res, set(self.event_ids))
+
+ # That should result in a single db query to lookup
+ self.assertEqual(ctx.get_resource_usage().db_txn_count, 1)
+
+ # Clear the cache with any events associated with the `room_id`
+ self.store.have_seen_event.invalidate((self.room_id,))
+
+ with LoggingContext(name="test") as ctx:
+ res = self.get_success(
+ self.store.have_seen_events(self.room_id, self.event_ids)
+ )
+ self.assertEqual(res, set(self.event_ids))
+
+ # Since we cleared the cache, it should result in another db query to lookup
+ self.assertEqual(ctx.get_resource_usage().db_txn_count, 1)
class EventCacheTestCase(unittest.HomeserverTestCase):
@@ -254,7 +298,7 @@ class DatabaseOutageTestCase(unittest.HomeserverTestCase):
"room_id": self.room_id,
"json": json.dumps(event_json),
"internal_metadata": "{}",
- "format_version": EventFormatVersions.V3,
+ "format_version": EventFormatVersions.ROOM_V4_PLUS,
},
)
)
diff --git a/tests/storage/databases/main/test_receipts.py b/tests/storage/databases/main/test_receipts.py
new file mode 100644
index 0000000000..c4f12d81d7
--- /dev/null
+++ b/tests/storage/databases/main/test_receipts.py
@@ -0,0 +1,209 @@
+# Copyright 2022 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an 'AS IS' BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Any, Dict, Optional, Sequence, Tuple
+
+from twisted.test.proto_helpers import MemoryReactor
+
+from synapse.rest import admin
+from synapse.rest.client import login, room
+from synapse.server import HomeServer
+from synapse.storage.database import LoggingTransaction
+from synapse.util import Clock
+
+from tests.unittest import HomeserverTestCase
+
+
+class ReceiptsBackgroundUpdateStoreTestCase(HomeserverTestCase):
+
+ servlets = [
+ admin.register_servlets,
+ room.register_servlets,
+ login.register_servlets,
+ ]
+
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer):
+ self.store = hs.get_datastores().main
+ self.user_id = self.register_user("foo", "pass")
+ self.token = self.login("foo", "pass")
+ self.room_id = self.helper.create_room_as(self.user_id, tok=self.token)
+ self.other_room_id = self.helper.create_room_as(self.user_id, tok=self.token)
+
+ def _test_background_receipts_unique_index(
+ self,
+ update_name: str,
+ index_name: str,
+ table: str,
+ receipts: Dict[Tuple[str, str, str], Sequence[Dict[str, Any]]],
+ expected_unique_receipts: Dict[Tuple[str, str, str], Optional[Dict[str, Any]]],
+ ):
+ """Test that the background update to uniqueify non-thread receipts in
+ the given receipts table works properly.
+
+ Args:
+ update_name: The name of the background update to test.
+ index_name: The name of the index that the background update creates.
+ table: The table of receipts that the background update fixes.
+ receipts: The test data containing duplicate receipts.
+ A list of receipt rows to insert, grouped by
+ `(room_id, receipt_type, user_id)`.
+ expected_unique_receipts: A dictionary of `(room_id, receipt_type, user_id)`
+ keys and expected receipt key-values after duplicate receipts have been
+ removed.
+ """
+ # First, undo the background update.
+ def drop_receipts_unique_index(txn: LoggingTransaction) -> None:
+ txn.execute(f"DROP INDEX IF EXISTS {index_name}")
+
+ self.get_success(
+ self.store.db_pool.runInteraction(
+ "drop_receipts_unique_index",
+ drop_receipts_unique_index,
+ )
+ )
+
+ # Populate the receipts table, including duplicates.
+ for (room_id, receipt_type, user_id), rows in receipts.items():
+ for row in rows:
+ self.get_success(
+ self.store.db_pool.simple_insert(
+ table,
+ {
+ "room_id": room_id,
+ "receipt_type": receipt_type,
+ "user_id": user_id,
+ "thread_id": None,
+ "data": "{}",
+ **row,
+ },
+ )
+ )
+
+ # Insert and run the background update.
+ self.get_success(
+ self.store.db_pool.simple_insert(
+ "background_updates",
+ {
+ "update_name": update_name,
+ "progress_json": "{}",
+ },
+ )
+ )
+
+ self.store.db_pool.updates._all_done = False
+
+ self.wait_for_background_updates()
+
+ # Check that the remaining receipts match expectations.
+ for (
+ room_id,
+ receipt_type,
+ user_id,
+ ), expected_row in expected_unique_receipts.items():
+ # Include the receipt key in the returned columns, for more informative
+ # assertion messages.
+ columns = ["room_id", "receipt_type", "user_id"]
+ if expected_row is not None:
+ columns += expected_row.keys()
+
+ rows = self.get_success(
+ self.store.db_pool.simple_select_list(
+ table=table,
+ keyvalues={
+ "room_id": room_id,
+ "receipt_type": receipt_type,
+ "user_id": user_id,
+ # `simple_select_onecol` does not support NULL filters,
+ # so skip the filter on `thread_id`.
+ },
+ retcols=columns,
+ desc="get_receipt",
+ )
+ )
+
+ if expected_row is not None:
+ self.assertEqual(
+ len(rows),
+ 1,
+ f"Background update did not leave behind latest receipt in {table}",
+ )
+ self.assertEqual(
+ rows[0],
+ {
+ "room_id": room_id,
+ "receipt_type": receipt_type,
+ "user_id": user_id,
+ **expected_row,
+ },
+ )
+ else:
+ self.assertEqual(
+ len(rows),
+ 0,
+ f"Background update did not remove all duplicate receipts from {table}",
+ )
+
+ def test_background_receipts_linearized_unique_index(self):
+ """Test that the background update to uniqueify non-thread receipts in
+ `receipts_linearized` works properly.
+ """
+ self._test_background_receipts_unique_index(
+ "receipts_linearized_unique_index",
+ "receipts_linearized_unique_index",
+ "receipts_linearized",
+ receipts={
+ (self.room_id, "m.read", self.user_id): [
+ {"stream_id": 5, "event_id": "$some_event"},
+ {"stream_id": 6, "event_id": "$some_event"},
+ ],
+ (self.other_room_id, "m.read", self.user_id): [
+ {"stream_id": 7, "event_id": "$some_event"}
+ ],
+ },
+ expected_unique_receipts={
+ (self.room_id, "m.read", self.user_id): {"stream_id": 6},
+ (self.other_room_id, "m.read", self.user_id): {"stream_id": 7},
+ },
+ )
+
+ def test_background_receipts_graph_unique_index(self):
+ """Test that the background update to uniqueify non-thread receipts in
+ `receipts_graph` works properly.
+ """
+ self._test_background_receipts_unique_index(
+ "receipts_graph_unique_index",
+ "receipts_graph_unique_index",
+ "receipts_graph",
+ receipts={
+ (self.room_id, "m.read", self.user_id): [
+ {
+ "event_ids": '["$some_event"]',
+ },
+ {
+ "event_ids": '["$some_event"]',
+ },
+ ],
+ (self.other_room_id, "m.read", self.user_id): [
+ {
+ "event_ids": '["$some_event"]',
+ }
+ ],
+ },
+ expected_unique_receipts={
+ (self.room_id, "m.read", self.user_id): None,
+ (self.other_room_id, "m.read", self.user_id): {
+ "event_ids": '["$some_event"]'
+ },
+ },
+ )
diff --git a/tests/storage/test_base.py b/tests/storage/test_base.py
index cce8e75c74..40e58f8199 100644
--- a/tests/storage/test_base.py
+++ b/tests/storage/test_base.py
@@ -54,7 +54,6 @@ class SQLBaseStoreTestCase(unittest.TestCase):
sqlite_config = {"name": "sqlite3"}
engine = create_engine(sqlite_config)
fake_engine = Mock(wraps=engine)
- fake_engine.can_native_upsert = False
fake_engine.in_transaction.return_value = False
db = DatabasePool(Mock(), Mock(config=sqlite_config), fake_engine)
diff --git a/tests/storage/test_devices.py b/tests/storage/test_devices.py
index f37505b6cf..8e7db2c4ec 100644
--- a/tests/storage/test_devices.py
+++ b/tests/storage/test_devices.py
@@ -28,7 +28,7 @@ class DeviceStoreTestCase(HomeserverTestCase):
"""
for device_id in device_ids:
- stream_id = self.get_success(
+ self.get_success(
self.store.add_device_change_to_streams(
user_id, [device_id], ["!some:room"]
)
@@ -39,7 +39,6 @@ class DeviceStoreTestCase(HomeserverTestCase):
user_id=user_id,
device_id=device_id,
room_id="!some:room",
- stream_id=stream_id,
hosts=[host],
context={},
)
diff --git a/tests/storage/test_event_chain.py b/tests/storage/test_event_chain.py
index a0ce077a99..de9f4af2de 100644
--- a/tests/storage/test_event_chain.py
+++ b/tests/storage/test_event_chain.py
@@ -531,7 +531,9 @@ class EventChainBackgroundUpdateTestCase(HomeserverTestCase):
)
)
self.get_success(
- event_handler.handle_new_client_event(self.requester, event, context)
+ event_handler.handle_new_client_event(
+ self.requester, events_and_context=[(event, context)]
+ )
)
state1 = set(self.get_success(context.get_current_state_ids()).values())
@@ -549,7 +551,9 @@ class EventChainBackgroundUpdateTestCase(HomeserverTestCase):
)
)
self.get_success(
- event_handler.handle_new_client_event(self.requester, event, context)
+ event_handler.handle_new_client_event(
+ self.requester, events_and_context=[(event, context)]
+ )
)
state2 = set(self.get_success(context.get_current_state_ids()).values())
diff --git a/tests/storage/test_event_federation.py b/tests/storage/test_event_federation.py
index d92a9ac5b7..853db930d6 100644
--- a/tests/storage/test_event_federation.py
+++ b/tests/storage/test_event_federation.py
@@ -12,25 +12,46 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from typing import Tuple, Union
+import datetime
+from typing import Dict, List, Tuple, Union
import attr
from parameterized import parameterized
+from twisted.test.proto_helpers import MemoryReactor
+
+from synapse.api.constants import EventTypes
from synapse.api.room_versions import (
KNOWN_ROOM_VERSIONS,
EventFormatVersions,
RoomVersion,
)
from synapse.events import _EventInternalMetadata
-from synapse.util import json_encoder
+from synapse.rest import admin
+from synapse.rest.client import login, room
+from synapse.server import HomeServer
+from synapse.storage.database import LoggingTransaction
+from synapse.types import JsonDict
+from synapse.util import Clock, json_encoder
import tests.unittest
import tests.utils
+@attr.s(auto_attribs=True, frozen=True, slots=True)
+class _BackfillSetupInfo:
+ room_id: str
+ depth_map: Dict[str, int]
+
+
class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase):
- def prepare(self, reactor, clock, hs):
+ servlets = [
+ admin.register_servlets,
+ room.register_servlets,
+ login.register_servlets,
+ ]
+
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.store = hs.get_datastores().main
def test_get_prev_events_for_room(self):
@@ -513,7 +534,7 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase):
def prev_event_format(prev_event_id: str) -> Union[Tuple[str, dict], str]:
"""Account for differences in prev_events format across room versions"""
- if room_version.event_format == EventFormatVersions.V1:
+ if room_version.event_format == EventFormatVersions.ROOM_V1_V2:
return prev_event_id, {}
return prev_event_id
@@ -571,11 +592,600 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase):
)
self.assertEqual(count, 1)
- _, event_id = self.get_success(
+ next_staged_event_info = self.get_success(
self.store.get_next_staged_event_id_for_room(room_id)
)
+ assert next_staged_event_info
+ _, event_id = next_staged_event_info
self.assertEqual(event_id, "$fake_event_id_500")
+ def _setup_room_for_backfill_tests(self) -> _BackfillSetupInfo:
+ """
+ Sets up a room with various events and backward extremities to test
+ backfill functions against.
+
+ Returns:
+ _BackfillSetupInfo including the `room_id` to test against and
+ `depth_map` of events in the room
+ """
+ room_id = "!backfill-room-test:some-host"
+
+ # The silly graph we use to test grabbing backward extremities,
+ # where the top is the oldest events.
+ # 1 (oldest)
+ # |
+ # 2 ⹁
+ # | \
+ # | [b1, b2, b3]
+ # | |
+ # | A
+ # | /
+ # 3 {
+ # | \
+ # | [b4, b5, b6]
+ # | |
+ # | B
+ # | /
+ # 4 ´
+ # |
+ # 5 (newest)
+
+ event_graph: Dict[str, List[str]] = {
+ "1": [],
+ "2": ["1"],
+ "3": ["2", "A"],
+ "4": ["3", "B"],
+ "5": ["4"],
+ "A": ["b1", "b2", "b3"],
+ "b1": ["2"],
+ "b2": ["2"],
+ "b3": ["2"],
+ "B": ["b4", "b5", "b6"],
+ "b4": ["3"],
+ "b5": ["3"],
+ "b6": ["3"],
+ }
+
+ depth_map: Dict[str, int] = {
+ "1": 1,
+ "2": 2,
+ "b1": 3,
+ "b2": 3,
+ "b3": 3,
+ "A": 4,
+ "3": 5,
+ "b4": 6,
+ "b5": 6,
+ "b6": 6,
+ "B": 7,
+ "4": 8,
+ "5": 9,
+ }
+
+ # The events we have persisted on our server.
+ # The rest are events in the room but not backfilled tet.
+ our_server_events = {"5", "4", "B", "3", "A"}
+
+ complete_event_dict_map: Dict[str, JsonDict] = {}
+ stream_ordering = 0
+ for (event_id, prev_event_ids) in event_graph.items():
+ depth = depth_map[event_id]
+
+ complete_event_dict_map[event_id] = {
+ "event_id": event_id,
+ "type": "test_regular_type",
+ "room_id": room_id,
+ "sender": "@sender",
+ "prev_event_ids": prev_event_ids,
+ "auth_event_ids": [],
+ "origin_server_ts": stream_ordering,
+ "depth": depth,
+ "stream_ordering": stream_ordering,
+ "content": {"body": "event" + event_id},
+ }
+
+ stream_ordering += 1
+
+ def populate_db(txn: LoggingTransaction):
+ # Insert the room to satisfy the foreign key constraint of
+ # `event_failed_pull_attempts`
+ self.store.db_pool.simple_insert_txn(
+ txn,
+ "rooms",
+ {
+ "room_id": room_id,
+ "creator": "room_creator_user_id",
+ "is_public": True,
+ "room_version": "6",
+ },
+ )
+
+ # Insert our server events
+ for event_id in our_server_events:
+ event_dict = complete_event_dict_map[event_id]
+
+ self.store.db_pool.simple_insert_txn(
+ txn,
+ table="events",
+ values={
+ "event_id": event_dict.get("event_id"),
+ "type": event_dict.get("type"),
+ "room_id": event_dict.get("room_id"),
+ "depth": event_dict.get("depth"),
+ "topological_ordering": event_dict.get("depth"),
+ "stream_ordering": event_dict.get("stream_ordering"),
+ "processed": True,
+ "outlier": False,
+ },
+ )
+
+ # Insert the event edges
+ for event_id in our_server_events:
+ for prev_event_id in event_graph[event_id]:
+ self.store.db_pool.simple_insert_txn(
+ txn,
+ table="event_edges",
+ values={
+ "event_id": event_id,
+ "prev_event_id": prev_event_id,
+ "room_id": room_id,
+ },
+ )
+
+ # Insert the backward extremities
+ prev_events_of_our_events = {
+ prev_event_id
+ for our_server_event in our_server_events
+ for prev_event_id in complete_event_dict_map[our_server_event][
+ "prev_event_ids"
+ ]
+ }
+ backward_extremities = prev_events_of_our_events - our_server_events
+ for backward_extremity in backward_extremities:
+ self.store.db_pool.simple_insert_txn(
+ txn,
+ table="event_backward_extremities",
+ values={
+ "event_id": backward_extremity,
+ "room_id": room_id,
+ },
+ )
+
+ self.get_success(
+ self.store.db_pool.runInteraction(
+ "_setup_room_for_backfill_tests_populate_db",
+ populate_db,
+ )
+ )
+
+ return _BackfillSetupInfo(room_id=room_id, depth_map=depth_map)
+
+ def test_get_backfill_points_in_room(self):
+ """
+ Test to make sure only backfill points that are older and come before
+ the `current_depth` are returned.
+ """
+ setup_info = self._setup_room_for_backfill_tests()
+ room_id = setup_info.room_id
+ depth_map = setup_info.depth_map
+
+ # Try at "B"
+ backfill_points = self.get_success(
+ self.store.get_backfill_points_in_room(room_id, depth_map["B"], limit=100)
+ )
+ backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points]
+ self.assertEqual(backfill_event_ids, ["b6", "b5", "b4", "2", "b3", "b2", "b1"])
+
+ # Try at "A"
+ backfill_points = self.get_success(
+ self.store.get_backfill_points_in_room(room_id, depth_map["A"], limit=100)
+ )
+ backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points]
+ # Event "2" has a depth of 2 but is not included here because we only
+ # know the approximate depth of 5 from our event "3".
+ self.assertListEqual(backfill_event_ids, ["b3", "b2", "b1"])
+
+ def test_get_backfill_points_in_room_excludes_events_we_have_attempted(
+ self,
+ ):
+ """
+ Test to make sure that events we have attempted to backfill (and within
+ backoff timeout duration) do not show up as an event to backfill again.
+ """
+ setup_info = self._setup_room_for_backfill_tests()
+ room_id = setup_info.room_id
+ depth_map = setup_info.depth_map
+
+ # Record some attempts to backfill these events which will make
+ # `get_backfill_points_in_room` exclude them because we
+ # haven't passed the backoff interval.
+ self.get_success(
+ self.store.record_event_failed_pull_attempt(room_id, "b5", "fake cause")
+ )
+ self.get_success(
+ self.store.record_event_failed_pull_attempt(room_id, "b4", "fake cause")
+ )
+ self.get_success(
+ self.store.record_event_failed_pull_attempt(room_id, "b3", "fake cause")
+ )
+ self.get_success(
+ self.store.record_event_failed_pull_attempt(room_id, "b2", "fake cause")
+ )
+
+ # No time has passed since we attempted to backfill ^
+
+ # Try at "B"
+ backfill_points = self.get_success(
+ self.store.get_backfill_points_in_room(room_id, depth_map["B"], limit=100)
+ )
+ backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points]
+ # Only the backfill points that we didn't record earlier exist here.
+ self.assertEqual(backfill_event_ids, ["b6", "2", "b1"])
+
+ def test_get_backfill_points_in_room_attempted_event_retry_after_backoff_duration(
+ self,
+ ):
+ """
+ Test to make sure after we fake attempt to backfill event "b3" many times,
+ we can see retry and see the "b3" again after the backoff timeout duration
+ has exceeded.
+ """
+ setup_info = self._setup_room_for_backfill_tests()
+ room_id = setup_info.room_id
+ depth_map = setup_info.depth_map
+
+ # Record some attempts to backfill these events which will make
+ # `get_backfill_points_in_room` exclude them because we
+ # haven't passed the backoff interval.
+ self.get_success(
+ self.store.record_event_failed_pull_attempt(room_id, "b3", "fake cause")
+ )
+ self.get_success(
+ self.store.record_event_failed_pull_attempt(room_id, "b1", "fake cause")
+ )
+ self.get_success(
+ self.store.record_event_failed_pull_attempt(room_id, "b1", "fake cause")
+ )
+ self.get_success(
+ self.store.record_event_failed_pull_attempt(room_id, "b1", "fake cause")
+ )
+ self.get_success(
+ self.store.record_event_failed_pull_attempt(room_id, "b1", "fake cause")
+ )
+
+ # Now advance time by 2 hours and we should only be able to see "b3"
+ # because we have waited long enough for the single attempt (2^1 hours)
+ # but we still shouldn't see "b1" because we haven't waited long enough
+ # for this many attempts. We didn't do anything to "b2" so it should be
+ # visible regardless.
+ self.reactor.advance(datetime.timedelta(hours=2).total_seconds())
+
+ # Try at "A" and make sure that "b1" is not in the list because we've
+ # already attempted many times
+ backfill_points = self.get_success(
+ self.store.get_backfill_points_in_room(room_id, depth_map["A"], limit=100)
+ )
+ backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points]
+ self.assertEqual(backfill_event_ids, ["b3", "b2"])
+
+ # Now advance time by 20 hours (above 2^4 because we made 4 attemps) and
+ # see if we can now backfill it
+ self.reactor.advance(datetime.timedelta(hours=20).total_seconds())
+
+ # Try at "A" again after we advanced enough time and we should see "b3" again
+ backfill_points = self.get_success(
+ self.store.get_backfill_points_in_room(room_id, depth_map["A"], limit=100)
+ )
+ backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points]
+ self.assertEqual(backfill_event_ids, ["b3", "b2", "b1"])
+
+ def test_get_backfill_points_in_room_works_after_many_failed_pull_attempts_that_could_naively_overflow(
+ self,
+ ) -> None:
+ """
+ A test that reproduces #13929 (Postgres only).
+
+ Test to make sure we can still get backfill points after many failed pull
+ attempts that cause us to backoff to the limit. Even if the backoff formula
+ would tell us to wait for more seconds than can be expressed in a 32 bit
+ signed int.
+ """
+ setup_info = self._setup_room_for_backfill_tests()
+ room_id = setup_info.room_id
+ depth_map = setup_info.depth_map
+
+ # Pretend that we have tried and failed 10 times to backfill event b1.
+ for _ in range(10):
+ self.get_success(
+ self.store.record_event_failed_pull_attempt(room_id, "b1", "fake cause")
+ )
+
+ # If the backoff periods grow without limit:
+ # After the first failed attempt, we would have backed off for 1 << 1 = 2 hours.
+ # After the second failed attempt we would have backed off for 1 << 2 = 4 hours,
+ # so after the 10th failed attempt we should backoff for 1 << 10 == 1024 hours.
+ # Wait 1100 hours just so we have a nice round number.
+ self.reactor.advance(datetime.timedelta(hours=1100).total_seconds())
+
+ # 1024 hours in milliseconds is 1024 * 3600000, which exceeds the largest 32 bit
+ # signed integer. The bug we're reproducing is that this overflow causes an
+ # error in postgres preventing us from fetching a set of backwards extremities
+ # to retry fetching.
+ backfill_points = self.get_success(
+ self.store.get_backfill_points_in_room(room_id, depth_map["A"], limit=100)
+ )
+
+ # We should aim to fetch all backoff points: b1's latest backoff period has
+ # expired, and we haven't tried the rest.
+ backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points]
+ self.assertEqual(backfill_event_ids, ["b3", "b2", "b1"])
+
+ def _setup_room_for_insertion_backfill_tests(self) -> _BackfillSetupInfo:
+ """
+ Sets up a room with various insertion event backward extremities to test
+ backfill functions against.
+
+ Returns:
+ _BackfillSetupInfo including the `room_id` to test against and
+ `depth_map` of events in the room
+ """
+ room_id = "!backfill-room-test:some-host"
+
+ depth_map: Dict[str, int] = {
+ "1": 1,
+ "2": 2,
+ "insertion_eventA": 3,
+ "3": 4,
+ "insertion_eventB": 5,
+ "4": 6,
+ "5": 7,
+ }
+
+ def populate_db(txn: LoggingTransaction):
+ # Insert the room to satisfy the foreign key constraint of
+ # `event_failed_pull_attempts`
+ self.store.db_pool.simple_insert_txn(
+ txn,
+ "rooms",
+ {
+ "room_id": room_id,
+ "creator": "room_creator_user_id",
+ "is_public": True,
+ "room_version": "6",
+ },
+ )
+
+ # Insert our server events
+ stream_ordering = 0
+ for event_id, depth in depth_map.items():
+ self.store.db_pool.simple_insert_txn(
+ txn,
+ table="events",
+ values={
+ "event_id": event_id,
+ "type": EventTypes.MSC2716_INSERTION
+ if event_id.startswith("insertion_event")
+ else "test_regular_type",
+ "room_id": room_id,
+ "depth": depth,
+ "topological_ordering": depth,
+ "stream_ordering": stream_ordering,
+ "processed": True,
+ "outlier": False,
+ },
+ )
+
+ if event_id.startswith("insertion_event"):
+ self.store.db_pool.simple_insert_txn(
+ txn,
+ table="insertion_event_extremities",
+ values={
+ "event_id": event_id,
+ "room_id": room_id,
+ },
+ )
+
+ stream_ordering += 1
+
+ self.get_success(
+ self.store.db_pool.runInteraction(
+ "_setup_room_for_insertion_backfill_tests_populate_db",
+ populate_db,
+ )
+ )
+
+ return _BackfillSetupInfo(room_id=room_id, depth_map=depth_map)
+
+ def test_get_insertion_event_backward_extremities_in_room(self):
+ """
+ Test to make sure only insertion event backward extremities that are
+ older and come before the `current_depth` are returned.
+ """
+ setup_info = self._setup_room_for_insertion_backfill_tests()
+ room_id = setup_info.room_id
+ depth_map = setup_info.depth_map
+
+ # Try at "insertion_eventB"
+ backfill_points = self.get_success(
+ self.store.get_insertion_event_backward_extremities_in_room(
+ room_id, depth_map["insertion_eventB"], limit=100
+ )
+ )
+ backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points]
+ self.assertEqual(backfill_event_ids, ["insertion_eventB", "insertion_eventA"])
+
+ # Try at "insertion_eventA"
+ backfill_points = self.get_success(
+ self.store.get_insertion_event_backward_extremities_in_room(
+ room_id, depth_map["insertion_eventA"], limit=100
+ )
+ )
+ backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points]
+ # Event "2" has a depth of 2 but is not included here because we only
+ # know the approximate depth of 5 from our event "3".
+ self.assertListEqual(backfill_event_ids, ["insertion_eventA"])
+
+ def test_get_insertion_event_backward_extremities_in_room_excludes_events_we_have_attempted(
+ self,
+ ):
+ """
+ Test to make sure that insertion events we have attempted to backfill
+ (and within backoff timeout duration) do not show up as an event to
+ backfill again.
+ """
+ setup_info = self._setup_room_for_insertion_backfill_tests()
+ room_id = setup_info.room_id
+ depth_map = setup_info.depth_map
+
+ # Record some attempts to backfill these events which will make
+ # `get_insertion_event_backward_extremities_in_room` exclude them
+ # because we haven't passed the backoff interval.
+ self.get_success(
+ self.store.record_event_failed_pull_attempt(
+ room_id, "insertion_eventA", "fake cause"
+ )
+ )
+
+ # No time has passed since we attempted to backfill ^
+
+ # Try at "insertion_eventB"
+ backfill_points = self.get_success(
+ self.store.get_insertion_event_backward_extremities_in_room(
+ room_id, depth_map["insertion_eventB"], limit=100
+ )
+ )
+ backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points]
+ # Only the backfill points that we didn't record earlier exist here.
+ self.assertEqual(backfill_event_ids, ["insertion_eventB"])
+
+ def test_get_insertion_event_backward_extremities_in_room_attempted_event_retry_after_backoff_duration(
+ self,
+ ):
+ """
+ Test to make sure after we fake attempt to backfill event
+ "insertion_eventA" many times, we can see retry and see the
+ "insertion_eventA" again after the backoff timeout duration has
+ exceeded.
+ """
+ setup_info = self._setup_room_for_insertion_backfill_tests()
+ room_id = setup_info.room_id
+ depth_map = setup_info.depth_map
+
+ # Record some attempts to backfill these events which will make
+ # `get_backfill_points_in_room` exclude them because we
+ # haven't passed the backoff interval.
+ self.get_success(
+ self.store.record_event_failed_pull_attempt(
+ room_id, "insertion_eventB", "fake cause"
+ )
+ )
+ self.get_success(
+ self.store.record_event_failed_pull_attempt(
+ room_id, "insertion_eventA", "fake cause"
+ )
+ )
+ self.get_success(
+ self.store.record_event_failed_pull_attempt(
+ room_id, "insertion_eventA", "fake cause"
+ )
+ )
+ self.get_success(
+ self.store.record_event_failed_pull_attempt(
+ room_id, "insertion_eventA", "fake cause"
+ )
+ )
+ self.get_success(
+ self.store.record_event_failed_pull_attempt(
+ room_id, "insertion_eventA", "fake cause"
+ )
+ )
+
+ # Now advance time by 2 hours and we should only be able to see
+ # "insertion_eventB" because we have waited long enough for the single
+ # attempt (2^1 hours) but we still shouldn't see "insertion_eventA"
+ # because we haven't waited long enough for this many attempts.
+ self.reactor.advance(datetime.timedelta(hours=2).total_seconds())
+
+ # Try at "insertion_eventA" and make sure that "insertion_eventA" is not
+ # in the list because we've already attempted many times
+ backfill_points = self.get_success(
+ self.store.get_insertion_event_backward_extremities_in_room(
+ room_id, depth_map["insertion_eventA"], limit=100
+ )
+ )
+ backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points]
+ self.assertEqual(backfill_event_ids, [])
+
+ # Now advance time by 20 hours (above 2^4 because we made 4 attemps) and
+ # see if we can now backfill it
+ self.reactor.advance(datetime.timedelta(hours=20).total_seconds())
+
+ # Try at "insertion_eventA" again after we advanced enough time and we
+ # should see "insertion_eventA" again
+ backfill_points = self.get_success(
+ self.store.get_insertion_event_backward_extremities_in_room(
+ room_id, depth_map["insertion_eventA"], limit=100
+ )
+ )
+ backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points]
+ self.assertEqual(backfill_event_ids, ["insertion_eventA"])
+
+ def test_get_event_ids_to_not_pull_from_backoff(
+ self,
+ ):
+ """
+ Test to make sure only event IDs we should backoff from are returned.
+ """
+ # Create the room
+ user_id = self.register_user("alice", "test")
+ tok = self.login("alice", "test")
+ room_id = self.helper.create_room_as(room_creator=user_id, tok=tok)
+
+ self.get_success(
+ self.store.record_event_failed_pull_attempt(
+ room_id, "$failed_event_id", "fake cause"
+ )
+ )
+
+ event_ids_to_backoff = self.get_success(
+ self.store.get_event_ids_to_not_pull_from_backoff(
+ room_id=room_id, event_ids=["$failed_event_id", "$normal_event_id"]
+ )
+ )
+
+ self.assertEqual(event_ids_to_backoff, ["$failed_event_id"])
+
+ def test_get_event_ids_to_not_pull_from_backoff_retry_after_backoff_duration(
+ self,
+ ):
+ """
+ Test to make sure no event IDs are returned after the backoff duration has
+ elapsed.
+ """
+ # Create the room
+ user_id = self.register_user("alice", "test")
+ tok = self.login("alice", "test")
+ room_id = self.helper.create_room_as(room_creator=user_id, tok=tok)
+
+ self.get_success(
+ self.store.record_event_failed_pull_attempt(
+ room_id, "$failed_event_id", "fake cause"
+ )
+ )
+
+ # Now advance time by 2 hours so we wait long enough for the single failed
+ # attempt (2^1 hours).
+ self.reactor.advance(datetime.timedelta(hours=2).total_seconds())
+
+ event_ids_to_backoff = self.get_success(
+ self.store.get_event_ids_to_not_pull_from_backoff(
+ room_id=room_id, event_ids=["$failed_event_id", "$normal_event_id"]
+ )
+ )
+ # Since this function only returns events we should backoff from, time has
+ # elapsed past the backoff range so there is no events to backoff from.
+ self.assertEqual(event_ids_to_backoff, [])
+
@attr.s
class FakeEvent:
diff --git a/tests/storage/test_event_metrics.py b/tests/storage/test_event_metrics.py
index 088fbb247b..6f1135eef4 100644
--- a/tests/storage/test_event_metrics.py
+++ b/tests/storage/test_event_metrics.py
@@ -11,8 +11,9 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+from prometheus_client import generate_latest
-from synapse.metrics import REGISTRY, generate_latest
+from synapse.metrics import REGISTRY
from synapse.types import UserID, create_requester
from tests.unittest import HomeserverTestCase
@@ -53,8 +54,8 @@ class ExtremStatisticsTestCase(HomeserverTestCase):
items = list(
filter(
- lambda x: b"synapse_forward_extremities_" in x,
- generate_latest(REGISTRY, emit_help=False).split(b"\n"),
+ lambda x: b"synapse_forward_extremities_" in x and b"# HELP" not in x,
+ generate_latest(REGISTRY).split(b"\n"),
)
)
diff --git a/tests/storage/test_event_push_actions.py b/tests/storage/test_event_push_actions.py
index ba40124c8a..ee48920f84 100644
--- a/tests/storage/test_event_push_actions.py
+++ b/tests/storage/test_event_push_actions.py
@@ -12,18 +12,20 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+from typing import Optional, Tuple
+
from twisted.test.proto_helpers import MemoryReactor
+from synapse.api.constants import MAIN_TIMELINE, RelationTypes
from synapse.rest import admin
from synapse.rest.client import login, room
from synapse.server import HomeServer
from synapse.storage.databases.main.event_push_actions import NotifCounts
+from synapse.types import JsonDict
from synapse.util import Clock
from tests.unittest import HomeserverTestCase
-USER_ID = "@user:example.com"
-
class EventPushActionsStoreTestCase(HomeserverTestCase):
servlets = [
@@ -38,21 +40,13 @@ class EventPushActionsStoreTestCase(HomeserverTestCase):
assert persist_events_store is not None
self.persist_events_store = persist_events_store
- def test_get_unread_push_actions_for_user_in_range_for_http(self) -> None:
- self.get_success(
- self.store.get_unread_push_actions_for_user_in_range_for_http(
- USER_ID, 0, 1000, 20
- )
- )
+ def _create_users_and_room(self) -> Tuple[str, str, str, str, str]:
+ """
+ Creates two users and a shared room.
- def test_get_unread_push_actions_for_user_in_range_for_email(self) -> None:
- self.get_success(
- self.store.get_unread_push_actions_for_user_in_range_for_email(
- USER_ID, 0, 1000, 20
- )
- )
-
- def test_count_aggregation(self) -> None:
+ Returns:
+ Tuple of (user 1 ID, user 1 token, user 2 ID, user 2 token, room ID).
+ """
# Create a user to receive notifications and send receipts.
user_id = self.register_user("user1235", "pass")
token = self.login("user1235", "pass")
@@ -65,11 +59,104 @@ class EventPushActionsStoreTestCase(HomeserverTestCase):
room_id = self.helper.create_room_as(user_id, tok=token)
self.helper.join(room_id, other_id, tok=other_token)
+ return user_id, token, other_id, other_token, room_id
+
+ def test_get_unread_push_actions_for_user_in_range(self) -> None:
+ """Test getting unread push actions for HTTP and email pushers."""
+ user_id, token, _, other_token, room_id = self._create_users_and_room()
+
+ # Create two events, one of which is a highlight.
+ first_event_id = self.helper.send_event(
+ room_id,
+ type="m.room.message",
+ content={"msgtype": "m.text", "body": "msg"},
+ tok=other_token,
+ )["event_id"]
+ second_event_id = self.helper.send_event(
+ room_id,
+ type="m.room.message",
+ content={
+ "msgtype": "m.text",
+ "body": user_id,
+ "m.relates_to": {
+ "rel_type": RelationTypes.THREAD,
+ "event_id": first_event_id,
+ },
+ },
+ tok=other_token,
+ )["event_id"]
+
+ # Fetch unread actions for HTTP pushers.
+ http_actions = self.get_success(
+ self.store.get_unread_push_actions_for_user_in_range_for_http(
+ user_id, 0, 1000, 20
+ )
+ )
+ self.assertEqual(2, len(http_actions))
+
+ # Fetch unread actions for email pushers.
+ email_actions = self.get_success(
+ self.store.get_unread_push_actions_for_user_in_range_for_email(
+ user_id, 0, 1000, 20
+ )
+ )
+ self.assertEqual(2, len(email_actions))
+
+ # Send a receipt, which should clear the first action.
+ self.get_success(
+ self.store.insert_receipt(
+ room_id,
+ "m.read",
+ user_id=user_id,
+ event_ids=[first_event_id],
+ thread_id=None,
+ data={},
+ )
+ )
+ http_actions = self.get_success(
+ self.store.get_unread_push_actions_for_user_in_range_for_http(
+ user_id, 0, 1000, 20
+ )
+ )
+ self.assertEqual(1, len(http_actions))
+ email_actions = self.get_success(
+ self.store.get_unread_push_actions_for_user_in_range_for_email(
+ user_id, 0, 1000, 20
+ )
+ )
+ self.assertEqual(1, len(email_actions))
+
+ # Send a thread receipt to clear the thread action.
+ self.get_success(
+ self.store.insert_receipt(
+ room_id,
+ "m.read",
+ user_id=user_id,
+ event_ids=[second_event_id],
+ thread_id=first_event_id,
+ data={},
+ )
+ )
+ http_actions = self.get_success(
+ self.store.get_unread_push_actions_for_user_in_range_for_http(
+ user_id, 0, 1000, 20
+ )
+ )
+ self.assertEqual([], http_actions)
+ email_actions = self.get_success(
+ self.store.get_unread_push_actions_for_user_in_range_for_email(
+ user_id, 0, 1000, 20
+ )
+ )
+ self.assertEqual([], email_actions)
+
+ def test_count_aggregation(self) -> None:
+ # Create a user to receive notifications and send receipts.
+ user_id, token, _, other_token, room_id = self._create_users_and_room()
+
last_event_id: str
- def _assert_counts(
- noitf_count: int, unread_count: int, highlight_count: int
- ) -> None:
+ def _assert_counts(noitf_count: int, highlight_count: int) -> None:
counts = self.get_success(
self.store.db_pool.runInteraction(
"get-unread-counts",
@@ -79,13 +166,14 @@ class EventPushActionsStoreTestCase(HomeserverTestCase):
)
)
self.assertEqual(
- counts,
+ counts.main_timeline,
NotifCounts(
notify_count=noitf_count,
- unread_count=unread_count,
+ unread_count=0,
highlight_count=highlight_count,
),
)
+ self.assertEqual(counts.threads, {})
def _create_event(highlight: bool = False) -> str:
result = self.helper.send_event(
@@ -108,63 +196,518 @@ class EventPushActionsStoreTestCase(HomeserverTestCase):
"m.read",
user_id=user_id,
event_ids=[event_id],
+ thread_id=None,
data={},
)
)
- _assert_counts(0, 0, 0)
+ _assert_counts(0, 0)
_create_event()
- _assert_counts(1, 1, 0)
+ _assert_counts(1, 0)
_rotate()
- _assert_counts(1, 1, 0)
+ _assert_counts(1, 0)
event_id = _create_event()
- _assert_counts(2, 2, 0)
+ _assert_counts(2, 0)
_rotate()
- _assert_counts(2, 2, 0)
+ _assert_counts(2, 0)
_create_event()
_mark_read(event_id)
- _assert_counts(1, 1, 0)
+ _assert_counts(1, 0)
_mark_read(last_event_id)
- _assert_counts(0, 0, 0)
+ _assert_counts(0, 0)
_create_event()
+ _assert_counts(1, 0)
_rotate()
- _assert_counts(1, 1, 0)
+ _assert_counts(1, 0)
# Delete old event push actions, this should not affect the (summarised) count.
+ #
+ # All event push actions are kept for 24 hours, so need to move forward
+ # in time.
+ self.pump(60 * 60 * 24)
self.get_success(self.store._remove_old_push_actions_that_have_rotated())
- _assert_counts(1, 1, 0)
+ # Double check that the event push actions have been cleared (i.e. that
+ # any results *must* come from the summary).
+ result = self.get_success(
+ self.store.db_pool.simple_select_list(
+ table="event_push_actions",
+ keyvalues={"1": 1},
+ retcols=("event_id",),
+ desc="",
+ )
+ )
+ self.assertEqual(result, [])
+ _assert_counts(1, 0)
_mark_read(last_event_id)
- _assert_counts(0, 0, 0)
+ _assert_counts(0, 0)
event_id = _create_event(True)
- _assert_counts(1, 1, 1)
+ _assert_counts(1, 1)
_rotate()
- _assert_counts(1, 1, 1)
+ _assert_counts(1, 1)
# Check that adding another notification and rotating after highlight
# works.
_create_event()
_rotate()
- _assert_counts(2, 2, 1)
+ _assert_counts(2, 1)
# Check that sending read receipts at different points results in the
# right counts.
_mark_read(event_id)
- _assert_counts(1, 1, 0)
+ _assert_counts(1, 0)
_mark_read(last_event_id)
- _assert_counts(0, 0, 0)
+ _assert_counts(0, 0)
_create_event(True)
- _assert_counts(1, 1, 1)
+ _assert_counts(1, 1)
_mark_read(last_event_id)
- _assert_counts(0, 0, 0)
+ _assert_counts(0, 0)
_rotate()
- _assert_counts(0, 0, 0)
+ _assert_counts(0, 0)
+
+ def test_count_aggregation_threads(self) -> None:
+ """
+ This is essentially the same test as test_count_aggregation, but adds
+ events to the main timeline and to a thread.
+ """
+
+ user_id, token, _, other_token, room_id = self._create_users_and_room()
+ thread_id: str
+
+ last_event_id: str
+
+ def _assert_counts(
+ noitf_count: int,
+ highlight_count: int,
+ thread_notif_count: int,
+ thread_highlight_count: int,
+ ) -> None:
+ counts = self.get_success(
+ self.store.db_pool.runInteraction(
+ "get-unread-counts",
+ self.store._get_unread_counts_by_receipt_txn,
+ room_id,
+ user_id,
+ )
+ )
+ self.assertEqual(
+ counts.main_timeline,
+ NotifCounts(
+ notify_count=noitf_count,
+ unread_count=0,
+ highlight_count=highlight_count,
+ ),
+ )
+ if thread_notif_count or thread_highlight_count:
+ self.assertEqual(
+ counts.threads,
+ {
+ thread_id: NotifCounts(
+ notify_count=thread_notif_count,
+ unread_count=0,
+ highlight_count=thread_highlight_count,
+ ),
+ },
+ )
+ else:
+ self.assertEqual(counts.threads, {})
+
+ def _create_event(
+ highlight: bool = False, thread_id: Optional[str] = None
+ ) -> str:
+ content: JsonDict = {
+ "msgtype": "m.text",
+ "body": user_id if highlight else "msg",
+ }
+ if thread_id:
+ content["m.relates_to"] = {
+ "rel_type": "m.thread",
+ "event_id": thread_id,
+ }
+
+ result = self.helper.send_event(
+ room_id,
+ type="m.room.message",
+ content=content,
+ tok=other_token,
+ )
+ nonlocal last_event_id
+ last_event_id = result["event_id"]
+ return last_event_id
+
+ def _rotate() -> None:
+ self.get_success(self.store._rotate_notifs())
+
+ def _mark_read(event_id: str, thread_id: str = MAIN_TIMELINE) -> None:
+ self.get_success(
+ self.store.insert_receipt(
+ room_id,
+ "m.read",
+ user_id=user_id,
+ event_ids=[event_id],
+ thread_id=thread_id,
+ data={},
+ )
+ )
+
+ _assert_counts(0, 0, 0, 0)
+ thread_id = _create_event()
+ _assert_counts(1, 0, 0, 0)
+ _rotate()
+ _assert_counts(1, 0, 0, 0)
+
+ _create_event(thread_id=thread_id)
+ _assert_counts(1, 0, 1, 0)
+ _rotate()
+ _assert_counts(1, 0, 1, 0)
+
+ _create_event()
+ _assert_counts(2, 0, 1, 0)
+ _rotate()
+ _assert_counts(2, 0, 1, 0)
+
+ event_id = _create_event(thread_id=thread_id)
+ _assert_counts(2, 0, 2, 0)
+ _rotate()
+ _assert_counts(2, 0, 2, 0)
+
+ _create_event()
+ _create_event(thread_id=thread_id)
+ _mark_read(event_id)
+ _assert_counts(1, 0, 3, 0)
+ _mark_read(event_id, thread_id)
+ _assert_counts(1, 0, 1, 0)
+
+ _mark_read(last_event_id)
+ _mark_read(last_event_id, thread_id)
+ _assert_counts(0, 0, 0, 0)
+
+ _create_event()
+ _create_event(thread_id=thread_id)
+ _assert_counts(1, 0, 1, 0)
+ _rotate()
+ _assert_counts(1, 0, 1, 0)
+
+ # Delete old event push actions, this should not affect the (summarised) count.
+ self.get_success(self.store._remove_old_push_actions_that_have_rotated())
+ _assert_counts(1, 0, 1, 0)
+
+ _mark_read(last_event_id)
+ _mark_read(last_event_id, thread_id)
+ _assert_counts(0, 0, 0, 0)
+
+ _create_event(True)
+ _assert_counts(1, 1, 0, 0)
+ _rotate()
+ _assert_counts(1, 1, 0, 0)
+
+ event_id = _create_event(True, thread_id)
+ _assert_counts(1, 1, 1, 1)
+ _rotate()
+ _assert_counts(1, 1, 1, 1)
+
+ # Check that adding another notification and rotating after highlight
+ # works.
+ _create_event()
+ _rotate()
+ _assert_counts(2, 1, 1, 1)
+
+ _create_event(thread_id=thread_id)
+ _rotate()
+ _assert_counts(2, 1, 2, 1)
+
+ # Check that sending read receipts at different points results in the
+ # right counts.
+ _mark_read(event_id)
+ _assert_counts(1, 0, 2, 1)
+ _mark_read(event_id, thread_id)
+ _assert_counts(1, 0, 1, 0)
+ _mark_read(last_event_id)
+ _assert_counts(0, 0, 1, 0)
+ _mark_read(last_event_id, thread_id)
+ _assert_counts(0, 0, 0, 0)
+
+ _create_event(True)
+ _create_event(True, thread_id)
+ _assert_counts(1, 1, 1, 1)
+ _mark_read(last_event_id)
+ _mark_read(last_event_id, thread_id)
+ _assert_counts(0, 0, 0, 0)
+ _rotate()
+ _assert_counts(0, 0, 0, 0)
+
+ def test_count_aggregation_mixed(self) -> None:
+ """
+ This is essentially the same test as test_count_aggregation_threads, but
+ sends both unthreaded and threaded receipts.
+ """
+
+ user_id, token, _, other_token, room_id = self._create_users_and_room()
+ thread_id: str
+
+ last_event_id: str
+
+ def _assert_counts(
+ noitf_count: int,
+ highlight_count: int,
+ thread_notif_count: int,
+ thread_highlight_count: int,
+ ) -> None:
+ counts = self.get_success(
+ self.store.db_pool.runInteraction(
+ "get-unread-counts",
+ self.store._get_unread_counts_by_receipt_txn,
+ room_id,
+ user_id,
+ )
+ )
+ self.assertEqual(
+ counts.main_timeline,
+ NotifCounts(
+ notify_count=noitf_count,
+ unread_count=0,
+ highlight_count=highlight_count,
+ ),
+ )
+ if thread_notif_count or thread_highlight_count:
+ self.assertEqual(
+ counts.threads,
+ {
+ thread_id: NotifCounts(
+ notify_count=thread_notif_count,
+ unread_count=0,
+ highlight_count=thread_highlight_count,
+ ),
+ },
+ )
+ else:
+ self.assertEqual(counts.threads, {})
+
+ def _create_event(
+ highlight: bool = False, thread_id: Optional[str] = None
+ ) -> str:
+ content: JsonDict = {
+ "msgtype": "m.text",
+ "body": user_id if highlight else "msg",
+ }
+ if thread_id:
+ content["m.relates_to"] = {
+ "rel_type": "m.thread",
+ "event_id": thread_id,
+ }
+
+ result = self.helper.send_event(
+ room_id,
+ type="m.room.message",
+ content=content,
+ tok=other_token,
+ )
+ nonlocal last_event_id
+ last_event_id = result["event_id"]
+ return last_event_id
+
+ def _rotate() -> None:
+ self.get_success(self.store._rotate_notifs())
+
+ def _mark_read(event_id: str, thread_id: Optional[str] = None) -> None:
+ self.get_success(
+ self.store.insert_receipt(
+ room_id,
+ "m.read",
+ user_id=user_id,
+ event_ids=[event_id],
+ thread_id=thread_id,
+ data={},
+ )
+ )
+
+ _assert_counts(0, 0, 0, 0)
+ thread_id = _create_event()
+ _assert_counts(1, 0, 0, 0)
+ _rotate()
+ _assert_counts(1, 0, 0, 0)
+
+ _create_event(thread_id=thread_id)
+ _assert_counts(1, 0, 1, 0)
+ _rotate()
+ _assert_counts(1, 0, 1, 0)
+
+ _create_event()
+ _assert_counts(2, 0, 1, 0)
+ _rotate()
+ _assert_counts(2, 0, 1, 0)
+
+ event_id = _create_event(thread_id=thread_id)
+ _assert_counts(2, 0, 2, 0)
+ _rotate()
+ _assert_counts(2, 0, 2, 0)
+
+ _create_event()
+ _create_event(thread_id=thread_id)
+ _mark_read(event_id)
+ _assert_counts(1, 0, 1, 0)
+
+ _mark_read(last_event_id, MAIN_TIMELINE)
+ _mark_read(last_event_id, thread_id)
+ _assert_counts(0, 0, 0, 0)
+
+ _create_event()
+ _create_event(thread_id=thread_id)
+ _assert_counts(1, 0, 1, 0)
+ _rotate()
+ _assert_counts(1, 0, 1, 0)
+
+ # Delete old event push actions, this should not affect the (summarised) count.
+ self.get_success(self.store._remove_old_push_actions_that_have_rotated())
+ _assert_counts(1, 0, 1, 0)
+
+ _mark_read(last_event_id)
+ _assert_counts(0, 0, 0, 0)
+
+ _create_event(True)
+ _assert_counts(1, 1, 0, 0)
+ _rotate()
+ _assert_counts(1, 1, 0, 0)
+
+ event_id = _create_event(True, thread_id)
+ _assert_counts(1, 1, 1, 1)
+ _rotate()
+ _assert_counts(1, 1, 1, 1)
+
+ # Check that adding another notification and rotating after highlight
+ # works.
+ _create_event()
+ _rotate()
+ _assert_counts(2, 1, 1, 1)
+
+ _create_event(thread_id=thread_id)
+ _rotate()
+ _assert_counts(2, 1, 2, 1)
+
+ # Check that sending read receipts at different points results in the
+ # right counts.
+ _mark_read(event_id)
+ _assert_counts(1, 0, 1, 0)
+ _mark_read(event_id, MAIN_TIMELINE)
+ _assert_counts(1, 0, 1, 0)
+ _mark_read(last_event_id, MAIN_TIMELINE)
+ _assert_counts(0, 0, 1, 0)
+ _mark_read(last_event_id, thread_id)
+ _assert_counts(0, 0, 0, 0)
+
+ _create_event(True)
+ _create_event(True, thread_id)
+ _assert_counts(1, 1, 1, 1)
+ _mark_read(last_event_id)
+ _assert_counts(0, 0, 0, 0)
+ _rotate()
+ _assert_counts(0, 0, 0, 0)
+
+ def test_recursive_thread(self) -> None:
+ """
+ Events related to events in a thread should still be considered part of
+ that thread.
+ """
+
+ # Create a user to receive notifications and send receipts.
+ user_id = self.register_user("user1235", "pass")
+ token = self.login("user1235", "pass")
+
+ # And another users to send events.
+ other_id = self.register_user("other", "pass")
+ other_token = self.login("other", "pass")
+
+ # Create a room and put both users in it.
+ room_id = self.helper.create_room_as(user_id, tok=token)
+ self.helper.join(room_id, other_id, tok=other_token)
+
+ # Update the user's push rules to care about reaction events.
+ self.get_success(
+ self.store.add_push_rule(
+ user_id,
+ "related_events",
+ priority_class=5,
+ conditions=[
+ {"kind": "event_match", "key": "type", "pattern": "m.reaction"}
+ ],
+ actions=["notify"],
+ )
+ )
+
+ def _create_event(type: str, content: JsonDict) -> str:
+ result = self.helper.send_event(
+ room_id, type=type, content=content, tok=other_token
+ )
+ return result["event_id"]
+
+ def _assert_counts(noitf_count: int, thread_notif_count: int) -> None:
+ counts = self.get_success(
+ self.store.db_pool.runInteraction(
+ "get-unread-counts",
+ self.store._get_unread_counts_by_receipt_txn,
+ room_id,
+ user_id,
+ )
+ )
+ self.assertEqual(
+ counts.main_timeline,
+ NotifCounts(
+ notify_count=noitf_count, unread_count=0, highlight_count=0
+ ),
+ )
+ if thread_notif_count:
+ self.assertEqual(
+ counts.threads,
+ {
+ thread_id: NotifCounts(
+ notify_count=thread_notif_count,
+ unread_count=0,
+ highlight_count=0,
+ ),
+ },
+ )
+ else:
+ self.assertEqual(counts.threads, {})
+
+ # Create a root event.
+ thread_id = _create_event(
+ "m.room.message", {"msgtype": "m.text", "body": "msg"}
+ )
+ _assert_counts(1, 0)
+
+ # Reply, creating a thread.
+ reply_id = _create_event(
+ "m.room.message",
+ {
+ "msgtype": "m.text",
+ "body": "msg",
+ "m.relates_to": {
+ "rel_type": "m.thread",
+ "event_id": thread_id,
+ },
+ },
+ )
+ _assert_counts(1, 1)
+
+ # Create an event related to a thread event, this should still appear in
+ # the thread.
+ _create_event(
+ type="m.reaction",
+ content={
+ "m.relates_to": {
+ "rel_type": "m.annotation",
+ "event_id": reply_id,
+ "key": "A",
+ }
+ },
+ )
+ _assert_counts(1, 2)
def test_find_first_stream_ordering_after_ts(self) -> None:
def add_event(so: int, ts: int) -> None:
diff --git a/tests/storage/test_id_generators.py b/tests/storage/test_id_generators.py
index 2d8d1f860f..d6a2b8d274 100644
--- a/tests/storage/test_id_generators.py
+++ b/tests/storage/test_id_generators.py
@@ -16,15 +16,157 @@ from typing import List, Optional
from twisted.test.proto_helpers import MemoryReactor
from synapse.server import HomeServer
-from synapse.storage.database import DatabasePool, LoggingTransaction
+from synapse.storage.database import (
+ DatabasePool,
+ LoggingDatabaseConnection,
+ LoggingTransaction,
+)
from synapse.storage.engines import IncorrectDatabaseSetup
-from synapse.storage.util.id_generators import MultiWriterIdGenerator
+from synapse.storage.types import Cursor
+from synapse.storage.util.id_generators import MultiWriterIdGenerator, StreamIdGenerator
from synapse.util import Clock
from tests.unittest import HomeserverTestCase
from tests.utils import USE_POSTGRES_FOR_TESTS
+class StreamIdGeneratorTestCase(HomeserverTestCase):
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+ self.store = hs.get_datastores().main
+ self.db_pool: DatabasePool = self.store.db_pool
+
+ self.get_success(self.db_pool.runInteraction("_setup_db", self._setup_db))
+
+ def _setup_db(self, txn: LoggingTransaction) -> None:
+ txn.execute(
+ """
+ CREATE TABLE foobar (
+ stream_id BIGINT NOT NULL,
+ data TEXT
+ );
+ """
+ )
+ txn.execute("INSERT INTO foobar VALUES (123, 'hello world');")
+
+ def _create_id_generator(self) -> StreamIdGenerator:
+ def _create(conn: LoggingDatabaseConnection) -> StreamIdGenerator:
+ return StreamIdGenerator(
+ db_conn=conn,
+ table="foobar",
+ column="stream_id",
+ )
+
+ return self.get_success_or_raise(self.db_pool.runWithConnection(_create))
+
+ def test_initial_value(self) -> None:
+ """Check that we read the current token from the DB."""
+ id_gen = self._create_id_generator()
+ self.assertEqual(id_gen.get_current_token(), 123)
+
+ def test_single_gen_next(self) -> None:
+ """Check that we correctly increment the current token from the DB."""
+ id_gen = self._create_id_generator()
+
+ async def test_gen_next() -> None:
+ async with id_gen.get_next() as next_id:
+ # We haven't persisted `next_id` yet; current token is still 123
+ self.assertEqual(id_gen.get_current_token(), 123)
+ # But we did learn what the next value is
+ self.assertEqual(next_id, 124)
+
+ # Once the context manager closes we assume that the `next_id` has been
+ # written to the DB.
+ self.assertEqual(id_gen.get_current_token(), 124)
+
+ self.get_success(test_gen_next())
+
+ def test_multiple_gen_nexts(self) -> None:
+ """Check that we handle overlapping calls to gen_next sensibly."""
+ id_gen = self._create_id_generator()
+
+ async def test_gen_next() -> None:
+ ctx1 = id_gen.get_next()
+ ctx2 = id_gen.get_next()
+ ctx3 = id_gen.get_next()
+
+ # Request three new stream IDs.
+ self.assertEqual(await ctx1.__aenter__(), 124)
+ self.assertEqual(await ctx2.__aenter__(), 125)
+ self.assertEqual(await ctx3.__aenter__(), 126)
+
+ # None are persisted: current token unchanged.
+ self.assertEqual(id_gen.get_current_token(), 123)
+
+ # Persist each in turn.
+ await ctx1.__aexit__(None, None, None)
+ self.assertEqual(id_gen.get_current_token(), 124)
+ await ctx2.__aexit__(None, None, None)
+ self.assertEqual(id_gen.get_current_token(), 125)
+ await ctx3.__aexit__(None, None, None)
+ self.assertEqual(id_gen.get_current_token(), 126)
+
+ self.get_success(test_gen_next())
+
+ def test_multiple_gen_nexts_closed_in_different_order(self) -> None:
+ """Check that we handle overlapping calls to gen_next, even when their IDs
+ created and persisted in different orders."""
+ id_gen = self._create_id_generator()
+
+ async def test_gen_next() -> None:
+ ctx1 = id_gen.get_next()
+ ctx2 = id_gen.get_next()
+ ctx3 = id_gen.get_next()
+
+ # Request three new stream IDs.
+ self.assertEqual(await ctx1.__aenter__(), 124)
+ self.assertEqual(await ctx2.__aenter__(), 125)
+ self.assertEqual(await ctx3.__aenter__(), 126)
+
+ # None are persisted: current token unchanged.
+ self.assertEqual(id_gen.get_current_token(), 123)
+
+ # Persist them in a different order, starting with 126 from ctx3.
+ await ctx3.__aexit__(None, None, None)
+ # We haven't persisted 124 from ctx1 yet---current token is still 123.
+ self.assertEqual(id_gen.get_current_token(), 123)
+
+ # Now persist 124 from ctx1.
+ await ctx1.__aexit__(None, None, None)
+ # Current token is then 124, waiting for 125 to be persisted.
+ self.assertEqual(id_gen.get_current_token(), 124)
+
+ # Finally persist 125 from ctx2.
+ await ctx2.__aexit__(None, None, None)
+ # Current token is then 126 (skipping over 125).
+ self.assertEqual(id_gen.get_current_token(), 126)
+
+ self.get_success(test_gen_next())
+
+ def test_gen_next_while_still_waiting_for_persistence(self) -> None:
+ """Check that we handle overlapping calls to gen_next."""
+ id_gen = self._create_id_generator()
+
+ async def test_gen_next() -> None:
+ ctx1 = id_gen.get_next()
+ ctx2 = id_gen.get_next()
+ ctx3 = id_gen.get_next()
+
+ # Request two new stream IDs.
+ self.assertEqual(await ctx1.__aenter__(), 124)
+ self.assertEqual(await ctx2.__aenter__(), 125)
+
+ # Persist ctx2 first.
+ await ctx2.__aexit__(None, None, None)
+ # Still waiting on ctx1's ID to be persisted.
+ self.assertEqual(id_gen.get_current_token(), 123)
+
+ # Now request a third stream ID. It should be 126 (the smallest ID that
+ # we've not yet handed out.)
+ self.assertEqual(await ctx3.__aenter__(), 126)
+
+ self.get_success(test_gen_next())
+
+
class MultiWriterIdGeneratorTestCase(HomeserverTestCase):
if not USE_POSTGRES_FOR_TESTS:
skip = "Requires Postgres"
@@ -48,9 +190,9 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase):
)
def _create_id_generator(
- self, instance_name="master", writers: Optional[List[str]] = None
+ self, instance_name: str = "master", writers: Optional[List[str]] = None
) -> MultiWriterIdGenerator:
- def _create(conn):
+ def _create(conn: LoggingDatabaseConnection) -> MultiWriterIdGenerator:
return MultiWriterIdGenerator(
conn,
self.db_pool,
@@ -446,7 +588,7 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase):
self._insert_row_with_id("master", 3)
# Now we add a row *without* updating the stream ID
- def _insert(txn):
+ def _insert(txn: Cursor) -> None:
txn.execute("INSERT INTO foobar VALUES (26, 'master')")
self.get_success(self.db_pool.runInteraction("_insert", _insert))
@@ -481,9 +623,9 @@ class BackwardsMultiWriterIdGeneratorTestCase(HomeserverTestCase):
)
def _create_id_generator(
- self, instance_name="master", writers: Optional[List[str]] = None
+ self, instance_name: str = "master", writers: Optional[List[str]] = None
) -> MultiWriterIdGenerator:
- def _create(conn):
+ def _create(conn: LoggingDatabaseConnection) -> MultiWriterIdGenerator:
return MultiWriterIdGenerator(
conn,
self.db_pool,
@@ -617,9 +759,9 @@ class MultiTableMultiWriterIdGeneratorTestCase(HomeserverTestCase):
)
def _create_id_generator(
- self, instance_name="master", writers: Optional[List[str]] = None
+ self, instance_name: str = "master", writers: Optional[List[str]] = None
) -> MultiWriterIdGenerator:
- def _create(conn):
+ def _create(conn: LoggingDatabaseConnection) -> MultiWriterIdGenerator:
return MultiWriterIdGenerator(
conn,
self.db_pool,
@@ -641,7 +783,7 @@ class MultiTableMultiWriterIdGeneratorTestCase(HomeserverTestCase):
instance_name: str,
number: int,
update_stream_table: bool = True,
- ):
+ ) -> None:
"""Insert N rows as the given instance, inserting with stream IDs pulled
from the postgres sequence.
"""
diff --git a/tests/storage/test_monthly_active_users.py b/tests/storage/test_monthly_active_users.py
index e8b4a5644b..c55c4db970 100644
--- a/tests/storage/test_monthly_active_users.py
+++ b/tests/storage/test_monthly_active_users.py
@@ -96,8 +96,12 @@ class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase):
# Test each of the registered users is marked as active
timestamp = self.get_success(self.store.user_last_seen_monthly_active(user1))
+ # Mypy notes that one shouldn't compare Optional[int] to 0 with assertGreater.
+ # Check that timestamp really is an int.
+ assert timestamp is not None
self.assertGreater(timestamp, 0)
timestamp = self.get_success(self.store.user_last_seen_monthly_active(user2))
+ assert timestamp is not None
self.assertGreater(timestamp, 0)
# Test that users with reserved 3pids are not removed from the MAU table
@@ -166,10 +170,11 @@ class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase):
self.get_success(self.store.upsert_monthly_active_user(user_id2))
result = self.get_success(self.store.user_last_seen_monthly_active(user_id1))
+ assert result is not None
self.assertGreater(result, 0)
result = self.get_success(self.store.user_last_seen_monthly_active(user_id3))
- self.assertNotEqual(result, 0)
+ self.assertIsNone(result)
@override_config({"max_mau_value": 5})
def test_reap_monthly_active_users(self):
diff --git a/tests/storage/test_receipts.py b/tests/storage/test_receipts.py
index b1a8f8bba7..81253d0361 100644
--- a/tests/storage/test_receipts.py
+++ b/tests/storage/test_receipts.py
@@ -12,6 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+from typing import Collection, Optional
+
from synapse.api.constants import ReceiptTypes
from synapse.types import UserID, create_requester
@@ -23,7 +25,7 @@ OUR_USER_ID = "@our:test"
class ReceiptTestCase(HomeserverTestCase):
- def prepare(self, reactor, clock, homeserver):
+ def prepare(self, reactor, clock, homeserver) -> None:
super().prepare(reactor, clock, homeserver)
self.store = homeserver.get_datastores().main
@@ -83,10 +85,41 @@ class ReceiptTestCase(HomeserverTestCase):
)
)
- def test_return_empty_with_no_data(self):
+ def get_last_unthreaded_receipt(
+ self, receipt_types: Collection[str], room_id: Optional[str] = None
+ ) -> Optional[str]:
+ """
+ Fetch the event ID for the latest unthreaded receipt in the test room for the test user.
+
+ Args:
+ receipt_types: The receipt types to fetch.
+
+ Returns:
+ The latest receipt, if one exists.
+ """
+ result = self.get_success(
+ self.store.db_pool.runInteraction(
+ "get_last_receipt_event_id_for_user",
+ self.store.get_last_unthreaded_receipt_for_user_txn,
+ OUR_USER_ID,
+ room_id or self.room_id1,
+ receipt_types,
+ )
+ )
+ if not result:
+ return None
+
+ event_id, _ = result
+ return event_id
+
+ def test_return_empty_with_no_data(self) -> None:
res = self.get_success(
self.store.get_receipts_for_user(
- OUR_USER_ID, [ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE]
+ OUR_USER_ID,
+ [
+ ReceiptTypes.READ,
+ ReceiptTypes.READ_PRIVATE,
+ ],
)
)
self.assertEqual(res, {})
@@ -94,21 +127,21 @@ class ReceiptTestCase(HomeserverTestCase):
res = self.get_success(
self.store.get_receipts_for_user_with_orderings(
OUR_USER_ID,
- [ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE],
+ [
+ ReceiptTypes.READ,
+ ReceiptTypes.READ_PRIVATE,
+ ],
)
)
self.assertEqual(res, {})
- res = self.get_success(
- self.store.get_last_receipt_event_id_for_user(
- OUR_USER_ID,
- self.room_id1,
- [ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE],
- )
+ res = self.get_last_unthreaded_receipt(
+ [ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE]
)
+
self.assertEqual(res, None)
- def test_get_receipts_for_user(self):
+ def test_get_receipts_for_user(self) -> None:
# Send some events into the first room
event1_1_id = self.create_and_send_event(
self.room_id1, UserID.from_string(OTHER_USER_ID)
@@ -120,13 +153,18 @@ class ReceiptTestCase(HomeserverTestCase):
# Send public read receipt for the first event
self.get_success(
self.store.insert_receipt(
- self.room_id1, ReceiptTypes.READ, OUR_USER_ID, [event1_1_id], {}
+ self.room_id1, ReceiptTypes.READ, OUR_USER_ID, [event1_1_id], None, {}
)
)
# Send private read receipt for the second event
self.get_success(
self.store.insert_receipt(
- self.room_id1, ReceiptTypes.READ_PRIVATE, OUR_USER_ID, [event1_2_id], {}
+ self.room_id1,
+ ReceiptTypes.READ_PRIVATE,
+ OUR_USER_ID,
+ [event1_2_id],
+ None,
+ {},
)
)
@@ -153,7 +191,7 @@ class ReceiptTestCase(HomeserverTestCase):
# Test receipt updating
self.get_success(
self.store.insert_receipt(
- self.room_id1, ReceiptTypes.READ, OUR_USER_ID, [event1_2_id], {}
+ self.room_id1, ReceiptTypes.READ, OUR_USER_ID, [event1_2_id], None, {}
)
)
res = self.get_success(
@@ -169,7 +207,12 @@ class ReceiptTestCase(HomeserverTestCase):
# Test new room is reflected in what the method returns
self.get_success(
self.store.insert_receipt(
- self.room_id2, ReceiptTypes.READ_PRIVATE, OUR_USER_ID, [event2_1_id], {}
+ self.room_id2,
+ ReceiptTypes.READ_PRIVATE,
+ OUR_USER_ID,
+ [event2_1_id],
+ None,
+ {},
)
)
res = self.get_success(
@@ -179,7 +222,7 @@ class ReceiptTestCase(HomeserverTestCase):
)
self.assertEqual(res, {self.room_id1: event1_2_id, self.room_id2: event2_1_id})
- def test_get_last_receipt_event_id_for_user(self):
+ def test_get_last_receipt_event_id_for_user(self) -> None:
# Send some events into the first room
event1_1_id = self.create_and_send_event(
self.room_id1, UserID.from_string(OTHER_USER_ID)
@@ -191,53 +234,42 @@ class ReceiptTestCase(HomeserverTestCase):
# Send public read receipt for the first event
self.get_success(
self.store.insert_receipt(
- self.room_id1, ReceiptTypes.READ, OUR_USER_ID, [event1_1_id], {}
+ self.room_id1, ReceiptTypes.READ, OUR_USER_ID, [event1_1_id], None, {}
)
)
# Send private read receipt for the second event
self.get_success(
self.store.insert_receipt(
- self.room_id1, ReceiptTypes.READ_PRIVATE, OUR_USER_ID, [event1_2_id], {}
+ self.room_id1,
+ ReceiptTypes.READ_PRIVATE,
+ OUR_USER_ID,
+ [event1_2_id],
+ None,
+ {},
)
)
# Test we get the latest event when we want both private and public receipts
- res = self.get_success(
- self.store.get_last_receipt_event_id_for_user(
- OUR_USER_ID,
- self.room_id1,
- [ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE],
- )
+ res = self.get_last_unthreaded_receipt(
+ [ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE]
)
self.assertEqual(res, event1_2_id)
# Test we get the older event when we want only public receipt
- res = self.get_success(
- self.store.get_last_receipt_event_id_for_user(
- OUR_USER_ID, self.room_id1, [ReceiptTypes.READ]
- )
- )
+ res = self.get_last_unthreaded_receipt([ReceiptTypes.READ])
self.assertEqual(res, event1_1_id)
# Test we get the latest event when we want only the private receipt
- res = self.get_success(
- self.store.get_last_receipt_event_id_for_user(
- OUR_USER_ID, self.room_id1, [ReceiptTypes.READ_PRIVATE]
- )
- )
+ res = self.get_last_unthreaded_receipt([ReceiptTypes.READ_PRIVATE])
self.assertEqual(res, event1_2_id)
# Test receipt updating
self.get_success(
self.store.insert_receipt(
- self.room_id1, ReceiptTypes.READ, OUR_USER_ID, [event1_2_id], {}
- )
- )
- res = self.get_success(
- self.store.get_last_receipt_event_id_for_user(
- OUR_USER_ID, self.room_id1, [ReceiptTypes.READ]
+ self.room_id1, ReceiptTypes.READ, OUR_USER_ID, [event1_2_id], None, {}
)
)
+ res = self.get_last_unthreaded_receipt([ReceiptTypes.READ])
self.assertEqual(res, event1_2_id)
# Send some events into the second room
@@ -248,14 +280,15 @@ class ReceiptTestCase(HomeserverTestCase):
# Test new room is reflected in what the method returns
self.get_success(
self.store.insert_receipt(
- self.room_id2, ReceiptTypes.READ_PRIVATE, OUR_USER_ID, [event2_1_id], {}
- )
- )
- res = self.get_success(
- self.store.get_last_receipt_event_id_for_user(
- OUR_USER_ID,
self.room_id2,
- [ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE],
+ ReceiptTypes.READ_PRIVATE,
+ OUR_USER_ID,
+ [event2_1_id],
+ None,
+ {},
)
)
+ res = self.get_last_unthreaded_receipt(
+ [ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE], room_id=self.room_id2
+ )
self.assertEqual(res, event2_1_id)
diff --git a/tests/storage/test_registration.py b/tests/storage/test_registration.py
index a49ac1525e..05ea802008 100644
--- a/tests/storage/test_registration.py
+++ b/tests/storage/test_registration.py
@@ -11,15 +11,19 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+from twisted.test.proto_helpers import MemoryReactor
from synapse.api.constants import UserTypes
from synapse.api.errors import ThreepidValidationError
+from synapse.server import HomeServer
+from synapse.types import JsonDict, UserID
+from synapse.util import Clock
-from tests.unittest import HomeserverTestCase
+from tests.unittest import HomeserverTestCase, override_config
class RegistrationStoreTestCase(HomeserverTestCase):
- def prepare(self, reactor, clock, hs):
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.store = hs.get_datastores().main
self.user_id = "@my-user:test"
@@ -27,7 +31,7 @@ class RegistrationStoreTestCase(HomeserverTestCase):
self.pwhash = "{xx1}123456789"
self.device_id = "akgjhdjklgshg"
- def test_register(self):
+ def test_register(self) -> None:
self.get_success(self.store.register_user(self.user_id, self.pwhash))
self.assertEqual(
@@ -38,17 +42,32 @@ class RegistrationStoreTestCase(HomeserverTestCase):
"admin": 0,
"is_guest": 0,
"consent_version": None,
+ "consent_ts": None,
"consent_server_notice_sent": None,
"appservice_id": None,
"creation_ts": 0,
"user_type": None,
"deactivated": 0,
"shadow_banned": 0,
+ "approved": 1,
},
(self.get_success(self.store.get_user_by_id(self.user_id))),
)
- def test_add_tokens(self):
+ def test_consent(self) -> None:
+ self.get_success(self.store.register_user(self.user_id, self.pwhash))
+ before_consent = self.clock.time_msec()
+ self.reactor.advance(5)
+ self.get_success(self.store.user_set_consent_version(self.user_id, "1"))
+ self.reactor.advance(5)
+
+ user = self.get_success(self.store.get_user_by_id(self.user_id))
+ assert user
+ self.assertEqual(user["consent_version"], "1")
+ self.assertGreater(user["consent_ts"], before_consent)
+ self.assertLess(user["consent_ts"], self.clock.time_msec())
+
+ def test_add_tokens(self) -> None:
self.get_success(self.store.register_user(self.user_id, self.pwhash))
self.get_success(
self.store.add_access_token_to_user(
@@ -58,11 +77,12 @@ class RegistrationStoreTestCase(HomeserverTestCase):
result = self.get_success(self.store.get_user_by_access_token(self.tokens[1]))
+ assert result
self.assertEqual(result.user_id, self.user_id)
self.assertEqual(result.device_id, self.device_id)
self.assertIsNotNone(result.token_id)
- def test_user_delete_access_tokens(self):
+ def test_user_delete_access_tokens(self) -> None:
# add some tokens
self.get_success(self.store.register_user(self.user_id, self.pwhash))
self.get_success(
@@ -87,6 +107,7 @@ class RegistrationStoreTestCase(HomeserverTestCase):
# check the one not associated with the device was not deleted
user = self.get_success(self.store.get_user_by_access_token(self.tokens[0]))
+ assert user
self.assertEqual(self.user_id, user.user_id)
# now delete the rest
@@ -95,11 +116,11 @@ class RegistrationStoreTestCase(HomeserverTestCase):
user = self.get_success(self.store.get_user_by_access_token(self.tokens[0]))
self.assertIsNone(user, "access token was not deleted without device_id")
- def test_is_support_user(self):
+ def test_is_support_user(self) -> None:
TEST_USER = "@test:test"
SUPPORT_USER = "@support:test"
- res = self.get_success(self.store.is_support_user(None))
+ res = self.get_success(self.store.is_support_user(None)) # type: ignore[arg-type]
self.assertFalse(res)
self.get_success(
self.store.register_user(user_id=TEST_USER, password_hash=None)
@@ -115,7 +136,7 @@ class RegistrationStoreTestCase(HomeserverTestCase):
res = self.get_success(self.store.is_support_user(SUPPORT_USER))
self.assertTrue(res)
- def test_3pid_inhibit_invalid_validation_session_error(self):
+ def test_3pid_inhibit_invalid_validation_session_error(self) -> None:
"""Tests that enabling the configuration option to inhibit 3PID errors on
/requestToken also inhibits validation errors caused by an unknown session ID.
"""
@@ -147,3 +168,101 @@ class RegistrationStoreTestCase(HomeserverTestCase):
ThreepidValidationError,
)
self.assertEqual(e.value.msg, "Validation token not found or has expired", e)
+
+
+class ApprovalRequiredRegistrationTestCase(HomeserverTestCase):
+ def default_config(self) -> JsonDict:
+ config = super().default_config()
+
+ # If there's already some config for this feature in the default config, it
+ # means we're overriding it with @override_config. In this case we don't want
+ # to do anything more with it.
+ msc3866_config = config.get("experimental_features", {}).get("msc3866")
+ if msc3866_config is not None:
+ return config
+
+ # Require approval for all new accounts.
+ config["experimental_features"] = {
+ "msc3866": {
+ "enabled": True,
+ "require_approval_for_new_accounts": True,
+ }
+ }
+ return config
+
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+ self.store = hs.get_datastores().main
+ self.user_id = "@my-user:test"
+ self.pwhash = "{xx1}123456789"
+
+ @override_config(
+ {
+ "experimental_features": {
+ "msc3866": {
+ "enabled": True,
+ "require_approval_for_new_accounts": False,
+ }
+ }
+ }
+ )
+ def test_approval_not_required(self) -> None:
+ """Tests that if we don't require approval for new accounts, newly created
+ accounts are automatically marked as approved.
+ """
+ self.get_success(self.store.register_user(self.user_id, self.pwhash))
+
+ user = self.get_success(self.store.get_user_by_id(self.user_id))
+ assert user is not None
+ self.assertTrue(user["approved"])
+
+ approved = self.get_success(self.store.is_user_approved(self.user_id))
+ self.assertTrue(approved)
+
+ def test_approval_required(self) -> None:
+ """Tests that if we require approval for new accounts, newly created accounts
+ are not automatically marked as approved.
+ """
+ self.get_success(self.store.register_user(self.user_id, self.pwhash))
+
+ user = self.get_success(self.store.get_user_by_id(self.user_id))
+ assert user is not None
+ self.assertFalse(user["approved"])
+
+ approved = self.get_success(self.store.is_user_approved(self.user_id))
+ self.assertFalse(approved)
+
+ def test_override(self) -> None:
+ """Tests that if we require approval for new accounts, but we explicitly say the
+ new user should be considered approved, they're marked as approved.
+ """
+ self.get_success(
+ self.store.register_user(
+ self.user_id,
+ self.pwhash,
+ approved=True,
+ )
+ )
+
+ user = self.get_success(self.store.get_user_by_id(self.user_id))
+ self.assertIsNotNone(user)
+ assert user is not None
+ self.assertEqual(user["approved"], 1)
+
+ approved = self.get_success(self.store.is_user_approved(self.user_id))
+ self.assertTrue(approved)
+
+ def test_approve_user(self) -> None:
+ """Tests that approving the user updates their approval status."""
+ self.get_success(self.store.register_user(self.user_id, self.pwhash))
+
+ approved = self.get_success(self.store.is_user_approved(self.user_id))
+ self.assertFalse(approved)
+
+ self.get_success(
+ self.store.update_user_approval_status(
+ UserID.from_string(self.user_id), True
+ )
+ )
+
+ approved = self.get_success(self.store.is_user_approved(self.user_id))
+ self.assertTrue(approved)
diff --git a/tests/storage/test_relations.py b/tests/storage/test_relations.py
new file mode 100644
index 0000000000..cd1d00208b
--- /dev/null
+++ b/tests/storage/test_relations.py
@@ -0,0 +1,111 @@
+# Copyright 2022 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from twisted.test.proto_helpers import MemoryReactor
+
+from synapse.api.constants import MAIN_TIMELINE
+from synapse.server import HomeServer
+from synapse.util import Clock
+
+from tests import unittest
+
+
+class RelationsStoreTestCase(unittest.HomeserverTestCase):
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+ """
+ Creates a DAG:
+
+ A <---[m.thread]-- B <--[m.annotation]-- C
+ ^
+ |--[m.reference]-- D <--[m.annotation]-- E
+
+ F <--[m.annotation]-- G
+
+ """
+ self._main_store = self.hs.get_datastores().main
+
+ self._create_relation("A", "B", "m.thread")
+ self._create_relation("B", "C", "m.annotation")
+ self._create_relation("A", "D", "m.reference")
+ self._create_relation("D", "E", "m.annotation")
+ self._create_relation("F", "G", "m.annotation")
+
+ def _create_relation(self, parent_id: str, event_id: str, rel_type: str) -> None:
+ self.get_success(
+ self._main_store.db_pool.simple_insert(
+ table="event_relations",
+ values={
+ "event_id": event_id,
+ "relates_to_id": parent_id,
+ "relation_type": rel_type,
+ },
+ )
+ )
+
+ def test_get_thread_id(self) -> None:
+ """
+ Ensure that get_thread_id only searches up the tree for threads.
+ """
+ # The thread itself and children of it return the thread.
+ thread_id = self.get_success(self._main_store.get_thread_id("B"))
+ self.assertEqual("A", thread_id)
+
+ thread_id = self.get_success(self._main_store.get_thread_id("C"))
+ self.assertEqual("A", thread_id)
+
+ # But the root and events related to the root do not.
+ thread_id = self.get_success(self._main_store.get_thread_id("A"))
+ self.assertEqual(MAIN_TIMELINE, thread_id)
+
+ thread_id = self.get_success(self._main_store.get_thread_id("D"))
+ self.assertEqual(MAIN_TIMELINE, thread_id)
+
+ thread_id = self.get_success(self._main_store.get_thread_id("E"))
+ self.assertEqual(MAIN_TIMELINE, thread_id)
+
+ # Events which are not related to a thread at all should return the
+ # main timeline.
+ thread_id = self.get_success(self._main_store.get_thread_id("F"))
+ self.assertEqual(MAIN_TIMELINE, thread_id)
+
+ thread_id = self.get_success(self._main_store.get_thread_id("G"))
+ self.assertEqual(MAIN_TIMELINE, thread_id)
+
+ def test_get_thread_id_for_receipts(self) -> None:
+ """
+ Ensure that get_thread_id_for_receipts searches up and down the tree for a thread.
+ """
+ # All of the events are considered related to this thread.
+ thread_id = self.get_success(self._main_store.get_thread_id_for_receipts("A"))
+ self.assertEqual("A", thread_id)
+
+ thread_id = self.get_success(self._main_store.get_thread_id_for_receipts("B"))
+ self.assertEqual("A", thread_id)
+
+ thread_id = self.get_success(self._main_store.get_thread_id_for_receipts("C"))
+ self.assertEqual("A", thread_id)
+
+ thread_id = self.get_success(self._main_store.get_thread_id_for_receipts("D"))
+ self.assertEqual("A", thread_id)
+
+ thread_id = self.get_success(self._main_store.get_thread_id_for_receipts("E"))
+ self.assertEqual("A", thread_id)
+
+ # Events which are not related to a thread at all should return the
+ # main timeline.
+ thread_id = self.get_success(self._main_store.get_thread_id("F"))
+ self.assertEqual(MAIN_TIMELINE, thread_id)
+
+ thread_id = self.get_success(self._main_store.get_thread_id("G"))
+ self.assertEqual(MAIN_TIMELINE, thread_id)
diff --git a/tests/storage/test_room_search.py b/tests/storage/test_room_search.py
index e747c6b50e..ef850daa73 100644
--- a/tests/storage/test_room_search.py
+++ b/tests/storage/test_room_search.py
@@ -12,11 +12,21 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+from typing import List, Tuple
+from unittest.case import SkipTest
+
+from twisted.test.proto_helpers import MemoryReactor
+
import synapse.rest.admin
from synapse.api.constants import EventTypes
from synapse.api.errors import StoreError
from synapse.rest.client import login, room
+from synapse.server import HomeServer
+from synapse.storage.databases.main import DataStore
+from synapse.storage.databases.main.search import Phrase, SearchToken, _tokenize_query
from synapse.storage.engines import PostgresEngine
+from synapse.storage.engines.sqlite import Sqlite3Engine
+from synapse.util import Clock
from tests.unittest import HomeserverTestCase, skip_unless
from tests.utils import USE_POSTGRES_FOR_TESTS
@@ -187,3 +197,179 @@ class EventSearchInsertionTest(HomeserverTestCase):
),
)
self.assertCountEqual(values, ["hi", "2"])
+
+
+class MessageSearchTest(HomeserverTestCase):
+ """
+ Check message search.
+
+ A powerful way to check the behaviour is to run the following in Postgres >= 11:
+
+ # SELECT websearch_to_tsquery('english', <your string>);
+
+ The result can be compared to the tokenized version for SQLite and Postgres < 11.
+
+ """
+
+ servlets = [
+ synapse.rest.admin.register_servlets_for_client_rest_resource,
+ login.register_servlets,
+ room.register_servlets,
+ ]
+
+ PHRASE = "the quick brown fox jumps over the lazy dog"
+
+ # Each entry is a search query, followed by a boolean of whether it is in the phrase.
+ COMMON_CASES = [
+ ("nope", False),
+ ("brown", True),
+ ("quick brown", True),
+ ("brown quick", True),
+ ("quick \t brown", True),
+ ("jump", True),
+ ("brown nope", False),
+ ('"brown quick"', False),
+ ('"jumps over"', True),
+ ('"quick fox"', False),
+ ("nope OR doublenope", False),
+ ("furphy OR fox", True),
+ ("fox -nope", True),
+ ("fox -brown", False),
+ ('"fox" quick', True),
+ ('"quick brown', True),
+ ('" quick "', True),
+ ('" nope"', False),
+ ]
+ # TODO Test non-ASCII cases.
+
+ # Case that fail on SQLite.
+ POSTGRES_CASES = [
+ # SQLite treats NOT as a binary operator.
+ ("- fox", False),
+ ("- nope", True),
+ ('"-fox quick', False),
+ # PostgreSQL skips stop words.
+ ('"the quick brown"', True),
+ ('"over lazy"', True),
+ ]
+
+ def prepare(
+ self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer
+ ) -> None:
+ # Register a user and create a room, create some messages
+ self.register_user("alice", "password")
+ self.access_token = self.login("alice", "password")
+ self.room_id = self.helper.create_room_as("alice", tok=self.access_token)
+
+ # Send the phrase as a message and check it was created
+ response = self.helper.send(self.room_id, self.PHRASE, tok=self.access_token)
+ self.assertIn("event_id", response)
+
+ # The behaviour of a missing trailing double quote changed in PostgreSQL 14
+ # from ignoring the initial double quote to treating it as a phrase.
+ main_store = homeserver.get_datastores().main
+ found = False
+ if isinstance(main_store.database_engine, PostgresEngine):
+ assert main_store.database_engine._version is not None
+ found = main_store.database_engine._version < 140000
+ self.COMMON_CASES.append(('"fox quick', found))
+
+ def test_tokenize_query(self) -> None:
+ """Test the custom logic to tokenize a user's query."""
+ cases = (
+ ("brown", ["brown"]),
+ ("quick brown", ["quick", SearchToken.And, "brown"]),
+ ("quick \t brown", ["quick", SearchToken.And, "brown"]),
+ ('"brown quick"', [Phrase(["brown", "quick"])]),
+ ("furphy OR fox", ["furphy", SearchToken.Or, "fox"]),
+ ("fox -brown", ["fox", SearchToken.Not, "brown"]),
+ ("- fox", [SearchToken.Not, "fox"]),
+ ('"fox" quick', [Phrase(["fox"]), SearchToken.And, "quick"]),
+ # No trailing double quote.
+ ('"fox quick', [Phrase(["fox", "quick"])]),
+ ('"-fox quick', [Phrase(["-fox", "quick"])]),
+ ('" quick "', [Phrase(["quick"])]),
+ (
+ 'q"uick brow"n',
+ [
+ "q",
+ SearchToken.And,
+ Phrase(["uick", "brow"]),
+ SearchToken.And,
+ "n",
+ ],
+ ),
+ (
+ '-"quick brown"',
+ [SearchToken.Not, Phrase(["quick", "brown"])],
+ ),
+ )
+
+ for query, expected in cases:
+ tokenized = _tokenize_query(query)
+ self.assertEqual(
+ tokenized, expected, f"{tokenized} != {expected} for {query}"
+ )
+
+ def _check_test_cases(
+ self, store: DataStore, cases: List[Tuple[str, bool]]
+ ) -> None:
+ # Run all the test cases versus search_msgs
+ for query, expect_to_contain in cases:
+ result = self.get_success(
+ store.search_msgs([self.room_id], query, ["content.body"])
+ )
+ self.assertEquals(
+ result["count"],
+ 1 if expect_to_contain else 0,
+ f"expected '{query}' to match '{self.PHRASE}'"
+ if expect_to_contain
+ else f"'{query}' unexpectedly matched '{self.PHRASE}'",
+ )
+ self.assertEquals(
+ len(result["results"]),
+ 1 if expect_to_contain else 0,
+ "results array length should match count",
+ )
+
+ # Run them again versus search_rooms
+ for query, expect_to_contain in cases:
+ result = self.get_success(
+ store.search_rooms([self.room_id], query, ["content.body"], 10)
+ )
+ self.assertEquals(
+ result["count"],
+ 1 if expect_to_contain else 0,
+ f"expected '{query}' to match '{self.PHRASE}'"
+ if expect_to_contain
+ else f"'{query}' unexpectedly matched '{self.PHRASE}'",
+ )
+ self.assertEquals(
+ len(result["results"]),
+ 1 if expect_to_contain else 0,
+ "results array length should match count",
+ )
+
+ def test_postgres_web_search_for_phrase(self):
+ """
+ Test searching for phrases using typical web search syntax, as per postgres' websearch_to_tsquery.
+ This test is skipped unless the postgres instance supports websearch_to_tsquery.
+
+ See https://www.postgresql.org/docs/current/textsearch-controls.html
+ """
+
+ store = self.hs.get_datastores().main
+ if not isinstance(store.database_engine, PostgresEngine):
+ raise SkipTest("Test only applies when postgres is used as the database")
+
+ self._check_test_cases(store, self.COMMON_CASES + self.POSTGRES_CASES)
+
+ def test_sqlite_search(self):
+ """
+ Test sqlite searching for phrases.
+ """
+ store = self.hs.get_datastores().main
+ if not isinstance(store.database_engine, Sqlite3Engine):
+ raise SkipTest("Test only applies when sqlite is used as the database")
+
+ self._check_test_cases(store, self.COMMON_CASES)
diff --git a/tests/storage/test_roommember.py b/tests/storage/test_roommember.py
index 240b02cb9f..8794401823 100644
--- a/tests/storage/test_roommember.py
+++ b/tests/storage/test_roommember.py
@@ -23,6 +23,7 @@ from synapse.util import Clock
from tests import unittest
from tests.server import TestHomeServer
+from tests.test_utils import event_injection
class RoomMemberStoreTestCase(unittest.HomeserverTestCase):
@@ -157,6 +158,75 @@ class RoomMemberStoreTestCase(unittest.HomeserverTestCase):
# Check that alice's display name is now None
self.assertEqual(row[0]["display_name"], None)
+ def test_room_is_locally_forgotten(self) -> None:
+ """Test that when the last local user has forgotten a room it is known as forgotten."""
+ # join two local and one remote user
+ self.room = self.helper.create_room_as(self.u_alice, tok=self.t_alice)
+ self.get_success(
+ event_injection.inject_member_event(self.hs, self.room, self.u_bob, "join")
+ )
+ self.get_success(
+ event_injection.inject_member_event(
+ self.hs, self.room, self.u_charlie.to_string(), "join"
+ )
+ )
+ self.assertFalse(
+ self.get_success(self.store.is_locally_forgotten_room(self.room))
+ )
+
+ # local users leave the room and the room is not forgotten
+ self.get_success(
+ event_injection.inject_member_event(
+ self.hs, self.room, self.u_alice, "leave"
+ )
+ )
+ self.get_success(
+ event_injection.inject_member_event(self.hs, self.room, self.u_bob, "leave")
+ )
+ self.assertFalse(
+ self.get_success(self.store.is_locally_forgotten_room(self.room))
+ )
+
+ # first user forgets the room, room is not forgotten
+ self.get_success(self.store.forget(self.u_alice, self.room))
+ self.assertFalse(
+ self.get_success(self.store.is_locally_forgotten_room(self.room))
+ )
+
+ # second (last local) user forgets the room and the room is forgotten
+ self.get_success(self.store.forget(self.u_bob, self.room))
+ self.assertTrue(
+ self.get_success(self.store.is_locally_forgotten_room(self.room))
+ )
+
+ def test_join_locally_forgotten_room(self) -> None:
+ """Tests if a user joins a forgotten room the room is not forgotten anymore."""
+ self.room = self.helper.create_room_as(self.u_alice, tok=self.t_alice)
+ self.assertFalse(
+ self.get_success(self.store.is_locally_forgotten_room(self.room))
+ )
+
+ # after leaving and forget the room, it is forgotten
+ self.get_success(
+ event_injection.inject_member_event(
+ self.hs, self.room, self.u_alice, "leave"
+ )
+ )
+ self.get_success(self.store.forget(self.u_alice, self.room))
+ self.assertTrue(
+ self.get_success(self.store.is_locally_forgotten_room(self.room))
+ )
+
+ # after rejoin the room is not forgotten anymore
+ self.get_success(
+ event_injection.inject_member_event(
+ self.hs, self.room, self.u_alice, "join"
+ )
+ )
+ self.assertFalse(
+ self.get_success(self.store.is_locally_forgotten_room(self.room))
+ )
+
class CurrentStateMembershipUpdateTestCase(unittest.HomeserverTestCase):
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
diff --git a/tests/storage/test_stream.py b/tests/storage/test_stream.py
index 78663a53fe..34fa810cf6 100644
--- a/tests/storage/test_stream.py
+++ b/tests/storage/test_stream.py
@@ -16,7 +16,6 @@ from typing import List
from synapse.api.constants import EventTypes, RelationTypes
from synapse.api.filtering import Filter
-from synapse.events import EventBase
from synapse.rest import admin
from synapse.rest.client import login, room
from synapse.types import JsonDict
@@ -40,7 +39,7 @@ class PaginationTestCase(HomeserverTestCase):
def default_config(self):
config = super().default_config()
- config["experimental_features"] = {"msc3440_enabled": True}
+ config["experimental_features"] = {"msc3874_enabled": True}
return config
def prepare(self, reactor, clock, homeserver):
@@ -58,6 +57,11 @@ class PaginationTestCase(HomeserverTestCase):
self.third_tok = self.login("third", "test")
self.helper.join(room=self.room_id, user=self.third_user_id, tok=self.third_tok)
+ # Store a token which is after all the room creation events.
+ self.from_token = self.get_success(
+ self.hs.get_event_sources().get_current_token_for_pagination(self.room_id)
+ )
+
# An initial event with a relation from second user.
res = self.helper.send_event(
room_id=self.room_id,
@@ -66,7 +70,7 @@ class PaginationTestCase(HomeserverTestCase):
tok=self.tok,
)
self.event_id_1 = res["event_id"]
- self.helper.send_event(
+ res = self.helper.send_event(
room_id=self.room_id,
type="m.reaction",
content={
@@ -78,6 +82,7 @@ class PaginationTestCase(HomeserverTestCase):
},
tok=self.second_tok,
)
+ self.event_id_annotation = res["event_id"]
# Another event with a relation from third user.
res = self.helper.send_event(
@@ -87,7 +92,7 @@ class PaginationTestCase(HomeserverTestCase):
tok=self.tok,
)
self.event_id_2 = res["event_id"]
- self.helper.send_event(
+ res = self.helper.send_event(
room_id=self.room_id,
type="m.reaction",
content={
@@ -98,68 +103,59 @@ class PaginationTestCase(HomeserverTestCase):
},
tok=self.third_tok,
)
+ self.event_id_reference = res["event_id"]
# An event with no relations.
- self.helper.send_event(
+ res = self.helper.send_event(
room_id=self.room_id,
type=EventTypes.Message,
content={"msgtype": "m.text", "body": "No relations"},
tok=self.tok,
)
+ self.event_id_none = res["event_id"]
- def _filter_messages(self, filter: JsonDict) -> List[EventBase]:
+ def _filter_messages(self, filter: JsonDict) -> List[str]:
"""Make a request to /messages with a filter, returns the chunk of events."""
- from_token = self.get_success(
- self.hs.get_event_sources().get_current_token_for_pagination(self.room_id)
- )
-
events, next_key = self.get_success(
self.hs.get_datastores().main.paginate_room_events(
room_id=self.room_id,
- from_key=from_token.room_key,
+ from_key=self.from_token.room_key,
to_key=None,
- direction="b",
+ direction="f",
limit=10,
event_filter=Filter(self.hs, filter),
)
)
- return events
+ return [ev.event_id for ev in events]
def test_filter_relation_senders(self):
# Messages which second user reacted to.
filter = {"related_by_senders": [self.second_user_id]}
chunk = self._filter_messages(filter)
- self.assertEqual(len(chunk), 1, chunk)
- self.assertEqual(chunk[0].event_id, self.event_id_1)
+ self.assertEqual(chunk, [self.event_id_1])
# Messages which third user reacted to.
filter = {"related_by_senders": [self.third_user_id]}
chunk = self._filter_messages(filter)
- self.assertEqual(len(chunk), 1, chunk)
- self.assertEqual(chunk[0].event_id, self.event_id_2)
+ self.assertEqual(chunk, [self.event_id_2])
# Messages which either user reacted to.
filter = {"related_by_senders": [self.second_user_id, self.third_user_id]}
chunk = self._filter_messages(filter)
- self.assertEqual(len(chunk), 2, chunk)
- self.assertCountEqual(
- [c.event_id for c in chunk], [self.event_id_1, self.event_id_2]
- )
+ self.assertCountEqual(chunk, [self.event_id_1, self.event_id_2])
def test_filter_relation_type(self):
# Messages which have annotations.
filter = {"related_by_rel_types": [RelationTypes.ANNOTATION]}
chunk = self._filter_messages(filter)
- self.assertEqual(len(chunk), 1, chunk)
- self.assertEqual(chunk[0].event_id, self.event_id_1)
+ self.assertEqual(chunk, [self.event_id_1])
# Messages which have references.
filter = {"related_by_rel_types": [RelationTypes.REFERENCE]}
chunk = self._filter_messages(filter)
- self.assertEqual(len(chunk), 1, chunk)
- self.assertEqual(chunk[0].event_id, self.event_id_2)
+ self.assertEqual(chunk, [self.event_id_2])
# Messages which have either annotations or references.
filter = {
@@ -169,10 +165,7 @@ class PaginationTestCase(HomeserverTestCase):
]
}
chunk = self._filter_messages(filter)
- self.assertEqual(len(chunk), 2, chunk)
- self.assertCountEqual(
- [c.event_id for c in chunk], [self.event_id_1, self.event_id_2]
- )
+ self.assertCountEqual(chunk, [self.event_id_1, self.event_id_2])
def test_filter_relation_senders_and_type(self):
# Messages which second user reacted to.
@@ -181,8 +174,7 @@ class PaginationTestCase(HomeserverTestCase):
"related_by_rel_types": [RelationTypes.ANNOTATION],
}
chunk = self._filter_messages(filter)
- self.assertEqual(len(chunk), 1, chunk)
- self.assertEqual(chunk[0].event_id, self.event_id_1)
+ self.assertEqual(chunk, [self.event_id_1])
def test_duplicate_relation(self):
"""An event should only be returned once if there are multiple relations to it."""
@@ -201,5 +193,65 @@ class PaginationTestCase(HomeserverTestCase):
filter = {"related_by_senders": [self.second_user_id]}
chunk = self._filter_messages(filter)
- self.assertEqual(len(chunk), 1, chunk)
- self.assertEqual(chunk[0].event_id, self.event_id_1)
+ self.assertEqual(chunk, [self.event_id_1])
+
+ def test_filter_rel_types(self) -> None:
+ # Messages which are annotations.
+ filter = {"org.matrix.msc3874.rel_types": [RelationTypes.ANNOTATION]}
+ chunk = self._filter_messages(filter)
+ self.assertEqual(chunk, [self.event_id_annotation])
+
+ # Messages which are references.
+ filter = {"org.matrix.msc3874.rel_types": [RelationTypes.REFERENCE]}
+ chunk = self._filter_messages(filter)
+ self.assertEqual(chunk, [self.event_id_reference])
+
+ # Messages which are either annotations or references.
+ filter = {
+ "org.matrix.msc3874.rel_types": [
+ RelationTypes.ANNOTATION,
+ RelationTypes.REFERENCE,
+ ]
+ }
+ chunk = self._filter_messages(filter)
+ self.assertCountEqual(
+ chunk,
+ [self.event_id_annotation, self.event_id_reference],
+ )
+
+ def test_filter_not_rel_types(self) -> None:
+ # Messages which are not annotations.
+ filter = {"org.matrix.msc3874.not_rel_types": [RelationTypes.ANNOTATION]}
+ chunk = self._filter_messages(filter)
+ self.assertEqual(
+ chunk,
+ [
+ self.event_id_1,
+ self.event_id_2,
+ self.event_id_reference,
+ self.event_id_none,
+ ],
+ )
+
+ # Messages which are not references.
+ filter = {"org.matrix.msc3874.not_rel_types": [RelationTypes.REFERENCE]}
+ chunk = self._filter_messages(filter)
+ self.assertEqual(
+ chunk,
+ [
+ self.event_id_1,
+ self.event_id_annotation,
+ self.event_id_2,
+ self.event_id_none,
+ ],
+ )
+
+ # Messages which are neither annotations or references.
+ filter = {
+ "org.matrix.msc3874.not_rel_types": [
+ RelationTypes.ANNOTATION,
+ RelationTypes.REFERENCE,
+ ]
+ }
+ chunk = self._filter_messages(filter)
+ self.assertEqual(chunk, [self.event_id_1, self.event_id_2, self.event_id_none])
diff --git a/tests/test_event_auth.py b/tests/test_event_auth.py
index e42d7b9ba0..f4d9fba0a1 100644
--- a/tests/test_event_auth.py
+++ b/tests/test_event_auth.py
@@ -821,7 +821,7 @@ def _alias_event(room_version: RoomVersion, sender: str, **kwargs) -> EventBase:
def _build_auth_dict_for_room_version(
room_version: RoomVersion, auth_events: Iterable[EventBase]
) -> List:
- if room_version.event_format == EventFormatVersions.V1:
+ if room_version.event_format == EventFormatVersions.ROOM_V1_V2:
return [(e.event_id, "not_used") for e in auth_events]
else:
return [e.event_id for e in auth_events]
@@ -871,7 +871,7 @@ event_count = 0
def _maybe_get_event_id_dict_for_room_version(room_version: RoomVersion) -> dict:
"""If this room version needs it, generate an event id"""
- if room_version.event_format != EventFormatVersions.V1:
+ if room_version.event_format != EventFormatVersions.ROOM_V1_V2:
return {}
global event_count
diff --git a/tests/test_federation.py b/tests/test_federation.py
index 779fad1f63..80e5c590d8 100644
--- a/tests/test_federation.py
+++ b/tests/test_federation.py
@@ -86,8 +86,8 @@ class MessageAcceptTests(unittest.HomeserverTestCase):
federation_event_handler._check_event_auth = _check_event_auth
self.client = self.homeserver.get_federation_client()
- self.client._check_sigs_and_hash_and_fetch = lambda dest, pdus, **k: succeed(
- pdus
+ self.client._check_sigs_and_hash_for_pulled_events_and_fetch = (
+ lambda dest, pdus, **k: succeed(pdus)
)
# Send the join, it should return None (which is not an error)
diff --git a/tests/test_phone_home.py b/tests/test_phone_home.py
index b01cae6e5d..cc1a98f1c4 100644
--- a/tests/test_phone_home.py
+++ b/tests/test_phone_home.py
@@ -15,8 +15,14 @@
import resource
from unittest import mock
+from twisted.test.proto_helpers import MemoryReactor
+
from synapse.app.phone_stats_home import phone_stats_home
+from synapse.rest import admin
+from synapse.rest.client import login, sync
+from synapse.server import HomeServer
from synapse.types import JsonDict
+from synapse.util import Clock
from tests.unittest import HomeserverTestCase
@@ -47,5 +53,43 @@ class PhoneHomeStatsTestCase(HomeserverTestCase):
stats: JsonDict = {}
self.reactor.advance(1)
# `old_resource` has type `Mock` instead of `struct_rusage`
- self.get_success(phone_stats_home(self.hs, stats, past_stats)) # type: ignore[arg-type]
+ self.get_success(
+ phone_stats_home(self.hs, stats, past_stats) # type: ignore[arg-type]
+ )
self.assertApproximates(stats["cpu_average"], 100, tolerance=2.5)
+
+
+class CommonMetricsTestCase(HomeserverTestCase):
+ servlets = [
+ admin.register_servlets,
+ login.register_servlets,
+ sync.register_servlets,
+ ]
+
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+ self.metrics_manager = hs.get_common_usage_metrics_manager()
+ self.get_success(self.metrics_manager.setup())
+
+ def test_dau(self) -> None:
+ """Tests that the daily active users count is correctly updated."""
+ self._assert_metric_value("daily_active_users", 0)
+
+ self.register_user("user", "password")
+ tok = self.login("user", "password")
+ self.make_request("GET", "/sync", access_token=tok)
+
+ self.pump(1)
+
+ self._assert_metric_value("daily_active_users", 1)
+
+ def _assert_metric_value(self, metric_name: str, expected: int) -> None:
+ """Compare the given value to the current value of the common usage metric with
+ the given name.
+
+ Args:
+ metric_name: The metric to look up.
+ expected: Expected value for this metric.
+ """
+ metrics = self.get_success(self.metrics_manager.get_metrics())
+ value = getattr(metrics, metric_name)
+ self.assertEqual(value, expected)
diff --git a/tests/test_rust.py b/tests/test_rust.py
new file mode 100644
index 0000000000..55d8b6b28c
--- /dev/null
+++ b/tests/test_rust.py
@@ -0,0 +1,11 @@
+from synapse.synapse_rust import sum_as_string
+
+from tests import unittest
+
+
+class RustTestCase(unittest.TestCase):
+ """Basic tests to ensure that we can call into Rust code."""
+
+ def test_basic(self):
+ result = sum_as_string(1, 2)
+ self.assertEqual("3", result)
diff --git a/tests/test_server.py b/tests/test_server.py
index 2fe4411401..2d9a0257d4 100644
--- a/tests/test_server.py
+++ b/tests/test_server.py
@@ -26,12 +26,12 @@ from synapse.http.server import (
DirectServeJsonResource,
JsonResource,
OptionsResource,
- cancellable,
)
from synapse.http.site import SynapseRequest, SynapseSite
from synapse.logging.context import make_deferred_yieldable
from synapse.types import JsonDict
from synapse.util import Clock
+from synapse.util.cancellation import cancellable
from tests import unittest
from tests.http.server._base import test_disconnect
@@ -104,7 +104,7 @@ class JsonResourceTests(unittest.TestCase):
self.reactor, FakeSite(res, self.reactor), b"GET", b"/_matrix/foo"
)
- self.assertEqual(channel.result["code"], b"500")
+ self.assertEqual(channel.code, 500)
def test_callback_indirect_exception(self) -> None:
"""
@@ -130,7 +130,7 @@ class JsonResourceTests(unittest.TestCase):
self.reactor, FakeSite(res, self.reactor), b"GET", b"/_matrix/foo"
)
- self.assertEqual(channel.result["code"], b"500")
+ self.assertEqual(channel.code, 500)
def test_callback_synapseerror(self) -> None:
"""
@@ -150,7 +150,7 @@ class JsonResourceTests(unittest.TestCase):
self.reactor, FakeSite(res, self.reactor), b"GET", b"/_matrix/foo"
)
- self.assertEqual(channel.result["code"], b"403")
+ self.assertEqual(channel.code, 403)
self.assertEqual(channel.json_body["error"], "Forbidden!!one!")
self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN")
@@ -174,7 +174,7 @@ class JsonResourceTests(unittest.TestCase):
self.reactor, FakeSite(res, self.reactor), b"GET", b"/_matrix/foobar"
)
- self.assertEqual(channel.result["code"], b"400")
+ self.assertEqual(channel.code, 400)
self.assertEqual(channel.json_body["error"], "Unrecognized request")
self.assertEqual(channel.json_body["errcode"], "M_UNRECOGNIZED")
@@ -203,7 +203,7 @@ class JsonResourceTests(unittest.TestCase):
self.reactor, FakeSite(res, self.reactor), b"HEAD", b"/_matrix/foo"
)
- self.assertEqual(channel.result["code"], b"200")
+ self.assertEqual(channel.code, 200)
self.assertNotIn("body", channel.result)
@@ -222,13 +222,22 @@ class OptionsResourceTests(unittest.TestCase):
self.resource = OptionsResource()
self.resource.putChild(b"res", DummyResource())
- def _make_request(self, method: bytes, path: bytes) -> FakeChannel:
+ def _make_request(
+ self, method: bytes, path: bytes, experimental_cors_msc3886: bool = False
+ ) -> FakeChannel:
"""Create a request from the method/path and return a channel with the response."""
# Create a site and query for the resource.
site = SynapseSite(
"test",
"site_tag",
- parse_listener_def({"type": "http", "port": 0}),
+ parse_listener_def(
+ 0,
+ {
+ "type": "http",
+ "port": 0,
+ "experimental_cors_msc3886": experimental_cors_msc3886,
+ },
+ ),
self.resource,
"1.0",
max_request_body_size=4096,
@@ -239,55 +248,86 @@ class OptionsResourceTests(unittest.TestCase):
channel = make_request(self.reactor, site, method, path, shorthand=False)
return channel
+ def _check_cors_standard_headers(self, channel: FakeChannel) -> None:
+ # Ensure the correct CORS headers have been added
+ # as per https://spec.matrix.org/v1.4/client-server-api/#web-browser-clients
+ self.assertEqual(
+ channel.headers.getRawHeaders(b"Access-Control-Allow-Origin"),
+ [b"*"],
+ "has correct CORS Origin header",
+ )
+ self.assertEqual(
+ channel.headers.getRawHeaders(b"Access-Control-Allow-Methods"),
+ [b"GET, HEAD, POST, PUT, DELETE, OPTIONS"], # HEAD isn't in the spec
+ "has correct CORS Methods header",
+ )
+ self.assertEqual(
+ channel.headers.getRawHeaders(b"Access-Control-Allow-Headers"),
+ [b"X-Requested-With, Content-Type, Authorization, Date"],
+ "has correct CORS Headers header",
+ )
+
+ def _check_cors_msc3886_headers(self, channel: FakeChannel) -> None:
+ # Ensure the correct CORS headers have been added
+ # as per https://github.com/matrix-org/matrix-spec-proposals/blob/hughns/simple-rendezvous-capability/proposals/3886-simple-rendezvous-capability.md#cors
+ self.assertEqual(
+ channel.headers.getRawHeaders(b"Access-Control-Allow-Origin"),
+ [b"*"],
+ "has correct CORS Origin header",
+ )
+ self.assertEqual(
+ channel.headers.getRawHeaders(b"Access-Control-Allow-Methods"),
+ [b"GET, HEAD, POST, PUT, DELETE, OPTIONS"], # HEAD isn't in the spec
+ "has correct CORS Methods header",
+ )
+ self.assertEqual(
+ channel.headers.getRawHeaders(b"Access-Control-Allow-Headers"),
+ [
+ b"X-Requested-With, Content-Type, Authorization, Date, If-Match, If-None-Match"
+ ],
+ "has correct CORS Headers header",
+ )
+ self.assertEqual(
+ channel.headers.getRawHeaders(b"Access-Control-Expose-Headers"),
+ [b"ETag, Location, X-Max-Bytes"],
+ "has correct CORS Expose Headers header",
+ )
+
def test_unknown_options_request(self) -> None:
"""An OPTIONS requests to an unknown URL still returns 204 No Content."""
channel = self._make_request(b"OPTIONS", b"/foo/")
- self.assertEqual(channel.result["code"], b"204")
+ self.assertEqual(channel.code, 204)
self.assertNotIn("body", channel.result)
- # Ensure the correct CORS headers have been added
- self.assertTrue(
- channel.headers.hasHeader(b"Access-Control-Allow-Origin"),
- "has CORS Origin header",
- )
- self.assertTrue(
- channel.headers.hasHeader(b"Access-Control-Allow-Methods"),
- "has CORS Methods header",
- )
- self.assertTrue(
- channel.headers.hasHeader(b"Access-Control-Allow-Headers"),
- "has CORS Headers header",
- )
+ self._check_cors_standard_headers(channel)
def test_known_options_request(self) -> None:
"""An OPTIONS requests to an known URL still returns 204 No Content."""
channel = self._make_request(b"OPTIONS", b"/res/")
- self.assertEqual(channel.result["code"], b"204")
+ self.assertEqual(channel.code, 204)
self.assertNotIn("body", channel.result)
- # Ensure the correct CORS headers have been added
- self.assertTrue(
- channel.headers.hasHeader(b"Access-Control-Allow-Origin"),
- "has CORS Origin header",
- )
- self.assertTrue(
- channel.headers.hasHeader(b"Access-Control-Allow-Methods"),
- "has CORS Methods header",
- )
- self.assertTrue(
- channel.headers.hasHeader(b"Access-Control-Allow-Headers"),
- "has CORS Headers header",
+ self._check_cors_standard_headers(channel)
+
+ def test_known_options_request_msc3886(self) -> None:
+ """An OPTIONS requests to an known URL still returns 204 No Content."""
+ channel = self._make_request(
+ b"OPTIONS", b"/res/", experimental_cors_msc3886=True
)
+ self.assertEqual(channel.code, 204)
+ self.assertNotIn("body", channel.result)
+
+ self._check_cors_msc3886_headers(channel)
def test_unknown_request(self) -> None:
"""A non-OPTIONS request to an unknown URL should 404."""
channel = self._make_request(b"GET", b"/foo/")
- self.assertEqual(channel.result["code"], b"404")
+ self.assertEqual(channel.code, 404)
def test_known_request(self) -> None:
"""A non-OPTIONS request to an known URL should query the proper resource."""
channel = self._make_request(b"GET", b"/res/")
- self.assertEqual(channel.result["code"], b"200")
+ self.assertEqual(channel.code, 200)
self.assertEqual(channel.result["body"], b"/res/")
@@ -314,7 +354,7 @@ class WrapHtmlRequestHandlerTests(unittest.TestCase):
self.reactor, FakeSite(res, self.reactor), b"GET", b"/path"
)
- self.assertEqual(channel.result["code"], b"200")
+ self.assertEqual(channel.code, 200)
body = channel.result["body"]
self.assertEqual(body, b"response")
@@ -334,7 +374,7 @@ class WrapHtmlRequestHandlerTests(unittest.TestCase):
self.reactor, FakeSite(res, self.reactor), b"GET", b"/path"
)
- self.assertEqual(channel.result["code"], b"301")
+ self.assertEqual(channel.code, 301)
headers = channel.result["headers"]
location_headers = [v for k, v in headers if k == b"Location"]
self.assertEqual(location_headers, [b"/look/an/eagle"])
@@ -357,7 +397,7 @@ class WrapHtmlRequestHandlerTests(unittest.TestCase):
self.reactor, FakeSite(res, self.reactor), b"GET", b"/path"
)
- self.assertEqual(channel.result["code"], b"304")
+ self.assertEqual(channel.code, 304)
headers = channel.result["headers"]
location_headers = [v for k, v in headers if k == b"Location"]
self.assertEqual(location_headers, [b"/no/over/there"])
@@ -378,7 +418,7 @@ class WrapHtmlRequestHandlerTests(unittest.TestCase):
self.reactor, FakeSite(res, self.reactor), b"HEAD", b"/path"
)
- self.assertEqual(channel.result["code"], b"200")
+ self.assertEqual(channel.code, 200)
self.assertNotIn("body", channel.result)
diff --git a/tests/test_terms_auth.py b/tests/test_terms_auth.py
index d3c13cf14c..abd7459a8c 100644
--- a/tests/test_terms_auth.py
+++ b/tests/test_terms_auth.py
@@ -53,7 +53,7 @@ class TermsTestCase(unittest.HomeserverTestCase):
request_data = {"username": "kermit", "password": "monkey"}
channel = self.make_request(b"POST", self.url, request_data)
- self.assertEqual(channel.result["code"], b"401", channel.result)
+ self.assertEqual(channel.code, 401, channel.result)
self.assertTrue(channel.json_body is not None)
self.assertIsInstance(channel.json_body["session"], str)
@@ -96,7 +96,7 @@ class TermsTestCase(unittest.HomeserverTestCase):
# We don't bother checking that the response is correct - we'll leave that to
# other tests. We just want to make sure we're on the right path.
- self.assertEqual(channel.result["code"], b"401", channel.result)
+ self.assertEqual(channel.code, 401, channel.result)
# Finish the UI auth for terms
request_data = {
@@ -112,7 +112,7 @@ class TermsTestCase(unittest.HomeserverTestCase):
# We're interested in getting a response that looks like a successful
# registration, not so much that the details are exactly what we want.
- self.assertEqual(channel.result["code"], b"200", channel.result)
+ self.assertEqual(channel.code, 200, channel.result)
self.assertTrue(channel.json_body is not None)
self.assertIsInstance(channel.json_body["user_id"], str)
diff --git a/tests/test_types.py b/tests/test_types.py
index d8d82a517e..1111169384 100644
--- a/tests/test_types.py
+++ b/tests/test_types.py
@@ -13,11 +13,35 @@
# limitations under the License.
from synapse.api.errors import SynapseError
-from synapse.types import RoomAlias, UserID, map_username_to_mxid_localpart
+from synapse.types import (
+ RoomAlias,
+ UserID,
+ get_domain_from_id,
+ get_localpart_from_id,
+ map_username_to_mxid_localpart,
+)
from tests import unittest
+class IsMineIDTests(unittest.HomeserverTestCase):
+ def test_is_mine_id(self) -> None:
+ self.assertTrue(self.hs.is_mine_id("@user:test"))
+ self.assertTrue(self.hs.is_mine_id("#room:test"))
+ self.assertTrue(self.hs.is_mine_id("invalid:test"))
+
+ self.assertFalse(self.hs.is_mine_id("@user:test\0"))
+ self.assertFalse(self.hs.is_mine_id("@user"))
+
+ def test_two_colons(self) -> None:
+ """Test handling of IDs containing more than one colon."""
+ # The domain starts after the first colon.
+ # These functions must interpret things consistently.
+ self.assertFalse(self.hs.is_mine_id("@user:test:test"))
+ self.assertEqual("user", get_localpart_from_id("@user:test:test"))
+ self.assertEqual("test:test", get_domain_from_id("@user:test:test"))
+
+
class UserIDTestCase(unittest.HomeserverTestCase):
def test_parse(self):
user = UserID.from_string("@1234abcd:test")
diff --git a/tests/test_utils/__init__.py b/tests/test_utils/__init__.py
index 0d0d6faf0d..e62ebcc6a5 100644
--- a/tests/test_utils/__init__.py
+++ b/tests/test_utils/__init__.py
@@ -15,17 +15,24 @@
"""
Utilities for running the unit tests
"""
+import json
import sys
import warnings
from asyncio import Future
from binascii import unhexlify
-from typing import Awaitable, Callable, TypeVar
+from typing import Awaitable, Callable, Tuple, TypeVar
from unittest.mock import Mock
import attr
+import zope.interface
from twisted.python.failure import Failure
from twisted.web.client import ResponseDone
+from twisted.web.http import RESPONSES
+from twisted.web.http_headers import Headers
+from twisted.web.iweb import IResponse
+
+from synapse.types import JsonDict
TV = TypeVar("TV")
@@ -97,27 +104,44 @@ def simple_async_mock(return_value=None, raises=None) -> Mock:
return Mock(side_effect=cb)
-@attr.s
-class FakeResponse:
+# Type ignore: it does not fully implement IResponse, but is good enough for tests
+@zope.interface.implementer(IResponse)
+@attr.s(slots=True, frozen=True, auto_attribs=True)
+class FakeResponse: # type: ignore[misc]
"""A fake twisted.web.IResponse object
there is a similar class at treq.test.test_response, but it lacks a `phrase`
attribute, and didn't support deliverBody until recently.
"""
- # HTTP response code
- code = attr.ib(type=int)
+ version: Tuple[bytes, int, int] = (b"HTTP", 1, 1)
- # HTTP response phrase (eg b'OK' for a 200)
- phrase = attr.ib(type=bytes)
+ # HTTP response code
+ code: int = 200
# body of the response
- body = attr.ib(type=bytes)
+ body: bytes = b""
+
+ headers: Headers = attr.Factory(Headers)
+
+ @property
+ def phrase(self):
+ return RESPONSES.get(self.code, b"Unknown Status")
+
+ @property
+ def length(self):
+ return len(self.body)
def deliverBody(self, protocol):
protocol.dataReceived(self.body)
protocol.connectionLost(Failure(ResponseDone()))
+ @classmethod
+ def json(cls, *, code: int = 200, payload: JsonDict) -> "FakeResponse":
+ headers = Headers({"Content-Type": ["application/json"]})
+ body = json.dumps(payload).encode("utf-8")
+ return cls(code=code, body=body, headers=headers)
+
# A small image used in some tests.
#
diff --git a/tests/test_utils/oidc.py b/tests/test_utils/oidc.py
new file mode 100644
index 0000000000..1461d23ee8
--- /dev/null
+++ b/tests/test_utils/oidc.py
@@ -0,0 +1,348 @@
+# Copyright 2022 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import json
+from typing import Any, Dict, List, Optional, Tuple
+from unittest.mock import Mock, patch
+from urllib.parse import parse_qs
+
+import attr
+
+from twisted.web.http_headers import Headers
+from twisted.web.iweb import IResponse
+
+from synapse.server import HomeServer
+from synapse.util import Clock
+from synapse.util.stringutils import random_string
+
+from tests.test_utils import FakeResponse
+
+
+@attr.s(slots=True, frozen=True, auto_attribs=True)
+class FakeAuthorizationGrant:
+ userinfo: dict
+ client_id: str
+ redirect_uri: str
+ scope: str
+ nonce: Optional[str]
+ sid: Optional[str]
+
+
+class FakeOidcServer:
+ """A fake OpenID Connect Provider."""
+
+ # All methods here are mocks, so we can track when they are called, and override
+ # their values
+ request: Mock
+ get_jwks_handler: Mock
+ get_metadata_handler: Mock
+ get_userinfo_handler: Mock
+ post_token_handler: Mock
+
+ sid_counter: int = 0
+
+ def __init__(self, clock: Clock, issuer: str):
+ from authlib.jose import ECKey, KeySet
+
+ self._clock = clock
+ self.issuer = issuer
+
+ self.request = Mock(side_effect=self._request)
+ self.get_jwks_handler = Mock(side_effect=self._get_jwks_handler)
+ self.get_metadata_handler = Mock(side_effect=self._get_metadata_handler)
+ self.get_userinfo_handler = Mock(side_effect=self._get_userinfo_handler)
+ self.post_token_handler = Mock(side_effect=self._post_token_handler)
+
+ # A code -> grant mapping
+ self._authorization_grants: Dict[str, FakeAuthorizationGrant] = {}
+ # An access token -> grant mapping
+ self._sessions: Dict[str, FakeAuthorizationGrant] = {}
+
+ # We generate here an ECDSA key with the P-256 curve (ES256 algorithm) used for
+ # signing JWTs. ECDSA keys are really quick to generate compared to RSA.
+ self._key = ECKey.generate_key(crv="P-256", is_private=True)
+ self._jwks = KeySet([ECKey.import_key(self._key.as_pem(is_private=False))])
+
+ self._id_token_overrides: Dict[str, Any] = {}
+
+ def reset_mocks(self):
+ self.request.reset_mock()
+ self.get_jwks_handler.reset_mock()
+ self.get_metadata_handler.reset_mock()
+ self.get_userinfo_handler.reset_mock()
+ self.post_token_handler.reset_mock()
+
+ def patch_homeserver(self, hs: HomeServer):
+ """Patch the ``HomeServer`` HTTP client to handle requests through the ``FakeOidcServer``.
+
+ This patch should be used whenever the HS is expected to perform request to the
+ OIDC provider, e.g.::
+
+ fake_oidc_server = self.helper.fake_oidc_server()
+ with fake_oidc_server.patch_homeserver(hs):
+ self.make_request("GET", "/_matrix/client/r0/login/sso/redirect")
+ """
+ return patch.object(hs.get_proxied_http_client(), "request", self.request)
+
+ @property
+ def authorization_endpoint(self) -> str:
+ return self.issuer + "authorize"
+
+ @property
+ def token_endpoint(self) -> str:
+ return self.issuer + "token"
+
+ @property
+ def userinfo_endpoint(self) -> str:
+ return self.issuer + "userinfo"
+
+ @property
+ def metadata_endpoint(self) -> str:
+ return self.issuer + ".well-known/openid-configuration"
+
+ @property
+ def jwks_uri(self) -> str:
+ return self.issuer + "jwks"
+
+ def get_metadata(self) -> dict:
+ return {
+ "issuer": self.issuer,
+ "authorization_endpoint": self.authorization_endpoint,
+ "token_endpoint": self.token_endpoint,
+ "jwks_uri": self.jwks_uri,
+ "userinfo_endpoint": self.userinfo_endpoint,
+ "response_types_supported": ["code"],
+ "subject_types_supported": ["public"],
+ "id_token_signing_alg_values_supported": ["ES256"],
+ }
+
+ def get_jwks(self) -> dict:
+ return self._jwks.as_dict()
+
+ def get_userinfo(self, access_token: str) -> Optional[dict]:
+ """Given an access token, get the userinfo of the associated session."""
+ session = self._sessions.get(access_token, None)
+ if session is None:
+ return None
+ return session.userinfo
+
+ def _sign(self, payload: dict) -> str:
+ from authlib.jose import JsonWebSignature
+
+ jws = JsonWebSignature()
+ kid = self.get_jwks()["keys"][0]["kid"]
+ protected = {"alg": "ES256", "kid": kid}
+ json_payload = json.dumps(payload)
+ return jws.serialize_compact(protected, json_payload, self._key).decode("utf-8")
+
+ def generate_id_token(self, grant: FakeAuthorizationGrant) -> str:
+ now = int(self._clock.time())
+ id_token = {
+ **grant.userinfo,
+ "iss": self.issuer,
+ "aud": grant.client_id,
+ "iat": now,
+ "nbf": now,
+ "exp": now + 600,
+ }
+
+ if grant.nonce is not None:
+ id_token["nonce"] = grant.nonce
+
+ if grant.sid is not None:
+ id_token["sid"] = grant.sid
+
+ id_token.update(self._id_token_overrides)
+
+ return self._sign(id_token)
+
+ def generate_logout_token(self, grant: FakeAuthorizationGrant) -> str:
+ now = int(self._clock.time())
+ logout_token = {
+ "iss": self.issuer,
+ "aud": grant.client_id,
+ "iat": now,
+ "jti": random_string(10),
+ "events": {
+ "http://schemas.openid.net/event/backchannel-logout": {},
+ },
+ }
+
+ if grant.sid is not None:
+ logout_token["sid"] = grant.sid
+
+ if "sub" in grant.userinfo:
+ logout_token["sub"] = grant.userinfo["sub"]
+
+ return self._sign(logout_token)
+
+ def id_token_override(self, overrides: dict):
+ """Temporarily patch the ID token generated by the token endpoint."""
+ return patch.object(self, "_id_token_overrides", overrides)
+
+ def start_authorization(
+ self,
+ client_id: str,
+ scope: str,
+ redirect_uri: str,
+ userinfo: dict,
+ nonce: Optional[str] = None,
+ with_sid: bool = False,
+ ) -> Tuple[str, FakeAuthorizationGrant]:
+ """Start an authorization request, and get back the code to use on the authorization endpoint."""
+ code = random_string(10)
+ sid = None
+ if with_sid:
+ sid = str(self.sid_counter)
+ self.sid_counter += 1
+
+ grant = FakeAuthorizationGrant(
+ userinfo=userinfo,
+ scope=scope,
+ redirect_uri=redirect_uri,
+ nonce=nonce,
+ client_id=client_id,
+ sid=sid,
+ )
+ self._authorization_grants[code] = grant
+
+ return code, grant
+
+ def exchange_code(self, code: str) -> Optional[Dict[str, Any]]:
+ grant = self._authorization_grants.pop(code, None)
+ if grant is None:
+ return None
+
+ access_token = random_string(10)
+ self._sessions[access_token] = grant
+
+ token = {
+ "token_type": "Bearer",
+ "access_token": access_token,
+ "expires_in": 3600,
+ "scope": grant.scope,
+ }
+
+ if "openid" in grant.scope:
+ token["id_token"] = self.generate_id_token(grant)
+
+ return dict(token)
+
+ def buggy_endpoint(
+ self,
+ *,
+ jwks: bool = False,
+ metadata: bool = False,
+ token: bool = False,
+ userinfo: bool = False,
+ ):
+ """A context which makes a set of endpoints return a 500 error.
+
+ Args:
+ jwks: If True, makes the JWKS endpoint return a 500 error.
+ metadata: If True, makes the OIDC Discovery endpoint return a 500 error.
+ token: If True, makes the token endpoint return a 500 error.
+ userinfo: If True, makes the userinfo endpoint return a 500 error.
+ """
+ buggy = FakeResponse(code=500, body=b"Internal server error")
+
+ patches = {}
+ if jwks:
+ patches["get_jwks_handler"] = Mock(return_value=buggy)
+ if metadata:
+ patches["get_metadata_handler"] = Mock(return_value=buggy)
+ if token:
+ patches["post_token_handler"] = Mock(return_value=buggy)
+ if userinfo:
+ patches["get_userinfo_handler"] = Mock(return_value=buggy)
+
+ return patch.multiple(self, **patches)
+
+ async def _request(
+ self,
+ method: str,
+ uri: str,
+ data: Optional[bytes] = None,
+ headers: Optional[Headers] = None,
+ ) -> IResponse:
+ """The override of the SimpleHttpClient#request() method"""
+ access_token: Optional[str] = None
+
+ if headers is None:
+ headers = Headers()
+
+ # Try to find the access token in the headers if any
+ auth_headers = headers.getRawHeaders(b"Authorization")
+ if auth_headers:
+ parts = auth_headers[0].split(b" ")
+ if parts[0] == b"Bearer" and len(parts) == 2:
+ access_token = parts[1].decode("ascii")
+
+ if method == "POST":
+ # If the method is POST, assume it has an url-encoded body
+ if data is None or headers.getRawHeaders(b"Content-Type") != [
+ b"application/x-www-form-urlencoded"
+ ]:
+ return FakeResponse.json(code=400, payload={"error": "invalid_request"})
+
+ params = parse_qs(data.decode("utf-8"))
+
+ if uri == self.token_endpoint:
+ # Even though this endpoint should be protected, this does not check
+ # for client authentication. We're not checking it for simplicity,
+ # and because client authentication is tested in other standalone tests.
+ return self.post_token_handler(params)
+
+ elif method == "GET":
+ if uri == self.jwks_uri:
+ return self.get_jwks_handler()
+ elif uri == self.metadata_endpoint:
+ return self.get_metadata_handler()
+ elif uri == self.userinfo_endpoint:
+ return self.get_userinfo_handler(access_token=access_token)
+
+ return FakeResponse(code=404, body=b"404 not found")
+
+ # Request handlers
+ def _get_jwks_handler(self) -> IResponse:
+ """Handles requests to the JWKS URI."""
+ return FakeResponse.json(payload=self.get_jwks())
+
+ def _get_metadata_handler(self) -> IResponse:
+ """Handles requests to the OIDC well-known document."""
+ return FakeResponse.json(payload=self.get_metadata())
+
+ def _get_userinfo_handler(self, access_token: Optional[str]) -> IResponse:
+ """Handles requests to the userinfo endpoint."""
+ if access_token is None:
+ return FakeResponse(code=401)
+ user_info = self.get_userinfo(access_token)
+ if user_info is None:
+ return FakeResponse(code=401)
+
+ return FakeResponse.json(payload=user_info)
+
+ def _post_token_handler(self, params: Dict[str, List[str]]) -> IResponse:
+ """Handles requests to the token endpoint."""
+ code = params.get("code", [])
+
+ if len(code) != 1:
+ return FakeResponse.json(code=400, payload={"error": "invalid_request"})
+
+ grant = self.exchange_code(code=code[0])
+ if grant is None:
+ return FakeResponse.json(code=400, payload={"error": "invalid_grant"})
+
+ return FakeResponse.json(payload=grant)
diff --git a/tests/test_visibility.py b/tests/test_visibility.py
index c385b2f8d4..d0b9ad5454 100644
--- a/tests/test_visibility.py
+++ b/tests/test_visibility.py
@@ -61,7 +61,7 @@ class FilterEventsForServerTestCase(unittest.HomeserverTestCase):
filtered = self.get_success(
filter_events_for_server(
- self._storage_controllers, "test_server", events_to_filter
+ self._storage_controllers, "test_server", "hs", events_to_filter
)
)
@@ -83,7 +83,7 @@ class FilterEventsForServerTestCase(unittest.HomeserverTestCase):
self.assertEqual(
self.get_success(
filter_events_for_server(
- self._storage_controllers, "remote_hs", [outlier]
+ self._storage_controllers, "remote_hs", "hs", [outlier]
)
),
[outlier],
@@ -94,7 +94,7 @@ class FilterEventsForServerTestCase(unittest.HomeserverTestCase):
filtered = self.get_success(
filter_events_for_server(
- self._storage_controllers, "remote_hs", [outlier, evt]
+ self._storage_controllers, "remote_hs", "local_hs", [outlier, evt]
)
)
self.assertEqual(len(filtered), 2, f"expected 2 results, got: {filtered}")
@@ -106,7 +106,7 @@ class FilterEventsForServerTestCase(unittest.HomeserverTestCase):
# be redacted)
filtered = self.get_success(
filter_events_for_server(
- self._storage_controllers, "other_server", [outlier, evt]
+ self._storage_controllers, "other_server", "local_hs", [outlier, evt]
)
)
self.assertEqual(filtered[0], outlier)
@@ -141,7 +141,7 @@ class FilterEventsForServerTestCase(unittest.HomeserverTestCase):
# ... and the filtering happens.
filtered = self.get_success(
filter_events_for_server(
- self._storage_controllers, "test_server", events_to_filter
+ self._storage_controllers, "test_server", "local_hs", events_to_filter
)
)
diff --git a/tests/unittest.py b/tests/unittest.py
index bec4a3d023..a120c2976c 100644
--- a/tests/unittest.py
+++ b/tests/unittest.py
@@ -300,47 +300,31 @@ class HomeserverTestCase(TestCase):
if hasattr(self, "user_id"):
if self.hijack_auth:
assert self.helper.auth_user_id is not None
+ token = "some_fake_token"
# We need a valid token ID to satisfy foreign key constraints.
token_id = self.get_success(
self.hs.get_datastores().main.add_access_token_to_user(
self.helper.auth_user_id,
- "some_fake_token",
+ token,
None,
None,
)
)
- async def get_user_by_access_token(
- token: Optional[str] = None, allow_guest: bool = False
- ) -> JsonDict:
- assert self.helper.auth_user_id is not None
- return {
- "user": UserID.from_string(self.helper.auth_user_id),
- "token_id": token_id,
- "is_guest": False,
- }
-
- async def get_user_by_req(
- request: SynapseRequest,
- allow_guest: bool = False,
- allow_expired: bool = False,
- ) -> Requester:
+ # This has to be a function and not just a Mock, because
+ # `self.helper.auth_user_id` is temporarily reassigned in some tests
+ async def get_requester(*args, **kwargs) -> Requester:
assert self.helper.auth_user_id is not None
return create_requester(
- UserID.from_string(self.helper.auth_user_id),
- token_id,
- False,
- False,
- None,
+ user_id=UserID.from_string(self.helper.auth_user_id),
+ access_token_id=token_id,
)
# Type ignore: mypy doesn't like us assigning to methods.
- self.hs.get_auth().get_user_by_req = get_user_by_req # type: ignore[assignment]
- self.hs.get_auth().get_user_by_access_token = get_user_by_access_token # type: ignore[assignment]
- self.hs.get_auth().get_access_token_from_request = Mock( # type: ignore[assignment]
- return_value="1234"
- )
+ self.hs.get_auth().get_user_by_req = get_requester # type: ignore[assignment]
+ self.hs.get_auth().get_user_by_access_token = get_requester # type: ignore[assignment]
+ self.hs.get_auth().get_access_token_from_request = Mock(return_value=token) # type: ignore[assignment]
if self.needs_threadpool:
self.reactor.threadpool = ThreadPool() # type: ignore[assignment]
@@ -376,13 +360,13 @@ class HomeserverTestCase(TestCase):
store.db_pool.updates.do_next_background_update(False), by=0.1
)
- def make_homeserver(self, reactor, clock):
+ def make_homeserver(self, reactor: MemoryReactor, clock: Clock):
"""
Make and return a homeserver.
Args:
reactor: A Twisted Reactor, or something that pretends to be one.
- clock (synapse.util.Clock): The Clock, associated with the reactor.
+ clock: The Clock, associated with the reactor.
Returns:
A homeserver suitable for testing.
@@ -442,9 +426,8 @@ class HomeserverTestCase(TestCase):
Args:
reactor: A Twisted Reactor, or something that pretends to be one.
- clock (synapse.util.Clock): The Clock, associated with the reactor.
- homeserver (synapse.server.HomeServer): The HomeServer to test
- against.
+ clock: The Clock, associated with the reactor.
+ homeserver: The HomeServer to test against.
Function to optionally be overridden in subclasses.
"""
@@ -468,11 +451,10 @@ class HomeserverTestCase(TestCase):
given content.
Args:
- method (bytes/unicode): The HTTP request method ("verb").
- path (bytes/unicode): The HTTP path, suitably URL encoded (e.g.
- escaped UTF-8 & spaces and such).
- content (bytes or dict): The body of the request. JSON-encoded, if
- a dict.
+ method: The HTTP request method ("verb").
+ path: The HTTP path, suitably URL encoded (e.g. escaped UTF-8 & spaces
+ and such). content (bytes or dict): The body of the request.
+ JSON-encoded, if a dict.
shorthand: Whether to try and be helpful and prefix the given URL
with the usual REST API path, if it doesn't contain it.
federation_auth_origin: if set to not-None, we will add a fake
@@ -677,14 +659,29 @@ class HomeserverTestCase(TestCase):
username: str,
password: str,
device_id: Optional[str] = None,
+ additional_request_fields: Optional[Dict[str, str]] = None,
custom_headers: Optional[Iterable[CustomHeaderType]] = None,
) -> str:
"""
Log in a user, and get an access token. Requires the Login API be registered.
+
+ Args:
+ username: The localpart to assign to the new user.
+ password: The password to assign to the new user.
+ device_id: An optional device ID to assign to the new device created during
+ login.
+ additional_request_fields: A dictionary containing any additional /login
+ request fields and their values.
+ custom_headers: Custom HTTP headers and values to add to the /login request.
+
+ Returns:
+ The newly registered user's Matrix ID.
"""
body = {"type": "m.login.password", "user": username, "password": password}
if device_id:
body["device_id"] = device_id
+ if additional_request_fields:
+ body.update(additional_request_fields)
channel = self.make_request(
"POST",
@@ -735,7 +732,9 @@ class HomeserverTestCase(TestCase):
event.internal_metadata.soft_failed = True
self.get_success(
- event_creator.handle_new_client_event(requester, event, context)
+ event_creator.handle_new_client_event(
+ requester, events_and_context=[(event, context)]
+ )
)
return event.event_id
diff --git a/tests/util/caches/test_cached_call.py b/tests/util/caches/test_cached_call.py
index 80b97167ba..9266f12590 100644
--- a/tests/util/caches/test_cached_call.py
+++ b/tests/util/caches/test_cached_call.py
@@ -11,6 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+from typing import NoReturn
from unittest.mock import Mock
from twisted.internet import defer
@@ -23,14 +24,14 @@ from tests.unittest import TestCase
class CachedCallTestCase(TestCase):
- def test_get(self):
+ def test_get(self) -> None:
"""
Happy-path test case: makes a couple of calls and makes sure they behave
correctly
"""
- d = Deferred()
+ d: "Deferred[int]" = Deferred()
- async def f():
+ async def f() -> int:
return await d
slow_call = Mock(side_effect=f)
@@ -43,7 +44,7 @@ class CachedCallTestCase(TestCase):
# now fire off a couple of calls
completed_results = []
- async def r():
+ async def r() -> None:
res = await cached_call.get()
completed_results.append(res)
@@ -69,12 +70,12 @@ class CachedCallTestCase(TestCase):
self.assertEqual(r3, 123)
slow_call.assert_not_called()
- def test_fast_call(self):
+ def test_fast_call(self) -> None:
"""
Test the behaviour when the underlying function completes immediately
"""
- async def f():
+ async def f() -> int:
return 12
fast_call = Mock(side_effect=f)
@@ -92,12 +93,12 @@ class CachedCallTestCase(TestCase):
class RetryOnExceptionCachedCallTestCase(TestCase):
- def test_get(self):
+ def test_get(self) -> None:
# set up the RetryOnExceptionCachedCall around a function which will fail
# (after a while)
- d = Deferred()
+ d: "Deferred[int]" = Deferred()
- async def f1():
+ async def f1() -> NoReturn:
await d
raise ValueError("moo")
@@ -110,7 +111,7 @@ class RetryOnExceptionCachedCallTestCase(TestCase):
# now fire off a couple of calls
completed_results = []
- async def r():
+ async def r() -> None:
try:
await cached_call.get()
except Exception as e1:
@@ -137,7 +138,7 @@ class RetryOnExceptionCachedCallTestCase(TestCase):
# to the getter
d = Deferred()
- async def f2():
+ async def f2() -> int:
return await d
slow_call.reset_mock()
diff --git a/tests/util/caches/test_deferred_cache.py b/tests/util/caches/test_deferred_cache.py
index 02b99b466a..f74d82b1dc 100644
--- a/tests/util/caches/test_deferred_cache.py
+++ b/tests/util/caches/test_deferred_cache.py
@@ -13,6 +13,7 @@
# limitations under the License.
from functools import partial
+from typing import List, Tuple
from twisted.internet import defer
@@ -22,20 +23,20 @@ from tests.unittest import TestCase
class DeferredCacheTestCase(TestCase):
- def test_empty(self):
- cache = DeferredCache("test")
+ def test_empty(self) -> None:
+ cache: DeferredCache[str, int] = DeferredCache("test")
with self.assertRaises(KeyError):
cache.get("foo")
- def test_hit(self):
- cache = DeferredCache("test")
+ def test_hit(self) -> None:
+ cache: DeferredCache[str, int] = DeferredCache("test")
cache.prefill("foo", 123)
self.assertEqual(self.successResultOf(cache.get("foo")), 123)
- def test_hit_deferred(self):
- cache = DeferredCache("test")
- origin_d = defer.Deferred()
+ def test_hit_deferred(self) -> None:
+ cache: DeferredCache[str, int] = DeferredCache("test")
+ origin_d: "defer.Deferred[int]" = defer.Deferred()
set_d = cache.set("k1", origin_d)
# get should return an incomplete deferred
@@ -43,7 +44,7 @@ class DeferredCacheTestCase(TestCase):
self.assertFalse(get_d.called)
# add a callback that will make sure that the set_d gets called before the get_d
- def check1(r):
+ def check1(r: str) -> str:
self.assertTrue(set_d.called)
return r
@@ -55,16 +56,16 @@ class DeferredCacheTestCase(TestCase):
self.assertEqual(self.successResultOf(set_d), 99)
self.assertEqual(self.successResultOf(get_d), 99)
- def test_callbacks(self):
+ def test_callbacks(self) -> None:
"""Invalidation callbacks are called at the right time"""
- cache = DeferredCache("test")
+ cache: DeferredCache[str, int] = DeferredCache("test")
callbacks = set()
# start with an entry, with a callback
cache.prefill("k1", 10, callback=lambda: callbacks.add("prefill"))
# now replace that entry with a pending result
- origin_d = defer.Deferred()
+ origin_d: "defer.Deferred[int]" = defer.Deferred()
set_d = cache.set("k1", origin_d, callback=lambda: callbacks.add("set"))
# ... and also make a get request
@@ -89,15 +90,15 @@ class DeferredCacheTestCase(TestCase):
cache.prefill("k1", 30)
self.assertEqual(callbacks, {"set", "get"})
- def test_set_fail(self):
- cache = DeferredCache("test")
+ def test_set_fail(self) -> None:
+ cache: DeferredCache[str, int] = DeferredCache("test")
callbacks = set()
# start with an entry, with a callback
cache.prefill("k1", 10, callback=lambda: callbacks.add("prefill"))
# now replace that entry with a pending result
- origin_d = defer.Deferred()
+ origin_d: defer.Deferred = defer.Deferred()
set_d = cache.set("k1", origin_d, callback=lambda: callbacks.add("set"))
# ... and also make a get request
@@ -126,9 +127,9 @@ class DeferredCacheTestCase(TestCase):
cache.prefill("k1", 30)
self.assertEqual(callbacks, {"prefill", "get2"})
- def test_get_immediate(self):
- cache = DeferredCache("test")
- d1 = defer.Deferred()
+ def test_get_immediate(self) -> None:
+ cache: DeferredCache[str, int] = DeferredCache("test")
+ d1: "defer.Deferred[int]" = defer.Deferred()
cache.set("key1", d1)
# get_immediate should return default
@@ -142,27 +143,27 @@ class DeferredCacheTestCase(TestCase):
v = cache.get_immediate("key1", 1)
self.assertEqual(v, 2)
- def test_invalidate(self):
- cache = DeferredCache("test")
+ def test_invalidate(self) -> None:
+ cache: DeferredCache[Tuple[str], int] = DeferredCache("test")
cache.prefill(("foo",), 123)
cache.invalidate(("foo",))
with self.assertRaises(KeyError):
cache.get(("foo",))
- def test_invalidate_all(self):
- cache = DeferredCache("testcache")
+ def test_invalidate_all(self) -> None:
+ cache: DeferredCache[str, str] = DeferredCache("testcache")
callback_record = [False, False]
- def record_callback(idx):
+ def record_callback(idx: int) -> None:
callback_record[idx] = True
# add a couple of pending entries
- d1 = defer.Deferred()
+ d1: "defer.Deferred[str]" = defer.Deferred()
cache.set("key1", d1, partial(record_callback, 0))
- d2 = defer.Deferred()
+ d2: "defer.Deferred[str]" = defer.Deferred()
cache.set("key2", d2, partial(record_callback, 1))
# lookup should return pending deferreds
@@ -193,8 +194,8 @@ class DeferredCacheTestCase(TestCase):
with self.assertRaises(KeyError):
cache.get("key1", None)
- def test_eviction(self):
- cache = DeferredCache(
+ def test_eviction(self) -> None:
+ cache: DeferredCache[int, str] = DeferredCache(
"test", max_entries=2, apply_cache_factor_from_config=False
)
@@ -208,8 +209,8 @@ class DeferredCacheTestCase(TestCase):
cache.get(2)
cache.get(3)
- def test_eviction_lru(self):
- cache = DeferredCache(
+ def test_eviction_lru(self) -> None:
+ cache: DeferredCache[int, str] = DeferredCache(
"test", max_entries=2, apply_cache_factor_from_config=False
)
@@ -227,8 +228,8 @@ class DeferredCacheTestCase(TestCase):
cache.get(1)
cache.get(3)
- def test_eviction_iterable(self):
- cache = DeferredCache(
+ def test_eviction_iterable(self) -> None:
+ cache: DeferredCache[int, List[str]] = DeferredCache(
"test",
max_entries=3,
apply_cache_factor_from_config=False,
diff --git a/tests/util/caches/test_descriptors.py b/tests/util/caches/test_descriptors.py
index 48e616ac74..13f1edd533 100644
--- a/tests/util/caches/test_descriptors.py
+++ b/tests/util/caches/test_descriptors.py
@@ -13,11 +13,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
-from typing import Set
+from typing import Iterable, Set, Tuple, cast
from unittest import mock
from twisted.internet import defer, reactor
from twisted.internet.defer import CancelledError, Deferred
+from twisted.internet.interfaces import IReactorTime
from synapse.api.errors import SynapseError
from synapse.logging.context import (
@@ -28,7 +29,7 @@ from synapse.logging.context import (
make_deferred_yieldable,
)
from synapse.util.caches import descriptors
-from synapse.util.caches.descriptors import cached, cachedList, lru_cache
+from synapse.util.caches.descriptors import cached, cachedList
from tests import unittest
from tests.test_utils import get_awaitable_result
@@ -36,41 +37,9 @@ from tests.test_utils import get_awaitable_result
logger = logging.getLogger(__name__)
-class LruCacheDecoratorTestCase(unittest.TestCase):
- def test_base(self):
- class Cls:
- def __init__(self):
- self.mock = mock.Mock()
-
- @lru_cache()
- def fn(self, arg1, arg2):
- return self.mock(arg1, arg2)
-
- obj = Cls()
- obj.mock.return_value = "fish"
- r = obj.fn(1, 2)
- self.assertEqual(r, "fish")
- obj.mock.assert_called_once_with(1, 2)
- obj.mock.reset_mock()
-
- # a call with different params should call the mock again
- obj.mock.return_value = "chips"
- r = obj.fn(1, 3)
- self.assertEqual(r, "chips")
- obj.mock.assert_called_once_with(1, 3)
- obj.mock.reset_mock()
-
- # the two values should now be cached
- r = obj.fn(1, 2)
- self.assertEqual(r, "fish")
- r = obj.fn(1, 3)
- self.assertEqual(r, "chips")
- obj.mock.assert_not_called()
-
-
def run_on_reactor():
- d = defer.Deferred()
- reactor.callLater(0, d.callback, 0)
+ d: "Deferred[int]" = defer.Deferred()
+ cast(IReactorTime, reactor).callLater(0, d.callback, 0)
return make_deferred_yieldable(d)
@@ -256,7 +225,8 @@ class DescriptorTestCase(unittest.TestCase):
callbacks: Set[str] = set()
# set off an asynchronous request
- obj.result = origin_d = defer.Deferred()
+ origin_d: Deferred = defer.Deferred()
+ obj.result = origin_d
d1 = obj.fn(1, on_invalidate=lambda: callbacks.add("d1"))
self.assertFalse(d1.called)
@@ -294,7 +264,7 @@ class DescriptorTestCase(unittest.TestCase):
"""Check that logcontexts are set and restored correctly when
using the cache."""
- complete_lookup = defer.Deferred()
+ complete_lookup: Deferred = defer.Deferred()
class Cls:
@descriptors.cached()
@@ -478,10 +448,10 @@ class DescriptorTestCase(unittest.TestCase):
@cached(cache_context=True)
async def func2(self, key, cache_context):
- return self.func3(key, on_invalidate=cache_context.invalidate)
+ return await self.func3(key, on_invalidate=cache_context.invalidate)
- @lru_cache(cache_context=True)
- def func3(self, key, cache_context):
+ @cached(cache_context=True)
+ async def func3(self, key, cache_context):
self.invalidate = cache_context.invalidate
return 42
@@ -804,10 +774,14 @@ class CachedListDescriptorTestCase(unittest.TestCase):
@descriptors.cachedList(cached_method_name="fn", list_name="args1")
async def list_fn(self, args1, arg2):
- assert current_context().name == "c1"
+ context = current_context()
+ assert isinstance(context, LoggingContext)
+ assert context.name == "c1"
# we want this to behave like an asynchronous function
await run_on_reactor()
- assert current_context().name == "c1"
+ context = current_context()
+ assert isinstance(context, LoggingContext)
+ assert context.name == "c1"
return self.mock(args1, arg2)
with LoggingContext("c1") as c1:
@@ -866,7 +840,7 @@ class CachedListDescriptorTestCase(unittest.TestCase):
return self.mock(args1)
obj = Cls()
- deferred_result = Deferred()
+ deferred_result: "Deferred[dict]" = Deferred()
obj.mock.return_value = deferred_result
# start off several concurrent lookups of the same key
@@ -1008,3 +982,34 @@ class CachedListDescriptorTestCase(unittest.TestCase):
obj.inner_context_was_finished, "Tried to restart a finished logcontext"
)
self.assertEqual(current_context(), SENTINEL_CONTEXT)
+
+ def test_num_args_mismatch(self):
+ """
+ Make sure someone does not accidentally use @cachedList on a method with
+ a mismatch in the number args to the underlying single cache method.
+ """
+
+ class Cls:
+ @descriptors.cached(tree=True)
+ def fn(self, room_id, event_id):
+ pass
+
+ # This is wrong ❌. `@cachedList` expects to be given the same number
+ # of arguments as the underlying cached function, just with one of
+ # the arguments being an iterable
+ @descriptors.cachedList(cached_method_name="fn", list_name="keys")
+ def list_fn(self, keys: Iterable[Tuple[str, str]]):
+ pass
+
+ # Corrected syntax ✅
+ #
+ # @cachedList(cached_method_name="fn", list_name="event_ids")
+ # async def list_fn(
+ # self, room_id: str, event_ids: Collection[str],
+ # )
+
+ obj = Cls()
+
+ # Make sure this raises an error about the arg mismatch
+ with self.assertRaises(TypeError):
+ obj.list_fn([("foo", "bar")])
diff --git a/tests/util/caches/test_response_cache.py b/tests/util/caches/test_response_cache.py
index 025b73e32f..f09eeecada 100644
--- a/tests/util/caches/test_response_cache.py
+++ b/tests/util/caches/test_response_cache.py
@@ -35,7 +35,7 @@ class ResponseCacheTestCase(TestCase):
(These have cache with a short timeout_ms=, shorter than will be tested through advancing the clock)
"""
- def setUp(self):
+ def setUp(self) -> None:
self.reactor, self.clock = get_clock()
def with_cache(self, name: str, ms: int = 0) -> ResponseCache:
@@ -49,7 +49,7 @@ class ResponseCacheTestCase(TestCase):
await self.clock.sleep(1)
return o
- def test_cache_hit(self):
+ def test_cache_hit(self) -> None:
cache = self.with_cache("keeping_cache", ms=9001)
expected_result = "howdy"
@@ -74,7 +74,7 @@ class ResponseCacheTestCase(TestCase):
"cache should still have the result",
)
- def test_cache_miss(self):
+ def test_cache_miss(self) -> None:
cache = self.with_cache("trashing_cache", ms=0)
expected_result = "howdy"
@@ -90,7 +90,7 @@ class ResponseCacheTestCase(TestCase):
)
self.assertCountEqual([], cache.keys(), "cache should not have the result now")
- def test_cache_expire(self):
+ def test_cache_expire(self) -> None:
cache = self.with_cache("short_cache", ms=1000)
expected_result = "howdy"
@@ -115,7 +115,7 @@ class ResponseCacheTestCase(TestCase):
self.reactor.pump((2,))
self.assertCountEqual([], cache.keys(), "cache should not have the result now")
- def test_cache_wait_hit(self):
+ def test_cache_wait_hit(self) -> None:
cache = self.with_cache("neutral_cache")
expected_result = "howdy"
@@ -131,7 +131,7 @@ class ResponseCacheTestCase(TestCase):
self.assertEqual(expected_result, self.successResultOf(wrap_d))
- def test_cache_wait_expire(self):
+ def test_cache_wait_expire(self) -> None:
cache = self.with_cache("medium_cache", ms=3000)
expected_result = "howdy"
@@ -162,7 +162,7 @@ class ResponseCacheTestCase(TestCase):
self.assertCountEqual([], cache.keys(), "cache should not have the result now")
@parameterized.expand([(True,), (False,)])
- def test_cache_context_nocache(self, should_cache: bool):
+ def test_cache_context_nocache(self, should_cache: bool) -> None:
"""If the callback clears the should_cache bit, the result should not be cached"""
cache = self.with_cache("medium_cache", ms=3000)
@@ -170,7 +170,7 @@ class ResponseCacheTestCase(TestCase):
call_count = 0
- async def non_caching(o: str, cache_context: ResponseCacheContext[int]):
+ async def non_caching(o: str, cache_context: ResponseCacheContext[int]) -> str:
nonlocal call_count
call_count += 1
await self.clock.sleep(1)
diff --git a/tests/util/caches/test_ttlcache.py b/tests/util/caches/test_ttlcache.py
index fe8314057d..679d1eb36b 100644
--- a/tests/util/caches/test_ttlcache.py
+++ b/tests/util/caches/test_ttlcache.py
@@ -20,11 +20,11 @@ from tests import unittest
class CacheTestCase(unittest.TestCase):
- def setUp(self):
+ def setUp(self) -> None:
self.mock_timer = Mock(side_effect=lambda: 100.0)
- self.cache = TTLCache("test_cache", self.mock_timer)
+ self.cache: TTLCache[str, str] = TTLCache("test_cache", self.mock_timer)
- def test_get(self):
+ def test_get(self) -> None:
"""simple set/get tests"""
self.cache.set("one", "1", 10)
self.cache.set("two", "2", 20)
@@ -59,7 +59,7 @@ class CacheTestCase(unittest.TestCase):
self.assertEqual(self.cache._metrics.hits, 4)
self.assertEqual(self.cache._metrics.misses, 5)
- def test_expiry(self):
+ def test_expiry(self) -> None:
self.cache.set("one", "1", 10)
self.cache.set("two", "2", 20)
self.cache.set("three", "3", 30)
diff --git a/tests/util/test_check_dependencies.py b/tests/util/test_check_dependencies.py
index 5d1aa025d1..6913de24b9 100644
--- a/tests/util/test_check_dependencies.py
+++ b/tests/util/test_check_dependencies.py
@@ -40,7 +40,10 @@ class TestDependencyChecker(TestCase):
def mock_installed_package(
self, distribution: Optional[DummyDistribution]
) -> Generator[None, None, None]:
- """Pretend that looking up any distribution yields the given `distribution`."""
+ """Pretend that looking up any package yields the given `distribution`.
+
+ If `distribution = None`, we pretend that the package is not installed.
+ """
def mock_distribution(name: str):
if distribution is None:
@@ -81,7 +84,7 @@ class TestDependencyChecker(TestCase):
self.assertRaises(DependencyException, check_requirements)
def test_checks_ignore_dev_dependencies(self) -> None:
- """Bot generic and per-extra checks should ignore dev dependencies."""
+ """Both generic and per-extra checks should ignore dev dependencies."""
with patch(
"synapse.util.check_dependencies.metadata.requires",
return_value=["dummypkg >= 1; extra == 'mypy'"],
@@ -142,3 +145,16 @@ class TestDependencyChecker(TestCase):
with self.mock_installed_package(new_release_candidate):
# should not raise
check_requirements()
+
+ def test_setuptools_rust_ignored(self) -> None:
+ """Test a workaround for a `poetry build` problem. Reproduces #13926."""
+ with patch(
+ "synapse.util.check_dependencies.metadata.requires",
+ return_value=["setuptools_rust >= 1.3"],
+ ):
+ with self.mock_installed_package(None):
+ # should not raise, even if setuptools_rust is not installed
+ check_requirements()
+ with self.mock_installed_package(old):
+ # We also ignore old versions of setuptools_rust
+ check_requirements()
diff --git a/tests/util/test_macaroons.py b/tests/util/test_macaroons.py
index 32125f7bb7..40754a4711 100644
--- a/tests/util/test_macaroons.py
+++ b/tests/util/test_macaroons.py
@@ -84,34 +84,6 @@ class MacaroonGeneratorTestCase(TestCase):
)
self.assertEqual(user_id, "@user:tesths")
- def test_short_term_login_token(self):
- """Test the generation and verification of short-term login tokens"""
- token = self.macaroon_generator.generate_short_term_login_token(
- user_id="@user:tesths",
- auth_provider_id="oidc",
- auth_provider_session_id="sid",
- duration_in_ms=2 * 60 * 1000,
- )
-
- info = self.macaroon_generator.verify_short_term_login_token(token)
- self.assertEqual(info.user_id, "@user:tesths")
- self.assertEqual(info.auth_provider_id, "oidc")
- self.assertEqual(info.auth_provider_session_id, "sid")
-
- # Raises with another secret key
- with self.assertRaises(MacaroonVerificationFailedException):
- self.other_macaroon_generator.verify_short_term_login_token(token)
-
- # Wait a minute
- self.reactor.pump([60])
- # Shouldn't raise
- self.macaroon_generator.verify_short_term_login_token(token)
- # Wait another minute
- self.reactor.pump([60])
- # Should raise since it expired
- with self.assertRaises(MacaroonVerificationFailedException):
- self.macaroon_generator.verify_short_term_login_token(token)
-
def test_oidc_session_token(self):
"""Test the generation and verification of OIDC session cookies"""
state = "arandomstate"
diff --git a/tests/utils.py b/tests/utils.py
index d2c6d1e852..045a8b5fa7 100644
--- a/tests/utils.py
+++ b/tests/utils.py
@@ -135,7 +135,6 @@ def default_config(
"enable_registration_captcha": False,
"macaroon_secret_key": "not even a little secret",
"password_providers": [],
- "worker_replication_url": "",
"worker_app": None,
"block_non_admin_invites": False,
"federation_domain_whitelist": None,
@@ -271,9 +270,7 @@ class MockClock:
*args: P.args,
**kwargs: P.kwargs,
) -> None:
- # This type-ignore should be redundant once we use a mypy release with
- # https://github.com/python/mypy/pull/12668.
- self.loopers.append(Looper(function, interval / 1000.0, self.now, args, kwargs)) # type: ignore[arg-type]
+ self.loopers.append(Looper(function, interval / 1000.0, self.now, args, kwargs))
def cancel_call_later(self, timer: Timer, ignore_errs: bool = False) -> None:
if timer.expired:
|