diff --git a/.ci/latest_deps_build_failed_issue_template.md b/.ci/latest_deps_build_failed_issue_template.md
new file mode 100644
index 0000000000..0525402503
--- /dev/null
+++ b/.ci/latest_deps_build_failed_issue_template.md
@@ -0,0 +1,4 @@
+---
+title: CI run against latest deps is failing
+---
+See https://github.com/{{env.GITHUB_REPOSITORY}}/actions/runs/{{env.GITHUB_RUN_ID}}
diff --git a/.ci/patch_for_twisted_trunk.sh b/.ci/patch_for_twisted_trunk.sh
deleted file mode 100755
index f524581986..0000000000
--- a/.ci/patch_for_twisted_trunk.sh
+++ /dev/null
@@ -1,8 +0,0 @@
-#!/bin/sh
-
-# replaces the dependency on Twisted in `python_dependencies` with trunk.
-
-set -e
-cd "$(dirname "$0")"/..
-
-sed -i -e 's#"Twisted.*"#"Twisted @ git+https://github.com/twisted/twisted"#' synapse/python_dependencies.py
diff --git a/.dockerignore b/.dockerignore
index a236760cf1..7809863ef3 100644
--- a/.dockerignore
+++ b/.dockerignore
@@ -8,8 +8,4 @@
!pyproject.toml
!poetry.lock
-# TODO: remove these once we have moved over to using poetry-core in pyproject.toml
-!MANIFEST.in
-!setup.py
-
**/__pycache__
diff --git a/.github/workflows/latest_deps.yml b/.github/workflows/latest_deps.yml
new file mode 100644
index 0000000000..1a61d179d9
--- /dev/null
+++ b/.github/workflows/latest_deps.yml
@@ -0,0 +1,156 @@
+# People who are freshly `pip install`ing from PyPI will pull in the latest versions of
+# dependencies which match the broad requirements. Since most CI runs are against
+# the locked poetry environment, run specifically against the latest dependencies to
+# know if there's an upcoming breaking change.
+#
+# As an overview this workflow:
+# - checks out develop,
+# - installs from source, pulling in the dependencies like a fresh `pip install` would, and
+# - runs mypy and test suites in that checkout.
+#
+# Based on the twisted trunk CI job.
+
+name: Latest dependencies
+
+on:
+ schedule:
+ - cron: 0 7 * * *
+ workflow_dispatch:
+
+concurrency:
+ group: ${{ github.workflow }}-${{ github.ref }}
+ cancel-in-progress: true
+
+jobs:
+ mypy:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v2
+ # The dev dependencies aren't exposed in the wheel metadata (at least with current
+ # poetry-core versions), so we install with poetry.
+ - uses: matrix-org/setup-python-poetry@v1
+ with:
+ python-version: "3.x"
+ poetry-version: "1.2.0b1"
+ # Dump installed versions for debugging.
+ - run: poetry run pip list > before.txt
+ # Upgrade all runtime dependencies only. This is intended to mimic a fresh
+ # `pip install matrix-synapse[all]` as closely as possible.
+ - run: poetry update --no-dev
+ - run: poetry run pip list > after.txt && (diff -u before.txt after.txt || true)
+ - run: poetry run mypy
+ trial:
+ runs-on: ubuntu-latest
+ strategy:
+ matrix:
+ include:
+ - database: "sqlite"
+ - database: "postgres"
+ postgres-version: "14"
+
+ steps:
+ - uses: actions/checkout@v2
+ - run: sudo apt-get -qq install xmlsec1
+ - name: Set up PostgreSQL ${{ matrix.postgres-version }}
+ if: ${{ matrix.postgres-version }}
+ run: |
+ docker run -d -p 5432:5432 \
+ -e POSTGRES_PASSWORD=postgres \
+ -e POSTGRES_INITDB_ARGS="--lc-collate C --lc-ctype C --encoding UTF8" \
+ postgres:${{ matrix.postgres-version }}
+ - uses: actions/setup-python@v2
+ with:
+ python-version: "3.x"
+ - run: pip install .[all,test]
+ - name: Await PostgreSQL
+ if: ${{ matrix.postgres-version }}
+ timeout-minutes: 2
+ run: until pg_isready -h localhost; do sleep 1; done
+ - run: python -m twisted.trial --jobs=2 tests
+ env:
+ SYNAPSE_POSTGRES: ${{ matrix.database == 'postgres' || '' }}
+ SYNAPSE_POSTGRES_HOST: localhost
+ SYNAPSE_POSTGRES_USER: postgres
+ SYNAPSE_POSTGRES_PASSWORD: postgres
+ - name: Dump logs
+ # Logs are most useful when the command fails, always include them.
+ if: ${{ always() }}
+ # Note: Dumps to workflow logs instead of using actions/upload-artifact
+ # This keeps logs colocated with failing jobs
+ # It also ignores find's exit code; this is a best effort affair
+ run: >-
+ find _trial_temp -name '*.log'
+ -exec echo "::group::{}" \;
+ -exec cat {} \;
+ -exec echo "::endgroup::" \;
+ || true
+
+
+ sytest:
+ runs-on: ubuntu-latest
+ container:
+ image: matrixdotorg/sytest-synapse:testing
+ volumes:
+ - ${{ github.workspace }}:/src
+ strategy:
+ fail-fast: false
+ matrix:
+ include:
+ - sytest-tag: focal
+
+ - sytest-tag: focal
+ postgres: postgres
+ workers: workers
+ redis: redis
+ env:
+ POSTGRES: ${{ matrix.postgres && 1}}
+ WORKERS: ${{ matrix.workers && 1 }}
+ REDIS: ${{ matrix.redis && 1 }}
+ BLACKLIST: ${{ matrix.workers && 'synapse-blacklist-with-workers' }}
+
+ steps:
+ - uses: actions/checkout@v2
+ - name: Ensure sytest runs `pip install`
+ # Delete the lockfile so sytest will `pip install` rather than `poetry install`
+ run: rm /src/poetry.lock
+ working-directory: /src
+ - name: Prepare test blacklist
+ run: cat sytest-blacklist .ci/worker-blacklist > synapse-blacklist-with-workers
+ - name: Run SyTest
+ run: /bootstrap.sh synapse
+ working-directory: /src
+ - name: Summarise results.tap
+ if: ${{ always() }}
+ run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
+ - name: Upload SyTest logs
+ uses: actions/upload-artifact@v2
+ if: ${{ always() }}
+ with:
+ name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.*, ', ') }})
+ path: |
+ /logs/results.tap
+ /logs/**/*.log*
+
+
+ # TODO: run complement (as with twisted trunk, see #12473).
+
+ # open an issue if the build fails, so we know about it.
+ open-issue:
+ if: failure()
+ needs:
+ # TODO: should mypy be included here? It feels more brittle than the other two.
+ - mypy
+ - trial
+ - sytest
+
+ runs-on: ubuntu-latest
+
+ steps:
+ - uses: actions/checkout@v2
+ - uses: JasonEtco/create-an-issue@5d9504915f79f9cc6d791934b8ef34f2353dd74d # v2.5.0, 2020-12-06
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ with:
+ update_existing: true
+ filename: .ci/latest_deps_build_failed_issue_template.md
+
diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml
index 5a98f61932..cad4cb6d77 100644
--- a/.github/workflows/tests.yml
+++ b/.github/workflows/tests.yml
@@ -15,24 +15,18 @@ jobs:
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
- - run: pip install -e .
+ - run: pip install .
- run: scripts-dev/generate_sample_config.sh --check
- run: scripts-dev/config-lint.sh
lint:
- runs-on: ubuntu-latest
- strategy:
- matrix:
- toxenv:
- - "check_codestyle"
- - "check_isort"
- - "mypy"
-
- steps:
- - uses: actions/checkout@v2
- - uses: actions/setup-python@v2
- - run: pip install tox
- - run: tox -e ${{ matrix.toxenv }}
+ # This does a vanilla `poetry install` - no extras. I'm slightly anxious
+ # that we might skip some typechecks on code that uses extras. However,
+ # I think the right way to fix this is to mark any extras needed for
+ # typechecking as development dependencies. To detect this, we ought to
+ # turn up mypy's strictness: disallow unknown imports and be accept fewer
+ # uses of `Any`.
+ uses: "matrix-org/backend-meta/.github/workflows/python-poetry-ci.yml@v1"
lint-crlf:
runs-on: ubuntu-latest
@@ -71,23 +65,23 @@ jobs:
matrix:
python-version: ["3.7", "3.8", "3.9", "3.10"]
database: ["sqlite"]
- toxenv: ["py"]
+ extras: ["all"]
include:
# Newest Python without optional deps
- python-version: "3.10"
- toxenv: "py-noextras"
+ extras: ""
# Oldest Python with PostgreSQL
- python-version: "3.7"
database: "postgres"
postgres-version: "10"
- toxenv: "py"
+ extras: "all"
# Newest Python with newest PostgreSQL
- python-version: "3.10"
database: "postgres"
postgres-version: "14"
- toxenv: "py"
+ extras: "all"
steps:
- uses: actions/checkout@v2
@@ -99,17 +93,16 @@ jobs:
-e POSTGRES_PASSWORD=postgres \
-e POSTGRES_INITDB_ARGS="--lc-collate C --lc-ctype C --encoding UTF8" \
postgres:${{ matrix.postgres-version }}
- - uses: actions/setup-python@v2
+ - uses: matrix-org/setup-python-poetry@v1
with:
python-version: ${{ matrix.python-version }}
- - run: pip install tox
+ extras: ${{ matrix.extras }}
- name: Await PostgreSQL
if: ${{ matrix.postgres-version }}
timeout-minutes: 2
run: until pg_isready -h localhost; do sleep 1; done
- - run: tox -e ${{ matrix.toxenv }}
+ - run: poetry run trial --jobs=2 tests
env:
- TRIAL_FLAGS: "--jobs=2"
SYNAPSE_POSTGRES: ${{ matrix.database == 'postgres' || '' }}
SYNAPSE_POSTGRES_HOST: localhost
SYNAPSE_POSTGRES_USER: postgres
@@ -156,23 +149,24 @@ jobs:
trial-pypy:
# Very slow; only run if the branch name includes 'pypy'
+ # Note: sqlite only; no postgres. Completely untested since poetry move.
if: ${{ contains(github.ref, 'pypy') && !failure() && !cancelled() }}
needs: linting-done
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ["pypy-3.7"]
+ extras: ["all"]
steps:
- uses: actions/checkout@v2
+ # Install libs necessary for PyPy to build binary wheels for dependencies
- run: sudo apt-get -qq install xmlsec1 libxml2-dev libxslt-dev
- - uses: actions/setup-python@v2
+ - uses: matrix-org/setup-python-poetry@v1
with:
python-version: ${{ matrix.python-version }}
- - run: pip install tox
- - run: tox -e py
- env:
- TRIAL_FLAGS: "--jobs=2"
+ extras: ${{ matrix.extras }}
+ - run: poetry run trial --jobs=2 tests
- name: Dump logs
# Logs are most useful when the command fails, always include them.
if: ${{ always() }}
diff --git a/.github/workflows/twisted_trunk.yml b/.github/workflows/twisted_trunk.yml
index fb9d46b7bf..8fc1affb77 100644
--- a/.github/workflows/twisted_trunk.yml
+++ b/.github/workflows/twisted_trunk.yml
@@ -6,16 +6,25 @@ on:
workflow_dispatch:
+concurrency:
+ group: ${{ github.workflow }}-${{ github.ref }}
+ cancel-in-progress: true
+
jobs:
mypy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- - uses: actions/setup-python@v2
- - run: .ci/patch_for_twisted_trunk.sh
- - run: pip install tox
- - run: tox -e mypy
+ - uses: matrix-org/setup-python-poetry@v1
+ with:
+ python-version: "3.x"
+ extras: "all"
+ - run: |
+ poetry remove twisted
+ poetry add --extras tls git+https://github.com/twisted/twisted.git#trunk
+ poetry install --no-interaction --extras "all test"
+ - run: poetry run mypy
trial:
runs-on: ubuntu-latest
@@ -23,14 +32,15 @@ jobs:
steps:
- uses: actions/checkout@v2
- run: sudo apt-get -qq install xmlsec1
- - uses: actions/setup-python@v2
+ - uses: matrix-org/setup-python-poetry@v1
with:
- python-version: 3.7
- - run: .ci/patch_for_twisted_trunk.sh
- - run: pip install tox
- - run: tox -e py
- env:
- TRIAL_FLAGS: "--jobs=2"
+ python-version: "3.x"
+ extras: "all test"
+ - run: |
+ poetry remove twisted
+ poetry add --extras tls git+https://github.com/twisted/twisted.git#trunk
+ poetry install --no-interaction --extras "all test"
+ - run: poetry run trial --jobs 2 tests
- name: Dump logs
# Logs are most useful when the command fails, always include them.
@@ -55,11 +65,23 @@ jobs:
steps:
- uses: actions/checkout@v2
- name: Patch dependencies
- run: .ci/patch_for_twisted_trunk.sh
+ # Note: The poetry commands want to create a virtualenv in /src/.venv/,
+ # but the sytest-synapse container expects it to be in /venv/.
+ # We symlink it before running poetry so that poetry actually
+ # ends up installing to `/venv`.
+ run: |
+ ln -s -T /venv /src/.venv
+ poetry remove twisted
+ poetry add --extras tls git+https://github.com/twisted/twisted.git#trunk
+ poetry install --no-interaction --extras "all test"
working-directory: /src
- name: Run SyTest
run: /bootstrap.sh synapse
working-directory: /src
+ env:
+ # Use offline mode to avoid reinstalling the pinned version of
+ # twisted.
+ OFFLINE: 1
- name: Summarise results.tap
if: ${{ always() }}
run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
diff --git a/.gitignore b/.gitignore
index c011cd27a4..e58affb241 100644
--- a/.gitignore
+++ b/.gitignore
@@ -15,8 +15,7 @@ _trial_temp*/
.DS_Store
__pycache__/
-# We do want the poetry lockfile. TODO: is there a good reason for ignoring
-# '*.lock' above? If not, let's nuke it.
+# We do want the poetry lockfile.
!poetry.lock
# stuff that is likely to exist when you run a server locally
diff --git a/MANIFEST.in b/MANIFEST.in
deleted file mode 100644
index d744c090ac..0000000000
--- a/MANIFEST.in
+++ /dev/null
@@ -1,54 +0,0 @@
-include LICENSE
-include VERSION
-include *.rst
-include *.md
-include demo/README
-include demo/demo.tls.dh
-include demo/*.py
-include demo/*.sh
-
-include synapse/py.typed
-recursive-include synapse/storage *.sql
-recursive-include synapse/storage *.sql.postgres
-recursive-include synapse/storage *.sql.sqlite
-recursive-include synapse/storage *.py
-recursive-include synapse/storage *.txt
-recursive-include synapse/storage *.md
-
-recursive-include docs *
-recursive-include scripts-dev *
-recursive-include synapse *.pyi
-recursive-include tests *.py
-recursive-include tests *.pem
-recursive-include tests *.p8
-recursive-include tests *.crt
-recursive-include tests *.key
-
-recursive-include synapse/res *
-recursive-include synapse/static *.css
-recursive-include synapse/static *.gif
-recursive-include synapse/static *.html
-recursive-include synapse/static *.js
-
-exclude .codecov.yml
-exclude .coveragerc
-exclude .dockerignore
-exclude .editorconfig
-exclude Dockerfile
-exclude mypy.ini
-exclude sytest-blacklist
-exclude test_postgresql.sh
-
-include book.toml
-include pyproject.toml
-recursive-include changelog.d *
-
-include .flake8
-prune .circleci
-prune .github
-prune .ci
-prune contrib
-prune debian
-prune demo/etc
-prune docker
-prune stubs
diff --git a/README.rst b/README.rst
index 595fb5ff62..d71d733679 100644
--- a/README.rst
+++ b/README.rst
@@ -293,39 +293,42 @@ directory of your choice::
git clone https://github.com/matrix-org/synapse.git
cd synapse
-Synapse has a number of external dependencies, that are easiest
-to install using pip and a virtualenv::
+Synapse has a number of external dependencies. We maintain a fixed development
+environment using [poetry](https://python-poetry.org/). First, install poetry. We recommend
- python3 -m venv ./env
- source ./env/bin/activate
- pip install -e ".[all,dev]"
+ pip install --user pipx
+ pipx install poetry
-This will run a process of downloading and installing all the needed
-dependencies into a virtual env. If any dependencies fail to install,
-try installing the failing modules individually::
+as described `here <https://python-poetry.org/docs/#installing-with-pipx>`_.
+(See `poetry's installation docs <https://python-poetry.org/docs/#installation>`
+for other installation methods.) Then ask poetry to create a virtual environment
+from the project and install Synapse's dependencies::
+
+ poetry install --extras "all test"
- pip install -e "module-name"
+This will run a process of downloading and installing all the needed
+dependencies into a virtual env.
We recommend using the demo which starts 3 federated instances running on ports `8080` - `8082`
- ./demo/start.sh
+ poetry run ./demo/start.sh
-(to stop, you can use `./demo/stop.sh`)
+(to stop, you can use `poetry run ./demo/stop.sh`)
-See the [demo documentation](https://matrix-org.github.io/synapse/develop/development/demo.html)
+See the `demo documentation <https://matrix-org.github.io/synapse/develop/development/demo.html>`_
for more information.
If you just want to start a single instance of the app and run it directly::
# Create the homeserver.yaml config once
- python -m synapse.app.homeserver \
+ poetry run synapse_homeserver \
--server-name my.domain.name \
--config-path homeserver.yaml \
--generate-config \
--report-stats=[yes|no]
# Start the app
- python -m synapse.app.homeserver --config-path homeserver.yaml
+ poetry run synapse_homeserver --config-path homeserver.yaml
Running the unit tests
@@ -334,7 +337,7 @@ Running the unit tests
After getting up and running, you may wish to run Synapse's unit tests to
check that everything is installed correctly::
- trial tests
+ poetry run trial tests
This should end with a 'PASSED' result (note that exact numbers will
differ)::
diff --git a/changelog.d/11398.feature b/changelog.d/11398.feature
new file mode 100644
index 0000000000..a910f4da14
--- /dev/null
+++ b/changelog.d/11398.feature
@@ -0,0 +1 @@
+Implement [MSC3383](https://github.com/matrix-org/matrix-spec-proposals/pull/3383) for including the destination in server-to-server authentication headers. Contributed by @Bubu and @jcgruenhage for Famedly GmbH.
diff --git a/changelog.d/12213.bugfix b/changelog.d/12213.bugfix
new file mode 100644
index 0000000000..9278e3a9c1
--- /dev/null
+++ b/changelog.d/12213.bugfix
@@ -0,0 +1 @@
+Prevent a sync request from removing a user's busy presence status.
diff --git a/changelog.d/12319.bugfix b/changelog.d/12319.bugfix
new file mode 100644
index 0000000000..a50191feaa
--- /dev/null
+++ b/changelog.d/12319.bugfix
@@ -0,0 +1 @@
+Fix bug with incremental sync missing events when rejoining/backfilling. Contributed by Nick @ Beeper.
diff --git a/changelog.d/12337.feature b/changelog.d/12337.feature
new file mode 100644
index 0000000000..6c4444c707
--- /dev/null
+++ b/changelog.d/12337.feature
@@ -0,0 +1 @@
+Use poetry to manage Synapse's dependencies.
\ No newline at end of file
diff --git a/changelog.d/12340.doc b/changelog.d/12340.doc
new file mode 100644
index 0000000000..8354f2259e
--- /dev/null
+++ b/changelog.d/12340.doc
@@ -0,0 +1 @@
+Fix rendering of the documentation site when using the 'print' feature.
diff --git a/changelog.d/12344.removal b/changelog.d/12344.removal
new file mode 100644
index 0000000000..ecefa76d8e
--- /dev/null
+++ b/changelog.d/12344.removal
@@ -0,0 +1 @@
+The groups/communities feature in Synapse has been disabled by default.
diff --git a/changelog.d/12365.feature b/changelog.d/12365.feature
new file mode 100644
index 0000000000..642dea966c
--- /dev/null
+++ b/changelog.d/12365.feature
@@ -0,0 +1 @@
+Enable processing of device list updates asynchronously.
diff --git a/changelog.d/12368.doc b/changelog.d/12368.doc
new file mode 100644
index 0000000000..62e4cb2c7e
--- /dev/null
+++ b/changelog.d/12368.doc
@@ -0,0 +1 @@
+Add a manual documenting config file options.
\ No newline at end of file
diff --git a/changelog.d/12382.removal b/changelog.d/12382.removal
new file mode 100644
index 0000000000..eb91186340
--- /dev/null
+++ b/changelog.d/12382.removal
@@ -0,0 +1 @@
+Remove unstable identifiers from [MSC3440](https://github.com/matrix-org/matrix-doc/pull/3440).
diff --git a/changelog.d/12394.misc b/changelog.d/12394.misc
new file mode 100644
index 0000000000..69109fcc37
--- /dev/null
+++ b/changelog.d/12394.misc
@@ -0,0 +1 @@
+Preparation for faster-room-join work: start a background process to resynchronise the room state after a room join.
diff --git a/changelog.d/12395.misc b/changelog.d/12395.misc
new file mode 100644
index 0000000000..0a2123b294
--- /dev/null
+++ b/changelog.d/12395.misc
@@ -0,0 +1 @@
+Remove an unstable identifier from [MSC3083](https://github.com/matrix-org/matrix-doc/pull/3083).
diff --git a/changelog.d/12425.misc b/changelog.d/12425.misc
new file mode 100644
index 0000000000..3b076be0bd
--- /dev/null
+++ b/changelog.d/12425.misc
@@ -0,0 +1 @@
+Run twisted trunk CI job in the locked poetry environment.
diff --git a/changelog.d/12427.feature b/changelog.d/12427.feature
new file mode 100644
index 0000000000..e6913c8c09
--- /dev/null
+++ b/changelog.d/12427.feature
@@ -0,0 +1 @@
+Implement [MSC2815](https://github.com/matrix-org/matrix-spec-proposals/pull/2815) to allow room moderators to view redacted event content. Contributed by @tulir.
diff --git a/changelog.d/12434.misc b/changelog.d/12434.misc
new file mode 100644
index 0000000000..88dab428d2
--- /dev/null
+++ b/changelog.d/12434.misc
@@ -0,0 +1 @@
+Run lints under poetry in CI, and remove corresponding tox lint jobs.
diff --git a/changelog.d/12438.misc b/changelog.d/12438.misc
new file mode 100644
index 0000000000..f2c07a56da
--- /dev/null
+++ b/changelog.d/12438.misc
@@ -0,0 +1 @@
+Run "main" trial tests under `poetry`.
diff --git a/changelog.d/12441.misc b/changelog.d/12441.misc
new file mode 100644
index 0000000000..c2619f1654
--- /dev/null
+++ b/changelog.d/12441.misc
@@ -0,0 +1 @@
+Bump twisted version in `poetry.lock` to work around [pip bug #9644](https://github.com/pypa/pip/issues/9644).
diff --git a/changelog.d/12445.misc b/changelog.d/12445.misc
new file mode 100644
index 0000000000..954248115a
--- /dev/null
+++ b/changelog.d/12445.misc
@@ -0,0 +1 @@
+Change Mutual Rooms' `unstable_features` flag to `uk.half-shot.msc2666.mutual_rooms` which matches the current MSC iteration.
\ No newline at end of file
diff --git a/changelog.d/12449.misc b/changelog.d/12449.misc
new file mode 100644
index 0000000000..03e08aace4
--- /dev/null
+++ b/changelog.d/12449.misc
@@ -0,0 +1 @@
+Use `poetry` to manage the virtualenv in debian packages.
diff --git a/changelog.d/12450.misc b/changelog.d/12450.misc
new file mode 100644
index 0000000000..4b1c8cba87
--- /dev/null
+++ b/changelog.d/12450.misc
@@ -0,0 +1 @@
+Fix typo in the release script help string.
diff --git a/changelog.d/12451.doc b/changelog.d/12451.doc
new file mode 100644
index 0000000000..c8b23c1285
--- /dev/null
+++ b/changelog.d/12451.doc
@@ -0,0 +1 @@
+Update documentation to reflect that both the `run_background_tasks_on` option and the options for moving stream writers off of the main process are no longer experimental.
diff --git a/changelog.d/12454.misc b/changelog.d/12454.misc
new file mode 100644
index 0000000000..cb7ff74b4c
--- /dev/null
+++ b/changelog.d/12454.misc
@@ -0,0 +1 @@
+Limit length of device_id to less than 512 characters.
diff --git a/changelog.d/12455.misc b/changelog.d/12455.misc
new file mode 100644
index 0000000000..9b19945673
--- /dev/null
+++ b/changelog.d/12455.misc
@@ -0,0 +1 @@
+Reintroduce the list of targets to the linter script, to avoid linting unwanted local-only directories during development.
diff --git a/changelog.d/12457.doc b/changelog.d/12457.doc
new file mode 100644
index 0000000000..a4871622cf
--- /dev/null
+++ b/changelog.d/12457.doc
@@ -0,0 +1 @@
+Update worker documentation and replace old `federation_reader` with `generic_worker`.
\ No newline at end of file
diff --git a/changelog.d/12464.misc b/changelog.d/12464.misc
new file mode 100644
index 0000000000..7a8cc6ba51
--- /dev/null
+++ b/changelog.d/12464.misc
@@ -0,0 +1 @@
+Dockerfile-workers: reduce the amount we install in the image.
diff --git a/changelog.d/12465.feature b/changelog.d/12465.feature
new file mode 100644
index 0000000000..642dea966c
--- /dev/null
+++ b/changelog.d/12465.feature
@@ -0,0 +1 @@
+Enable processing of device list updates asynchronously.
diff --git a/changelog.d/12466.misc b/changelog.d/12466.misc
new file mode 100644
index 0000000000..b0c2c950fe
--- /dev/null
+++ b/changelog.d/12466.misc
@@ -0,0 +1 @@
+Dockerfile-workers: give the master its own log config.
diff --git a/changelog.d/12467.misc b/changelog.d/12467.misc
new file mode 100644
index 0000000000..fbf415f707
--- /dev/null
+++ b/changelog.d/12467.misc
@@ -0,0 +1 @@
+complement-synapse-workers: factor out separate entry point script.
diff --git a/changelog.d/12472.misc b/changelog.d/12472.misc
new file mode 100644
index 0000000000..ed306209cc
--- /dev/null
+++ b/changelog.d/12472.misc
@@ -0,0 +1 @@
+Add a CI job which tests Synapse against the latest version of all dependencies.
diff --git a/changelog.d/12474.misc b/changelog.d/12474.misc
new file mode 100644
index 0000000000..5292108b39
--- /dev/null
+++ b/changelog.d/12474.misc
@@ -0,0 +1 @@
+Back out experimental implementation of [MSC2314](https://github.com/matrix-org/matrix-spec-proposals/pull/2314).
diff --git a/changelog.d/12475.doc b/changelog.d/12475.doc
new file mode 100644
index 0000000000..f4481d0613
--- /dev/null
+++ b/changelog.d/12475.doc
@@ -0,0 +1 @@
+Strongly recommend `poetry` for development.
diff --git a/changelog.d/12476.bugfix b/changelog.d/12476.bugfix
new file mode 100644
index 0000000000..9ad6a71abd
--- /dev/null
+++ b/changelog.d/12476.bugfix
@@ -0,0 +1 @@
+Fix a long-standing bug which incorrectly caused `GET /_matrix/client/r3/rooms/{roomId}/event/{eventId}` to return edited events rather than the original.
diff --git a/changelog.d/12478.misc b/changelog.d/12478.misc
new file mode 100644
index 0000000000..061a604a1e
--- /dev/null
+++ b/changelog.d/12478.misc
@@ -0,0 +1 @@
+Use poetry-core instead of setuptools to build wheels.
diff --git a/changelog.d/12483.misc b/changelog.d/12483.misc
new file mode 100644
index 0000000000..88c6e3e465
--- /dev/null
+++ b/changelog.d/12483.misc
@@ -0,0 +1 @@
+Fix grammatical error in federation error response when the room version of a room is unknown.
diff --git a/changelog.d/12495.doc b/changelog.d/12495.doc
new file mode 100644
index 0000000000..afa0111675
--- /dev/null
+++ b/changelog.d/12495.doc
@@ -0,0 +1 @@
+Fix a broken link in `README.rst`.
diff --git a/changelog.d/12496.bugfix b/changelog.d/12496.bugfix
new file mode 100644
index 0000000000..a68df7c96a
--- /dev/null
+++ b/changelog.d/12496.bugfix
@@ -0,0 +1 @@
+Fix bug where the admin API for [deleting forward extremities](https://github.com/matrix-org/synapse/blob/erikj/fix_delete_event_response_count/docs/admin_api/rooms.md#deleting-forward-extremities) would always return a count of 1 no matter how many extremities were deleted. Broke in v1.27.0.
diff --git a/changelog.d/12497.misc b/changelog.d/12497.misc
new file mode 100644
index 0000000000..17a661ec61
--- /dev/null
+++ b/changelog.d/12497.misc
@@ -0,0 +1 @@
+Fix a minor typo in the Debian changelogs generated by the release script.
diff --git a/changelog.d/12510.bugfix b/changelog.d/12510.bugfix
new file mode 100644
index 0000000000..d5856e982a
--- /dev/null
+++ b/changelog.d/12510.bugfix
@@ -0,0 +1 @@
+Fix a long-standing bug where the image thumbanils embedded into email notifications were broken.
diff --git a/changelog.d/12511.misc b/changelog.d/12511.misc
new file mode 100644
index 0000000000..a314bedfc4
--- /dev/null
+++ b/changelog.d/12511.misc
@@ -0,0 +1 @@
+Remove unnecessary configuration overrides in tests.
diff --git a/debian/build_virtualenv b/debian/build_virtualenv
index e691163619..b068792592 100755
--- a/debian/build_virtualenv
+++ b/debian/build_virtualenv
@@ -30,9 +30,19 @@ case $(dpkg-architecture -q DEB_HOST_ARCH) in
;;
esac
-# Use --builtin-venv to use the better `venv` module from CPython 3.4+ rather
-# than the 2/3 compatible `virtualenv`.
-
+# Manually install Poetry and export a pip-compatible `requirements.txt`
+# We need a Poetry pre-release as the export command is buggy in < 1.2
+TEMP_VENV="$(mktemp -d)"
+python3 -m venv "$TEMP_VENV"
+source "$TEMP_VENV/bin/activate"
+pip install -U pip
+pip install poetry==1.2.0b1
+poetry export --extras all --extras test -o exported_requirements.txt
+deactivate
+rm -rf "$TEMP_VENV"
+
+# Use --no-deps to only install pinned versions in exported_requirements.txt,
+# and to avoid https://github.com/pypa/pip/issues/9644
dh_virtualenv \
--install-suffix "matrix-synapse" \
--builtin-venv \
@@ -41,9 +51,11 @@ dh_virtualenv \
--preinstall="lxml" \
--preinstall="mock" \
--preinstall="wheel" \
+ --extra-pip-arg="--no-deps" \
--extra-pip-arg="--no-cache-dir" \
--extra-pip-arg="--compile" \
- --extras="all,systemd,test"
+ --extras="all,systemd,test" \
+ --requirements="exported_requirements.txt"
PACKAGE_BUILD_DIR="debian/matrix-synapse-py3"
VIRTUALENV_DIR="${PACKAGE_BUILD_DIR}${DH_VIRTUALENV_INSTALL_ROOT}/matrix-synapse"
diff --git a/debian/changelog b/debian/changelog
index 2db6ed9491..05e6bd75a7 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,9 @@
+matrix-synapse-py3 (1.58.0+nmu1) UNRELEASED; urgency=medium
+
+ * Use poetry to manage the bundled virtualenv included with this package.
+
+ -- Synapse Packaging team <packages@matrix.org> Wed, 30 Mar 2022 12:21:43 +0100
+
matrix-synapse-py3 (1.57.1) stable; urgency=medium
* New synapse release 1.57.1.
diff --git a/debian/clean b/debian/clean
new file mode 100644
index 0000000000..d488f298d5
--- /dev/null
+++ b/debian/clean
@@ -0,0 +1 @@
+exported_requirements.txt
diff --git a/docker/Dockerfile b/docker/Dockerfile
index 6f87702cc8..4523c60645 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -59,7 +59,7 @@ RUN --mount=type=cache,target=/root/.cache/pip \
WORKDIR /synapse
# Copy just what we need to run `poetry export`...
-COPY pyproject.toml poetry.lock README.rst /synapse/
+COPY pyproject.toml poetry.lock /synapse/
RUN /root/.local/bin/poetry export --extras all -o /synapse/requirements.txt
@@ -98,9 +98,7 @@ RUN --mount=type=cache,target=/root/.cache/pip \
# Copy over the rest of the synapse source code.
COPY synapse /synapse/synapse/
# ... and what we need to `pip install`.
-# TODO: once pyproject.toml declares poetry-core as its build system, we'll need to copy
-# pyproject.toml here, ditching setup.py and MANIFEST.in.
-COPY setup.py MANIFEST.in README.rst /synapse/
+COPY pyproject.toml README.rst /synapse/
# Install the synapse package itself.
RUN pip install --prefix="/install" --no-deps --no-warn-script-location /synapse
diff --git a/docker/Dockerfile-workers b/docker/Dockerfile-workers
index 6fb1cdbfb0..9ccb2b22a7 100644
--- a/docker/Dockerfile-workers
+++ b/docker/Dockerfile-workers
@@ -2,10 +2,19 @@
FROM matrixdotorg/synapse
# Install deps
-RUN apt-get update
-RUN apt-get install -y supervisor redis nginx
+RUN \
+ --mount=type=cache,target=/var/cache/apt,sharing=locked \
+ --mount=type=cache,target=/var/lib/apt,sharing=locked \
+ apt-get update && \
+ DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
+ redis-server nginx-light
-# Remove the default nginx sites
+# Install supervisord with pip instead of apt, to avoid installing a second
+# copy of python.
+RUN --mount=type=cache,target=/root/.cache/pip \
+ pip install supervisor~=4.2
+
+# Disable the default nginx sites
RUN rm /etc/nginx/sites-enabled/default
# Copy Synapse worker, nginx and supervisord configuration template files
@@ -19,5 +28,7 @@ EXPOSE 8080/tcp
COPY ./docker/configure_workers_and_start.py /configure_workers_and_start.py
ENTRYPOINT ["/configure_workers_and_start.py"]
+# Replace the healthcheck with one which checks *all* the workers. The script
+# is generated by configure_workers_and_start.py.
HEALTHCHECK --start-period=5s --interval=15s --timeout=5s \
CMD /bin/sh /healthcheck.sh
diff --git a/docker/complement/SynapseWorkers.Dockerfile b/docker/complement/SynapseWorkers.Dockerfile
index 982219a91e..65df2d114d 100644
--- a/docker/complement/SynapseWorkers.Dockerfile
+++ b/docker/complement/SynapseWorkers.Dockerfile
@@ -13,8 +13,8 @@ RUN curl -OL "https://github.com/caddyserver/caddy/releases/download/v2.3.0/cadd
tar xzf caddy_2.3.0_linux_amd64.tar.gz && rm caddy_2.3.0_linux_amd64.tar.gz && mv caddy /root
# Install postgresql
-RUN apt-get update
-RUN apt-get install -y postgresql
+RUN apt-get update && \
+ DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -y postgresql-13
# Configure a user and create a database for Synapse
RUN pg_ctlcluster 13 main start && su postgres -c "echo \
@@ -34,40 +34,14 @@ WORKDIR /data
# Copy the caddy config
COPY conf-workers/caddy.complement.json /root/caddy.json
+# Copy the entrypoint
+COPY conf-workers/start-complement-synapse-workers.sh /
+
# Expose caddy's listener ports
EXPOSE 8008 8448
-ENTRYPOINT \
- # Replace the server name in the caddy config
- sed -i "s/{{ server_name }}/${SERVER_NAME}/g" /root/caddy.json && \
- # Start postgres
- pg_ctlcluster 13 main start 2>&1 && \
- # Start caddy
- /root/caddy start --config /root/caddy.json 2>&1 && \
- # Set the server name of the homeserver
- SYNAPSE_SERVER_NAME=${SERVER_NAME} \
- # No need to report stats here
- SYNAPSE_REPORT_STATS=no \
- # Set postgres authentication details which will be placed in the homeserver config file
- POSTGRES_PASSWORD=somesecret POSTGRES_USER=postgres POSTGRES_HOST=localhost \
- # Specify the workers to test with
- SYNAPSE_WORKER_TYPES="\
- event_persister, \
- event_persister, \
- background_worker, \
- frontend_proxy, \
- event_creator, \
- user_dir, \
- media_repository, \
- federation_inbound, \
- federation_reader, \
- federation_sender, \
- synchrotron, \
- appservice, \
- pusher" \
- # Run the script that writes the necessary config files and starts supervisord, which in turn
- # starts everything else
- /configure_workers_and_start.py
+ENTRYPOINT /start-complement-synapse-workers.sh
+# Update the healthcheck to have a shorter check interval
HEALTHCHECK --start-period=5s --interval=1s --timeout=1s \
CMD /bin/sh /healthcheck.sh
diff --git a/docker/complement/conf-workers/start-complement-synapse-workers.sh b/docker/complement/conf-workers/start-complement-synapse-workers.sh
new file mode 100755
index 0000000000..2c1e05bd62
--- /dev/null
+++ b/docker/complement/conf-workers/start-complement-synapse-workers.sh
@@ -0,0 +1,50 @@
+#!/bin/bash
+#
+# Default ENTRYPOINT for the docker image used for testing synapse with workers under complement
+
+set -e
+
+function log {
+ d=$(date +"%Y-%m-%d %H:%M:%S,%3N")
+ echo "$d $@"
+}
+
+# Replace the server name in the caddy config
+sed -i "s/{{ server_name }}/${SERVER_NAME}/g" /root/caddy.json
+
+log "starting postgres"
+pg_ctlcluster 13 main start
+
+log "starting caddy"
+/root/caddy start --config /root/caddy.json
+
+# Set the server name of the homeserver
+export SYNAPSE_SERVER_NAME=${SERVER_NAME}
+
+# No need to report stats here
+export SYNAPSE_REPORT_STATS=no
+
+# Set postgres authentication details which will be placed in the homeserver config file
+export POSTGRES_PASSWORD=somesecret
+export POSTGRES_USER=postgres
+export POSTGRES_HOST=localhost
+
+# Specify the workers to test with
+export SYNAPSE_WORKER_TYPES="\
+ event_persister, \
+ event_persister, \
+ background_worker, \
+ frontend_proxy, \
+ event_creator, \
+ user_dir, \
+ media_repository, \
+ federation_inbound, \
+ federation_reader, \
+ federation_sender, \
+ synchrotron, \
+ appservice, \
+ pusher"
+
+# Run the script that writes the necessary config files and starts supervisord, which in turn
+# starts everything else
+exec /configure_workers_and_start.py
diff --git a/docker/conf-workers/supervisord.conf.j2 b/docker/conf-workers/supervisord.conf.j2
index 0de2c6143b..408ef72787 100644
--- a/docker/conf-workers/supervisord.conf.j2
+++ b/docker/conf-workers/supervisord.conf.j2
@@ -5,6 +5,9 @@
nodaemon=true
user=root
+[include]
+files = /etc/supervisor/conf.d/*.conf
+
[program:nginx]
command=/usr/sbin/nginx -g "daemon off;"
priority=500
diff --git a/docker/configure_workers_and_start.py b/docker/configure_workers_and_start.py
index 3e91024e8c..23cac18e8d 100755
--- a/docker/configure_workers_and_start.py
+++ b/docker/configure_workers_and_start.py
@@ -29,7 +29,7 @@
import os
import subprocess
import sys
-from typing import Any, Dict, Set
+from typing import Any, Dict, Mapping, Set
import jinja2
import yaml
@@ -341,7 +341,7 @@ def generate_worker_files(environ, config_path: str, data_dir: str):
# base shared worker jinja2 template.
#
# This config file will be passed to all workers, included Synapse's main process.
- shared_config = {"listeners": listeners}
+ shared_config: Dict[str, Any] = {"listeners": listeners}
# The supervisord config. The contents of which will be inserted into the
# base supervisord jinja2 template.
@@ -446,21 +446,7 @@ def generate_worker_files(environ, config_path: str, data_dir: str):
# Write out the worker's logging config file
- # Check whether we should write worker logs to disk, in addition to the console
- extra_log_template_args = {}
- if environ.get("SYNAPSE_WORKERS_WRITE_LOGS_TO_DISK"):
- extra_log_template_args["LOG_FILE_PATH"] = "{dir}/logs/{name}.log".format(
- dir=data_dir, name=worker_name
- )
-
- # Render and write the file
- log_config_filepath = "/conf/workers/{name}.log.config".format(name=worker_name)
- convert(
- "/conf/log.config",
- log_config_filepath,
- worker_name=worker_name,
- **extra_log_template_args,
- )
+ log_config_filepath = generate_worker_log_config(environ, worker_name, data_dir)
# Then a worker config file
convert(
@@ -496,6 +482,10 @@ def generate_worker_files(environ, config_path: str, data_dir: str):
# Finally, we'll write out the config files.
+ # log config for the master process
+ master_log_config = generate_worker_log_config(environ, "master", data_dir)
+ shared_config["log_config"] = master_log_config
+
# Shared homeserver config
convert(
"/conf/shared.yaml.j2",
@@ -512,9 +502,10 @@ def generate_worker_files(environ, config_path: str, data_dir: str):
)
# Supervisord config
+ os.makedirs("/etc/supervisor", exist_ok=True)
convert(
"/conf/supervisord.conf.j2",
- "/etc/supervisor/conf.d/supervisord.conf",
+ "/etc/supervisor/supervisord.conf",
main_config_path=config_path,
worker_config=supervisord_config,
)
@@ -532,12 +523,28 @@ def generate_worker_files(environ, config_path: str, data_dir: str):
os.mkdir(log_dir)
-def start_supervisord():
- """Starts up supervisord which then starts and monitors all other necessary processes
+def generate_worker_log_config(
+ environ: Mapping[str, str], worker_name: str, data_dir: str
+) -> str:
+ """Generate a log.config file for the given worker.
- Raises: CalledProcessError if calling start.py return a non-zero exit code.
+ Returns: the path to the generated file
"""
- subprocess.run(["/usr/bin/supervisord"], stdin=subprocess.PIPE)
+ # Check whether we should write worker logs to disk, in addition to the console
+ extra_log_template_args = {}
+ if environ.get("SYNAPSE_WORKERS_WRITE_LOGS_TO_DISK"):
+ extra_log_template_args["LOG_FILE_PATH"] = "{dir}/logs/{name}.log".format(
+ dir=data_dir, name=worker_name
+ )
+ # Render and write the file
+ log_config_filepath = "/conf/workers/{name}.log.config".format(name=worker_name)
+ convert(
+ "/conf/log.config",
+ log_config_filepath,
+ worker_name=worker_name,
+ **extra_log_template_args,
+ )
+ return log_config_filepath
def main(args, environ):
@@ -567,7 +574,13 @@ def main(args, environ):
# Start supervisord, which will start Synapse, all of the configured worker
# processes, redis, nginx etc. according to the config we created above.
- start_supervisord()
+ log("Starting supervisord")
+ os.execl(
+ "/usr/local/bin/supervisord",
+ "supervisord",
+ "-c",
+ "/etc/supervisor/supervisord.conf",
+ )
if __name__ == "__main__":
diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md
index 6aa48e1919..65570cefbe 100644
--- a/docs/SUMMARY.md
+++ b/docs/SUMMARY.md
@@ -17,6 +17,7 @@
# Usage
- [Federation](federate.md)
- [Configuration](usage/configuration/README.md)
+ - [Configuration Manual](usage/configuration/config_documentation.md)
- [Homeserver Sample Config File](usage/configuration/homeserver_sample_config.md)
- [Logging Sample Config File](usage/configuration/logging_sample_config.md)
- [Structured Logging](structured_logging.md)
diff --git a/docs/code_style.md b/docs/code_style.md
index ebda6dcc85..db7edcd76b 100644
--- a/docs/code_style.md
+++ b/docs/code_style.md
@@ -6,60 +6,36 @@ The Synapse codebase uses a number of code formatting tools in order to
quickly and automatically check for formatting (and sometimes logical)
errors in code.
-The necessary tools are detailed below.
+The necessary tools are:
-First install them with:
+- [black](https://black.readthedocs.io/en/stable/), a source code formatter;
+- [isort](https://pycqa.github.io/isort/), which organises each file's imports;
+- [flake8](https://flake8.pycqa.org/en/latest/), which can spot common errors; and
+- [mypy](https://mypy.readthedocs.io/en/stable/), a type checker.
+
+Install them with:
```sh
pip install -e ".[lint,mypy]"
```
-- **black**
-
- The Synapse codebase uses [black](https://pypi.org/project/black/)
- as an opinionated code formatter, ensuring all comitted code is
- properly formatted.
-
- Have `black` auto-format your code (it shouldn't change any
- functionality) with:
-
- ```sh
- black .
- ```
-
-- **flake8**
-
- `flake8` is a code checking tool. We require code to pass `flake8`
- before being merged into the codebase.
-
- Check all application and test code with:
+The easiest way to run the lints is to invoke the linter script as follows.
- ```sh
- flake8 .
- ```
-
-- **isort**
-
- `isort` ensures imports are nicely formatted, and can suggest and
- auto-fix issues such as double-importing.
-
- Auto-fix imports with:
-
- ```sh
- isort .
- ```
+```sh
+scripts-dev/lint.sh
+```
It's worth noting that modern IDEs and text editors can run these tools
automatically on save. It may be worth looking into whether this
functionality is supported in your editor for a more convenient
-development workflow. It is not, however, recommended to run `flake8` on
-save as it takes a while and is very resource intensive.
+development workflow. It is not, however, recommended to run `flake8` or `mypy`
+on save as they take a while and can be very resource intensive.
## General rules
- **Naming**:
- - Use camel case for class and type names
- - Use underscores for functions and variables.
+ - Use `CamelCase` for class and type names
+ - Use underscores for `function_names` and `variable_names`.
- **Docstrings**: should follow the [google code
style](https://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings).
See the
diff --git a/docs/development/contributing_guide.md b/docs/development/contributing_guide.md
index 0d9cf60196..3b5c774018 100644
--- a/docs/development/contributing_guide.md
+++ b/docs/development/contributing_guide.md
@@ -48,19 +48,28 @@ can find many good git tutorials on the web.
# 4. Install the dependencies
-Once you have installed Python 3 and added the source, please open a terminal and
-setup a *virtualenv*, as follows:
+Synapse uses the [poetry](https://python-poetry.org/) project to manage its dependencies
+and development environment. Once you have installed Python 3 and added the
+source, you should install `poetry`.
+Of their installation methods, we recommend
+[installing `poetry` using `pipx`](https://python-poetry.org/docs/#installing-with-pipx),
+
+```shell
+pip install --user pipx
+pipx install poetry
+```
+
+but see poetry's [installation instructions](https://python-poetry.org/docs/#installation)
+for other installation methods.
+
+Next, open a terminal and install dependencies as follows:
```sh
cd path/where/you/have/cloned/the/repository
-python3 -m venv ./env
-source ./env/bin/activate
-pip install wheel
-pip install -e ".[all,dev]"
-pip install tox
+poetry install --extras all
```
-This will install the developer dependencies for the project.
+This will install the runtime and developer dependencies for the project.
# 5. Get in touch.
@@ -117,11 +126,10 @@ The linters look at your code and do two things:
- ensure that your code follows the coding style adopted by the project;
- catch a number of errors in your code.
-The linter takes no time at all to run as soon as you've [downloaded the dependencies into your python virtual environment](#4-install-the-dependencies).
+The linter takes no time at all to run as soon as you've [downloaded the dependencies](#4-install-the-dependencies).
```sh
-source ./env/bin/activate
-./scripts-dev/lint.sh
+poetry run ./scripts-dev/lint.sh
```
Note that this script *will modify your files* to fix styling errors.
@@ -131,15 +139,13 @@ If you wish to restrict the linters to only the files changed since the last com
(much faster!), you can instead run:
```sh
-source ./env/bin/activate
-./scripts-dev/lint.sh -d
+poetry run ./scripts-dev/lint.sh -d
```
Or if you know exactly which files you wish to lint, you can instead run:
```sh
-source ./env/bin/activate
-./scripts-dev/lint.sh path/to/file1.py path/to/file2.py path/to/folder
+poetry run ./scripts-dev/lint.sh path/to/file1.py path/to/file2.py path/to/folder
```
## Run the unit tests (Twisted trial).
@@ -148,16 +154,14 @@ The unit tests run parts of Synapse, including your changes, to see if anything
was broken. They are slower than the linters but will typically catch more errors.
```sh
-source ./env/bin/activate
-trial tests
+poetry run trial tests
```
If you wish to only run *some* unit tests, you may specify
another module instead of `tests` - or a test class or a method:
```sh
-source ./env/bin/activate
-trial tests.rest.admin.test_room tests.handlers.test_admin.ExfiltrateData.test_invite
+poetry run trial tests.rest.admin.test_room tests.handlers.test_admin.ExfiltrateData.test_invite
```
If your tests fail, you may wish to look at the logs (the default log level is `ERROR`):
@@ -169,7 +173,7 @@ less _trial_temp/test.log
To increase the log level for the tests, set `SYNAPSE_TEST_LOG_LEVEL`:
```sh
-SYNAPSE_TEST_LOG_LEVEL=DEBUG trial tests
+SYNAPSE_TEST_LOG_LEVEL=DEBUG poetry run trial tests
```
By default, tests will use an in-memory SQLite database for test data. For additional
@@ -180,7 +184,7 @@ database state to be stored in a file named `test.db` under the trial process'
working directory. Typically, this ends up being `_trial_temp/test.db`. For example:
```sh
-SYNAPSE_TEST_PERSIST_SQLITE_DB=1 trial tests
+SYNAPSE_TEST_PERSIST_SQLITE_DB=1 poetry run trial tests
```
The database file can then be inspected with:
diff --git a/docs/development/dependencies.md b/docs/development/dependencies.md
new file mode 100644
index 0000000000..8ef7d357d8
--- /dev/null
+++ b/docs/development/dependencies.md
@@ -0,0 +1,239 @@
+# Managing dependencies with Poetry
+
+This is a quick cheat sheet for developers on how to use [`poetry`](https://python-poetry.org/).
+
+# Background
+
+Synapse uses a variety of third-party Python packages to function as a homeserver.
+Some of these are direct dependencies, listed in `pyproject.toml` under the
+`[tool.poetry.dependencies]` section. The rest are transitive dependencies (the
+things that our direct dependencies themselves depend on, and so on recursively.)
+
+We maintain a locked list of all our dependencies (transitive included) so that
+we can track exactly which version of each dependency appears in a given release.
+See [here](https://github.com/matrix-org/synapse/issues/11537#issue-1074469665)
+for discussion of why we wanted this for Synapse. We chose to use
+[`poetry`](https://python-poetry.org/) to manage this locked list; see
+[this comment](https://github.com/matrix-org/synapse/issues/11537#issuecomment-1015975819)
+for the reasoning.
+
+The locked dependencies get included in our "self-contained" releases: namely,
+our docker images and our debian packages. We also use the locked dependencies
+in development and our continuous integration.
+
+Separately, our "broad" dependencies—the version ranges specified in
+`pyproject.toml`—are included as metadata in our "sdists" and "wheels" [uploaded
+to PyPI](https://pypi.org/project/matrix-synapse). Installing from PyPI or from
+the Synapse source tree directly will _not_ use the locked dependencies; instead,
+they'll pull in the latest version of each package available at install time.
+
+## Example dependency
+
+An example may help. We have a broad dependency on
+[`phonenumbers`](https://pypi.org/project/phonenumbers/), as declared in
+this snippet from pyproject.toml [as of Synapse 1.57](
+https://github.com/matrix-org/synapse/blob/release-v1.57/pyproject.toml#L133
+):
+
+```toml
+[tool.poetry.dependencies]
+# ...
+phonenumbers = ">=8.2.0"
+```
+
+In our lockfile this is
+[pinned]( https://github.com/matrix-org/synapse/blob/dfc7646504cef3e4ff396c36089e1c6f1b1634de/poetry.lock#L679-L685)
+to version 8.12.44, even though
+[newer versions are available](https://pypi.org/project/phonenumbers/#history).
+
+```toml
+[[package]]
+name = "phonenumbers"
+version = "8.12.44"
+description = "Python version of Google's common library for parsing, formatting, storing and validating international phone numbers."
+category = "main"
+optional = false
+python-versions = "*"
+```
+
+The lockfile also includes a
+[cryptographic checksum](https://github.com/matrix-org/synapse/blob/release-v1.57/poetry.lock#L2178-L2181)
+of the sdists and wheels provided for this version of `phonenumbers`.
+
+```toml
+[metadata.files]
+# ...
+phonenumbers = [
+ {file = "phonenumbers-8.12.44-py2.py3-none-any.whl", hash = "sha256:cc1299cf37b309ecab6214297663ab86cb3d64ae37fd5b88e904fe7983a874a6"},
+ {file = "phonenumbers-8.12.44.tar.gz", hash = "sha256:26cfd0257d1704fe2f88caff2caabb70d16a877b1e65b6aae51f9fbbe10aa8ce"},
+]
+```
+
+We can see this pinned version inside the docker image for that release:
+
+```
+$ docker pull matrixdotorg/synapse:v1.57.0
+...
+$ docker run --entrypoint pip matrixdotorg/synapse:v1.57.0 show phonenumbers
+Name: phonenumbers
+Version: 8.12.44
+Summary: Python version of Google's common library for parsing, formatting, storing and validating international phone numbers.
+Home-page: https://github.com/daviddrysdale/python-phonenumbers
+Author: David Drysdale
+Author-email: dmd@lurklurk.org
+License: Apache License 2.0
+Location: /usr/local/lib/python3.9/site-packages
+Requires:
+Required-by: matrix-synapse
+```
+
+Whereas the wheel metadata just contains the broad dependencies:
+
+```
+$ cd /tmp
+$ wget https://files.pythonhosted.org/packages/ca/5e/d722d572cc5b3092402b783d6b7185901b444427633bd8a6b00ea0dd41b7/matrix_synapse-1.57.0rc1-py3-none-any.whl
+...
+$ unzip -c matrix_synapse-1.57.0rc1-py3-none-any.whl matrix_synapse-1.57.0rc1.dist-info/METADATA | grep phonenumbers
+Requires-Dist: phonenumbers (>=8.2.0)
+```
+
+# Tooling recommendation: direnv
+
+[`direnv`](https://direnv.net/) is a tool for activating environments in your
+shell inside a given directory. Its support for poetry is unofficial (a
+community wiki recipe only), but works solidly in our experience. We thoroughly
+recommend it for daily use. To use it:
+
+1. [Install `direnv`](https://direnv.net/docs/installation.html) - it's likely
+ packaged for your system already.
+2. Teach direnv about poetry. The [shell config here](https://github.com/direnv/direnv/wiki/Python#poetry)
+ needs to be added to `~/.config/direnv/direnvrc` (or more generally `$XDG_CONFIG_HOME/direnv/direnvrc`).
+3. Mark the synapse checkout as a poetry project: `echo layout poetry > .envrc`.
+4. Convince yourself that you trust this `.envrc` configuration and project.
+ Then formally confirm this to `direnv` by running `direnv allow`.
+
+Then whenever you navigate to the synapse checkout, you should be able to run
+e.g. `mypy` instead of `poetry run mypy`; `python` instead of
+`poetry run python`; and your shell commands will automatically run in the
+context of poetry's venv, without having to run `poetry shell` beforehand.
+
+
+# How do I...
+
+## ...reset my venv to the locked environment?
+
+```shell
+poetry install --extras all --remove-untracked
+```
+
+## ...run a command in the `poetry` virtualenv?
+
+Use `poetry run cmd args` when you need the python virtualenv context.
+To avoid typing `poetry run` all the time, you can run `poetry shell`
+to start a new shell in the poetry virtualenv context. Within `poetry shell`,
+`python`, `pip`, `mypy`, `trial`, etc. are all run inside the project virtualenv
+and isolated from the rest o the system.
+
+Roughly speaking, the translation from a traditional virtualenv is:
+- `env/bin/activate` -> `poetry shell`, and
+- `deactivate` -> close the terminal (Ctrl-D, `exit`, etc.)
+
+See also the direnv recommendation above, which makes `poetry run` and
+`poetry shell` unnecessary.
+
+
+## ...inspect the `poetry` virtualenv?
+
+Some suggestions:
+
+```shell
+# Current env only
+poetry env info
+# All envs: this allows you to have e.g. a poetry managed venv for Python 3.7,
+# and another for Python 3.10.
+poetry env list --full-path
+poetry run pip list
+```
+
+Note that `poetry show` describes the abstract *lock file* rather than your
+on-disk environment. With that said, `poetry show --tree` can sometimes be
+useful.
+
+
+## ...add a new dependency?
+
+Either:
+- manually update `pyproject.toml`; then `poetry lock --no-update`; or else
+- `poetry add packagename`. See `poetry add --help`; note the `--dev`,
+ `--extras` and `--optional` flags in particular.
+ - **NB**: this specifies the new package with a version given by a "caret bound". This won't get forced to its lowest version in the old deps CI job: see [this TODO](https://github.com/matrix-org/synapse/blob/4e1374373857f2f7a911a31c50476342d9070681/.ci/scripts/test_old_deps.sh#L35-L39).
+
+Include the updated `pyproject.toml` and `poetry.lock` files in your commit.
+
+## ...remove a dependency?
+
+This is not done often and is untested, but
+
+```shell
+poetry remove packagename
+```
+
+ought to do the trick. Alternatively, manually update `pyproject.toml` and
+`poetry lock --no-update`. Include the updated `pyproject.toml` and poetry.lock`
+files in your commit.
+
+## ...update the version range for an existing dependency?
+
+Best done by manually editing `pyproject.toml`, then `poetry lock --no-update`.
+Include the updated `pyproject.toml` and `poetry.lock` in your commit.
+
+## ...update a dependency in the locked environment?
+
+Use
+
+```shell
+poetry update packagename
+```
+
+to use the latest version of `packagename` in the locked environment, without
+affecting the broad dependencies listed in the wheel.
+
+There doesn't seem to be a way to do this whilst locking a _specific_ version of
+`packagename`. We can workaround this (crudely) as follows:
+
+```shell
+poetry add packagename==1.2.3
+# This should update pyproject.lock.
+
+# Now undo the changes to pyproject.toml. For example
+# git restore pyproject.toml
+
+# Get poetry to recompute the content-hash of pyproject.toml without changing
+# the locked package versions.
+poetry lock --no-update
+```
+
+Either way, include the updated `poetry.lock` file in your commit.
+
+## ...export a `requirements.txt` file?
+
+```shell
+poetry export --extras all
+```
+
+Be wary of bugs in `poetry export` and `pip install -r requirements.txt`.
+
+Note: `poetry export` will be made a plugin in Poetry 1.2. Additional config may
+be required.
+
+## ...build a test wheel?
+
+I usually use
+
+```shell
+poetry run pip install build && poetry run python -m build
+```
+
+because [`build`](https://github.com/pypa/build) is a standardish tool which
+doesn't require poetry. (It's what we use in CI too). However, you could try
+`poetry build` too.
diff --git a/docs/systemd-with-workers/README.md b/docs/systemd-with-workers/README.md
index b160d93528..d516501085 100644
--- a/docs/systemd-with-workers/README.md
+++ b/docs/systemd-with-workers/README.md
@@ -10,15 +10,15 @@ See the folder [system](https://github.com/matrix-org/synapse/tree/develop/docs/
for the systemd unit files.
The folder [workers](https://github.com/matrix-org/synapse/tree/develop/docs/systemd-with-workers/workers/)
-contains an example configuration for the `federation_reader` worker.
+contains an example configuration for the `generic_worker` worker.
## Synapse configuration files
See [the worker documentation](../workers.md) for information on how to set up the
configuration files and reverse-proxy correctly.
-Below is a sample `federation_reader` worker configuration file.
+Below is a sample `generic_worker` worker configuration file.
```yaml
-{{#include workers/federation_reader.yaml}}
+{{#include workers/generic_worker.yaml}}
```
Systemd manages daemonization itself, so ensure that none of the configuration
@@ -61,9 +61,9 @@ systemctl stop matrix-synapse.target
# Restart the master alone
systemctl start matrix-synapse.service
-# Restart a specific worker (eg. federation_reader); the master is
+# Restart a specific worker (eg. generic_worker); the master is
# unaffected by this.
-systemctl restart matrix-synapse-worker@federation_reader.service
+systemctl restart matrix-synapse-worker@generic_worker.service
# Add a new worker (assuming all configs are set up already)
systemctl enable matrix-synapse-worker@federation_writer.service
diff --git a/docs/systemd-with-workers/workers/federation_reader.yaml b/docs/systemd-with-workers/workers/federation_reader.yaml
deleted file mode 100644
index 13e69e62c9..0000000000
--- a/docs/systemd-with-workers/workers/federation_reader.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
-worker_app: synapse.app.federation_reader
-worker_name: federation_reader1
-
-worker_replication_host: 127.0.0.1
-worker_replication_http_port: 9093
-
-worker_listeners:
- - type: http
- port: 8011
- resources:
- - names: [federation]
-
-worker_log_config: /etc/matrix-synapse/federation-reader-log.yaml
diff --git a/docs/systemd-with-workers/workers/generic_worker.yaml b/docs/systemd-with-workers/workers/generic_worker.yaml
new file mode 100644
index 0000000000..8561e2cda5
--- /dev/null
+++ b/docs/systemd-with-workers/workers/generic_worker.yaml
@@ -0,0 +1,13 @@
+worker_app: synapse.app.generic_worker
+worker_name: generic_worker1
+
+worker_replication_host: 127.0.0.1
+worker_replication_http_port: 9093
+
+worker_listeners:
+ - type: http
+ port: 8011
+ resources:
+ - names: [client, federation]
+
+worker_log_config: /etc/matrix-synapse/generic-worker-log.yaml
diff --git a/docs/upgrade.md b/docs/upgrade.md
index 023872490e..3a8aeb0395 100644
--- a/docs/upgrade.md
+++ b/docs/upgrade.md
@@ -19,32 +19,36 @@ this document.
packages](setup/installation.md#prebuilt-packages), you will need to follow the
normal process for upgrading those packages.
+- If Synapse was installed using pip then upgrade to the latest
+ version by running:
+
+ ```bash
+ pip install --upgrade matrix-synapse
+ ```
+
- If Synapse was installed from source, then:
- 1. Activate the virtualenv before upgrading. For example, if
- Synapse is installed in a virtualenv in `~/synapse/env` then
+ 1. Obtain the latest version of the source code. Git users can run
+ `git pull` to do this.
+
+ 2. If you're running Synapse in a virtualenv, make sure to activate it before
+ upgrading. For example, if Synapse is installed in a virtualenv in `~/synapse/env` then
run:
```bash
source ~/synapse/env/bin/activate
+ pip install --upgrade .
```
+ Include any relevant extras between square brackets, e.g. `pip install --upgrade ".[postgres,oidc]"`.
- 2. If Synapse was installed using pip then upgrade to the latest
- version by running:
-
- ```bash
- pip install --upgrade matrix-synapse
- ```
-
- If Synapse was installed using git then upgrade to the latest
- version by running:
-
+ 3. If you're using `poetry` to manage a Synapse installation, run:
```bash
- git pull
- pip install --upgrade .
+ poetry install
```
+ Include any relevant extras with `--extras`, e.g. `poetry install --extras postgres --extras oidc`.
+ It's probably easiest to run `poetry install --extras all`.
- 3. Restart Synapse:
+ 4. Restart Synapse:
```bash
synctl restart
@@ -85,6 +89,13 @@ process, for example:
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
```
+# Upgrading to v1.58.0
+
+## Groups/communities feature has been disabled by default
+
+The non-standard groups/communities feature in Synapse has been disabled by default
+and will be removed in Synapse v1.61.0.
+
# Upgrading to v1.57.0
## Changes to database schema for application services
diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md
new file mode 100644
index 0000000000..9c864af6ec
--- /dev/null
+++ b/docs/usage/configuration/config_documentation.md
@@ -0,0 +1,3412 @@
+# Configuring Synapse
+
+This is intended as a guide to the Synapse configuration. The behavior of a Synapse instance can be modified
+through the many configuration settings documented here — each config option is explained,
+including what the default is, how to change the default and what sort of behaviour the setting governs.
+Also included is an example configuration for each setting. If you don't want to spend a lot of time
+thinking about options, the config as generated sets sensible defaults for all values. Do note however that the
+database defaults to SQLite, which is not recommended for production usage. You can read more on this subject
+[here](../../setup/installation.md#using-postgresql).
+
+## Config Conventions
+
+Configuration options that take a time period can be set using a number
+followed by a letter. Letters have the following meanings:
+
+* `s` = second
+* `m` = minute
+* `h` = hour
+* `d` = day
+* `w` = week
+* `y` = year
+
+For example, setting `redaction_retention_period: 5m` would remove redacted
+messages from the database after 5 minutes, rather than 5 months.
+
+### YAML
+The configuration file is a [YAML](https://yaml.org/) file, which means that certain syntax rules
+apply if you want your config file to be read properly. A few helpful things to know:
+* `#` before any option in the config will comment out that setting and either a default (if available) will
+ be applied or Synapse will ignore the setting. Thus, in example #1 below, the setting will be read and
+ applied, but in example #2 the setting will not be read and a default will be applied.
+
+ Example #1:
+ ```yaml
+ pid_file: DATADIR/homeserver.pid
+ ```
+ Example #2:
+ ```yaml
+ #pid_file: DATADIR/homeserver.pid
+ ```
+* Indentation matters! The indentation before a setting
+ will determine whether a given setting is read as part of another
+ setting, or considered on its own. Thus, in example #1, the `enabled` setting
+ is read as a sub-option of the `presence` setting, and will be properly applied.
+
+ However, the lack of indentation before the `enabled` setting in example #2 means
+ that when reading the config, Synapse will consider both `presence` and `enabled` as
+ different settings. In this case, `presence` has no value, and thus a default applied, and `enabled`
+ is an option that Synapse doesn't recognize and thus ignores.
+
+ Example #1:
+ ```yaml
+ presence:
+ enabled: false
+ ```
+ Example #2:
+ ```yaml
+ presence:
+ enabled: false
+ ```
+ In this manual, all top-level settings (ones with no indentation) are identified
+ at the beginning of their section (i.e. "Config option: `example_setting`") and
+ the sub-options, if any, are identified and listed in the body of the section.
+ In addition, each setting has an example of its usage, with the proper indentation
+ shown.
+
+
+## Modules
+
+Server admins can expand Synapse's functionality with external modules.
+
+See [here](../../modules/index.md) for more
+documentation on how to configure or create custom modules for Synapse.
+
+
+---
+Config option: `modules`
+
+Use the `module` sub-option to add modules under this option to extend functionality.
+The `module` setting then has a sub-option, `config`, which can be used to define some configuration
+for the `module`.
+
+Defaults to none.
+
+Example configuration:
+```yaml
+modules:
+ - module: my_super_module.MySuperClass
+ config:
+ do_thing: true
+ - module: my_other_super_module.SomeClass
+ config: {}
+```
+---
+## Server ##
+
+Define your homeserver name and other base options.
+
+---
+Config option: `server_name`
+
+This sets the public-facing domain of the server.
+
+The `server_name` name will appear at the end of usernames and room addresses
+created on your server. For example if the `server_name` was example.com,
+usernames on your server would be in the format `@user:example.com`
+
+In most cases you should avoid using a matrix specific subdomain such as
+matrix.example.com or synapse.example.com as the `server_name` for the same
+reasons you wouldn't use user@email.example.com as your email address.
+See [here](../../delegate.md)
+for information on how to host Synapse on a subdomain while preserving
+a clean `server_name`.
+
+The `server_name` cannot be changed later so it is important to
+configure this correctly before you start Synapse. It should be all
+lowercase and may contain an explicit port.
+
+There is no default for this option.
+
+Example configuration #1:
+```yaml
+server_name: matrix.org
+```
+Example configuration #2:
+```yaml
+server_name: localhost:8080
+```
+---
+Config option: `pid_file`
+
+When running Synapse as a daemon, the file to store the pid in. Defaults to none.
+
+Example configuration:
+```yaml
+pid_file: DATADIR/homeserver.pid
+```
+---
+Config option: `web_client_location`
+
+The absolute URL to the web client which `/` will redirect to. Defaults to none.
+
+Example configuration:
+```yaml
+web_client_location: https://riot.example.com/
+```
+---
+Config option: `public_baseurl`
+
+The public-facing base URL that clients use to access this Homeserver (not
+including _matrix/...). This is the same URL a user might enter into the
+'Custom Homeserver URL' field on their client. If you use Synapse with a
+reverse proxy, this should be the URL to reach Synapse via the proxy.
+Otherwise, it should be the URL to reach Synapse's client HTTP listener (see
+'listeners' below).
+
+Defaults to `https://<server_name>/`.
+
+Example configuration:
+```yaml
+public_baseurl: https://example.com/
+```
+---
+Config option: `serve_server_wellknown`
+
+By default, other servers will try to reach our server on port 8448, which can
+be inconvenient in some environments.
+
+Provided `https://<server_name>/` on port 443 is routed to Synapse, this
+option configures Synapse to serve a file at `https://<server_name>/.well-known/matrix/server`.
+This will tell other servers to send traffic to port 443 instead.
+
+This option currently defaults to false.
+
+See https://matrix-org.github.io/synapse/latest/delegate.html for more
+information.
+
+Example configuration:
+```yaml
+serve_server_wellknown: true
+```
+---
+Config option: `soft_file_limit`
+
+Set the soft limit on the number of file descriptors synapse can use.
+Zero is used to indicate synapse should set the soft limit to the hard limit.
+Defaults to 0.
+
+Example configuration:
+```yaml
+soft_file_limit: 3
+```
+---
+Config option: `presence`
+
+Presence tracking allows users to see the state (e.g online/offline)
+of other local and remote users. Set the `enabled` sub-option to false to
+disable presence tracking on this homeserver. Defaults to true.
+This option replaces the previous top-level 'use_presence' option.
+
+Example configuration:
+```yaml
+presence:
+ enabled: false
+```
+---
+Config option: `require_auth_for_profile_requests`
+
+Whether to require authentication to retrieve profile data (avatars, display names) of other
+users through the client API. Defaults to false. Note that profile data is also available
+via the federation API, unless `allow_profile_lookup_over_federation` is set to false.
+
+Example configuration:
+```yaml
+require_auth_for_profile_requests: true
+```
+---
+Config option: `limit_profile_requests_to_users_who_share_rooms`
+
+Use this option to require a user to share a room with another user in order
+to retrieve their profile information. Only checked on Client-Server
+requests. Profile requests from other servers should be checked by the
+requesting server. Defaults to false.
+
+Example configuration:
+```yaml
+limit_profile_requests_to_users_who_share_rooms: true
+```
+---
+Config option: `include_profile_data_on_invite`
+
+Use this option to prevent a user's profile data from being retrieved and
+displayed in a room until they have joined it. By default, a user's
+profile data is included in an invite event, regardless of the values
+of the above two settings, and whether or not the users share a server.
+Defaults to true.
+
+Example configuration:
+```yaml
+include_profile_data_on_invite: false
+```
+---
+Config option: `allow_public_rooms_without_auth`
+
+If set to true, removes the need for authentication to access the server's
+public rooms directory through the client API, meaning that anyone can
+query the room directory. Defaults to false.
+
+Example configuration:
+```yaml
+allow_public_rooms_without_auth: true
+```
+---
+Config option: `allow_public_rooms_without_auth`
+
+If set to true, allows any other homeserver to fetch the server's public
+rooms directory via federation. Defaults to false.
+
+Example configuration:
+```yaml
+allow_public_rooms_over_federation: true
+```
+---
+Config option: `default_room_version`
+
+The default room version for newly created rooms on this server.
+
+Known room versions are listed [here](https://spec.matrix.org/latest/rooms/#complete-list-of-room-versions)
+
+For example, for room version 1, `default_room_version` should be set
+to "1".
+
+Currently defaults to "9".
+
+Example configuration:
+```yaml
+default_room_version: "8"
+```
+---
+Config option: `gc_thresholds`
+
+The garbage collection threshold parameters to pass to `gc.set_threshold`, if defined.
+Defaults to none.
+
+Example configuration:
+```yaml
+gc_thresholds: [700, 10, 10]
+```
+---
+Config option: `gc_min_interval`
+
+The minimum time in seconds between each GC for a generation, regardless of
+the GC thresholds. This ensures that we don't do GC too frequently. A value of `[1s, 10s, 30s]`
+indicates that a second must pass between consecutive generation 0 GCs, etc.
+
+Defaults to `[1s, 10s, 30s]`.
+
+Example configuration:
+```yaml
+gc_min_interval: [0.5s, 30s, 1m]
+```
+---
+Config option: `filter_timeline_limit`
+
+Set the limit on the returned events in the timeline in the get
+and sync operations. Defaults to 100. A value of -1 means no upper limit.
+
+
+Example configuration:
+```yaml
+filter_timeline_limit: 5000
+```
+---
+Config option: `block_non_admin_invites`
+
+Whether room invites to users on this server should be blocked
+(except those sent by local server admins). Defaults to false.
+
+Example configuration:
+```yaml
+block_non_admin_invites: true
+```
+---
+Config option: `enable_search`
+
+If set to false, new messages will not be indexed for searching and users
+will receive errors when searching for messages. Defaults to true.
+
+Example configuration:
+```yaml
+enable_search: false
+```
+---
+Config option: `ip_range_blacklist`
+
+This option prevents outgoing requests from being sent to the specified blacklisted IP address
+CIDR ranges. If this option is not specified then it defaults to private IP
+address ranges (see the example below).
+
+The blacklist applies to the outbound requests for federation, identity servers,
+push servers, and for checking key validity for third-party invite events.
+
+(0.0.0.0 and :: are always blacklisted, whether or not they are explicitly
+listed here, since they correspond to unroutable addresses.)
+
+This option replaces `federation_ip_range_blacklist` in Synapse v1.25.0.
+
+Note: The value is ignored when an HTTP proxy is in use.
+
+Example configuration:
+```yaml
+ip_range_blacklist:
+ - '127.0.0.0/8'
+ - '10.0.0.0/8'
+ - '172.16.0.0/12'
+ - '192.168.0.0/16'
+ - '100.64.0.0/10'
+ - '192.0.0.0/24'
+ - '169.254.0.0/16'
+ - '192.88.99.0/24'
+ - '198.18.0.0/15'
+ - '192.0.2.0/24'
+ - '198.51.100.0/24'
+ - '203.0.113.0/24'
+ - '224.0.0.0/4'
+ - '::1/128'
+ - 'fe80::/10'
+ - 'fc00::/7'
+ - '2001:db8::/32'
+ - 'ff00::/8'
+ - 'fec0::/10'
+```
+---
+Config option: `ip_range_whitelist`
+
+List of IP address CIDR ranges that should be allowed for federation,
+identity servers, push servers, and for checking key validity for
+third-party invite events. This is useful for specifying exceptions to
+wide-ranging blacklisted target IP ranges - e.g. for communication with
+a push server only visible in your network.
+
+This whitelist overrides `ip_range_blacklist` and defaults to an empty
+list.
+
+Example configuration:
+```yaml
+ip_range_whitelist:
+ - '192.168.1.1'
+```
+---
+Config option: `listeners`
+
+List of ports that Synapse should listen on, their purpose and their
+configuration.
+
+Sub-options for each listener include:
+
+* `port`: the TCP port to bind to.
+
+* `bind_addresses`: a list of local addresses to listen on. The default is
+ 'all local interfaces'.
+
+* `type`: the type of listener. Normally `http`, but other valid options are:
+
+ * `manhole`: (see the docs [here](../../manhole.md)),
+
+ * `metrics`: (see the docs [here](../../metrics-howto.md)),
+
+ * `replication`: (see the docs [here](../../workers.md)).
+
+* `tls`: set to true to enable TLS for this listener. Will use the TLS key/cert specified in tls_private_key_path / tls_certificate_path.
+
+* `x_forwarded`: Only valid for an 'http' listener. Set to true to use the X-Forwarded-For header as the client IP. Useful when Synapse is
+ behind a reverse-proxy.
+
+* `resources`: Only valid for an 'http' listener. A list of resources to host
+ on this port. Sub-options for each resource are:
+
+ * `names`: a list of names of HTTP resources. See below for a list of valid resource names.
+
+ * `compress`: set to true to enable HTTP compression for this resource.
+
+* `additional_resources`: Only valid for an 'http' listener. A map of
+ additional endpoints which should be loaded via dynamic modules.
+
+Valid resource names are:
+
+* `client`: the client-server API (/_matrix/client), and the synapse admin API (/_synapse/admin). Also implies 'media' and 'static'.
+
+* `consent`: user consent forms (/_matrix/consent). See [here](../../consent_tracking.md) for more.
+
+* `federation`: the server-server API (/_matrix/federation). Also implies `media`, `keys`, `openid`
+
+* `keys`: the key discovery API (/_matrix/keys).
+
+* `media`: the media API (/_matrix/media).
+
+* `metrics`: the metrics interface. See [here](../../metrics-howto.md).
+
+* `openid`: OpenID authentication. See [here](../../openid.md).
+
+* `replication`: the HTTP replication API (/_synapse/replication). See [here](../../workers.md).
+
+* `static`: static resources under synapse/static (/_matrix/static). (Mostly useful for 'fallback authentication'.)
+
+Example configuration #1:
+```yaml
+listeners:
+ # TLS-enabled listener: for when matrix traffic is sent directly to synapse.
+ #
+ # (Note that you will also need to give Synapse a TLS key and certificate: see the TLS section
+ # below.)
+ #
+ - port: 8448
+ type: http
+ tls: true
+ resources:
+ - names: [client, federation]
+```
+Example configuration #2:
+```yaml
+listeners:
+ # Unsecure HTTP listener: for when matrix traffic passes through a reverse proxy
+ # that unwraps TLS.
+ #
+ # If you plan to use a reverse proxy, please see
+ # https://matrix-org.github.io/synapse/latest/reverse_proxy.html.
+ #
+ - port: 8008
+ tls: false
+ type: http
+ x_forwarded: true
+ bind_addresses: ['::1', '127.0.0.1']
+
+ resources:
+ - names: [client, federation]
+ compress: false
+
+ # example additional_resources:
+ additional_resources:
+ "/_matrix/my/custom/endpoint":
+ module: my_module.CustomRequestHandler
+ config: {}
+
+ # Turn on the twisted ssh manhole service on localhost on the given
+ # port.
+ - port: 9000
+ bind_addresses: ['::1', '127.0.0.1']
+ type: manhole
+```
+---
+Config option: `manhole_settings`
+
+Connection settings for the manhole. You can find more information
+on the manhole [here](../../manhole.md). Manhole sub-options include:
+* `username` : the username for the manhole. This defaults to 'matrix'.
+* `password`: The password for the manhole. This defaults to 'rabbithole'.
+* `ssh_priv_key_path` and `ssh_pub_key_path`: The private and public SSH key pair used to encrypt the manhole traffic.
+ If these are left unset, then hardcoded and non-secret keys are used,
+ which could allow traffic to be intercepted if sent over a public network.
+
+Example configuration:
+```yaml
+manhole_settings:
+ username: manhole
+ password: mypassword
+ ssh_priv_key_path: CONFDIR/id_rsa
+ ssh_pub_key_path: CONFDIR/id_rsa.pub
+```
+---
+Config option: `dummy_events_threshold`
+
+Forward extremities can build up in a room due to networking delays between
+homeservers. Once this happens in a large room, calculation of the state of
+that room can become quite expensive. To mitigate this, once the number of
+forward extremities reaches a given threshold, Synapse will send an
+`org.matrix.dummy_event` event, which will reduce the forward extremities
+in the room.
+
+This setting defines the threshold (i.e. number of forward extremities in the room) at which dummy events are sent.
+The default value is 10.
+
+Example configuration:
+```yaml
+dummy_events_threshold: 5
+```
+---
+## Homeserver blocking ##
+Useful options for Synapse admins.
+
+---
+
+Config option: `admin_contact`
+
+How to reach the server admin, used in `ResourceLimitError`. Defaults to none.
+
+Example configuration:
+```yaml
+admin_contact: 'mailto:admin@server.com'
+```
+---
+Config option: `hs_disabled` and `hs_disabled_message`
+
+Blocks users from connecting to the homeserver and provides a human-readable reason
+why the connection was blocked. Defaults to false.
+
+Example configuration:
+```yaml
+hs_disabled: true
+hs_disabled_message: 'Reason for why the HS is blocked'
+```
+---
+Config option: `limit_usage_by_mau`
+
+This option disables/enables monthly active user blocking. Used in cases where the admin or
+server owner wants to limit to the number of monthly active users. When enabled and a limit is
+reached the server returns a `ResourceLimitError` with error type `Codes.RESOURCE_LIMIT_EXCEEDED`.
+Defaults to false. If this is enabled, a value for `max_mau_value` must also be set.
+
+Example configuration:
+```yaml
+limit_usage_by_mau: true
+```
+---
+Config option: `max_mau_value`
+
+This option sets the hard limit of monthly active users above which the server will start
+blocking user actions if `limit_usage_by_mau` is enabled. Defaults to 0.
+
+Example configuration:
+```yaml
+max_mau_value: 50
+```
+---
+Config option: `mau_trial_days`
+
+The option `mau_trial_days` is a means to add a grace period for active users. It
+means that users must be active for the specified number of days before they
+can be considered active and guards against the case where lots of users
+sign up in a short space of time never to return after their initial
+session. Defaults to 0.
+
+Example configuration:
+```yaml
+mau_trial_days: 5
+```
+---
+Config option: `mau_limit_alerting`
+
+The option `mau_limit_alerting` is a means of limiting client-side alerting
+should the mau limit be reached. This is useful for small instances
+where the admin has 5 mau seats (say) for 5 specific people and no
+interest increasing the mau limit further. Defaults to true, which
+means that alerting is enabled.
+
+Example configuration:
+```yaml
+mau_limit_alerting: false
+```
+---
+Config option: `mau_stats_only`
+
+If enabled, the metrics for the number of monthly active users will
+be populated, however no one will be limited based on these numbers. If `limit_usage_by_mau`
+is true, this is implied to be true. Defaults to false.
+
+Example configuration:
+```yaml
+mau_stats_only: true
+```
+---
+Config option: `mau_limit_reserved_threepids`
+
+Sometimes the server admin will want to ensure certain accounts are
+never blocked by mau checking. These accounts are specified by this option.
+Defaults to none. Add accounts by specifying the `medium` and `address` of the
+reserved threepid (3rd party identifier).
+
+Example configuration:
+```yaml
+mau_limit_reserved_threepids:
+ - medium: 'email'
+ address: 'reserved_user@example.com'
+```
+---
+Config option: `server_context`
+
+This option is used by phonehome stats to group together related servers.
+Defaults to none.
+
+Example configuration:
+```yaml
+server_context: context
+```
+---
+Config option: `limit_remote_rooms`
+
+When this option is enabled, the room "complexity" will be checked before a user
+joins a new remote room. If it is above the complexity limit, the server will
+disallow joining, or will instantly leave. This is useful for homeservers that are
+resource-constrained. Options for this setting include:
+* `enabled`: whether this check is enabled. Defaults to false.
+* `complexity`: the limit above which rooms cannot be joined. The default is 1.0.
+* `complexity_error`: override the error which is returned when the room is too complex with a
+ custom message.
+* `admins_can_join`: allow server admins to join complex rooms. Default is false.
+
+Room complexity is an arbitrary measure based on factors such as the number of
+users in the room.
+
+Example configuration:
+```yaml
+limit_remote_rooms:
+ enabled: true
+ complexity: 0.5
+ complexity_error: "I can't let you do that, Dave."
+ admins_can_join: true
+```
+---
+Config option: `require_membership_for_aliases`
+
+Whether to require a user to be in the room to add an alias to it.
+Defaults to true.
+
+Example configuration:
+```yaml
+require_membership_for_aliases: false
+```
+---
+Config option: `allow_per_room_profiles`
+
+Whether to allow per-room membership profiles through the sending of membership
+events with profile information that differs from the target's global profile.
+Defaults to true.
+
+Example configuration:
+```yaml
+allow_per_room_profiles: false
+```
+---
+Config option: `max_avatar_size`
+
+The largest permissible file size in bytes for a user avatar. Defaults to no restriction.
+Use M for MB and K for KB.
+
+Note that user avatar changes will not work if this is set without using Synapse's media repository.
+
+Example configuration:
+```yaml
+max_avatar_size: 10M
+```
+---
+Config option: `allowed_avatar_mimetypes`
+
+The MIME types allowed for user avatars. Defaults to no restriction.
+
+Note that user avatar changes will not work if this is set without
+using Synapse's media repository.
+
+Example configuration:
+```yaml
+allowed_avatar_mimetypes: ["image/png", "image/jpeg", "image/gif"]
+```
+---
+Config option: `redaction_retention_period`
+
+How long to keep redacted events in unredacted form in the database. After
+this period redacted events get replaced with their redacted form in the DB.
+
+Defaults to `7d`. Set to `null` to disable.
+
+Example configuration:
+```yaml
+redaction_retention_period: 28d
+```
+---
+Config option: `user_ips_max_age`
+
+How long to track users' last seen time and IPs in the database.
+
+Defaults to `28d`. Set to `null` to disable clearing out of old rows.
+
+Example configuration:
+```yaml
+user_ips_max_age: 14d
+```
+---
+Config option: `request_token_inhibit_3pid_errors`
+
+Inhibits the `/requestToken` endpoints from returning an error that might leak
+information about whether an e-mail address is in use or not on this
+homeserver. Defaults to false.
+Note that for some endpoints the error situation is the e-mail already being
+used, and for others the error is entering the e-mail being unused.
+If this option is enabled, instead of returning an error, these endpoints will
+act as if no error happened and return a fake session ID ('sid') to clients.
+
+Example configuration:
+```yaml
+request_token_inhibit_3pid_errors: true
+```
+---
+Config option: `next_link_domain_whitelist`
+
+A list of domains that the domain portion of `next_link` parameters
+must match.
+
+This parameter is optionally provided by clients while requesting
+validation of an email or phone number, and maps to a link that
+users will be automatically redirected to after validation
+succeeds. Clients can make use this parameter to aid the validation
+process.
+
+The whitelist is applied whether the homeserver or an identity server is handling validation.
+
+The default value is no whitelist functionality; all domains are
+allowed. Setting this value to an empty list will instead disallow
+all domains.
+
+Example configuration:
+```yaml
+next_link_domain_whitelist: ["matrix.org"]
+```
+---
+Config option: `templates` and `custom_template_directory`
+
+These options define templates to use when generating email or HTML page contents.
+The `custom_template_directory` determines which directory Synapse will try to
+find template files in to use to generate email or HTML page contents.
+If not set, or a file is not found within the template directory, a default
+template from within the Synapse package will be used.
+
+See [here](../../templates.md) for more
+information about using custom templates.
+
+Example configuration:
+```yaml
+templates:
+ custom_template_directory: /path/to/custom/templates/
+```
+---
+Config option: `retention`
+
+This option and the associated options determine message retention policy at the
+server level.
+
+Room admins and mods can define a retention period for their rooms using the
+`m.room.retention` state event, and server admins can cap this period by setting
+the `allowed_lifetime_min` and `allowed_lifetime_max` config options.
+
+If this feature is enabled, Synapse will regularly look for and purge events
+which are older than the room's maximum retention period. Synapse will also
+filter events received over federation so that events that should have been
+purged are ignored and not stored again.
+
+The message retention policies feature is disabled by default.
+
+This setting has the following sub-options:
+* `default_policy`: Default retention policy. If set, Synapse will apply it to rooms that lack the
+ 'm.room.retention' state event. This option is further specified by the
+ `min_lifetime` and `max_lifetime` sub-options associated with it. Note that the
+ value of `min_lifetime` doesn't matter much because Synapse doesn't take it into account yet.
+
+* `allowed_lifetime_min` and `allowed_lifetime_max`: Retention policy limits. If
+ set, and the state of a room contains a `m.room.retention` event in its state
+ which contains a `min_lifetime` or a `max_lifetime` that's out of these bounds,
+ Synapse will cap the room's policy to these limits when running purge jobs.
+
+* `purge_jobs` and the associated `shortest_max_lifetime` and `longest_max_lifetime` sub-options:
+ Server admins can define the settings of the background jobs purging the
+ events whose lifetime has expired under the `purge_jobs` section.
+
+ If no configuration is provided for this option, a single job will be set up to delete
+ expired events in every room daily.
+
+ Each job's configuration defines which range of message lifetimes the job
+ takes care of. For example, if `shortest_max_lifetime` is '2d' and
+ `longest_max_lifetime` is '3d', the job will handle purging expired events in
+ rooms whose state defines a `max_lifetime` that's both higher than 2 days, and
+ lower than or equal to 3 days. Both the minimum and the maximum value of a
+ range are optional, e.g. a job with no `shortest_max_lifetime` and a
+ `longest_max_lifetime` of '3d' will handle every room with a retention policy
+ whose `max_lifetime` is lower than or equal to three days.
+
+ The rationale for this per-job configuration is that some rooms might have a
+ retention policy with a low `max_lifetime`, where history needs to be purged
+ of outdated messages on a more frequent basis than for the rest of the rooms
+ (e.g. every 12h), but not want that purge to be performed by a job that's
+ iterating over every room it knows, which could be heavy on the server.
+
+ If any purge job is configured, it is strongly recommended to have at least
+ a single job with neither `shortest_max_lifetime` nor `longest_max_lifetime`
+ set, or one job without `shortest_max_lifetime` and one job without
+ `longest_max_lifetime` set. Otherwise some rooms might be ignored, even if
+ `allowed_lifetime_min` and `allowed_lifetime_max` are set, because capping a
+ room's policy to these values is done after the policies are retrieved from
+ Synapse's database (which is done using the range specified in a purge job's
+ configuration).
+
+Example configuration:
+```yaml
+retention:
+ enabled: true
+ default_policy:
+ min_lifetime: 1d
+ max_lifetime: 1y
+ allowed_lifetime_min: 1d
+ allowed_lifetime_max: 1y
+ purge_jobs:
+ - longest_max_lifetime: 3d
+ interval: 12h
+ - shortest_max_lifetime: 3d
+ interval: 1d
+```
+---
+## TLS ##
+
+Options related to TLS.
+
+---
+Config option: `tls_certificate_path`
+
+This option specifies a PEM-encoded X509 certificate for TLS.
+This certificate, as of Synapse 1.0, will need to be a valid and verifiable
+certificate, signed by a recognised Certificate Authority. Defaults to none.
+
+Be sure to use a `.pem` file that includes the full certificate chain including
+any intermediate certificates (for instance, if using certbot, use
+`fullchain.pem` as your certificate, not `cert.pem`).
+
+Example configuration:
+```yaml
+tls_certificate_path: "CONFDIR/SERVERNAME.tls.crt"
+```
+---
+Config option: `tls_private_key_path`
+
+PEM-encoded private key for TLS. Defaults to none.
+
+Example configuration:
+```yaml
+tls_private_key_path: "CONFDIR/SERVERNAME.tls.key"
+```
+---
+Config option: `federation_verify_certificates`
+Whether to verify TLS server certificates for outbound federation requests.
+
+Defaults to true. To disable certificate verification, set the option to false.
+
+Example configuration:
+```yaml
+federation_verify_certificates: false
+```
+---
+Config option: `federation_client_minimum_tls_version`
+
+The minimum TLS version that will be used for outbound federation requests.
+
+Defaults to `1`. Configurable to `1`, `1.1`, `1.2`, or `1.3`. Note
+that setting this value higher than `1.2` will prevent federation to most
+of the public Matrix network: only configure it to `1.3` if you have an
+entirely private federation setup and you can ensure TLS 1.3 support.
+
+Example configuration:
+```yaml
+federation_client_minimum_tls_version: 1.2
+```
+---
+Config option: `federation_certificate_verification_whitelist`
+
+Skip federation certificate verification on a given whitelist
+of domains.
+
+This setting should only be used in very specific cases, such as
+federation over Tor hidden services and similar. For private networks
+of homeservers, you likely want to use a private CA instead.
+
+Only effective if `federation_verify_certicates` is `true`.
+
+Example configuration:
+```yaml
+federation_certificate_verification_whitelist:
+ - lon.example.com
+ - "*.domain.com"
+ - "*.onion"
+```
+---
+Config option: `federation_custom_ca_list`
+
+List of custom certificate authorities for federation traffic.
+
+This setting should only normally be used within a private network of
+homeservers.
+
+Note that this list will replace those that are provided by your
+operating environment. Certificates must be in PEM format.
+
+Example configuration:
+```yaml
+federation_custom_ca_list:
+ - myCA1.pem
+ - myCA2.pem
+ - myCA3.pem
+```
+---
+## Federation ##
+
+Options related to federation.
+
+---
+Config option: `federation_domain_whitelist`
+
+Restrict federation to the given whitelist of domains.
+N.B. we recommend also firewalling your federation listener to limit
+inbound federation traffic as early as possible, rather than relying
+purely on this application-layer restriction. If not specified, the
+default is to whitelist everything.
+
+Example configuration:
+```yaml
+federation_domain_whitelist:
+ - lon.example.com
+ - nyc.example.com
+ - syd.example.com
+```
+---
+Config option: `federation_metrics_domains`
+
+Report prometheus metrics on the age of PDUs being sent to and received from
+the given domains. This can be used to give an idea of "delay" on inbound
+and outbound federation, though be aware that any delay can be due to problems
+at either end or with the intermediate network.
+
+By default, no domains are monitored in this way.
+
+Example configuration:
+```yaml
+federation_metrics_domains:
+ - matrix.org
+ - example.com
+```
+---
+Config option: `allow_profile_lookup_over_federation`
+
+Set to false to disable profile lookup over federation. By default, the
+Federation API allows other homeservers to obtain profile data of any user
+on this homeserver.
+
+Example configuration:
+```yaml
+allow_profile_lookup_over_federation: false
+```
+---
+Config option: `allow_device_name_lookup_over_federation`
+
+Set this option to false to disable device display name lookup over federation. By default, the
+Federation API allows other homeservers to obtain device display names of any user
+on this homeserver.
+
+Example configuration:
+```yaml
+allow_device_name_lookup_over_federation: false
+```
+---
+## Caching ##
+
+Options related to caching
+
+---
+Config option: `event_cache_size`
+
+The number of events to cache in memory. Not affected by
+`caches.global_factor`. Defaults to 10K.
+
+Example configuration:
+```yaml
+event_cache_size: 15K
+```
+---
+Config option: `cache` and associated values
+
+A cache 'factor' is a multiplier that can be applied to each of
+Synapse's caches in order to increase or decrease the maximum
+number of entries that can be stored.
+
+Caching can be configured through the following sub-options:
+
+* `global_factor`: Controls the global cache factor, which is the default cache factor
+ for all caches if a specific factor for that cache is not otherwise
+ set.
+
+ This can also be set by the `SYNAPSE_CACHE_FACTOR` environment
+ variable. Setting by environment variable takes priority over
+ setting through the config file.
+
+ Defaults to 0.5, which will halve the size of all caches.
+
+* `per_cache_factors`: A dictionary of cache name to cache factor for that individual
+ cache. Overrides the global cache factor for a given cache.
+
+ These can also be set through environment variables comprised
+ of `SYNAPSE_CACHE_FACTOR_` + the name of the cache in capital
+ letters and underscores. Setting by environment variable
+ takes priority over setting through the config file.
+ Ex. `SYNAPSE_CACHE_FACTOR_GET_USERS_WHO_SHARE_ROOM_WITH_USER=2.0`
+
+ Some caches have '*' and other characters that are not
+ alphanumeric or underscores. These caches can be named with or
+ without the special characters stripped. For example, to specify
+ the cache factor for `*stateGroupCache*` via an environment
+ variable would be `SYNAPSE_CACHE_FACTOR_STATEGROUPCACHE=2.0`.
+
+* `expire_caches`: Controls whether cache entries are evicted after a specified time
+ period. Defaults to true. Set to false to disable this feature. Note that never expiring
+ caches may result in excessive memory usage.
+
+* `cache_entry_ttl`: If `expire_caches` is enabled, this flag controls how long an entry can
+ be in a cache without having been accessed before being evicted.
+ Defaults to 30m.
+
+* `sync_response_cache_duration`: Controls how long the results of a /sync request are
+ cached for after a successful response is returned. A higher duration can help clients
+ with intermittent connections, at the cost of higher memory usage.
+ By default, this is zero, which means that sync responses are not cached
+ at all.
+
+
+Example configuration:
+```yaml
+caches:
+ global_factor: 1.0
+ per_cache_factors:
+ get_users_who_share_room_with_user: 2.0
+ expire_caches: false
+ sync_response_cache_duration: 2m
+```
+---
+## Database ##
+Config options related to database settings.
+
+---
+Config option: `database`
+
+The `database` setting defines the database that synapse uses to store all of
+its data.
+
+Associated sub-options:
+
+* `name`: this option specifies the database engine to use: either `sqlite3` (for SQLite)
+ or `psycopg2` (for PostgreSQL). If no name is specified Synapse will default to SQLite.
+
+* `txn_limit` gives the maximum number of transactions to run per connection
+ before reconnecting. Defaults to 0, which means no limit.
+
+* `allow_unsafe_locale` is an option specific to Postgres. Under the default behavior, Synapse will refuse to
+ start if the postgres db is set to a non-C locale. You can override this behavior (which is *not* recommended)
+ by setting `allow_unsafe_locale` to true. Note that doing so may corrupt your database. You can find more information
+ [here](../../postgres.md#fixing-incorrect-collate-or-ctype) and [here](https://wiki.postgresql.org/wiki/Locale_data_changes).
+
+* `args` gives options which are passed through to the database engine,
+ except for options starting with `cp_`, which are used to configure the Twisted
+ connection pool. For a reference to valid arguments, see:
+ * for [sqlite](https://docs.python.org/3/library/sqlite3.html#sqlite3.connect)
+ * for [postgres](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-PARAMKEYWORDS)
+ * for [the connection pool](https://twistedmatrix.com/documents/current/api/twisted.enterprise.adbapi.ConnectionPool.html#__init__)
+
+For more information on using Synapse with Postgres,
+see [here](../../postgres.md).
+
+Example SQLite configuration:
+```
+database:
+ name: sqlite3
+ args:
+ database: /path/to/homeserver.db
+```
+
+Example Postgres configuration:
+```
+database:
+ name: psycopg2
+ txn_limit: 10000
+ args:
+ user: synapse_user
+ password: secretpassword
+ database: synapse
+ host: localhost
+ port: 5432
+ cp_min: 5
+ cp_max: 10
+```
+---
+## Logging ##
+Config options related to logging.
+
+---
+Config option: `log_config`
+
+This option specifies a yaml python logging config file as described [here](https://docs.python.org/3.7/library/logging.config.html#configuration-dictionary-schema).
+
+Example configuration:
+```yaml
+log_config: "CONFDIR/SERVERNAME.log.config"
+```
+---
+## Ratelimiting ##
+Options related to ratelimiting in Synapse.
+
+Each ratelimiting configuration is made of two parameters:
+ - `per_second`: number of requests a client can send per second.
+ - `burst_count`: number of requests a client can send before being throttled.
+---
+Config option: `rc_message`
+
+
+Ratelimiting settings for client messaging.
+
+This is a ratelimiting option for messages that ratelimits sending based on the account the client
+is using. It defaults to: `per_second: 0.2`, `burst_count: 10`.
+
+Example configuration:
+```yaml
+rc_message:
+ per_second: 0.5
+ burst_count: 15
+```
+---
+Config option: `rc_registration`
+
+This option ratelimits registration requests based on the client's IP address.
+It defaults to `per_second: 0.17`, `burst_count: 3`.
+
+Example configuration:
+```yaml
+rc_registration:
+ per_second: 0.15
+ burst_count: 2
+```
+---
+Config option: `rc_registration_token_validity`
+
+This option checks the validity of registration tokens that ratelimits requests based on
+the client's IP address.
+Defaults to `per_second: 0.1`, `burst_count: 5`.
+
+Example configuration:
+```yaml
+rc_registration_token_validity:
+ per_second: 0.3
+ burst_count: 6
+```
+---
+Config option: `rc_login`
+
+This option specifies several limits for login:
+* `address` ratelimits login requests based on the client's IP
+ address. Defaults to `per_second: 0.17`, `burst_count: 3`.
+
+* `account` ratelimits login requests based on the account the
+ client is attempting to log into. Defaults to `per_second: 0.17`,
+ `burst_count: 3`.
+
+* `failted_attempts` ratelimits login requests based on the account the
+ client is attempting to log into, based on the amount of failed login
+ attempts for this account. Defaults to `per_second: 0.17`, `burst_count: 3`.
+
+Example configuration:
+```yaml
+rc_login:
+ address:
+ per_second: 0.15
+ burst_count: 5
+ account:
+ per_second: 0.18
+ burst_count: 4
+ failed_attempts:
+ per_second: 0.19
+ burst_count: 7
+```
+---
+Config option: `rc_admin_redaction`
+
+This option sets ratelimiting redactions by room admins. If this is not explicitly
+set then it uses the same ratelimiting as per `rc_message`. This is useful
+to allow room admins to deal with abuse quickly.
+
+Example configuration:
+```yaml
+rc_admin_redaction:
+ per_second: 1
+ burst_count: 50
+```
+---
+Config option: `rc_joins`
+
+This option allows for ratelimiting number of rooms a user can join. This setting has the following sub-options:
+
+* `local`: ratelimits when users are joining rooms the server is already in.
+ Defaults to `per_second: 0.1`, `burst_count: 10`.
+
+* `remote`: ratelimits when users are trying to join rooms not on the server (which
+ can be more computationally expensive than restricting locally). Defaults to
+ `per_second: 0.01`, `burst_count: 10`
+
+Example configuration:
+```yaml
+rc_joins:
+ local:
+ per_second: 0.2
+ burst_count: 15
+ remote:
+ per_second: 0.03
+ burst_count: 12
+```
+---
+Config option: `rc_3pid_validation`
+
+This option ratelimits how often a user or IP can attempt to validate a 3PID.
+Defaults to `per_second: 0.003`, `burst_count: 5`.
+
+Example configuration:
+```yaml
+rc_3pid_validation:
+ per_second: 0.003
+ burst_count: 5
+```
+---
+Config option: `rc_invites`
+
+This option sets ratelimiting how often invites can be sent in a room or to a
+specific user. `per_room` defaults to `per_second: 0.3`, `burst_count: 10` and
+`per_user` defaults to `per_second: 0.003`, `burst_count: 5`.
+
+Example configuration:
+```yaml
+rc_invites:
+ per_room:
+ per_second: 0.5
+ burst_count: 5
+ per_user:
+ per_second: 0.004
+ burst_count: 3
+```
+---
+Config option: `rc_third_party_invite`
+
+This option ratelimits 3PID invites (i.e. invites sent to a third-party ID
+such as an email address or a phone number) based on the account that's
+sending the invite. Defaults to `per_second: 0.2`, `burst_count: 10`.
+
+Example configuration:
+```yaml
+rc_third_party_invite:
+ per_second: 0.2
+ burst_count: 10
+```
+---
+Config option: `rc_federation`
+
+Defines limits on federation requests.
+
+The `rc_federation` configuration has the following sub-options:
+* `window_size`: window size in milliseconds. Defaults to 1000.
+* `sleep_limit`: number of federation requests from a single server in
+ a window before the server will delay processing the request. Defaults to 10.
+* `sleep_delay`: duration in milliseconds to delay processing events
+ from remote servers by if they go over the sleep limit. Defaults to 500.
+* `reject_limit`: maximum number of concurrent federation requests
+ allowed from a single server. Defaults to 50.
+* `concurrent`: number of federation requests to concurrently process
+ from a single server. Defaults to 3.
+
+Example configuration:
+```yaml
+rc_federation:
+ window_size: 750
+ sleep_limit: 15
+ sleep_delay: 400
+ reject_limit: 40
+ concurrent: 5
+```
+---
+Config option: `federation_rr_transactions_per_room_per_second`
+
+Sets outgoing federation transaction frequency for sending read-receipts,
+per-room.
+
+If we end up trying to send out more read-receipts, they will get buffered up
+into fewer transactions. Defaults to 50.
+
+Example configuration:
+```yaml
+federation_rr_transactions_per_room_per_second: 40
+```
+---
+## Media Store ##
+Config options relating to Synapse media store.
+
+---
+Config option: `enable_media_repo`
+
+Enable the media store service in the Synapse master. Defaults to true.
+Set to false if you are using a separate media store worker.
+
+Example configuration:
+```yaml
+enable_media_repo: false
+```
+---
+Config option: `media_store_path`
+
+Directory where uploaded images and attachments are stored.
+
+Example configuration:
+```yaml
+media_store_path: "DATADIR/media_store"
+```
+---
+Config option: `media_storage_providers`
+
+Media storage providers allow media to be stored in different
+locations. Defaults to none. Associated sub-options are:
+* `module`: type of resource, e.g. `file_system`.
+* `store_local`: whether to store newly uploaded local files
+* `store_remote`: whether to store newly downloaded local files
+* `store_synchronous`: whether to wait for successful storage for local uploads
+* `config`: sets a path to the resource through the `directory` option
+
+Example configuration:
+```yaml
+media_storage_providers:
+ - module: file_system
+ store_local: false
+ store_remote: false
+ store_synchronous: false
+ config:
+ directory: /mnt/some/other/directory
+```
+---
+Config option: `max_upload_size`
+
+The largest allowed upload size in bytes.
+
+If you are using a reverse proxy you may also need to set this value in
+your reverse proxy's config. Defaults to 50M. Notably Nginx has a small max body size by default.
+See [here](../../reverse_proxy.md) for more on using a reverse proxy with Synapse.
+
+Example configuration:
+```yaml
+max_upload_size: 60M
+```
+---
+Config option: `max_image_pixels`
+
+Maximum number of pixels that will be thumbnailed. Defaults to 32M.
+
+Example configuration:
+```yaml
+max_image_pixels: 35M
+```
+---
+Config option: `dynamic_thumbnails`
+
+Whether to generate new thumbnails on the fly to precisely match
+the resolution requested by the client. If true then whenever
+a new resolution is requested by the client the server will
+generate a new thumbnail. If false the server will pick a thumbnail
+from a precalculated list. Defaults to false.
+
+Example configuration:
+```yaml
+dynamic_thumbnails: true
+```
+---
+Config option: `thumbnail_sizes`
+
+List of thumbnails to precalculate when an image is uploaded. Associated sub-options are:
+* `width`
+* `height`
+* `method`: i.e. `crop`, `scale`, etc.
+
+Example configuration:
+```yaml
+thumbnail_sizes:
+ - width: 32
+ height: 32
+ method: crop
+ - width: 96
+ height: 96
+ method: crop
+ - width: 320
+ height: 240
+ method: scale
+ - width: 640
+ height: 480
+ method: scale
+ - width: 800
+ height: 600
+ method: scale
+```
+Config option: `url_preview_enabled`
+
+This setting determines whether the preview URL API is enabled.
+It is disabled by default. Set to true to enable. If enabled you must specify a
+`url_preview_ip_range_blacklist` blacklist.
+
+Example configuration:
+```yaml
+url_preview_enabled: true
+```
+---
+Config option: `url_preview_ip_range_blacklist`
+
+List of IP address CIDR ranges that the URL preview spider is denied
+from accessing. There are no defaults: you must explicitly
+specify a list for URL previewing to work. You should specify any
+internal services in your network that you do not want synapse to try
+to connect to, otherwise anyone in any Matrix room could cause your
+synapse to issue arbitrary GET requests to your internal services,
+causing serious security issues.
+
+(0.0.0.0 and :: are always blacklisted, whether or not they are explicitly
+listed here, since they correspond to unroutable addresses.)
+
+This must be specified if `url_preview_enabled` is set. It is recommended that
+you use the following example list as a starting point.
+
+Note: The value is ignored when an HTTP proxy is in use.
+
+Example configuration:
+```yaml
+url_preview_ip_range_blacklist:
+ - '127.0.0.0/8'
+ - '10.0.0.0/8'
+ - '172.16.0.0/12'
+ - '192.168.0.0/16'
+ - '100.64.0.0/10'
+ - '192.0.0.0/24'
+ - '169.254.0.0/16'
+ - '192.88.99.0/24'
+ - '198.18.0.0/15'
+ - '192.0.2.0/24'
+ - '198.51.100.0/24'
+ - '203.0.113.0/24'
+ - '224.0.0.0/4'
+ - '::1/128'
+ - 'fe80::/10'
+ - 'fc00::/7'
+ - '2001:db8::/32'
+ - 'ff00::/8'
+ - 'fec0::/10'
+```
+----
+Config option: `url_preview_ip_range_whitelist`
+
+This option sets a list of IP address CIDR ranges that the URL preview spider is allowed
+to access even if they are specified in `url_preview_ip_range_blacklist`.
+This is useful for specifying exceptions to wide-ranging blacklisted
+target IP ranges - e.g. for enabling URL previews for a specific private
+website only visible in your network. Defaults to none.
+
+Example configuration:
+```yaml
+url_preview_ip_range_whitelist:
+ - '192.168.1.1'
+```
+---
+Config option: `url_preview_url_blacklist`
+
+Optional list of URL matches that the URL preview spider is
+denied from accessing. You should use `url_preview_ip_range_blacklist`
+in preference to this, otherwise someone could define a public DNS
+entry that points to a private IP address and circumvent the blacklist.
+This is more useful if you know there is an entire shape of URL that
+you know that will never want synapse to try to spider.
+
+Each list entry is a dictionary of url component attributes as returned
+by urlparse.urlsplit as applied to the absolute form of the URL. See
+[here](https://docs.python.org/2/library/urlparse.html#urlparse.urlsplit) for more
+information. Some examples are:
+
+* `username`
+* `netloc`
+* `scheme`
+* `path`
+
+The values of the dictionary are treated as a filename match pattern
+applied to that component of URLs, unless they start with a ^ in which
+case they are treated as a regular expression match. If all the
+specified component matches for a given list item succeed, the URL is
+blacklisted.
+
+Example configuration:
+```yaml
+url_preview_url_blacklist:
+ # blacklist any URL with a username in its URI
+ - username: '*'
+
+ # blacklist all *.google.com URLs
+ - netloc: 'google.com'
+ - netloc: '*.google.com'
+
+ # blacklist all plain HTTP URLs
+ - scheme: 'http'
+
+ # blacklist http(s)://www.acme.com/foo
+ - netloc: 'www.acme.com'
+ path: '/foo'
+
+ # blacklist any URL with a literal IPv4 address
+ - netloc: '^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$'
+```
+---
+Config option: `max_spider_size`
+
+The largest allowed URL preview spidering size in bytes. Defaults to 10M.
+
+Example configuration:
+```yaml
+max_spider_size: 8M
+```
+---
+Config option: `url_preview_language`
+
+A list of values for the Accept-Language HTTP header used when
+downloading webpages during URL preview generation. This allows
+Synapse to specify the preferred languages that URL previews should
+be in when communicating with remote servers.
+
+Each value is a IETF language tag; a 2-3 letter identifier for a
+language, optionally followed by subtags separated by '-', specifying
+a country or region variant.
+
+Multiple values can be provided, and a weight can be added to each by
+using quality value syntax (;q=). '*' translates to any language.
+
+Defaults to "en".
+
+Example configuration:
+```yaml
+ url_preview_accept_language:
+ - en-UK
+ - en-US;q=0.9
+ - fr;q=0.8
+ - *;q=0.7
+```
+----
+Config option: `oembed`
+
+oEmbed allows for easier embedding content from a website. It can be
+used for generating URLs previews of services which support it. A default list of oEmbed providers
+is included with Synapse. Set `disable_default_providers` to true to disable using
+these default oEmbed URLs. Use `additional_providers` to specify additional files with oEmbed configuration (each
+should be in the form of providers.json). By default this list is empty.
+
+Example configuration:
+```yaml
+oembed:
+ disable_default_providers: true
+ additional_providers:
+ - oembed/my_providers.json
+```
+---
+## Captcha ##
+
+See [here](../../CAPTCHA_SETUP.md) for full details on setting up captcha.
+
+---
+Config option: `recaptcha_public_key`
+
+This homeserver's ReCAPTCHA public key. Must be specified if `enable_registration_captcha` is
+enabled.
+
+Example configuration:
+```yaml
+recaptcha_public_key: "YOUR_PUBLIC_KEY"
+```
+---
+Config option: `recaptcha_private_key`
+
+This homeserver's ReCAPTCHA private key. Must be specified if `enable_registration_captcha` is
+enabled.
+
+Example configuration:
+```yaml
+recaptcha_private_key: "YOUR_PRIVATE_KEY"
+```
+---
+Config option: `enable_registration_captcha`
+
+Set to true to enable ReCaptcha checks when registering, preventing signup
+unless a captcha is answered. Requires a valid ReCaptcha public/private key.
+Defaults to false.
+
+Example configuration:
+```yaml
+enable_registration_captcha: true
+```
+---
+Config option: `recaptcha_siteverify_api`
+
+The API endpoint to use for verifying `m.login.recaptcha` responses.
+Defaults to `https://www.recaptcha.net/recaptcha/api/siteverify`.
+
+Example configuration:
+```yaml
+recaptcha_siteverify_api: "https://my.recaptcha.site"
+```
+---
+## TURN ##
+Options related to adding a TURN server to Synapse.
+
+---
+Config option: `turn_uris`
+
+The public URIs of the TURN server to give to clients.
+
+Example configuration:
+```yaml
+turn_uris: [turn:example.org]
+```
+---
+Config option: `turn_shared_secret`
+
+The shared secret used to compute passwords for the TURN server.
+
+Example configuration:
+```yaml
+turn_shared_secret: "YOUR_SHARED_SECRET"
+```
+----
+Config options: `turn_username` and `turn_password`
+
+The Username and password if the TURN server needs them and does not use a token.
+
+Example configuration:
+```yaml
+turn_username: "TURNSERVER_USERNAME"
+turn_password: "TURNSERVER_PASSWORD"
+```
+---
+Config option: `turn_user_lifetime`
+
+How long generated TURN credentials last. Defaults to 1h.
+
+Example configuration:
+```yaml
+turn_user_lifetime: 2h
+```
+---
+Config option: `turn_allow_guests`
+
+Whether guests should be allowed to use the TURN server. This defaults to true, otherwise
+VoIP will be unreliable for guests. However, it does introduce a slight security risk as
+it allows users to connect to arbitrary endpoints without having first signed up for a valid account (e.g. by passing a CAPTCHA).
+
+Example configuration:
+```yaml
+turn_allow_guests: false
+```
+---
+## Registration ##
+
+Registration can be rate-limited using the parameters in the [Ratelimiting](#ratelimiting) section of this manual.
+
+---
+Config option: `enable_registration`
+
+Enable registration for new users. Defaults to false. It is highly recommended that if you enable registration,
+you use either captcha, email, or token-based verification to verify that new users are not bots. In order to enable registration
+without any verification, you must also set `enable_registration_without_verification` to true.
+
+Example configuration:
+```yaml
+enable_registration: true
+```
+---
+Config option: `enable_registration_without_verification`
+Enable registration without email or captcha verification. Note: this option is *not* recommended,
+as registration without verification is a known vector for spam and abuse. Defaults to false. Has no effect
+unless `enable_registration` is also enabled.
+
+Example configuration:
+```yaml
+enable_registration_without_verification: true
+```
+---
+Config option: `session_lifetime`
+
+Time that a user's session remains valid for, after they log in.
+
+Note that this is not currently compatible with guest logins.
+
+Note also that this is calculated at login time: changes are not applied retrospectively to users who have already
+logged in.
+
+By default, this is infinite.
+
+Example configuration:
+```yaml
+session_lifetime: 24h
+```
+----
+Config option: `refresh_access_token_lifetime`
+
+Time that an access token remains valid for, if the session is using refresh tokens.
+
+For more information about refresh tokens, please see the [manual](user_authentication/refresh_tokens.md).
+
+Note that this only applies to clients which advertise support for refresh tokens.
+
+Note also that this is calculated at login time and refresh time: changes are not applied to
+existing sessions until they are refreshed.
+
+By default, this is 5 minutes.
+
+Example configuration:
+```yaml
+refreshable_access_token_lifetime: 10m
+```
+---
+Config option: `refresh_token_lifetime: 24h`
+
+Time that a refresh token remains valid for (provided that it is not
+exchanged for another one first).
+This option can be used to automatically log-out inactive sessions.
+Please see the manual for more information.
+
+Note also that this is calculated at login time and refresh time:
+changes are not applied to existing sessions until they are refreshed.
+
+By default, this is infinite.
+
+Example configuration:
+```yaml
+refresh_token_lifetime: 24h
+```
+---
+Config option: `nonrefreshable_access_token_lifetime`
+
+Time that an access token remains valid for, if the session is NOT
+using refresh tokens.
+
+Please note that not all clients support refresh tokens, so setting
+this to a short value may be inconvenient for some users who will
+then be logged out frequently.
+
+Note also that this is calculated at login time: changes are not applied
+retrospectively to existing sessions for users that have already logged in.
+
+By default, this is infinite.
+
+Example configuration:
+```yaml
+nonrefreshable_access_token_lifetime: 24h
+```
+---
+Config option: `registrations_require_3pid`
+
+If this is set, the user must provide all of the specified types of 3PID when registering.
+
+Example configuration:
+```yaml
+registrations_require_3pid:
+ - email
+ - msisdn
+```
+---
+Config option: `disable_msisdn_registration`
+
+Explicitly disable asking for MSISDNs from the registration
+flow (overrides `registrations_require_3pid` if MSISDNs are set as required).
+
+Example configuration:
+```yaml
+disable_msisdn_registration: true
+```
+---
+Config option: `allowed_local_3pids`
+
+Mandate that users are only allowed to associate certain formats of
+3PIDs with accounts on this server, as specified by the `medium` and `pattern` sub-options.
+
+Example configuration:
+```yaml
+allowed_local_3pids:
+ - medium: email
+ pattern: '^[^@]+@matrix\.org$'
+ - medium: email
+ pattern: '^[^@]+@vector\.im$'
+ - medium: msisdn
+ pattern: '\+44'
+```
+---
+Config option: `enable_3pid_lookup`
+
+Enable 3PIDs lookup requests to identity servers from this server. Defaults to true.
+
+Example configuration:
+```yaml
+enable_3pid_lookup: false
+```
+---
+Config option: `registration_requires_token`
+
+Require users to submit a token during registration.
+Tokens can be managed using the admin [API](../administration/admin_api/registration_tokens.md).
+Note that `enable_registration` must be set to true.
+Disabling this option will not delete any tokens previously generated.
+Defaults to false. Set to true to enable.
+
+Example configuration:
+```yaml
+registration_requires_token: true
+```
+---
+Config option: `registration_shared_secret`
+
+If set, allows registration of standard or admin accounts by anyone who
+has the shared secret, even if registration is otherwise disabled.
+
+Example configuration:
+```yaml
+registration_shared_secret: <PRIVATE STRING>
+```
+---
+Config option: `bcrypt_rounds`
+
+Set the number of bcrypt rounds used to generate password hash.
+Larger numbers increase the work factor needed to generate the hash.
+The default number is 12 (which equates to 2^12 rounds).
+N.B. that increasing this will exponentially increase the time required
+to register or login - e.g. 24 => 2^24 rounds which will take >20 mins.
+Example configuration:
+```yaml
+bcrypt_rounds: 14
+```
+---
+Config option: `allow_guest_access`
+
+Allows users to register as guests without a password/email/etc, and
+participate in rooms hosted on this server which have been made
+accessible to anonymous users. Defaults to false.
+
+Example configuration:
+```yaml
+allow_guest_access: true
+```
+---
+Config option: `default_identity_server`
+
+The identity server which we suggest that clients should use when users log
+in on this server.
+
+(By default, no suggestion is made, so it is left up to the client.
+This setting is ignored unless `public_baseurl` is also explicitly set.)
+
+Example configuration:
+```yaml
+default_identity_server: https://matrix.org
+```
+---
+Config option: `account_threepid_delegates`
+
+Handle threepid (email/phone etc) registration and password resets through a set of
+*trusted* identity servers. Note that this allows the configured identity server to
+reset passwords for accounts!
+
+Be aware that if `email` is not set, and SMTP options have not been
+configured in the email config block, registration and user password resets via
+email will be globally disabled.
+
+Additionally, if `msisdn` is not set, registration and password resets via msisdn
+will be disabled regardless, and users will not be able to associate an msisdn
+identifier to their account. This is due to Synapse currently not supporting
+any method of sending SMS messages on its own.
+
+To enable using an identity server for operations regarding a particular third-party
+identifier type, set the value to the URL of that identity server as shown in the
+examples below.
+
+Servers handling the these requests must answer the `/requestToken` endpoints defined
+by the Matrix Identity Service API [specification](https://matrix.org/docs/spec/identity_service/latest).
+
+Example configuration:
+```yaml
+account_threepid_delegates:
+ email: https://example.com # Delegate email sending to example.com
+ msisdn: http://localhost:8090 # Delegate SMS sending to this local process
+```
+---
+Config option: `enable_set_displayname`
+
+Whether users are allowed to change their displayname after it has
+been initially set. Useful when provisioning users based on the
+contents of a third-party directory.
+
+Does not apply to server administrators. Defaults to true.
+
+Example configuration:
+```yaml
+enable_set_displayname: false
+```
+---
+Config option: `enable_set_avatar_url`
+
+Whether users are allowed to change their avatar after it has been
+initially set. Useful when provisioning users based on the contents
+of a third-party directory.
+
+Does not apply to server administrators. Defaults to true.
+
+Example configuration:
+```yaml
+enable_set_avatar_url: false
+```
+---
+Config option: `enable_3pid_changes`
+
+Whether users can change the third-party IDs associated with their accounts
+(email address and msisdn).
+
+Defaults to true.
+
+Example configuration:
+```yaml
+enable_3pid_changes: false
+```
+---
+Config option: `auto_join_rooms`
+
+Users who register on this homeserver will automatically be joined
+to the rooms listed under this option.
+
+By default, any room aliases included in this list will be created
+as a publicly joinable room when the first user registers for the
+homeserver. If the room already exists, make certain it is a publicly joinable
+room, i.e. the join rule of the room must be set to 'public'. You can find more options
+relating to auto-joining rooms below.
+
+Example configuration:
+```yaml
+auto_join_rooms:
+ - "#exampleroom:example.com"
+ - "#anotherexampleroom:example.com"
+```
+---
+Config option: `autocreate_auto_join_rooms`
+
+Where `auto_join_rooms` are specified, setting this flag ensures that
+the rooms exist by creating them when the first user on the
+homeserver registers.
+
+By default the auto-created rooms are publicly joinable from any federated
+server. Use the `autocreate_auto_join_rooms_federated` and
+`autocreate_auto_join_room_preset` settings to customise this behaviour.
+
+Setting to false means that if the rooms are not manually created,
+users cannot be auto-joined since they do not exist.
+
+Defaults to true.
+
+Example configuration:
+```yaml
+autocreate_auto_join_rooms: false
+```
+---
+Config option: `autocreate_auto_join_rooms_federated`
+
+Whether the rooms listen in `auto_join_rooms` that are auto-created are available
+via federation. Only has an effect if `autocreate_auto_join_rooms` is true.
+
+Note that whether a room is federated cannot be modified after
+creation.
+
+Defaults to true: the room will be joinable from other servers.
+Set to false to prevent users from other homeservers from
+joining these rooms.
+
+Example configuration:
+```yaml
+autocreate_auto_join_rooms_federated: false
+```
+---
+Config option: `autocreate_auto_join_room_preset`
+
+The room preset to use when auto-creating one of `auto_join_rooms`. Only has an
+effect if `autocreate_auto_join_rooms` is true.
+
+Possible values for this option are:
+* "public_chat": the room is joinable by anyone, including
+ federated servers if `autocreate_auto_join_rooms_federated` is true (the default).
+* "private_chat": an invitation is required to join these rooms.
+* "trusted_private_chat": an invitation is required to join this room and the invitee is
+ assigned a power level of 100 upon joining the room.
+
+If a value of "private_chat" or "trusted_private_chat" is used then
+`auto_join_mxid_localpart` must also be configured.
+
+Defaults to "public_chat".
+
+Example configuration:
+```yaml
+autocreate_auto_join_room_preset: private_chat
+```
+---
+Config option: `auto_join_mxid_localpart`
+
+The local part of the user id which is used to create `auto_join_rooms` if
+`autocreate_auto_join_rooms` is true. If this is not provided then the
+initial user account that registers will be used to create the rooms.
+
+The user id is also used to invite new users to any auto-join rooms which
+are set to invite-only.
+
+It *must* be configured if `autocreate_auto_join_room_preset` is set to
+"private_chat" or "trusted_private_chat".
+
+Note that this must be specified in order for new users to be correctly
+invited to any auto-join rooms which have been set to invite-only (either
+at the time of creation or subsequently).
+
+Note that, if the room already exists, this user must be joined and
+have the appropriate permissions to invite new members.
+
+Example configuration:
+```yaml
+auto_join_mxid_localpart: system
+```
+---
+Config option: `auto_join_rooms_for_guests`
+
+When `auto_join_rooms` is specified, setting this flag to false prevents
+guest accounts from being automatically joined to the rooms.
+
+Defaults to true.
+
+Example configuration:
+```yaml
+auto_join_rooms_for_guests: false
+```
+---
+Config option: `inhibit_user_in_use_error`
+
+Whether to inhibit errors raised when registering a new account if the user ID
+already exists. If turned on, requests to `/register/available` will always
+show a user ID as available, and Synapse won't raise an error when starting
+a registration with a user ID that already exists. However, Synapse will still
+raise an error if the registration completes and the username conflicts.
+
+Defaults to false.
+
+Example configuration:
+```yaml
+inhibit_user_in_use_error: true
+```
+---
+## Metrics ###
+Config options related to metrics.
+
+---
+Config option: `enable_metrics`
+
+Set to true to enable collection and rendering of performance metrics.
+Defaults to false.
+
+Example configuration:
+```yaml
+enable_metrics: true
+```
+---
+Config option: `sentry`
+
+Use this option to enable sentry integration. Provide the DSN assigned to you by sentry
+with the `dsn` setting.
+
+NOTE: While attempts are made to ensure that the logs don't contain
+any sensitive information, this cannot be guaranteed. By enabling
+this option the sentry server may therefore receive sensitive
+information, and it in turn may then disseminate sensitive information
+through insecure notification channels if so configured.
+
+Example configuration:
+```yaml
+sentry:
+ dsn: "..."
+```
+---
+Config option: `metrics_flags`
+
+Flags to enable Prometheus metrics which are not suitable to be
+enabled by default, either for performance reasons or limited use.
+Currently the only option is `known_servers`, which publishes
+`synapse_federation_known_servers`, a gauge of the number of
+servers this homeserver knows about, including itself. May cause
+performance problems on large homeservers.
+
+Example configuration:
+```yaml
+metrics_flags:
+ known_servers: true
+```
+---
+Config option: `report_stats`
+
+Whether or not to report anonymized homeserver usage statistics. This is originally
+set when generating the config. Set this option to true or false to change the current
+behavior.
+
+Example configuration:
+```yaml
+report_stats: true
+```
+---
+Config option: `report_stats_endpoint`
+
+The endpoint to report the anonymized homeserver usage statistics to.
+Defaults to https://matrix.org/report-usage-stats/push
+
+Example configuration:
+```yaml
+report_stats_endpoint: https://example.com/report-usage-stats/push
+```
+---
+## API Configuration ##
+Config settings related to the client/server API
+
+---
+Config option: `room_prejoin_state:`
+
+Controls for the state that is shared with users who receive an invite
+to a room. By default, the following state event types are shared with users who
+receive invites to the room:
+- m.room.join_rules
+- m.room.canonical_alias
+- m.room.avatar
+- m.room.encryption
+- m.room.name
+- m.room.create
+- m.room.topic
+
+To change the default behavior, use the following sub-options:
+* `disable_default_event_types`: set to true to disable the above defaults. If this
+ is enabled, only the event types listed in `additional_event_types` are shared.
+ Defaults to false.
+* `additional_event_types`: Additional state event types to share with users when they are invited
+ to a room. By default, this list is empty (so only the default event types are shared).
+
+Example configuration:
+```yaml
+room_prejoin_state:
+ disable_default_event_types: true
+ additional_event_types:
+ - org.example.custom.event.type
+ - m.room.join_rules
+```
+---
+Config option: `track_puppeted_user_ips`
+
+We record the IP address of clients used to access the API for various
+reasons, including displaying it to the user in the "Where you're signed in"
+dialog.
+
+By default, when puppeting another user via the admin API, the client IP
+address is recorded against the user who created the access token (ie, the
+admin user), and *not* the puppeted user.
+
+Set this option to true to also record the IP address against the puppeted
+user. (This also means that the puppeted user will count as an "active" user
+for the purpose of monthly active user tracking - see `limit_usage_by_mau` etc
+above.)
+
+Example configuration:
+```yaml
+track_puppeted_user_ips: true
+```
+---
+Config option: `app_service_config_files`
+
+A list of application service config files to use.
+
+Example configuration:
+```yaml
+app_service_config_files:
+ - app_service_1.yaml
+ - app_service_2.yaml
+```
+---
+Config option: `track_appservice_user_ips`
+
+Defaults to false. Set to true to enable tracking of application service IP addresses.
+Implicitly enables MAU tracking for application service users.
+
+Example configuration:
+```yaml
+track_appservice_user_ips: true
+```
+---
+Config option: `macaroon_secret_key`
+
+A secret which is used to sign access tokens. If none is specified,
+the `registration_shared_secret` is used, if one is given; otherwise,
+a secret key is derived from the signing key.
+
+Example configuration:
+```yaml
+macaroon_secret_key: <PRIVATE STRING>
+```
+---
+Config option: `form_secret`
+
+A secret which is used to calculate HMACs for form values, to stop
+falsification of values. Must be specified for the User Consent
+forms to work.
+
+Example configuration:
+```yaml
+form_secret: <PRIVATE STRING>
+```
+---
+## Signing Keys ##
+Config options relating to signing keys
+
+---
+Config option: `signing_key_path`
+
+Path to the signing key to sign messages with.
+
+Example configuration:
+```yaml
+signing_key_path: "CONFDIR/SERVERNAME.signing.key"
+```
+---
+Config option: `old_signing_keys`
+
+The keys that the server used to sign messages with but won't use
+to sign new messages. For each key, `key` should be the base64-encoded public key, and
+`expired_ts`should be the time (in milliseconds since the unix epoch) that
+it was last used.
+
+It is possible to build an entry from an old `signing.key` file using the
+`export_signing_key` script which is provided with synapse.
+
+Example configuration:
+```yaml
+old_signing_keys:
+ "ed25519:id": { key: "base64string", expired_ts: 123456789123 }
+```
+---
+Config option: `key_refresh_interval`
+
+How long key response published by this server is valid for.
+Used to set the `valid_until_ts` in `/key/v2` APIs.
+Determines how quickly servers will query to check which keys
+are still valid. Defaults to 1d.
+
+Example configuration:
+```yaml
+key_refresh_interval: 2d
+```
+---
+Config option: `trusted_key_servers:`
+
+The trusted servers to download signing keys from.
+
+When we need to fetch a signing key, each server is tried in parallel.
+
+Normally, the connection to the key server is validated via TLS certificates.
+Additional security can be provided by configuring a `verify key`, which
+will make synapse check that the response is signed by that key.
+
+This setting supercedes an older setting named `perspectives`. The old format
+is still supported for backwards-compatibility, but it is deprecated.
+
+`trusted_key_servers` defaults to matrix.org, but using it will generate a
+warning on start-up. To suppress this warning, set
+`suppress_key_server_warning` to true.
+
+Options for each entry in the list include:
+* `server_name`: the name of the server. Required.
+* `verify_keys`: an optional map from key id to base64-encoded public key.
+ If specified, we will check that the response is signed by at least
+ one of the given keys.
+* `accept_keys_insecurely`: a boolean. Normally, if `verify_keys` is unset,
+ and `federation_verify_certificates` is not `true`, synapse will refuse
+ to start, because this would allow anyone who can spoof DNS responses
+ to masquerade as the trusted key server. If you know what you are doing
+ and are sure that your network environment provides a secure connection
+ to the key server, you can set this to `true` to override this behaviour.
+
+Example configuration #1:
+```yaml
+trusted_key_servers:
+ - server_name: "my_trusted_server.example.com"
+ verify_keys:
+ "ed25519:auto": "abcdefghijklmnopqrstuvwxyzabcdefghijklmopqr"
+ - server_name: "my_other_trusted_server.example.com"
+```
+Example configuration #2:
+```yaml
+trusted_key_servers:
+ - server_name: "matrix.org"
+```
+---
+Config option: `suppress_key_server_warning`
+
+Set the following to true to disable the warning that is emitted when the
+`trusted_key_servers` include 'matrix.org'. See above.
+
+Example configuration:
+```yaml
+suppress_key_server_warning: true
+```
+---
+Config option: `key_server_signing_keys_path`
+
+The signing keys to use when acting as a trusted key server. If not specified
+defaults to the server signing key.
+
+Can contain multiple keys, one per line.
+
+Example configuration:
+```yaml
+key_server_signing_keys_path: "key_server_signing_keys.key"
+```
+---
+## Single sign-on integration ##
+
+The following settings can be used to make Synapse use a single sign-on
+provider for authentication, instead of its internal password database.
+
+You will probably also want to set the following options to false to
+disable the regular login/registration flows:
+ * `enable_registration`
+ * `password_config.enabled`
+
+You will also want to investigate the settings under the "sso" configuration
+section below.
+
+---
+Config option: `saml2_config`
+
+Enable SAML2 for registration and login. Uses pysaml2. To learn more about pysaml and
+to find a full list options for configuring pysaml, read the docs [here](https://pysaml2.readthedocs.io/en/latest/).
+
+At least one of `sp_config` or `config_path` must be set in this section to
+enable SAML login. You can either put your entire pysaml config inline using the `sp_config`
+option, or you can specify a path to a psyaml config file with the sub-option `config_path`.
+This setting has the following sub-options:
+
+* `sp_config`: the configuration for the pysaml2 Service Provider. See pysaml2 docs for format of config.
+ Default values will be used for the `entityid` and `service` settings,
+ so it is not normally necessary to specify them unless you need to
+ override them. Here are a few useful sub-options for configuring pysaml:
+ * `metadata`: Point this to the IdP's metadata. You must provide either a local
+ file via the `local` attribute or (preferably) a URL via the
+ `remote` attribute.
+ * `accepted_time_diff: 3`: Allowed clock difference in seconds between the homeserver and IdP.
+ Defaults to 0.
+ * `service`: By default, the user has to go to our login page first. If you'd like
+ to allow IdP-initiated login, set `allow_unsolicited` to true under `sp` in the `service`
+ section.
+* `config_path`: specify a separate pysaml2 configuration file thusly:
+ `config_path: "CONFDIR/sp_conf.py"`
+* `saml_session_lifetime`: The lifetime of a SAML session. This defines how long a user has to
+ complete the authentication process, if `allow_unsolicited` is unset. The default is 15 minutes.
+* `user_mapping_provider`: Using this option, an external module can be provided as a
+ custom solution to mapping attributes returned from a saml provider onto a matrix user. The
+ `user_mapping_provider` has the following attributes:
+ * `module`: The custom module's class.
+ * `config`: Custom configuration values for the module. Use the values provided in the
+ example if you are using the built-in user_mapping_provider, or provide your own
+ config values for a custom class if you are using one. This section will be passed as a Python
+ dictionary to the module's `parse_config` method. The built-in provider takes the following two
+ options:
+ * `mxid_source_attribute`: The SAML attribute (after mapping via the attribute maps) to use
+ to derive the Matrix ID from. It is 'uid' by default. Note: This used to be configured by the
+ `saml2_config.mxid_source_attribute option`. If that is still defined, its value will be used instead.
+ * `mxid_mapping`: The mapping system to use for mapping the saml attribute onto a
+ matrix ID. Options include: `hexencode` (which maps unpermitted characters to '=xx')
+ and `dotreplace` (which replaces unpermitted characters with '.').
+ The default is `hexencode`. Note: This used to be configured by the
+ `saml2_config.mxid_mapping option`. If that is still defined, its value will be used instead.
+* `grandfathered_mxid_source_attribute`: In previous versions of synapse, the mapping from SAML attribute to
+ MXID was always calculated dynamically rather than stored in a table. For backwards- compatibility, we will look for `user_ids`
+ matching such a pattern before creating a new account. This setting controls the SAML attribute which will be used for this
+ backwards-compatibility lookup. Typically it should be 'uid', but if the attribute maps are changed, it may be necessary to change it.
+ The default is 'uid'.
+* `attribute_requirements`: It is possible to configure Synapse to only allow logins if SAML attributes
+ match particular values. The requirements can be listed under
+ `attribute_requirements` as shown in the example. All of the listed attributes must
+ match for the login to be permitted.
+* `idp_entityid`: If the metadata XML contains multiple IdP entities then the `idp_entityid`
+ option must be set to the entity to redirect users to.
+ Most deployments only have a single IdP entity and so should omit this option.
+
+
+Once SAML support is enabled, a metadata file will be exposed at
+`https://<server>:<port>/_synapse/client/saml2/metadata.xml`, which you may be able to
+use to configure your SAML IdP with. Alternatively, you can manually configure
+the IdP to use an ACS location of
+`https://<server>:<port>/_synapse/client/saml2/authn_response`.
+
+Example configuration:
+```yaml
+saml2_config:
+ sp_config:
+ metadata:
+ local: ["saml2/idp.xml"]
+ remote:
+ - url: https://our_idp/metadata.xml
+ accepted_time_diff: 3
+
+ service:
+ sp:
+ allow_unsolicited: true
+
+ # The examples below are just used to generate our metadata xml, and you
+ # may well not need them, depending on your setup. Alternatively you
+ # may need a whole lot more detail - see the pysaml2 docs!
+ description: ["My awesome SP", "en"]
+ name: ["Test SP", "en"]
+
+ ui_info:
+ display_name:
+ - lang: en
+ text: "Display Name is the descriptive name of your service."
+ description:
+ - lang: en
+ text: "Description should be a short paragraph explaining the purpose of the service."
+ information_url:
+ - lang: en
+ text: "https://example.com/terms-of-service"
+ privacy_statement_url:
+ - lang: en
+ text: "https://example.com/privacy-policy"
+ keywords:
+ - lang: en
+ text: ["Matrix", "Element"]
+ logo:
+ - lang: en
+ text: "https://example.com/logo.svg"
+ width: "200"
+ height: "80"
+
+ organization:
+ name: Example com
+ display_name:
+ - ["Example co", "en"]
+ url: "http://example.com"
+
+ contact_person:
+ - given_name: Bob
+ sur_name: "the Sysadmin"
+ email_address": ["admin@example.com"]
+ contact_type": technical
+
+ saml_session_lifetime: 5m
+
+ user_mapping_provider:
+ # Below options are intended for the built-in provider, they should be
+ # changed if using a custom module.
+ config:
+ mxid_source_attribute: displayName
+ mxid_mapping: dotreplace
+
+ grandfathered_mxid_source_attribute: upn
+
+ attribute_requirements:
+ - attribute: userGroup
+ value: "staff"
+ - attribute: department
+ value: "sales"
+
+ idp_entityid: 'https://our_idp/entityid'
+```
+---
+Config option: `oidc_providers`
+
+List of OpenID Connect (OIDC) / OAuth 2.0 identity providers, for registration
+and login. See [here](../../openid.md)
+for information on how to configure these options.
+
+For backwards compatibility, it is also possible to configure a single OIDC
+provider via an `oidc_config` setting. This is now deprecated and admins are
+advised to migrate to the `oidc_providers` format. (When doing that migration,
+use `oidc` for the `idp_id` to ensure that existing users continue to be
+recognised.)
+
+Options for each entry include:
+* `idp_id`: a unique identifier for this identity provider. Used internally
+ by Synapse; should be a single word such as 'github'.
+ Note that, if this is changed, users authenticating via that provider
+ will no longer be recognised as the same user!
+ (Use "oidc" here if you are migrating from an old `oidc_config` configuration.)
+
+* `idp_name`: A user-facing name for this identity provider, which is used to
+ offer the user a choice of login mechanisms.
+
+* `idp_icon`: An optional icon for this identity provider, which is presented
+ by clients and Synapse's own IdP picker page. If given, must be an
+ MXC URI of the format mxc://<server-name>/<media-id>. (An easy way to
+ obtain such an MXC URI is to upload an image to an (unencrypted) room
+ and then copy the "url" from the source of the event.)
+
+* `idp_brand`: An optional brand for this identity provider, allowing clients
+ to style the login flow according to the identity provider in question.
+ See the [spec](https://spec.matrix.org/latest/) for possible options here.
+
+* `discover`: set to false to disable the use of the OIDC discovery mechanism
+ to discover endpoints. Defaults to true.
+
+* `issuer`: Required. The OIDC issuer. Used to validate tokens and (if discovery
+ is enabled) to discover the provider's endpoints.
+
+* `client_id`: Required. oauth2 client id to use.
+
+* `client_secret`: oauth2 client secret to use. May be omitted if
+ `client_secret_jwt_key` is given, or if `client_auth_method` is 'none'.
+
+* `client_secret_jwt_key`: Alternative to client_secret: details of a key used
+ to create a JSON Web Token to be used as an OAuth2 client secret. If
+ given, must be a dictionary with the following properties:
+
+ * `key`: a pem-encoded signing key. Must be a suitable key for the
+ algorithm specified. Required unless `key_file` is given.
+
+ * `key_file`: the path to file containing a pem-encoded signing key file.
+ Required unless `key` is given.
+
+ * `jwt_header`: a dictionary giving properties to include in the JWT
+ header. Must include the key `alg`, giving the algorithm used to
+ sign the JWT, such as "ES256", using the JWA identifiers in
+ RFC7518.
+
+ * `jwt_payload`: an optional dictionary giving properties to include in
+ the JWT payload. Normally this should include an `iss` key.
+
+* `client_auth_method`: auth method to use when exchanging the token. Valid
+ values are `client_secret_basic` (default), `client_secret_post` and
+ `none`.
+
+* `scopes`: list of scopes to request. This should normally include the "openid"
+ scope. Defaults to ["openid"].
+
+* `authorization_endpoint`: the oauth2 authorization endpoint. Required if
+ provider discovery is disabled.
+
+* `token_endpoint`: the oauth2 token endpoint. Required if provider discovery is
+ disabled.
+
+* `userinfo_endpoint`: the OIDC userinfo endpoint. Required if discovery is
+ disabled and the 'openid' scope is not requested.
+
+* `jwks_uri`: URI where to fetch the JWKS. Required if discovery is disabled and
+ the 'openid' scope is used.
+
+* `skip_verification`: set to 'true' to skip metadata verification. Use this if
+ you are connecting to a provider that is not OpenID Connect compliant.
+ Defaults to false. Avoid this in production.
+
+* `user_profile_method`: Whether to fetch the user profile from the userinfo
+ endpoint, or to rely on the data returned in the id_token from the `token_endpoint`.
+ Valid values are: `auto` or `userinfo_endpoint`.
+ Defaults to `auto`, which uses the userinfo endpoint if `openid` is
+ not included in `scopes`. Set to `userinfo_endpoint` to always use the
+ userinfo endpoint.
+
+* `allow_existing_users`: set to true to allow a user logging in via OIDC to
+ match a pre-existing account instead of failing. This could be used if
+ switching from password logins to OIDC. Defaults to false.
+
+* `user_mapping_provider`: Configuration for how attributes returned from a OIDC
+ provider are mapped onto a matrix user. This setting has the following
+ sub-properties:
+
+ * `module`: The class name of a custom mapping module. Default is
+ `synapse.handlers.oidc.JinjaOidcMappingProvider`.
+ See https://matrix-org.github.io/synapse/latest/sso_mapping_providers.html#openid-mapping-providers
+ for information on implementing a custom mapping provider.
+
+ * `config`: Configuration for the mapping provider module. This section will
+ be passed as a Python dictionary to the user mapping provider
+ module's `parse_config` method.
+
+ For the default provider, the following settings are available:
+
+ * subject_claim: name of the claim containing a unique identifier
+ for the user. Defaults to 'sub', which OpenID Connect
+ compliant providers should provide.
+
+ * `localpart_template`: Jinja2 template for the localpart of the MXID.
+ If this is not set, the user will be prompted to choose their
+ own username (see the documentation for the `sso_auth_account_details.html`
+ template). This template can use the `localpart_from_email` filter.
+
+ * `confirm_localpart`: Whether to prompt the user to validate (or
+ change) the generated localpart (see the documentation for the
+ 'sso_auth_account_details.html' template), instead of
+ registering the account right away.
+
+ * `display_name_template`: Jinja2 template for the display name to set
+ on first login. If unset, no displayname will be set.
+
+ * `email_template`: Jinja2 template for the email address of the user.
+ If unset, no email address will be added to the account.
+
+ * `extra_attributes`: a map of Jinja2 templates for extra attributes
+ to send back to the client during login. Note that these are non-standard and clients will ignore them
+ without modifications.
+
+ When rendering, the Jinja2 templates are given a 'user' variable,
+ which is set to the claims returned by the UserInfo Endpoint and/or
+ in the ID Token.
+
+
+It is possible to configure Synapse to only allow logins if certain attributes
+match particular values in the OIDC userinfo. The requirements can be listed under
+`attribute_requirements` as shown here:
+```yaml
+attribute_requirements:
+ - attribute: family_name
+ value: "Stephensson"
+ - attribute: groups
+ value: "admin"
+```
+All of the listed attributes must match for the login to be permitted. Additional attributes can be added to
+userinfo by expanding the `scopes` section of the OIDC config to retrieve
+additional information from the OIDC provider.
+
+If the OIDC claim is a list, then the attribute must match any value in the list.
+Otherwise, it must exactly match the value of the claim. Using the example
+above, the `family_name` claim MUST be "Stephensson", but the `groups`
+claim MUST contain "admin".
+
+Example configuration:
+```yaml
+oidc_providers:
+ # Generic example
+ #
+ - idp_id: my_idp
+ idp_name: "My OpenID provider"
+ idp_icon: "mxc://example.com/mediaid"
+ discover: false
+ issuer: "https://accounts.example.com/"
+ client_id: "provided-by-your-issuer"
+ client_secret: "provided-by-your-issuer"
+ client_auth_method: client_secret_post
+ scopes: ["openid", "profile"]
+ authorization_endpoint: "https://accounts.example.com/oauth2/auth"
+ token_endpoint: "https://accounts.example.com/oauth2/token"
+ userinfo_endpoint: "https://accounts.example.com/userinfo"
+ jwks_uri: "https://accounts.example.com/.well-known/jwks.json"
+ skip_verification: true
+ user_mapping_provider:
+ config:
+ subject_claim: "id"
+ localpart_template: "{{ user.login }}"
+ display_name_template: "{{ user.name }}"
+ email_template: "{{ user.email }}"
+ attribute_requirements:
+ - attribute: userGroup
+ value: "synapseUsers"
+```
+---
+Config option: `cas_config`
+
+Enable Central Authentication Service (CAS) for registration and login.
+Has the following sub-options:
+* `enabled`: Set this to true to enable authorization against a CAS server.
+ Defaults to false.
+* `server_url`: The URL of the CAS authorization endpoint.
+* `displayname_attribute`: The attribute of the CAS response to use as the display name.
+ If no name is given here, no displayname will be set.
+* `required_attributes`: It is possible to configure Synapse to only allow logins if CAS attributes
+ match particular values. All of the keys given below must exist
+ and the values must match the given value. Alternately if the given value
+ is `None` then any value is allowed (the attribute just must exist).
+ All of the listed attributes must match for the login to be permitted.
+
+Example configuration:
+```yaml
+cas_config:
+ enabled: true
+ server_url: "https://cas-server.com"
+ displayname_attribute: name
+ required_attributes:
+ userGroup: "staff"
+ department: None
+```
+---
+Config option: `sso`
+
+Additional settings to use with single-sign on systems such as OpenID Connect,
+SAML2 and CAS.
+
+Server admins can configure custom templates for pages related to SSO. See
+[here](../../templates.md) for more information.
+
+Options include:
+* `client_whitelist`: A list of client URLs which are whitelisted so that the user does not
+ have to confirm giving access to their account to the URL. Any client
+ whose URL starts with an entry in the following list will not be subject
+ to an additional confirmation step after the SSO login is completed.
+ WARNING: An entry such as "https://my.client" is insecure, because it
+ will also match "https://my.client.evil.site", exposing your users to
+ phishing attacks from evil.site. To avoid this, include a slash after the
+ hostname: "https://my.client/".
+ The login fallback page (used by clients that don't natively support the
+ required login flows) is whitelisted in addition to any URLs in this list.
+ By default, this list contains only the login fallback page.
+* `update_profile_information`: Use this setting to keep a user's profile fields in sync with information from
+ the identity provider. Currently only syncing the displayname is supported. Fields
+ are checked on every SSO login, and are updated if necessary.
+ Note that enabling this option will override user profile information,
+ regardless of whether users have opted-out of syncing that
+ information when first signing in. Defaults to false.
+
+
+Example configuration:
+```yaml
+sso:
+ client_whitelist:
+ - https://riot.im/develop
+ - https://my.custom.client/
+ update_profile_information: true
+```
+---
+Config option: `jwt_config`
+
+JSON web token integration. The following settings can be used to make
+Synapse JSON web tokens for authentication, instead of its internal
+password database.
+
+Each JSON Web Token needs to contain a "sub" (subject) claim, which is
+used as the localpart of the mxid.
+
+Additionally, the expiration time ("exp"), not before time ("nbf"),
+and issued at ("iat") claims are validated if present.
+
+Note that this is a non-standard login type and client support is
+expected to be non-existent.
+
+See [here](../../jwt.md) for more.
+
+Additional sub-options for this setting include:
+* `enabled`: Set to true to enable authorization using JSON web
+ tokens. Defaults to false.
+* `secret`: This is either the private shared secret or the public key used to
+ decode the contents of the JSON web token. Required if `enabled` is set to true.
+* `algorithm`: The algorithm used to sign the JSON web token. Supported algorithms are listed at
+ https://pyjwt.readthedocs.io/en/latest/algorithms.html Required if `enabled` is set to true.
+* `subject_claim`: Name of the claim containing a unique identifier for the user.
+ Optional, defaults to `sub`.
+* `issuer`: The issuer to validate the "iss" claim against. Optional. If provided the
+ "iss" claim will be required and validated for all JSON web tokens.
+* `audiences`: A list of audiences to validate the "aud" claim against. Optional.
+ If provided the "aud" claim will be required and validated for all JSON web tokens.
+ Note that if the "aud" claim is included in a JSON web token then
+ validation will fail without configuring audiences.
+
+Example configuration:
+```yaml
+jwt_config:
+ enabled: true
+ secret: "provided-by-your-issuer"
+ algorithm: "provided-by-your-issuer"
+ subject_claim: "name_of_claim"
+ issuer: "provided-by-your-issuer"
+ audiences:
+ - "provided-by-your-issuer"
+```
+---
+Config option: `password_config`
+
+Use this setting to enable password-based logins.
+
+This setting has the following sub-options:
+* `enabled`: Defaults to true.
+* `localdb_enabled`: Set to false to disable authentication against the local password
+ database. This is ignored if `enabled` is false, and is only useful
+ if you have other `password_providers`. Defaults to true.
+* `pepper`: Set the value here to a secret random string for extra security. # Uncomment and change to a secret random string for extra security.
+ DO NOT CHANGE THIS AFTER INITIAL SETUP!
+* `policy`: Define and enforce a password policy, such as minimum lengths for passwords, etc.
+ Each parameter is optional. This is an implementation of MSC2000. Parameters are as follows:
+ * `enabled`: Defaults to false. Set to true to enable.
+ * `minimum_length`: Minimum accepted length for a password. Defaults to 0.
+ * `require_digit`: Whether a password must contain at least one digit.
+ Defaults to false.
+ * `require_symbol`: Whether a password must contain at least one symbol.
+ A symbol is any character that's not a number or a letter. Defaults to false.
+ * `require_lowercase`: Whether a password must contain at least one lowercase letter.
+ Defaults to false.
+ * `require_uppercase`: Whether a password must contain at least one uppercase letter.
+ Defaults to false.
+
+
+Example configuration:
+```yaml
+password_config:
+ enabled: false
+ localdb_enabled: false
+ pepper: "EVEN_MORE_SECRET"
+
+ policy:
+ enabled: true
+ minimum_length: 15
+ require_digit: true
+ require_symbol: true
+ require_lowercase: true
+ require_uppercase: true
+```
+---
+Config option: `ui_auth`
+
+The amount of time to allow a user-interactive authentication session to be active.
+
+This defaults to 0, meaning the user is queried for their credentials
+before every action, but this can be overridden to allow a single
+validation to be re-used. This weakens the protections afforded by
+the user-interactive authentication process, by allowing for multiple
+(and potentially different) operations to use the same validation session.
+
+This is ignored for potentially "dangerous" operations (including
+deactivating an account, modifying an account password, and
+adding a 3PID).
+
+Use the `session_timeout` sub-option here to change the time allowed for credential validation.
+
+Example configuration:
+```yaml
+ui_auth:
+ session_timeout: "15s"
+```
+---
+Config option: `email`
+
+Configuration for sending emails from Synapse.
+
+Server admins can configure custom templates for email content. See
+[here](../../templates.md) for more information.
+
+This setting has the following sub-options:
+* `smtp_host`: The hostname of the outgoing SMTP server to use. Defaults to 'localhost'.
+* `smtp_port`: The port on the mail server for outgoing SMTP. Defaults to 25.
+* `smtp_user` and `smtp_pass`: Username/password for authentication to the SMTP server. By default, no
+ authentication is attempted.
+* `require_transport_security`: Set to true to require TLS transport security for SMTP.
+ By default, Synapse will connect over plain text, and will then switch to
+ TLS via STARTTLS *if the SMTP server supports it*. If this option is set,
+ Synapse will refuse to connect unless the server supports STARTTLS.
+* `enable_tls`: By default, if the server supports TLS, it will be used, and the server
+ must present a certificate that is valid for 'smtp_host'. If this option
+ is set to false, TLS will not be used.
+* `notif_from`: defines the "From" address to use when sending emails.
+ It must be set if email sending is enabled. The placeholder '%(app)s' will be replaced by the application name,
+ which is normally set in `app_name`, but may be overridden by the
+ Matrix client application. Note that the placeholder must be written '%(app)s', including the
+ trailing 's'.
+* `app_name`: `app_name` defines the default value for '%(app)s' in `notif_from` and email
+ subjects. It defaults to 'Matrix'.
+* `enable_notifs`: Set to true to enable sending emails for messages that the user
+ has missed. Disabled by default.
+* `notif_for_new_users`: Set to false to disable automatic subscription to email
+ notifications for new users. Enabled by default.
+* `client_base_url`: Custom URL for client links within the email notifications. By default
+ links will be based on "https://matrix.to". (This setting used to be called `riot_base_url`;
+ the old name is still supported for backwards-compatibility but is now deprecated.)
+* `validation_token_lifetime`: Configures the time that a validation email will expire after sending.
+ Defaults to 1h.
+* `invite_client_location`: The web client location to direct users to during an invite. This is passed
+ to the identity server as the `org.matrix.web_client_location` key. Defaults
+ to unset, giving no guidance to the identity server.
+* `subjects`: Subjects to use when sending emails from Synapse. The placeholder '%(app)s' will
+ be replaced with the value of the `app_name` setting, or by a value dictated by the Matrix client application.
+ In addition, each subject can use the following placeholders: '%(person)s', which will be replaced by the displayname
+ of the user(s) that sent the message(s), e.g. "Alice and Bob", and '%(room)s', which will be replaced by the name of the room the
+ message(s) have been sent to, e.g. "My super room". In addition, emails related to account administration will
+ can use the '%(server_name)s' placeholder, which will be replaced by the value of the
+ `server_name` setting in your Synapse configuration.
+
+ Here is a list of subjects for notification emails that can be set:
+ * `message_from_person_in_room`: Subject to use to notify about one message from one or more user(s) in a
+ room which has a name. Defaults to "[%(app)s] You have a message on %(app)s from %(person)s in the %(room)s room..."
+ * `message_from_person`: Subject to use to notify about one message from one or more user(s) in a
+ room which doesn't have a name. Defaults to "[%(app)s] You have a message on %(app)s from %(person)s..."
+ * `messages_from_person`: Subject to use to notify about multiple messages from one or more users in
+ a room which doesn't have a name. Defaults to "[%(app)s] You have messages on %(app)s from %(person)s..."
+ * `messages_in_room`: Subject to use to notify about multiple messages in a room which has a
+ name. Defaults to "[%(app)s] You have messages on %(app)s in the %(room)s room..."
+ * `messages_in_room_and_others`: Subject to use to notify about multiple messages in multiple rooms.
+ Defaults to "[%(app)s] You have messages on %(app)s in the %(room)s room and others..."
+ * `messages_from_person_and_others`: Subject to use to notify about multiple messages from multiple persons in
+ multiple rooms. This is similar to the setting above except it's used when
+ the room in which the notification was triggered has no name. Defaults to
+ "[%(app)s] You have messages on %(app)s from %(person)s and others..."
+ * `invite_from_person_to_room`: Subject to use to notify about an invite to a room which has a name.
+ Defaults to "[%(app)s] %(person)s has invited you to join the %(room)s room on %(app)s..."
+ * `invite_from_person`: Subject to use to notify about an invite to a room which doesn't have a
+ name. Defaults to "[%(app)s] %(person)s has invited you to chat on %(app)s..."
+ * `password_reset`: Subject to use when sending a password reset email. Defaults to "[%(server_name)s] Password reset"
+ * `email_validation`: Subject to use when sending a verification email to assert an address's
+ ownership. Defaults to "[%(server_name)s] Validate your email"
+
+Example configuration:
+```yaml
+email:
+ smtp_host: mail.server
+ smtp_port: 587
+ smtp_user: "exampleusername"
+ smtp_pass: "examplepassword"
+ require_transport_security: true
+ enable_tls: false
+ notif_from: "Your Friendly %(app)s homeserver <noreply@example.com>"
+ app_name: my_branded_matrix_server
+ enable_notifs: true
+ notif_for_new_users: false
+ client_base_url: "http://localhost/riot"
+ validation_token_lifetime: 15m
+ invite_client_location: https://app.element.io
+
+ subjects:
+ message_from_person_in_room: "[%(app)s] You have a message on %(app)s from %(person)s in the %(room)s room..."
+ message_from_person: "[%(app)s] You have a message on %(app)s from %(person)s..."
+ messages_from_person: "[%(app)s] You have messages on %(app)s from %(person)s..."
+ messages_in_room: "[%(app)s] You have messages on %(app)s in the %(room)s room..."
+ messages_in_room_and_others: "[%(app)s] You have messages on %(app)s in the %(room)s room and others..."
+ messages_from_person_and_others: "[%(app)s] You have messages on %(app)s from %(person)s and others..."
+ invite_from_person_to_room: "[%(app)s] %(person)s has invited you to join the %(room)s room on %(app)s..."
+ invite_from_person: "[%(app)s] %(person)s has invited you to chat on %(app)s..."
+ password_reset: "[%(server_name)s] Password reset"
+ email_validation: "[%(server_name)s] Validate your email"
+```
+---
+## Push ##
+Configuration settings related to push notifications
+
+---
+Config option: `push`
+
+This setting defines options for push notifications.
+
+This option has a number of sub-options. They are as follows:
+* `include_content`: Clients requesting push notifications can either have the body of
+ the message sent in the notification poke along with other details
+ like the sender, or just the event ID and room ID (`event_id_only`).
+ If clients choose the to have the body sent, this option controls whether the
+ notification request includes the content of the event (other details
+ like the sender are still included). If `event_id_only` is enabled, it
+ has no effect.
+ For modern android devices the notification content will still appear
+ because it is loaded by the app. iPhone, however will send a
+ notification saying only that a message arrived and who it came from.
+ Defaults to true. Set to false to only include the event ID and room ID in push notification payloads.
+* `group_unread_count_by_room: false`: When a push notification is received, an unread count is also sent.
+ This number can either be calculated as the number of unread messages for the user, or the number of *rooms* the
+ user has unread messages in. Defaults to true, meaning push clients will see the number of
+ rooms with unread messages in them. Set to false to instead send the number
+ of unread messages.
+
+Example configuration:
+```yaml
+push:
+ include_content: false
+ group_unread_count_by_room: false
+```
+---
+## Rooms ##
+Config options relating to rooms.
+
+---
+Config option: `encryption_enabled_by_default`
+
+Controls whether locally-created rooms should be end-to-end encrypted by
+default.
+
+Possible options are "all", "invite", and "off". They are defined as:
+
+* "all": any locally-created room
+* "invite": any room created with the `private_chat` or `trusted_private_chat`
+ room creation presets
+* "off": this option will take no effect
+
+The default value is "off".
+
+Note that this option will only affect rooms created after it is set. It
+will also not affect rooms created by other servers.
+
+Example configuration:
+```yaml
+encryption_enabled_by_default_for_room_type: invite
+```
+---
+Config option: `enable_group_creation`
+
+Set to true to allow non-server-admin users to create groups on this server
+
+Example configuration:
+```yaml
+enable_group_creation: true
+```
+---
+Config option: `group_creation_prefix`
+
+If enabled/present, non-server admins can only create groups with local parts
+starting with this prefix.
+
+Example configuration:
+```yaml
+group_creation_prefix: "unofficial_"
+```
+---
+Config option: `user_directory`
+
+This setting defines options related to the user directory.
+
+This option has the following sub-options:
+* `enabled`: Defines whether users can search the user directory. If false then
+ empty responses are returned to all queries. Defaults to true.
+* `search_all_users`: Defines whether to search all users visible to your HS when searching
+ the user directory. If false, search results will only contain users
+ visible in public rooms and users sharing a room with the requester.
+ Defaults to false.
+ NB. If you set this to true, and the last time the user_directory search
+ indexes were (re)built was before Synapse 1.44, you'll have to
+ rebuild the indexes in order to search through all known users.
+ These indexes are built the first time Synapse starts; admins can
+ manually trigger a rebuild via API following the instructions at
+ https://matrix-org.github.io/synapse/latest/usage/administration/admin_api/background_updates.html#run
+ Set to true to return search results containing all known users, even if that
+ user does not share a room with the requester.
+* `prefer_local_users`: Defines whether to prefer local users in search query results.
+ If set to true, local users are more likely to appear above remote users when searching the
+ user directory. Defaults to false.
+
+Example configuration:
+```yaml
+user_directory:
+ enabled: false
+ search_all_users: true
+ prefer_local_users: true
+```
+---
+Config option: `user_consent`
+
+For detailed instructions on user consent configuration, see [here](../../consent_tracking.md).
+
+Parts of this section are required if enabling the `consent` resource under
+`listeners`, in particular `template_dir` and `version`. # TODO: link `listeners`
+
+* `template_dir`: gives the location of the templates for the HTML forms.
+ This directory should contain one subdirectory per language (eg, `en`, `fr`),
+ and each language directory should contain the policy document (named as
+ <version>.html) and a success page (success.html).
+
+* `version`: specifies the 'current' version of the policy document. It defines
+ the version to be served by the consent resource if there is no 'v'
+ parameter.
+
+* `server_notice_content`: if enabled, will send a user a "Server Notice"
+ asking them to consent to the privacy policy. The `server_notices` section ##TODO: link
+ must also be configured for this to work. Notices will *not* be sent to
+ guest users unless `send_server_notice_to_guests` is set to true.
+
+* `block_events_error`, if set, will block any attempts to send events
+ until the user consents to the privacy policy. The value of the setting is
+ used as the text of the error.
+
+* `require_at_registration`, if enabled, will add a step to the registration
+ process, similar to how captcha works. Users will be required to accept the
+ policy before their account is created.
+
+* `policy_name` is the display name of the policy users will see when registering
+ for an account. Has no effect unless `require_at_registration` is enabled.
+ Defaults to "Privacy Policy".
+
+Example configuration:
+```yaml
+user_consent:
+ template_dir: res/templates/privacy
+ version: 1.0
+ server_notice_content:
+ msgtype: m.text
+ body: >-
+ To continue using this homeserver you must review and agree to the
+ terms and conditions at %(consent_uri)s
+ send_server_notice_to_guests: true
+ block_events_error: >-
+ To continue using this homeserver you must review and agree to the
+ terms and conditions at %(consent_uri)s
+ require_at_registration: false
+ policy_name: Privacy Policy
+```
+---
+Config option: `stats`
+
+Settings for local room and user statistics collection. See [here](../../room_and_user_statistics.md)
+for more.
+
+* `enabled`: Set to false to disable room and user statistics. Note that doing
+ so may cause certain features (such as the room directory) not to work
+ correctly. Defaults to true.
+
+Example configuration:
+```yaml
+stats:
+ enabled: false
+```
+---
+Config option: `server_notices`
+
+Use this setting to enable a room which can be used to send notices
+from the server to users. It is a special room which users cannot leave; notices
+in the room come from a special "notices" user id.
+
+If you use this setting, you *must* define the `system_mxid_localpart`
+sub-setting, which defines the id of the user which will be used to send the
+notices.
+
+Sub-options for this setting include:
+* `system_mxid_display_name`: set the display name of the "notices" user
+* `system_mxid_avatar_url`: set the avatar for the "notices" user
+* `room_name`: set the room name of the server notices room
+
+Example configuration:
+```yaml
+server_notices:
+ system_mxid_localpart: notices
+ system_mxid_display_name: "Server Notices"
+ system_mxid_avatar_url: "mxc://server.com/oumMVlgDnLYFaPVkExemNVVZ"
+ room_name: "Server Notices"
+```
+---
+Config option: `enable_room_list_search`
+
+Set to false to disable searching the public room list. When disabled
+blocks searching local and remote room lists for local and remote
+users by always returning an empty list for all queries. Defaults to true.
+
+Example configuration:
+```yaml
+enable_room_list_search: false
+```
+---
+Config option: `alias_creation`
+
+The `alias_creation` option controls who is allowed to create aliases
+on this server.
+
+The format of this option is a list of rules that contain globs that
+match against user_id, room_id and the new alias (fully qualified with
+server name). The action in the first rule that matches is taken,
+which can currently either be "allow" or "deny".
+
+Missing user_id/room_id/alias fields default to "*".
+
+If no rules match the request is denied. An empty list means no one
+can create aliases.
+
+Options for the rules include:
+* `user_id`: Matches against the creator of the alias. Defaults to "*".
+* `alias`: Matches against the alias being created. Defaults to "*".
+* `room_id`: Matches against the room ID the alias is being pointed at. Defaults to "*"
+* `action`: Whether to "allow" or "deny" the request if the rule matches. Defaults to allow.
+
+Example configuration:
+```yaml
+alias_creation_rules:
+ - user_id: "bad_user"
+ alias: "spammy_alias"
+ room_id: "*"
+ action: deny
+```
+---
+Config options: `room_list_publication_rules`
+
+The `room_list_publication_rules` option controls who can publish and
+which rooms can be published in the public room list.
+
+The format of this option is the same as that for
+`alias_creation_rules`.
+
+If the room has one or more aliases associated with it, only one of
+the aliases needs to match the alias rule. If there are no aliases
+then only rules with `alias: *` match.
+
+If no rules match the request is denied. An empty list means no one
+can publish rooms.
+
+Options for the rules include:
+* `user_id`: Matches against the creator of the alias. Defaults to "*".
+* `alias`: Matches against any current local or canonical aliases associated with the room. Defaults to "*".
+* `room_id`: Matches against the room ID being published. Defaults to "*".
+* `action`: Whether to "allow" or "deny" the request if the rule matches. Defaults to allow.
+
+Example configuration:
+```yaml
+room_list_publication_rules:
+ - user_id: "*"
+ alias: "*"
+ room_id: "*"
+ action: allow
+```
+---
+## Opentracing ##
+Configuration options related to Opentracing support.
+
+---
+Config option: `opentracing`
+
+These settings enable and configure opentracing, which implements distributed tracing.
+This allows you to observe the causal chains of events across servers
+including requests, key lookups etc., across any server running
+synapse or any other services which support opentracing
+(specifically those implemented with Jaeger).
+
+Sub-options include:
+* `enabled`: whether tracing is enabled. Set to true to enable. Disabled by default.
+* `homeserver_whitelist`: The list of homeservers we wish to send and receive span contexts and span baggage.
+ See [here](../../opentracing.md) for more.
+ This is a list of regexes which are matched against the `server_name` of the homeserver.
+ By default, it is empty, so no servers are matched.
+* `force_tracing_for_users`: # A list of the matrix IDs of users whose requests will always be traced,
+ even if the tracing system would otherwise drop the traces due to probabilistic sampling.
+ By default, the list is empty.
+* `jaeger_config`: Jaeger can be configured to sample traces at different rates.
+ All configuration options provided by Jaeger can be set here. Jaeger's configuration is
+ mostly related to trace sampling which is documented [here](https://www.jaegertracing.io/docs/latest/sampling/).
+
+Example configuration:
+```yaml
+opentracing:
+ enabled: true
+ homeserver_whitelist:
+ - ".*"
+ force_tracing_for_users:
+ - "@user1:server_name"
+ - "@user2:server_name"
+
+ jaeger_config:
+ sampler:
+ type: const
+ param: 1
+ logging:
+ false
+```
+---
+## Workers ##
+Configuration options related to workers.
+
+---
+Config option: `send_federation`
+
+Controls sending of outbound federation transactions on the main process.
+Set to false if using a federation sender worker. Defaults to true.
+
+Example configuration:
+```yaml
+send_federation: false
+```
+---
+Config option: `federation_sender_instances`
+
+It is possible to run multiple federation sender workers, in which case the
+work is balanced across them. Use this setting to list the senders.
+
+This configuration setting must be shared between all federation sender workers, and if
+changed all federation sender workers must be stopped at the same time and then
+started, to ensure that all instances are running with the same config (otherwise
+events may be dropped).
+
+Example configuration:
+```yaml
+federation_sender_instances:
+ - federation_sender1
+```
+---
+Config option: `instance_map`
+
+When using workers this should be a map from worker name to the
+HTTP replication listener of the worker, if configured.
+
+Example configuration:
+```yaml
+instance_map:
+ worker1:
+ host: localhost
+ port: 8034
+```
+---
+Config option: `stream_writers`
+
+Experimental: When using workers you can define which workers should
+handle event persistence and typing notifications. Any worker
+specified here must also be in the `instance_map`.
+
+Example configuration:
+```yaml
+stream_writers:
+ events: worker1
+ typing: worker1
+```
+---
+Config option: `run_background_task_on`
+
+The worker that is used to run background tasks (e.g. cleaning up expired
+data). If not provided this defaults to the main process.
+
+Example configuration:
+```yaml
+run_background_tasks_on: worker1
+```
+---
+Config option: `worker_replication_secret`
+
+A shared secret used by the replication APIs to authenticate HTTP requests
+from workers.
+
+By default this is unused and traffic is not authenticated.
+
+Example configuration:
+```yaml
+worker_replication_secret: "secret_secret"
+```
+Config option: `redis`
+
+Configuration for Redis when using workers. This *must* be enabled when
+using workers (unless using old style direct TCP configuration).
+This setting has the following sub-options:
+* `enabled`: whether to use Redis support. Defaults to false.
+* `host` and `port`: Optional host and port to use to connect to redis. Defaults to
+ localhost and 6379
+* `password`: Optional password if configured on the Redis instance.
+
+Example configuration:
+```yaml
+redis:
+ enabled: true
+ host: localhost
+ port: 6379
+ password: <secret_password>
+```
+## Background Updates ##
+Configuration settings related to background updates.
+
+---
+Config option: `background_updates`
+
+Background updates are database updates that are run in the background in batches.
+The duration, minimum batch size, default batch size, whether to sleep between batches and if so, how long to
+sleep can all be configured. This is helpful to speed up or slow down the updates.
+This setting has the following sub-options:
+* `background_update_duration_ms`: How long in milliseconds to run a batch of background updates for. Defaults to 100.
+ Set a different time to change the default.
+* `sleep_enabled`: Whether to sleep between updates. Defaults to true. Set to false to change the default.
+* `sleep_duration_ms`: If sleeping between updates, how long in milliseconds to sleep for. Defaults to 1000.
+ Set a duration to change the default.
+* `min_batch_size`: Minimum size a batch of background updates can be. Must be greater than 0. Defaults to 1.
+ Set a size to change the default.
+* `default_batch_size`: The batch size to use for the first iteration of a new background update. The default is 100.
+ Set a size to change the default.
+
+Example configuration:
+```yaml
+background_updates:
+ background_update_duration_ms: 500
+ sleep_enabled: false
+ sleep_duration_ms: 300
+ min_batch_size: 10
+ default_batch_size: 50
+```
\ No newline at end of file
diff --git a/docs/website_files/table-of-contents.js b/docs/website_files/table-of-contents.js
index 0de5960b22..772da97fb9 100644
--- a/docs/website_files/table-of-contents.js
+++ b/docs/website_files/table-of-contents.js
@@ -75,6 +75,20 @@ function setTocEntry() {
* Populate sidebar on load
*/
window.addEventListener('load', () => {
+ // Prevent rendering the table of contents of the "print book" page, as it
+ // will end up being rendered into the output (in a broken-looking way)
+
+ // Get the name of the current page (i.e. 'print.html')
+ const pageNameExtension = window.location.pathname.split('/').pop();
+
+ // Split off the extension (as '.../print' is also a valid page name), which
+ // should result in 'print'
+ const pageName = pageNameExtension.split('.')[0];
+ if (pageName === "print") {
+ // Don't render the table of contents on this page
+ return;
+ }
+
// Only create table of contents if there is more than one header on the page
if (headers.length <= 1) {
return;
diff --git a/docs/workers.md b/docs/workers.md
index caef44b614..858411b15e 100644
--- a/docs/workers.md
+++ b/docs/workers.md
@@ -146,12 +146,10 @@ worker_replication_host: 127.0.0.1
worker_replication_http_port: 9093
worker_listeners:
- - type: http
- port: 8083
- resources:
- - names:
- - client
- - federation
+ - type: http
+ port: 8083
+ resources:
+ - names: [client, federation]
worker_log_config: /home/matrix/synapse/config/worker1_log_config.yaml
```
@@ -343,9 +341,9 @@ effects of bursts of events from that bridge on events sent by normal users.
#### Stream writers
-Additionally, there is *experimental* support for moving writing of specific
-streams (such as events) off of the main process to a particular worker. (This
-is only supported with Redis-based replication.)
+Additionally, the writing of specific streams (such as events) can be moved off
+of the main process to a particular worker.
+(This is only supported with Redis-based replication.)
To enable this, the worker must have a HTTP replication listener configured,
have a `worker_name` and be listed in the `instance_map` config. The same worker
@@ -422,7 +420,7 @@ the stream writer for the `presence` stream:
#### Background tasks
-There is also *experimental* support for moving background tasks to a separate
+There is also support for moving background tasks to a separate
worker. Background tasks are run periodically or started via replication. Exactly
which tasks are configured to run depends on your Synapse configuration (e.g. if
stats is enabled).
diff --git a/mypy.ini b/mypy.ini
index 5246f987c0..b2c3b1524c 100644
--- a/mypy.ini
+++ b/mypy.ini
@@ -13,7 +13,6 @@ no_implicit_optional = True
files =
docker/,
scripts-dev/,
- setup.py,
synapse/,
tests/
diff --git a/poetry.lock b/poetry.lock
index c11e2c6848..95c1afc077 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -720,7 +720,7 @@ test = ["appdirs (==1.4.4)", "pytest (>=6)", "pytest-cov (>=2.7)", "pytest-mock
[[package]]
name = "prometheus-client"
-version = "0.13.1"
+version = "0.14.0"
description = "Python client for the Prometheus monitoring system."
category = "main"
optional = false
@@ -1288,7 +1288,7 @@ urllib3 = ">=1.26.0"
[[package]]
name = "twisted"
-version = "22.2.0"
+version = "22.4.0"
description = "An asynchronous networking framework written in Python"
category = "main"
optional = false
@@ -1308,19 +1308,20 @@ typing-extensions = ">=3.6.5"
"zope.interface" = ">=4.4.2"
[package.extras]
-all_non_platform = ["cython-test-exception-raiser (>=1.0.2,<2)", "PyHamcrest (>=1.9.0)", "pyopenssl (>=16.0.0)", "service-identity (>=18.1.0)", "idna (>=2.4)", "pyasn1", "cryptography (>=2.6)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "pyserial (>=3.0)", "h2 (>=3.0,<4.0)", "priority (>=1.1.0,<2.0)", "pywin32 (!=226)", "contextvars (>=2.4,<3)"]
+all_non_platform = ["cython-test-exception-raiser (>=1.0.2,<2)", "PyHamcrest (>=1.9.0)", "pyopenssl (>=16.0.0)", "service-identity (>=18.1.0)", "idna (>=2.4)", "pyasn1", "cryptography (>=2.6)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "pyserial (>=3.0)", "h2 (>=3.0,<5.0)", "priority (>=1.1.0,<2.0)", "pywin32 (!=226)", "contextvars (>=2.4,<3)"]
conch = ["pyasn1", "cryptography (>=2.6)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)"]
+conch_nacl = ["pyasn1", "cryptography (>=2.6)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "pynacl"]
contextvars = ["contextvars (>=2.4,<3)"]
dev = ["towncrier (>=19.2,<20.0)", "sphinx-rtd-theme (>=0.5,<1.0)", "readthedocs-sphinx-ext (>=2.1,<3.0)", "sphinx (>=4.1.2,<6)", "pyflakes (>=2.2,<3.0)", "twistedchecker (>=0.7,<1.0)", "coverage (>=6b1,<7)", "python-subunit (>=1.4,<2.0)", "pydoctor (>=21.9.0,<21.10.0)"]
dev_release = ["towncrier (>=19.2,<20.0)", "sphinx-rtd-theme (>=0.5,<1.0)", "readthedocs-sphinx-ext (>=2.1,<3.0)", "sphinx (>=4.1.2,<6)", "pydoctor (>=21.9.0,<21.10.0)"]
-http2 = ["h2 (>=3.0,<4.0)", "priority (>=1.1.0,<2.0)"]
-macos_platform = ["pyobjc-core", "pyobjc-framework-cfnetwork", "pyobjc-framework-cocoa", "cython-test-exception-raiser (>=1.0.2,<2)", "PyHamcrest (>=1.9.0)", "pyopenssl (>=16.0.0)", "service-identity (>=18.1.0)", "idna (>=2.4)", "pyasn1", "cryptography (>=2.6)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "pyserial (>=3.0)", "h2 (>=3.0,<4.0)", "priority (>=1.1.0,<2.0)", "pywin32 (!=226)", "contextvars (>=2.4,<3)"]
-mypy = ["mypy (==0.930)", "mypy-zope (==0.3.4)", "types-setuptools", "types-pyopenssl", "towncrier (>=19.2,<20.0)", "sphinx-rtd-theme (>=0.5,<1.0)", "readthedocs-sphinx-ext (>=2.1,<3.0)", "sphinx (>=4.1.2,<6)", "pyflakes (>=2.2,<3.0)", "twistedchecker (>=0.7,<1.0)", "coverage (>=6b1,<7)", "cython-test-exception-raiser (>=1.0.2,<2)", "PyHamcrest (>=1.9.0)", "pyopenssl (>=16.0.0)", "service-identity (>=18.1.0)", "idna (>=2.4)", "pyasn1", "cryptography (>=2.6)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "pyserial (>=3.0)", "h2 (>=3.0,<4.0)", "priority (>=1.1.0,<2.0)", "pywin32 (!=226)", "python-subunit (>=1.4,<2.0)", "contextvars (>=2.4,<3)", "pydoctor (>=21.9.0,<21.10.0)"]
-osx_platform = ["pyobjc-core", "pyobjc-framework-cfnetwork", "pyobjc-framework-cocoa", "cython-test-exception-raiser (>=1.0.2,<2)", "PyHamcrest (>=1.9.0)", "pyopenssl (>=16.0.0)", "service-identity (>=18.1.0)", "idna (>=2.4)", "pyasn1", "cryptography (>=2.6)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "pyserial (>=3.0)", "h2 (>=3.0,<4.0)", "priority (>=1.1.0,<2.0)", "pywin32 (!=226)", "contextvars (>=2.4,<3)"]
+http2 = ["h2 (>=3.0,<5.0)", "priority (>=1.1.0,<2.0)"]
+macos_platform = ["pyobjc-core", "pyobjc-framework-cfnetwork", "pyobjc-framework-cocoa", "cython-test-exception-raiser (>=1.0.2,<2)", "PyHamcrest (>=1.9.0)", "pyopenssl (>=16.0.0)", "service-identity (>=18.1.0)", "idna (>=2.4)", "pyasn1", "cryptography (>=2.6)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "pyserial (>=3.0)", "h2 (>=3.0,<5.0)", "priority (>=1.1.0,<2.0)", "pywin32 (!=226)", "contextvars (>=2.4,<3)"]
+mypy = ["mypy (==0.930)", "mypy-zope (==0.3.4)", "types-setuptools", "types-pyopenssl", "towncrier (>=19.2,<20.0)", "sphinx-rtd-theme (>=0.5,<1.0)", "readthedocs-sphinx-ext (>=2.1,<3.0)", "sphinx (>=4.1.2,<6)", "pyflakes (>=2.2,<3.0)", "twistedchecker (>=0.7,<1.0)", "coverage (>=6b1,<7)", "cython-test-exception-raiser (>=1.0.2,<2)", "PyHamcrest (>=1.9.0)", "pyopenssl (>=16.0.0)", "service-identity (>=18.1.0)", "idna (>=2.4)", "pyasn1", "cryptography (>=2.6)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "pyserial (>=3.0)", "h2 (>=3.0,<5.0)", "priority (>=1.1.0,<2.0)", "pynacl", "pywin32 (!=226)", "python-subunit (>=1.4,<2.0)", "contextvars (>=2.4,<3)", "pydoctor (>=21.9.0,<21.10.0)"]
+osx_platform = ["pyobjc-core", "pyobjc-framework-cfnetwork", "pyobjc-framework-cocoa", "cython-test-exception-raiser (>=1.0.2,<2)", "PyHamcrest (>=1.9.0)", "pyopenssl (>=16.0.0)", "service-identity (>=18.1.0)", "idna (>=2.4)", "pyasn1", "cryptography (>=2.6)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "pyserial (>=3.0)", "h2 (>=3.0,<5.0)", "priority (>=1.1.0,<2.0)", "pywin32 (!=226)", "contextvars (>=2.4,<3)"]
serial = ["pyserial (>=3.0)", "pywin32 (!=226)"]
test = ["cython-test-exception-raiser (>=1.0.2,<2)", "PyHamcrest (>=1.9.0)"]
tls = ["pyopenssl (>=16.0.0)", "service-identity (>=18.1.0)", "idna (>=2.4)"]
-windows_platform = ["pywin32 (!=226)", "cython-test-exception-raiser (>=1.0.2,<2)", "PyHamcrest (>=1.9.0)", "pyopenssl (>=16.0.0)", "service-identity (>=18.1.0)", "idna (>=2.4)", "pyasn1", "cryptography (>=2.6)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "pyserial (>=3.0)", "h2 (>=3.0,<4.0)", "priority (>=1.1.0,<2.0)", "pywin32 (!=226)", "contextvars (>=2.4,<3)"]
+windows_platform = ["pywin32 (!=226)", "cython-test-exception-raiser (>=1.0.2,<2)", "PyHamcrest (>=1.9.0)", "pyopenssl (>=16.0.0)", "service-identity (>=18.1.0)", "idna (>=2.4)", "pyasn1", "cryptography (>=2.6)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "pyserial (>=3.0)", "h2 (>=3.0,<5.0)", "priority (>=1.1.0,<2.0)", "pywin32 (!=226)", "contextvars (>=2.4,<3)"]
[[package]]
name = "twisted-iocpsupport"
@@ -2229,8 +2230,8 @@ platformdirs = [
{file = "platformdirs-2.5.1.tar.gz", hash = "sha256:7535e70dfa32e84d4b34996ea99c5e432fa29a708d0f4e394bbcb2a8faa4f16d"},
]
prometheus-client = [
- {file = "prometheus_client-0.13.1-py3-none-any.whl", hash = "sha256:357a447fd2359b0a1d2e9b311a0c5778c330cfbe186d880ad5a6b39884652316"},
- {file = "prometheus_client-0.13.1.tar.gz", hash = "sha256:ada41b891b79fca5638bd5cfe149efa86512eaa55987893becd2c6d8d0a5dfc5"},
+ {file = "prometheus_client-0.14.0-py3-none-any.whl", hash = "sha256:f4aba3fdd1735852049f537c1f0ab177159b7ab76f271ecc4d2f45aa2a1d01f2"},
+ {file = "prometheus_client-0.14.0.tar.gz", hash = "sha256:8f7a922dd5455ad524b6ba212ce8eb2b4b05e073f4ec7218287f88b1cac34750"},
]
psycopg2 = [
{file = "psycopg2-2.9.3-cp310-cp310-win32.whl", hash = "sha256:083707a696e5e1c330af2508d8fab36f9700b26621ccbcb538abe22e15485362"},
@@ -2596,8 +2597,8 @@ twine = [
{file = "twine-3.8.0.tar.gz", hash = "sha256:8efa52658e0ae770686a13b675569328f1fba9837e5de1867bfe5f46a9aefe19"},
]
twisted = [
- {file = "Twisted-22.2.0-py3-none-any.whl", hash = "sha256:5c63c149eb6b8fe1e32a0215b1cef96fabdba04f705d8efb9174b1ccf5b49d49"},
- {file = "Twisted-22.2.0.tar.gz", hash = "sha256:57f32b1f6838facb8c004c89467840367ad38e9e535f8252091345dba500b4f2"},
+ {file = "Twisted-22.4.0-py3-none-any.whl", hash = "sha256:f9f7a91f94932477a9fc3b169d57f54f96c6e74a23d78d9ce54039a7f48928a2"},
+ {file = "Twisted-22.4.0.tar.gz", hash = "sha256:a047990f57dfae1e0bd2b7df2526d4f16dcdc843774dc108b78c52f2a5f13680"},
]
twisted-iocpsupport = [
{file = "twisted-iocpsupport-1.0.2.tar.gz", hash = "sha256:72068b206ee809c9c596b57b5287259ea41ddb4774d86725b19f35bf56aa32a9"},
diff --git a/pyproject.toml b/pyproject.toml
index 65f5a5f59f..c7f3e20fed 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -280,5 +280,5 @@ twine = "*"
towncrier = ">=18.6.0rc1"
[build-system]
-requires = ["setuptools"]
-build-backend = "setuptools.build_meta"
+requires = ["poetry-core>=1.0.0"]
+build-backend = "poetry.core.masonry.api"
diff --git a/scripts-dev/federation_client.py b/scripts-dev/federation_client.py
index c72e19f61d..079d2f5ed0 100755
--- a/scripts-dev/federation_client.py
+++ b/scripts-dev/federation_client.py
@@ -124,7 +124,12 @@ def request(
authorization_headers = []
for key, sig in signed_json["signatures"][origin_name].items():
- header = 'X-Matrix origin=%s,key="%s",sig="%s"' % (origin_name, key, sig)
+ header = 'X-Matrix origin=%s,key="%s",sig="%s",destination="%s"' % (
+ origin_name,
+ key,
+ sig,
+ destination,
+ )
authorization_headers.append(header.encode("ascii"))
print("Authorization: %s" % header, file=sys.stderr)
diff --git a/scripts-dev/lint.sh b/scripts-dev/lint.sh
index 4698d2d5be..91a704d982 100755
--- a/scripts-dev/lint.sh
+++ b/scripts-dev/lint.sh
@@ -79,8 +79,20 @@ else
# If we were not asked to lint changed files, and no paths were found as a result,
# then lint everything!
if [[ -z ${files+x} ]]; then
- # Lint all source code files and directories
- files=( "." )
+ # CI runs each linter on the entire checkout, e.g. `black .`. So don't
+ # rely on this list to *find* lint targets if that misses a file; instead;
+ # use it to exclude files from linters when this can't be done by config.
+ #
+ # To check which files the linters examine, use:
+ # black --verbose . 2>&1 | \grep -v ignored
+ # isort --show-files .
+ # flake8 --verbose . # This isn't a great option
+ # mypy has explicit config in mypy.ini; there is also mypy --verbose
+ files=(
+ "synapse" "docker" "tests"
+ "scripts-dev"
+ "contrib" "setup.py" "synmark" "stubs" ".ci"
+ )
fi
fi
diff --git a/scripts-dev/release.py b/scripts-dev/release.py
index 685fa32b03..6f7cf6888d 100755
--- a/scripts-dev/release.py
+++ b/scripts-dev/release.py
@@ -69,11 +69,12 @@ def cli():
# ... wait for assets to build ...
./scripts-dev/release.py publish
+
./scripts-dev/release.py upload
# Optional: generate some nice links for the announcement
- ./scripts-dev/release.py upload
+ ./scripts-dev/release.py announce
If the env var GH_TOKEN (or GITHUB_TOKEN) is set, or passed into the
`tag`/`publish` command, then a new draft release will be created/published.
@@ -229,7 +230,7 @@ def prepare():
debian_version = new_version
run_until_successful(
- f'dch -M -v {debian_version} "New synapse release {debian_version}."',
+ f'dch -M -v {debian_version} "New Synapse release {new_version}."',
shell=True,
)
run_until_successful('dch -M -r -D stable ""', shell=True)
diff --git a/setup.cfg b/setup.cfg
deleted file mode 100644
index 6213f3265b..0000000000
--- a/setup.cfg
+++ /dev/null
@@ -1,9 +0,0 @@
-[check-manifest]
-ignore =
- .git-blame-ignore-revs
- contrib
- contrib/*
- docs/*
- pylint.cfg
- tox.ini
-
diff --git a/setup.py b/setup.py
deleted file mode 100755
index ecd30247ed..0000000000
--- a/setup.py
+++ /dev/null
@@ -1,183 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright 2014-2017 OpenMarket Ltd
-# Copyright 2017 Vector Creations Ltd
-# Copyright 2017-2018 New Vector Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-import os
-from typing import Any, Dict
-
-from setuptools import Command, find_packages, setup
-
-here = os.path.abspath(os.path.dirname(__file__))
-
-
-# Some notes on `setup.py test`:
-#
-# Once upon a time we used to try to make `setup.py test` run `tox` to run the
-# tests. That's a bad idea for three reasons:
-#
-# 1: `setup.py test` is supposed to find out whether the tests work in the
-# *current* environmentt, not whatever tox sets up.
-# 2: Empirically, trying to install tox during the test run wasn't working ("No
-# module named virtualenv").
-# 3: The tox documentation advises against it[1].
-#
-# Even further back in time, we used to use setuptools_trial [2]. That has its
-# own set of issues: for instance, it requires installation of Twisted to build
-# an sdist (because the recommended mode of usage is to add it to
-# `setup_requires`). That in turn means that in order to successfully run tox
-# you have to have the python header files installed for whichever version of
-# python tox uses (which is python3 on recent ubuntus, for example).
-#
-# So, for now at least, we stick with what appears to be the convention among
-# Twisted projects, and don't attempt to do anything when someone runs
-# `setup.py test`; instead we direct people to run `trial` directly if they
-# care.
-#
-# [1]: http://tox.readthedocs.io/en/2.5.0/example/basic.html#integration-with-setup-py-test-command
-# [2]: https://pypi.python.org/pypi/setuptools_trial
-class TestCommand(Command):
- def initialize_options(self):
- pass
-
- def finalize_options(self):
- pass
-
- def run(self):
- print(
- """Synapse's tests cannot be run via setup.py. To run them, try:
- PYTHONPATH="." trial tests
-"""
- )
-
-
-def read_file(path_segments):
- """Read a file from the package. Takes a list of strings to join to
- make the path"""
- file_path = os.path.join(here, *path_segments)
- with open(file_path) as f:
- return f.read()
-
-
-def exec_file(path_segments):
- """Execute a single python file to get the variables defined in it"""
- result: Dict[str, Any] = {}
- code = read_file(path_segments)
- exec(code, result)
- return result
-
-
-version = exec_file(("synapse", "__init__.py"))["__version__"]
-dependencies = exec_file(("synapse", "python_dependencies.py"))
-long_description = read_file(("README.rst",))
-
-REQUIREMENTS = dependencies["REQUIREMENTS"]
-CONDITIONAL_REQUIREMENTS = dependencies["CONDITIONAL_REQUIREMENTS"]
-ALL_OPTIONAL_REQUIREMENTS = dependencies["ALL_OPTIONAL_REQUIREMENTS"]
-
-# Make `pip install matrix-synapse[all]` install all the optional dependencies.
-CONDITIONAL_REQUIREMENTS["all"] = list(ALL_OPTIONAL_REQUIREMENTS)
-
-# Developer dependencies should not get included in "all".
-#
-# We pin black so that our tests don't start failing on new releases.
-CONDITIONAL_REQUIREMENTS["lint"] = [
- "isort==5.7.0",
- "black==22.3.0",
- "flake8-comprehensions",
- "flake8-bugbear==21.3.2",
- "flake8",
-]
-
-CONDITIONAL_REQUIREMENTS["mypy"] = [
- "mypy==0.931",
- "mypy-zope==0.3.5",
- "types-bleach>=4.1.0",
- "types-jsonschema>=3.2.0",
- "types-opentracing>=2.4.2",
- "types-Pillow>=8.3.4",
- "types-psycopg2>=2.9.9",
- "types-pyOpenSSL>=20.0.7",
- "types-PyYAML>=5.4.10",
- "types-requests>=2.26.0",
- "types-setuptools>=57.4.0",
-]
-
-# Dependencies which are exclusively required by unit test code. This is
-# NOT a list of all modules that are necessary to run the unit tests.
-# Tests assume that all optional dependencies are installed.
-#
-# parameterized_class decorator was introduced in parameterized 0.7.0
-CONDITIONAL_REQUIREMENTS["test"] = ["parameterized>=0.7.0", "idna>=2.5"]
-
-CONDITIONAL_REQUIREMENTS["dev"] = (
- CONDITIONAL_REQUIREMENTS["lint"]
- + CONDITIONAL_REQUIREMENTS["mypy"]
- + CONDITIONAL_REQUIREMENTS["test"]
- + [
- # The following are used by the release script
- "click==8.1.0",
- "redbaron==0.9.2",
- "GitPython==3.1.14",
- "commonmark==0.9.1",
- "pygithub==1.55",
- # The following are executed as commands by the release script.
- "twine",
- "towncrier",
- ]
-)
-
-setup(
- name="matrix-synapse",
- version=version,
- packages=find_packages(exclude=["tests", "tests.*"]),
- description="Reference homeserver for the Matrix decentralised comms protocol",
- install_requires=REQUIREMENTS,
- extras_require=CONDITIONAL_REQUIREMENTS,
- include_package_data=True,
- zip_safe=False,
- long_description=long_description,
- long_description_content_type="text/x-rst",
- python_requires="~=3.7",
- entry_points={
- "console_scripts": [
- # Application
- "synapse_homeserver = synapse.app.homeserver:main",
- "synapse_worker = synapse.app.generic_worker:main",
- "synctl = synapse._scripts.synctl:main",
- # Scripts
- "export_signing_key = synapse._scripts.export_signing_key:main",
- "generate_config = synapse._scripts.generate_config:main",
- "generate_log_config = synapse._scripts.generate_log_config:main",
- "generate_signing_key = synapse._scripts.generate_signing_key:main",
- "hash_password = synapse._scripts.hash_password:main",
- "register_new_matrix_user = synapse._scripts.register_new_matrix_user:main",
- "synapse_port_db = synapse._scripts.synapse_port_db:main",
- "synapse_review_recent_signups = synapse._scripts.review_recent_signups:main",
- "update_synapse_database = synapse._scripts.update_synapse_database:main",
- ]
- },
- classifiers=[
- "Development Status :: 5 - Production/Stable",
- "Topic :: Communications :: Chat",
- "License :: OSI Approved :: Apache Software License",
- "Programming Language :: Python :: 3 :: Only",
- "Programming Language :: Python :: 3.7",
- "Programming Language :: Python :: 3.8",
- "Programming Language :: Python :: 3.9",
- "Programming Language :: Python :: 3.10",
- ],
- cmdclass={"test": TestCommand},
-)
diff --git a/synapse/__init__.py b/synapse/__init__.py
index b62eed66e2..1613941759 100644
--- a/synapse/__init__.py
+++ b/synapse/__init__.py
@@ -20,6 +20,8 @@ import json
import os
import sys
+from matrix_common.versionstring import get_distribution_version_string
+
# Check that we're not running on an unsupported Python version.
if sys.version_info < (3, 7):
print("Synapse requires Python 3.7 or above.")
@@ -68,7 +70,7 @@ try:
except ImportError:
pass
-__version__ = "1.57.1"
+__version__ = get_distribution_version_string("matrix-synapse")
if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
# We import here so that we don't have to install a bunch of deps when
diff --git a/synapse/api/constants.py b/synapse/api/constants.py
index 92907415e6..0172eb60b8 100644
--- a/synapse/api/constants.py
+++ b/synapse/api/constants.py
@@ -179,8 +179,6 @@ class RelationTypes:
REPLACE: Final = "m.replace"
REFERENCE: Final = "m.reference"
THREAD: Final = "m.thread"
- # TODO Remove this in Synapse >= v1.57.0.
- UNSTABLE_THREAD: Final = "io.element.thread"
class LimitBlockingTypes:
diff --git a/synapse/api/errors.py b/synapse/api/errors.py
index e92db29f6d..cb3b7323d5 100644
--- a/synapse/api/errors.py
+++ b/synapse/api/errors.py
@@ -79,6 +79,8 @@ class Codes:
UNABLE_AUTHORISE_JOIN = "M_UNABLE_TO_AUTHORISE_JOIN"
UNABLE_TO_GRANT_JOIN = "M_UNABLE_TO_GRANT_JOIN"
+ UNREDACTED_CONTENT_DELETED = "FI.MAU.MSC2815_UNREDACTED_CONTENT_DELETED"
+
class CodeMessageException(RuntimeError):
"""An exception with integer code and message string attributes.
@@ -483,6 +485,22 @@ class RequestSendFailed(RuntimeError):
self.can_retry = can_retry
+class UnredactedContentDeletedError(SynapseError):
+ def __init__(self, content_keep_ms: Optional[int] = None):
+ super().__init__(
+ 404,
+ "The content for that event has already been erased from the database",
+ errcode=Codes.UNREDACTED_CONTENT_DELETED,
+ )
+ self.content_keep_ms = content_keep_ms
+
+ def error_dict(self) -> "JsonDict":
+ extra = {}
+ if self.content_keep_ms is not None:
+ extra = {"fi.mau.msc2815.content_keep_ms": self.content_keep_ms}
+ return cs_error(self.msg, self.errcode, **extra)
+
+
def cs_error(msg: str, code: str = Codes.UNKNOWN, **kwargs: Any) -> "JsonDict":
"""Utility method for constructing an error response for client-server
interactions.
diff --git a/synapse/api/filtering.py b/synapse/api/filtering.py
index 27e97d6f37..4a808e33fe 100644
--- a/synapse/api/filtering.py
+++ b/synapse/api/filtering.py
@@ -89,9 +89,7 @@ ROOM_EVENT_FILTER_SCHEMA = {
"org.matrix.not_labels": {"type": "array", "items": {"type": "string"}},
# MSC3440, filtering by event relations.
"related_by_senders": {"type": "array", "items": {"type": "string"}},
- "io.element.relation_senders": {"type": "array", "items": {"type": "string"}},
"related_by_rel_types": {"type": "array", "items": {"type": "string"}},
- "io.element.relation_types": {"type": "array", "items": {"type": "string"}},
},
}
@@ -323,16 +321,6 @@ class Filter:
self.related_by_senders = self.filter_json.get("related_by_senders", None)
self.related_by_rel_types = self.filter_json.get("related_by_rel_types", None)
- # Fallback to the unstable prefix if the stable version is not given.
- if hs.config.experimental.msc3440_enabled:
- self.related_by_senders = self.related_by_senders or self.filter_json.get(
- "io.element.relation_senders", None
- )
- self.related_by_rel_types = (
- self.related_by_rel_types
- or self.filter_json.get("io.element.relation_types", None)
- )
-
def filters_all_types(self) -> bool:
return "*" in self.not_types
diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py
index 447476fbfa..421ed7481b 100644
--- a/synapse/config/experimental.py
+++ b/synapse/config/experimental.py
@@ -26,9 +26,6 @@ class ExperimentalConfig(Config):
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
experimental = config.get("experimental_features") or {}
- # MSC3440 (thread relation)
- self.msc3440_enabled: bool = experimental.get("msc3440_enabled", False)
-
# MSC3026 (busy presence state)
self.msc3026_enabled: bool = experimental.get("msc3026_enabled", False)
@@ -77,7 +74,10 @@ class ExperimentalConfig(Config):
self.msc3720_enabled: bool = experimental.get("msc3720_enabled", False)
# The deprecated groups feature.
- self.groups_enabled: bool = experimental.get("groups_enabled", True)
+ self.groups_enabled: bool = experimental.get("groups_enabled", False)
# MSC2654: Unread counts
self.msc2654_enabled: bool = experimental.get("msc2654_enabled", False)
+
+ # MSC2815 (allow room moderators to view redacted event content)
+ self.msc2815_enabled: bool = experimental.get("msc2815_enabled", False)
diff --git a/synapse/config/server.py b/synapse/config/server.py
index 415279d269..d771045b52 100644
--- a/synapse/config/server.py
+++ b/synapse/config/server.py
@@ -680,14 +680,6 @@ class ServerConfig(Config):
config.get("use_account_validity_in_account_status") or False
)
- # This is a temporary option that enables fully using the new
- # `device_lists_changes_in_room` without the backwards compat code. This
- # is primarily for testing. If enabled the server should *not* be
- # downgraded, as it may lead to missing device list updates.
- self.use_new_device_lists_changes_in_room = (
- config.get("use_new_device_lists_changes_in_room") or False
- )
-
self.rooms_to_exclude_from_sync: List[str] = (
config.get("exclude_rooms_from_sync") or []
)
diff --git a/synapse/events/utils.py b/synapse/events/utils.py
index 918e87ed9c..2174b4a094 100644
--- a/synapse/events/utils.py
+++ b/synapse/events/utils.py
@@ -39,7 +39,6 @@ from . import EventBase
if TYPE_CHECKING:
from synapse.handlers.relations import BundledAggregations
- from synapse.server import HomeServer
# Split strings on "." but not "\." This uses a negative lookbehind assertion for '\'
@@ -396,9 +395,6 @@ class EventClientSerializer:
clients.
"""
- def __init__(self, hs: "HomeServer"):
- self._msc3440_enabled = hs.config.experimental.msc3440_enabled
-
def serialize_event(
self,
event: Union[JsonDict, EventBase],
@@ -406,6 +402,7 @@ class EventClientSerializer:
*,
config: SerializeEventConfig = _DEFAULT_SERIALIZE_EVENT_CONFIG,
bundle_aggregations: Optional[Dict[str, "BundledAggregations"]] = None,
+ apply_edits: bool = True,
) -> JsonDict:
"""Serializes a single event.
@@ -413,10 +410,10 @@ class EventClientSerializer:
event: The event being serialized.
time_now: The current time in milliseconds
config: Event serialization config
- bundle_aggregations: Whether to include the bundled aggregations for this
- event. Only applies to non-state events. (State events never include
- bundled aggregations.)
-
+ bundle_aggregations: A map from event_id to the aggregations to be bundled
+ into the event.
+ apply_edits: Whether the content of the event should be modified to reflect
+ any replacement in `bundle_aggregations[<event_id>].replace`.
Returns:
The serialized event
"""
@@ -434,8 +431,9 @@ class EventClientSerializer:
event,
time_now,
config,
- bundle_aggregations[event.event_id],
+ event_aggregations,
serialized_event,
+ apply_edits=apply_edits,
)
return serialized_event
@@ -474,6 +472,7 @@ class EventClientSerializer:
config: SerializeEventConfig,
aggregations: "BundledAggregations",
serialized_event: JsonDict,
+ apply_edits: bool,
) -> None:
"""Potentially injects bundled aggregations into the unsigned portion of the serialized event.
@@ -483,7 +482,8 @@ class EventClientSerializer:
aggregations: The bundled aggregation to serialize.
serialized_event: The serialized event which may be modified.
config: Event serialization config
-
+ apply_edits: Whether the content of the event should be modified to reflect
+ any replacement in `aggregations.replace`.
"""
serialized_aggregations = {}
@@ -494,9 +494,10 @@ class EventClientSerializer:
serialized_aggregations[RelationTypes.REFERENCE] = aggregations.references
if aggregations.replace:
- # If there is an edit, apply it to the event.
+ # If there is an edit, optionally apply it to the event.
edit = aggregations.replace
- self._apply_edit(event, serialized_event, edit)
+ if apply_edits:
+ self._apply_edit(event, serialized_event, edit)
# Include information about it in the relations dict.
serialized_aggregations[RelationTypes.REPLACE] = {
@@ -525,8 +526,6 @@ class EventClientSerializer:
"current_user_participated": thread.current_user_participated,
}
serialized_aggregations[RelationTypes.THREAD] = thread_summary
- if self._msc3440_enabled:
- serialized_aggregations[RelationTypes.UNSTABLE_THREAD] = thread_summary
# Include the bundled aggregations in the event.
if serialized_aggregations:
diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py
index 69d833585f..beab1227b8 100644
--- a/synapse/federation/federation_server.py
+++ b/synapse/federation/federation_server.py
@@ -515,7 +515,7 @@ class FederationServer(FederationBase):
)
async def on_room_state_request(
- self, origin: str, room_id: str, event_id: Optional[str]
+ self, origin: str, room_id: str, event_id: str
) -> Tuple[int, JsonDict]:
origin_host, _ = parse_server_name(origin)
await self.check_server_matches_acl(origin_host, room_id)
@@ -530,18 +530,13 @@ class FederationServer(FederationBase):
# - but that's non-trivial to get right, and anyway somewhat defeats
# the point of the linearizer.
async with self._server_linearizer.queue((origin, room_id)):
- resp: JsonDict = dict(
- await self._state_resp_cache.wrap(
- (room_id, event_id),
- self._on_context_state_request_compute,
- room_id,
- event_id,
- )
+ resp = await self._state_resp_cache.wrap(
+ (room_id, event_id),
+ self._on_context_state_request_compute,
+ room_id,
+ event_id,
)
- room_version = await self.store.get_room_version_id(room_id)
- resp["room_version"] = room_version
-
return 200, resp
async def on_state_ids_request(
@@ -574,14 +569,11 @@ class FederationServer(FederationBase):
return {"pdu_ids": state_ids, "auth_chain_ids": list(auth_chain_ids)}
async def _on_context_state_request_compute(
- self, room_id: str, event_id: Optional[str]
+ self, room_id: str, event_id: str
) -> Dict[str, list]:
pdus: Collection[EventBase]
- if event_id:
- event_ids = await self.handler.get_state_ids_for_pdu(room_id, event_id)
- pdus = await self.store.get_events_as_list(event_ids)
- else:
- pdus = (await self.state.get_current_state(room_id)).values()
+ event_ids = await self.handler.get_state_ids_for_pdu(room_id, event_id)
+ pdus = await self.store.get_events_as_list(event_ids)
auth_chain = await self.store.get_auth_chain(
room_id, [pdu.event_id for pdu in pdus]
@@ -687,8 +679,6 @@ class FederationServer(FederationBase):
time_now = self._clock.time_msec()
event_json = event.get_pdu_json(time_now)
resp = {
- # TODO Remove the unstable prefix when servers have updated.
- "org.matrix.msc3083.v2.event": event_json,
"event": event_json,
"state": [p.get_pdu_json(time_now) for p in state_events],
"auth_chain": [p.get_pdu_json(time_now) for p in auth_chain_events],
diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py
index 01dc5ca94f..1421050b9a 100644
--- a/synapse/federation/transport/client.py
+++ b/synapse/federation/transport/client.py
@@ -1380,16 +1380,6 @@ class SendJoinParser(ByteParser[SendJoinResponse]):
prefix + "auth_chain.item",
use_float=True,
),
- # TODO Remove the unstable prefix when servers have updated.
- #
- # By re-using the same event dictionary this will cause the parsing of
- # org.matrix.msc3083.v2.event and event to stomp over each other.
- # Generally this should be fine.
- ijson.kvitems_coro(
- _event_parser(self._response.event_dict),
- prefix + "org.matrix.msc3083.v2.event",
- use_float=True,
- ),
ijson.kvitems_coro(
_event_parser(self._response.event_dict),
prefix + "event",
diff --git a/synapse/federation/transport/server/_base.py b/synapse/federation/transport/server/_base.py
index 2529dee613..d629a3ecb5 100644
--- a/synapse/federation/transport/server/_base.py
+++ b/synapse/federation/transport/server/_base.py
@@ -16,7 +16,8 @@ import functools
import logging
import re
import time
-from typing import TYPE_CHECKING, Any, Awaitable, Callable, Optional, Tuple, cast
+from http import HTTPStatus
+from typing import TYPE_CHECKING, Any, Awaitable, Callable, Dict, Optional, Tuple, cast
from synapse.api.errors import Codes, FederationDeniedError, SynapseError
from synapse.api.urls import FEDERATION_V1_PREFIX
@@ -86,15 +87,24 @@ class Authenticator:
if not auth_headers:
raise NoAuthenticationError(
- 401, "Missing Authorization headers", Codes.UNAUTHORIZED
+ HTTPStatus.UNAUTHORIZED,
+ "Missing Authorization headers",
+ Codes.UNAUTHORIZED,
)
for auth in auth_headers:
if auth.startswith(b"X-Matrix"):
- (origin, key, sig) = _parse_auth_header(auth)
+ (origin, key, sig, destination) = _parse_auth_header(auth)
json_request["origin"] = origin
json_request["signatures"].setdefault(origin, {})[key] = sig
+ # if the origin_server sent a destination along it needs to match our own server_name
+ if destination is not None and destination != self.server_name:
+ raise AuthenticationError(
+ HTTPStatus.UNAUTHORIZED,
+ "Destination mismatch in auth header",
+ Codes.UNAUTHORIZED,
+ )
if (
self.federation_domain_whitelist is not None
and origin not in self.federation_domain_whitelist
@@ -103,7 +113,9 @@ class Authenticator:
if origin is None or not json_request["signatures"]:
raise NoAuthenticationError(
- 401, "Missing Authorization headers", Codes.UNAUTHORIZED
+ HTTPStatus.UNAUTHORIZED,
+ "Missing Authorization headers",
+ Codes.UNAUTHORIZED,
)
await self.keyring.verify_json_for_server(
@@ -142,13 +154,14 @@ class Authenticator:
logger.exception("Error resetting retry timings on %s", origin)
-def _parse_auth_header(header_bytes: bytes) -> Tuple[str, str, str]:
+def _parse_auth_header(header_bytes: bytes) -> Tuple[str, str, str, Optional[str]]:
"""Parse an X-Matrix auth header
Args:
header_bytes: header value
Returns:
+ origin, key id, signature, destination.
origin, key id, signature.
Raises:
@@ -157,7 +170,9 @@ def _parse_auth_header(header_bytes: bytes) -> Tuple[str, str, str]:
try:
header_str = header_bytes.decode("utf-8")
params = header_str.split(" ")[1].split(",")
- param_dict = {k: v for k, v in (kv.split("=", maxsplit=1) for kv in params)}
+ param_dict: Dict[str, str] = {
+ k: v for k, v in [param.split("=", maxsplit=1) for param in params]
+ }
def strip_quotes(value: str) -> str:
if value.startswith('"'):
@@ -172,7 +187,15 @@ def _parse_auth_header(header_bytes: bytes) -> Tuple[str, str, str]:
key = strip_quotes(param_dict["key"])
sig = strip_quotes(param_dict["sig"])
- return origin, key, sig
+
+ # get the destination server_name from the auth header if it exists
+ destination = param_dict.get("destination")
+ if destination is not None:
+ destination = strip_quotes(destination)
+ else:
+ destination = None
+
+ return origin, key, sig, destination
except Exception as e:
logger.warning(
"Error parsing auth header '%s': %s",
@@ -180,7 +203,7 @@ def _parse_auth_header(header_bytes: bytes) -> Tuple[str, str, str]:
e,
)
raise AuthenticationError(
- 400, "Malformed Authorization header", Codes.UNAUTHORIZED
+ HTTPStatus.BAD_REQUEST, "Malformed Authorization header", Codes.UNAUTHORIZED
)
diff --git a/synapse/federation/transport/server/federation.py b/synapse/federation/transport/server/federation.py
index aed3d5069c..6fbc7b5f15 100644
--- a/synapse/federation/transport/server/federation.py
+++ b/synapse/federation/transport/server/federation.py
@@ -160,7 +160,7 @@ class FederationStateV1Servlet(BaseFederationServerServlet):
return await self.handler.on_room_state_request(
origin,
room_id,
- parse_string_from_args(query, "event_id", None, required=False),
+ parse_string_from_args(query, "event_id", None, required=True),
)
diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py
index ffa28b2a30..3c0fc756d4 100644
--- a/synapse/handlers/device.py
+++ b/synapse/handlers/device.py
@@ -291,12 +291,6 @@ class DeviceHandler(DeviceWorkerHandler):
# On start up check if there are any updates pending.
hs.get_reactor().callWhenRunning(self._handle_new_device_update_async)
- # Used to decide if we calculate outbound pokes up front or not. By
- # default we do to allow safely downgrading Synapse.
- self.use_new_device_lists_changes_in_room = (
- hs.config.server.use_new_device_lists_changes_in_room
- )
-
def _check_device_name_length(self, name: Optional[str]) -> None:
"""
Checks whether a device name is longer than the maximum allowed length.
@@ -490,23 +484,9 @@ class DeviceHandler(DeviceWorkerHandler):
room_ids = await self.store.get_rooms_for_user(user_id)
- hosts: Optional[Set[str]] = None
- if not self.use_new_device_lists_changes_in_room:
- hosts = set()
-
- if self.hs.is_mine_id(user_id):
- for room_id in room_ids:
- joined_users = await self.store.get_users_in_room(room_id)
- hosts.update(get_domain_from_id(u) for u in joined_users)
-
- set_tag("target_hosts", hosts)
-
- hosts.discard(self.server_name)
-
position = await self.store.add_device_change_to_streams(
user_id,
device_ids,
- hosts=hosts,
room_ids=room_ids,
)
@@ -528,14 +508,6 @@ class DeviceHandler(DeviceWorkerHandler):
# We may need to do some processing asynchronously.
self._handle_new_device_update_async()
- if hosts:
- logger.info(
- "Sending device list update notif for %r to: %r", user_id, hosts
- )
- for host in hosts:
- self.federation_sender.send_device_messages(host, immediate=False)
- log_kv({"message": "sent device update to host", "host": host})
-
async def notify_user_signature_update(
self, from_user_id: str, user_ids: List[str]
) -> None:
@@ -677,9 +649,13 @@ class DeviceHandler(DeviceWorkerHandler):
return
for user_id, device_id, room_id, stream_id, opentracing_context in rows:
- joined_user_ids = await self.store.get_users_in_room(room_id)
- hosts = {get_domain_from_id(u) for u in joined_user_ids}
- hosts.discard(self.server_name)
+ hosts = set()
+
+ # Ignore any users that aren't ours
+ if self.hs.is_mine_id(user_id):
+ joined_user_ids = await self.store.get_users_in_room(room_id)
+ hosts = {get_domain_from_id(u) for u in joined_user_ids}
+ hosts.discard(self.server_name)
# Check if we've already sent this update to some hosts
if current_stream_id == stream_id:
diff --git a/synapse/handlers/events.py b/synapse/handlers/events.py
index d2ccb5c5d3..5b94b00bc3 100644
--- a/synapse/handlers/events.py
+++ b/synapse/handlers/events.py
@@ -16,11 +16,12 @@ import logging
import random
from typing import TYPE_CHECKING, Iterable, List, Optional
-from synapse.api.constants import EduTypes, EventTypes, Membership
+from synapse.api.constants import EduTypes, EventTypes, Membership, PresenceState
from synapse.api.errors import AuthError, SynapseError
from synapse.events import EventBase
from synapse.events.utils import SerializeEventConfig
from synapse.handlers.presence import format_user_presence_state
+from synapse.storage.databases.main.events_worker import EventRedactBehaviour
from synapse.streams.config import PaginationConfig
from synapse.types import JsonDict, UserID
from synapse.visibility import filter_events_for_client
@@ -67,7 +68,9 @@ class EventStreamHandler:
presence_handler = self.hs.get_presence_handler()
context = await presence_handler.user_syncing(
- auth_user_id, affect_presence=affect_presence
+ auth_user_id,
+ affect_presence=affect_presence,
+ presence_state=PresenceState.ONLINE,
)
with context:
if timeout:
@@ -139,7 +142,11 @@ class EventHandler:
self.storage = hs.get_storage()
async def get_event(
- self, user: UserID, room_id: Optional[str], event_id: str
+ self,
+ user: UserID,
+ room_id: Optional[str],
+ event_id: str,
+ show_redacted: bool = False,
) -> Optional[EventBase]:
"""Retrieve a single specified event.
@@ -148,6 +155,7 @@ class EventHandler:
room_id: The expected room id. We'll return None if the
event's room does not match.
event_id: The event ID to obtain.
+ show_redacted: Should the full content of redacted events be returned?
Returns:
An event, or None if there is no event matching this ID.
Raises:
@@ -155,7 +163,12 @@ class EventHandler:
AuthError if the user does not have the rights to inspect this
event.
"""
- event = await self.store.get_event(event_id, check_room_id=room_id)
+ redact_behaviour = (
+ EventRedactBehaviour.AS_IS if show_redacted else EventRedactBehaviour.REDACT
+ )
+ event = await self.store.get_event(
+ event_id, check_room_id=room_id, redact_behaviour=redact_behaviour
+ )
if not event:
return None
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index 78d149905f..1434e99056 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -466,6 +466,8 @@ class FederationHandler:
)
if ret.partial_state:
+ # TODO(faster_joins): roll this back if we don't manage to start the
+ # background resync (eg process_remote_join fails)
await self.store.store_partial_state_room(room_id, ret.servers_in_room)
max_stream_id = await self._federation_event_handler.process_remote_join(
@@ -478,6 +480,18 @@ class FederationHandler:
partial_state=ret.partial_state,
)
+ if ret.partial_state:
+ # Kick off the process of asynchronously fetching the state for this
+ # room.
+ #
+ # TODO(faster_joins): pick this up again on restart
+ run_as_background_process(
+ desc="sync_partial_state_room",
+ func=self._sync_partial_state_room,
+ destination=origin,
+ room_id=room_id,
+ )
+
# We wait here until this instance has seen the events come down
# replication (if we're using replication) as the below uses caches.
await self._replication.wait_for_stream_position(
@@ -1370,3 +1384,64 @@ class FederationHandler:
# We fell off the bottom, couldn't get the complexity from anyone. Oh
# well.
return None
+
+ async def _sync_partial_state_room(
+ self,
+ destination: str,
+ room_id: str,
+ ) -> None:
+ """Background process to resync the state of a partial-state room
+
+ Args:
+ destination: homeserver to pull the state from
+ room_id: room to be resynced
+ """
+
+ # TODO(faster_joins): do we need to lock to avoid races? What happens if other
+ # worker processes kick off a resync in parallel? Perhaps we should just elect
+ # a single worker to do the resync.
+ #
+ # TODO(faster_joins): what happens if we leave the room during a resync? if we
+ # really leave, that might mean we have difficulty getting the room state over
+ # federation.
+ #
+ # TODO(faster_joins): try other destinations if the one we have fails
+
+ logger.info("Syncing state for room %s via %s", room_id, destination)
+
+ # we work through the queue in order of increasing stream ordering.
+ while True:
+ batch = await self.store.get_partial_state_events_batch(room_id)
+ if not batch:
+ # all the events are updated, so we can update current state and
+ # clear the lazy-loading flag.
+ logger.info("Updating current state for %s", room_id)
+ assert (
+ self.storage.persistence is not None
+ ), "TODO(faster_joins): support for workers"
+ await self.storage.persistence.update_current_state(room_id)
+
+ logger.info("Clearing partial-state flag for %s", room_id)
+ success = await self.store.clear_partial_state_room(room_id)
+ if success:
+ logger.info("State resync complete for %s", room_id)
+
+ # TODO(faster_joins) update room stats and user directory?
+ return
+
+ # we raced against more events arriving with partial state. Go round
+ # the loop again. We've already logged a warning, so no need for more.
+ # TODO(faster_joins): there is still a race here, whereby incoming events which raced
+ # with us will fail to be persisted after the call to `clear_partial_state_room` due to
+ # having partial state.
+ continue
+
+ events = await self.store.get_events_as_list(
+ batch,
+ redact_behaviour=EventRedactBehaviour.AS_IS,
+ allow_rejected=True,
+ )
+ for event in events:
+ await self._federation_event_handler.update_state_for_partial_state_event(
+ destination, event
+ )
diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py
index 03c1197c99..32bf02818c 100644
--- a/synapse/handlers/federation_event.py
+++ b/synapse/handlers/federation_event.py
@@ -477,6 +477,45 @@ class FederationEventHandler:
return await self.persist_events_and_notify(room_id, [(event, context)])
+ async def update_state_for_partial_state_event(
+ self, destination: str, event: EventBase
+ ) -> None:
+ """Recalculate the state at an event as part of a de-partial-stating process
+
+ Args:
+ destination: server to request full state from
+ event: partial-state event to be de-partial-stated
+ """
+ logger.info("Updating state for %s", event.event_id)
+ with nested_logging_context(suffix=event.event_id):
+ # if we have all the event's prev_events, then we can work out the
+ # state based on their states. Otherwise, we request it from the destination
+ # server.
+ #
+ # This is the same operation as we do when we receive a regular event
+ # over federation.
+ state = await self._resolve_state_at_missing_prevs(destination, event)
+
+ # build a new state group for it if need be
+ context = await self._state_handler.compute_event_context(
+ event,
+ old_state=state,
+ )
+ if context.partial_state:
+ # this can happen if some or all of the event's prev_events still have
+ # partial state - ie, an event has an earlier stream_ordering than one
+ # or more of its prev_events, so we de-partial-state it before its
+ # prev_events.
+ #
+ # TODO(faster_joins): we probably need to be more intelligent, and
+ # exclude partial-state prev_events from consideration
+ logger.warning(
+ "%s still has partial state: can't de-partial-state it yet",
+ event.event_id,
+ )
+ return
+ await self._store.update_state_for_partial_state_event(event, context)
+
async def backfill(
self, dest: str, room_id: str, limit: int, extremities: Collection[str]
) -> None:
diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py
index 7db6905c61..1b092e900e 100644
--- a/synapse/handlers/message.py
+++ b/synapse/handlers/message.py
@@ -175,17 +175,13 @@ class MessageHandler:
state_filter = state_filter or StateFilter.all()
if at_token:
- # FIXME this claims to get the state at a stream position, but
- # get_recent_events_for_room operates by topo ordering. This therefore
- # does not reliably give you the state at the given stream position.
- # (https://github.com/matrix-org/synapse/issues/3305)
- last_events, _ = await self.store.get_recent_events_for_room(
- room_id, end_token=at_token.room_key, limit=1
+ last_event = await self.store.get_last_event_in_room_before_stream_ordering(
+ room_id,
+ end_token=at_token.room_key,
)
- if not last_events:
+ if not last_event:
raise NotFoundError("Can't find event for token %s" % (at_token,))
- last_event = last_events[0]
# check whether the user is in the room at that time to determine
# whether they should be treated as peeking.
@@ -204,7 +200,7 @@ class MessageHandler:
visible_events = await filter_events_for_client(
self.storage,
user_id,
- last_events,
+ [last_event],
filter_send_to_client=False,
is_peeking=is_peeking,
)
@@ -1102,10 +1098,7 @@ class EventCreationHandler:
raise SynapseError(400, "Can't send same reaction twice")
# Don't attempt to start a thread if the parent event is a relation.
- elif (
- relation_type == RelationTypes.THREAD
- or relation_type == RelationTypes.UNSTABLE_THREAD
- ):
+ elif relation_type == RelationTypes.THREAD:
if await self.store.event_includes_relation(relates_to):
raise SynapseError(
400, "Cannot start threads from an event with a relation"
diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py
index 209a4b0e52..d078162c29 100644
--- a/synapse/handlers/presence.py
+++ b/synapse/handlers/presence.py
@@ -151,7 +151,7 @@ class BasePresenceHandler(abc.ABC):
@abc.abstractmethod
async def user_syncing(
- self, user_id: str, affect_presence: bool
+ self, user_id: str, affect_presence: bool, presence_state: str
) -> ContextManager[None]:
"""Returns a context manager that should surround any stream requests
from the user.
@@ -165,6 +165,7 @@ class BasePresenceHandler(abc.ABC):
affect_presence: If false this function will be a no-op.
Useful for streams that are not associated with an actual
client that is being used by a user.
+ presence_state: The presence state indicated in the sync request
"""
@abc.abstractmethod
@@ -228,6 +229,11 @@ class BasePresenceHandler(abc.ABC):
return states
+ async def current_state_for_user(self, user_id: str) -> UserPresenceState:
+ """Get the current presence state for a user."""
+ res = await self.current_state_for_users([user_id])
+ return res[user_id]
+
@abc.abstractmethod
async def set_state(
self,
@@ -461,7 +467,7 @@ class WorkerPresenceHandler(BasePresenceHandler):
self.send_user_sync(user_id, False, last_sync_ms)
async def user_syncing(
- self, user_id: str, affect_presence: bool
+ self, user_id: str, affect_presence: bool, presence_state: str
) -> ContextManager[None]:
"""Record that a user is syncing.
@@ -471,6 +477,17 @@ class WorkerPresenceHandler(BasePresenceHandler):
if not affect_presence or not self._presence_enabled:
return _NullContextManager()
+ prev_state = await self.current_state_for_user(user_id)
+ if prev_state != PresenceState.BUSY:
+ # We set state here but pass ignore_status_msg = True as we don't want to
+ # cause the status message to be cleared.
+ # Note that this causes last_active_ts to be incremented which is not
+ # what the spec wants: see comment in the BasePresenceHandler version
+ # of this function.
+ await self.set_state(
+ UserID.from_string(user_id), {"presence": presence_state}, True
+ )
+
curr_sync = self._user_to_num_current_syncs.get(user_id, 0)
self._user_to_num_current_syncs[user_id] = curr_sync + 1
@@ -942,7 +959,10 @@ class PresenceHandler(BasePresenceHandler):
await self._update_states([prev_state.copy_and_replace(**new_fields)])
async def user_syncing(
- self, user_id: str, affect_presence: bool = True
+ self,
+ user_id: str,
+ affect_presence: bool = True,
+ presence_state: str = PresenceState.ONLINE,
) -> ContextManager[None]:
"""Returns a context manager that should surround any stream requests
from the user.
@@ -956,6 +976,7 @@ class PresenceHandler(BasePresenceHandler):
affect_presence: If false this function will be a no-op.
Useful for streams that are not associated with an actual
client that is being used by a user.
+ presence_state: The presence state indicated in the sync request
"""
# Override if it should affect the user's presence, if presence is
# disabled.
@@ -967,9 +988,25 @@ class PresenceHandler(BasePresenceHandler):
self.user_to_num_current_syncs[user_id] = curr_sync + 1
prev_state = await self.current_state_for_user(user_id)
+
+ # If they're busy then they don't stop being busy just by syncing,
+ # so just update the last sync time.
+ if prev_state.state != PresenceState.BUSY:
+ # XXX: We set_state separately here and just update the last_active_ts above
+ # This keeps the logic as similar as possible between the worker and single
+ # process modes. Using set_state will actually cause last_active_ts to be
+ # updated always, which is not what the spec calls for, but synapse has done
+ # this for... forever, I think.
+ await self.set_state(
+ UserID.from_string(user_id), {"presence": presence_state}, True
+ )
+ # Retrieve the new state for the logic below. This should come from the
+ # in-memory cache.
+ prev_state = await self.current_state_for_user(user_id)
+
+ # To keep the single process behaviour consistent with worker mode, run the
+ # same logic as `update_external_syncs_row`, even though it looks weird.
if prev_state.state == PresenceState.OFFLINE:
- # If they're currently offline then bring them online, otherwise
- # just update the last sync times.
await self._update_states(
[
prev_state.copy_and_replace(
@@ -979,6 +1016,10 @@ class PresenceHandler(BasePresenceHandler):
)
]
)
+ # otherwise, set the new presence state & update the last sync time,
+ # but don't update last_active_ts as this isn't an indication that
+ # they've been active (even though it's probably been updated by
+ # set_state above)
else:
await self._update_states(
[
@@ -1086,11 +1127,6 @@ class PresenceHandler(BasePresenceHandler):
)
self.external_process_last_updated_ms.pop(process_id, None)
- async def current_state_for_user(self, user_id: str) -> UserPresenceState:
- """Get the current presence state for a user."""
- res = await self.current_state_for_users([user_id])
- return res[user_id]
-
async def _persist_and_notify(self, states: List[UserPresenceState]) -> None:
"""Persist states in the database, poke the notifier and send to
interested remote servers
diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py
index 6c8b17c420..5125126a80 100644
--- a/synapse/handlers/sync.py
+++ b/synapse/handlers/sync.py
@@ -661,16 +661,15 @@ class SyncHandler:
stream_position: point at which to get state
state_filter: The state filter used to fetch state from the database.
"""
- # FIXME this claims to get the state at a stream position, but
- # get_recent_events_for_room operates by topo ordering. This therefore
- # does not reliably give you the state at the given stream position.
- # (https://github.com/matrix-org/synapse/issues/3305)
- last_events, _ = await self.store.get_recent_events_for_room(
- room_id, end_token=stream_position.room_key, limit=1
+ # FIXME: This gets the state at the latest event before the stream ordering,
+ # which might not be the same as the "current state" of the room at the time
+ # of the stream token if there were multiple forward extremities at the time.
+ last_event = await self.store.get_last_event_in_room_before_stream_ordering(
+ room_id,
+ end_token=stream_position.room_key,
)
- if last_events:
- last_event = last_events[-1]
+ if last_event:
state = await self.get_state_after_event(
last_event, state_filter=state_filter or StateFilter.all()
)
diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py
index 5097b3ca57..e686445955 100644
--- a/synapse/http/matrixfederationclient.py
+++ b/synapse/http/matrixfederationclient.py
@@ -704,6 +704,9 @@ class MatrixFederationHttpClient:
Returns:
A list of headers to be added as "Authorization:" headers
"""
+ if destination is None and destination_is is None:
+ raise ValueError("destination and destination_is cannot both be None!")
+
request: JsonDict = {
"method": method.decode("ascii"),
"uri": url_bytes.decode("ascii"),
@@ -726,8 +729,13 @@ class MatrixFederationHttpClient:
for key, sig in request["signatures"][self.server_name].items():
auth_headers.append(
(
- 'X-Matrix origin=%s,key="%s",sig="%s"'
- % (self.server_name, key, sig)
+ 'X-Matrix origin=%s,key="%s",sig="%s",destination="%s"'
+ % (
+ self.server_name,
+ key,
+ sig,
+ request.get("destination") or request["destination_is"],
+ )
).encode("ascii")
)
return auth_headers
diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py
deleted file mode 100644
index ec199a161d..0000000000
--- a/synapse/python_dependencies.py
+++ /dev/null
@@ -1,151 +0,0 @@
-# Copyright 2015, 2016 OpenMarket Ltd
-# Copyright 2017 Vector Creations Ltd
-# Copyright 2018 New Vector Ltd
-# Copyright 2020 The Matrix.org Foundation C.I.C.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import itertools
-import logging
-from typing import Set
-
-logger = logging.getLogger(__name__)
-
-
-# REQUIREMENTS is a simple list of requirement specifiers[1], and must be
-# installed. It is passed to setup() as install_requires in setup.py.
-#
-# CONDITIONAL_REQUIREMENTS is the optional dependencies, represented as a dict
-# of lists. The dict key is the optional dependency name and can be passed to
-# pip when installing. The list is a series of requirement specifiers[1] to be
-# installed when that optional dependency requirement is specified. It is passed
-# to setup() as extras_require in setup.py
-#
-# Note that these both represent runtime dependencies (and the versions
-# installed are checked at runtime).
-#
-# Also note that we replicate these constraints in the Synapse Dockerfile while
-# pre-installing dependencies. If these constraints are updated here, the same
-# change should be made in the Dockerfile.
-#
-# [1] https://pip.pypa.io/en/stable/reference/pip_install/#requirement-specifiers.
-
-REQUIREMENTS = [
- # we use the TYPE_CHECKER.redefine method added in jsonschema 3.0.0
- "jsonschema>=3.0.0",
- # frozendict 2.1.2 is broken on Debian 10: https://github.com/Marco-Sulla/python-frozendict/issues/41
- "frozendict>=1,!=2.1.2",
- "unpaddedbase64>=1.1.0",
- "canonicaljson>=1.4.0",
- # we use the type definitions added in signedjson 1.1.
- "signedjson>=1.1.0",
- "pynacl>=1.2.1",
- # validating SSL certs for IP addresses requires service_identity 18.1.
- "service_identity>=18.1.0",
- # Twisted 18.9 introduces some logger improvements that the structured
- # logger utilises
- "Twisted[tls]>=18.9.0",
- "treq>=15.1",
- # Twisted has required pyopenssl 16.0 since about Twisted 16.6.
- "pyopenssl>=16.0.0",
- "pyyaml>=3.11",
- "pyasn1>=0.1.9",
- "pyasn1-modules>=0.0.7",
- "bcrypt>=3.1.0",
- "pillow>=5.4.0",
- "sortedcontainers>=1.4.4",
- "pymacaroons>=0.13.0",
- "msgpack>=0.5.2",
- "phonenumbers>=8.2.0",
- # we use GaugeHistogramMetric, which was added in prom-client 0.4.0.
- "prometheus_client>=0.4.0",
- # we use `order`, which arrived in attrs 19.2.0.
- # Note: 21.1.0 broke `/sync`, see #9936
- "attrs>=19.2.0,!=21.1.0",
- "netaddr>=0.7.18",
- # Jinja 2.x is incompatible with MarkupSafe>=2.1. To ensure that admins do not
- # end up with a broken installation, with recent MarkupSafe but old Jinja, we
- # add a lower bound to the Jinja2 dependency.
- "Jinja2>=3.0",
- "bleach>=1.4.3",
- # We use `ParamSpec`, which was added in `typing-extensions` 3.10.0.0.
- "typing-extensions>=3.10.0",
- # We enforce that we have a `cryptography` version that bundles an `openssl`
- # with the latest security patches.
- "cryptography>=3.4.7",
- # ijson 3.1.4 fixes a bug with "." in property names
- "ijson>=3.1.4",
- "matrix-common~=1.1.0",
- # We need packaging.requirements.Requirement, added in 16.1.
- "packaging>=16.1",
- # At the time of writing, we only use functions from the version `importlib.metadata`
- # which shipped in Python 3.8. This corresponds to version 1.4 of the backport.
- "importlib_metadata>=1.4 ; python_version < '3.8'",
-]
-
-CONDITIONAL_REQUIREMENTS = {
- "matrix-synapse-ldap3": ["matrix-synapse-ldap3>=0.1"],
- "postgres": [
- # we use execute_values with the fetch param, which arrived in psycopg 2.8.
- "psycopg2>=2.8 ; platform_python_implementation != 'PyPy'",
- "psycopg2cffi>=2.8 ; platform_python_implementation == 'PyPy'",
- "psycopg2cffi-compat==1.1 ; platform_python_implementation == 'PyPy'",
- ],
- "saml2": [
- "pysaml2>=4.5.0",
- ],
- "oidc": ["authlib>=0.14.0"],
- # systemd-python is necessary for logging to the systemd journal via
- # `systemd.journal.JournalHandler`, as is documented in
- # `contrib/systemd/log_config.yaml`.
- "systemd": ["systemd-python>=231"],
- "url_preview": ["lxml>=4.2.0"],
- "sentry": ["sentry-sdk>=0.7.2"],
- "opentracing": ["jaeger-client>=4.0.0", "opentracing>=2.2.0"],
- "jwt": ["pyjwt>=1.6.4"],
- # hiredis is not a *strict* dependency, but it makes things much faster.
- # (if it is not installed, we fall back to slow code.)
- "redis": ["txredisapi>=1.4.7", "hiredis"],
- # Required to use experimental `caches.track_memory_usage` config option.
- "cache_memory": ["pympler"],
-}
-
-ALL_OPTIONAL_REQUIREMENTS: Set[str] = set()
-
-for name, optional_deps in CONDITIONAL_REQUIREMENTS.items():
- # Exclude systemd as it's a system-based requirement.
- # Exclude lint as it's a dev-based requirement.
- if name not in ["systemd"]:
- ALL_OPTIONAL_REQUIREMENTS = set(optional_deps) | ALL_OPTIONAL_REQUIREMENTS
-
-
-# ensure there are no double-quote characters in any of the deps (otherwise the
-# 'pip install' incantation in DependencyException will break)
-for dep in itertools.chain(
- REQUIREMENTS,
- *CONDITIONAL_REQUIREMENTS.values(),
-):
- if '"' in dep:
- raise Exception(
- "Dependency `%s` contains double-quote; use single-quotes instead" % (dep,)
- )
-
-
-def list_requirements():
- return list(set(REQUIREMENTS) | ALL_OPTIONAL_REQUIREMENTS)
-
-
-if __name__ == "__main__":
- import sys
-
- sys.stdout.writelines(req + "\n" for req in list_requirements())
diff --git a/synapse/res/templates/notif.html b/synapse/res/templates/notif.html
index 0aaef97df8..7d86681fed 100644
--- a/synapse/res/templates/notif.html
+++ b/synapse/res/templates/notif.html
@@ -30,7 +30,7 @@
{%- elif message.msgtype == "m.notice" %}
{{ message.body_text_html }}
{%- elif message.msgtype == "m.image" and message.image_url %}
- <img src="{{ message.image_url|mxc_to_http(640, 480, scale) }}" />
+ <img src="{{ message.image_url|mxc_to_http(640, 480, 'scale') }}" />
{%- elif message.msgtype == "m.file" %}
<span class="filename">{{ message.body_text_plain }}</span>
{%- else %}
diff --git a/synapse/rest/client/login.py b/synapse/rest/client/login.py
index c9d44c5964..4a4dbe75de 100644
--- a/synapse/rest/client/login.py
+++ b/synapse/rest/client/login.py
@@ -342,6 +342,15 @@ class LoginRestServlet(RestServlet):
user_id = canonical_uid
device_id = login_submission.get("device_id")
+
+ # If device_id is present, check that device_id is not longer than a reasonable 512 characters
+ if device_id and len(device_id) > 512:
+ raise LoginError(
+ 400,
+ "device_id cannot be longer than 512 characters.",
+ errcode=Codes.INVALID_PARAM,
+ )
+
initial_display_name = login_submission.get("initial_device_display_name")
(
device_id,
diff --git a/synapse/rest/client/room.py b/synapse/rest/client/room.py
index 47e152c8cc..906fe09e97 100644
--- a/synapse/rest/client/room.py
+++ b/synapse/rest/client/room.py
@@ -21,6 +21,7 @@ from urllib import parse as urlparse
from twisted.web.server import Request
+from synapse import event_auth
from synapse.api.constants import EventTypes, Membership
from synapse.api.errors import (
AuthError,
@@ -29,6 +30,7 @@ from synapse.api.errors import (
MissingClientTokenError,
ShadowBanError,
SynapseError,
+ UnredactedContentDeletedError,
)
from synapse.api.filtering import Filter
from synapse.events.utils import format_event_for_client_v2
@@ -643,18 +645,55 @@ class RoomEventServlet(RestServlet):
super().__init__()
self.clock = hs.get_clock()
self._store = hs.get_datastores().main
+ self._state = hs.get_state_handler()
self.event_handler = hs.get_event_handler()
self._event_serializer = hs.get_event_client_serializer()
self._relations_handler = hs.get_relations_handler()
self.auth = hs.get_auth()
+ self.content_keep_ms = hs.config.server.redaction_retention_period
+ self.msc2815_enabled = hs.config.experimental.msc2815_enabled
async def on_GET(
self, request: SynapseRequest, room_id: str, event_id: str
) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request, allow_guest=True)
+
+ include_unredacted_content = self.msc2815_enabled and (
+ parse_string(
+ request,
+ "fi.mau.msc2815.include_unredacted_content",
+ allowed_values=("true", "false"),
+ )
+ == "true"
+ )
+ if include_unredacted_content and not await self.auth.is_server_admin(
+ requester.user
+ ):
+ power_level_event = await self._state.get_current_state(
+ room_id, EventTypes.PowerLevels, ""
+ )
+
+ auth_events = {}
+ if power_level_event:
+ auth_events[(EventTypes.PowerLevels, "")] = power_level_event
+
+ redact_level = event_auth.get_named_level(auth_events, "redact", 50)
+ user_level = event_auth.get_user_power_level(
+ requester.user.to_string(), auth_events
+ )
+ if user_level < redact_level:
+ raise SynapseError(
+ 403,
+ "You don't have permission to view redacted events in this room.",
+ errcode=Codes.FORBIDDEN,
+ )
+
try:
event = await self.event_handler.get_event(
- requester.user, room_id, event_id
+ requester.user,
+ room_id,
+ event_id,
+ show_redacted=include_unredacted_content,
)
except AuthError:
# This endpoint is supposed to return a 404 when the requester does
@@ -663,14 +702,21 @@ class RoomEventServlet(RestServlet):
raise SynapseError(404, "Event not found.", errcode=Codes.NOT_FOUND)
if event:
+ if include_unredacted_content and await self._store.have_censored_event(
+ event_id
+ ):
+ raise UnredactedContentDeletedError(self.content_keep_ms)
+
# Ensure there are bundled aggregations available.
aggregations = await self._relations_handler.get_bundled_aggregations(
[event], requester.user.to_string()
)
time_now = self.clock.time_msec()
+ # per MSC2676, /rooms/{roomId}/event/{eventId}, should return the
+ # *original* event, rather than the edited version
event_dict = self._event_serializer.serialize_event(
- event, time_now, bundle_aggregations=aggregations
+ event, time_now, bundle_aggregations=aggregations, apply_edits=False
)
return 200, event_dict
diff --git a/synapse/rest/client/sync.py b/synapse/rest/client/sync.py
index 2e25e8638b..e8772f86e7 100644
--- a/synapse/rest/client/sync.py
+++ b/synapse/rest/client/sync.py
@@ -180,13 +180,10 @@ class SyncRestServlet(RestServlet):
affect_presence = set_presence != PresenceState.OFFLINE
- if affect_presence:
- await self.presence_handler.set_state(
- user, {"presence": set_presence}, True
- )
-
context = await self.presence_handler.user_syncing(
- user.to_string(), affect_presence=affect_presence
+ user.to_string(),
+ affect_presence=affect_presence,
+ presence_state=set_presence,
)
with context:
sync_result = await self.sync_handler.wait_for_sync_for_user(
diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py
index 9a65aa4843..bfc1d4ee08 100644
--- a/synapse/rest/client/versions.py
+++ b/synapse/rest/client/versions.py
@@ -86,7 +86,7 @@ class VersionsRestServlet(RestServlet):
# Implements additional endpoints as described in MSC2432
"org.matrix.msc2432": True,
# Implements additional endpoints as described in MSC2666
- "uk.half-shot.msc2666": True,
+ "uk.half-shot.msc2666.mutual_rooms": True,
# Whether new rooms will be set to encrypted or not (based on presets).
"io.element.e2ee_forced.public": self.e2ee_forced_public,
"io.element.e2ee_forced.private": self.e2ee_forced_private,
@@ -100,8 +100,9 @@ class VersionsRestServlet(RestServlet):
# Adds support for jump to date endpoints (/timestamp_to_event) as per MSC3030
"org.matrix.msc3030": self.config.experimental.msc3030_enabled,
# Adds support for thread relations, per MSC3440.
- "org.matrix.msc3440": self.config.experimental.msc3440_enabled,
"org.matrix.msc3440.stable": True, # TODO: remove when "v1.3" is added above
+ # Allows moderators to fetch redacted event content as described in MSC2815
+ "fi.mau.msc2815": self.config.experimental.msc2815_enabled,
},
},
)
diff --git a/synapse/server.py b/synapse/server.py
index 380369db92..37c72bd83a 100644
--- a/synapse/server.py
+++ b/synapse/server.py
@@ -758,7 +758,7 @@ class HomeServer(metaclass=abc.ABCMeta):
@cache_in_self
def get_event_client_serializer(self) -> EventClientSerializer:
- return EventClientSerializer(self)
+ return EventClientSerializer()
@cache_in_self
def get_password_policy_handler(self) -> PasswordPolicyHandler:
diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py
index dc8009b23d..318e4df376 100644
--- a/synapse/storage/databases/main/devices.py
+++ b/synapse/storage/databases/main/devices.py
@@ -1582,7 +1582,6 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
self,
user_id: str,
device_ids: Collection[str],
- hosts: Optional[Collection[str]],
room_ids: Collection[str],
) -> Optional[int]:
"""Persist that a user's devices have been updated, and which hosts
@@ -1592,9 +1591,6 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
user_id: The ID of the user whose device changed.
device_ids: The IDs of any changed devices. If empty, this function will
return None.
- hosts: The remote destinations that should be notified of the change. If
- None then the set of hosts have *not* been calculated, and will be
- calculated later by a background task.
room_ids: The rooms that the user is in
Returns:
@@ -1606,14 +1602,12 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
context = get_active_span_text_map()
- def add_device_changes_txn(
- txn, stream_ids_for_device_change, stream_ids_for_outbound_pokes
- ):
+ def add_device_changes_txn(txn, stream_ids):
self._add_device_change_to_stream_txn(
txn,
user_id,
device_ids,
- stream_ids_for_device_change,
+ stream_ids,
)
self._add_device_outbound_room_poke_txn(
@@ -1621,43 +1615,17 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
user_id,
device_ids,
room_ids,
- stream_ids_for_device_change,
- context,
- hosts_have_been_calculated=hosts is not None,
- )
-
- # If the set of hosts to send to has not been calculated yet (and so
- # `hosts` is None) or there are no `hosts` to send to, then skip
- # trying to persist them to the DB.
- if not hosts:
- return
-
- self._add_device_outbound_poke_to_stream_txn(
- txn,
- user_id,
- device_ids,
- hosts,
- stream_ids_for_outbound_pokes,
+ stream_ids,
context,
)
- # `device_lists_stream` wants a stream ID per device update.
- num_stream_ids = len(device_ids)
-
- if hosts:
- # `device_lists_outbound_pokes` wants a different stream ID for
- # each row, which is a row per host per device update.
- num_stream_ids += len(hosts) * len(device_ids)
-
- async with self._device_list_id_gen.get_next_mult(num_stream_ids) as stream_ids:
- stream_ids_for_device_change = stream_ids[: len(device_ids)]
- stream_ids_for_outbound_pokes = stream_ids[len(device_ids) :]
-
+ async with self._device_list_id_gen.get_next_mult(
+ len(device_ids)
+ ) as stream_ids:
await self.db_pool.runInteraction(
"add_device_change_to_stream",
add_device_changes_txn,
- stream_ids_for_device_change,
- stream_ids_for_outbound_pokes,
+ stream_ids,
)
return stream_ids[-1]
@@ -1735,7 +1703,9 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
next(stream_id_iterator),
user_id,
device_id,
- False,
+ not self.hs.is_mine_id(
+ user_id
+ ), # We only need to send out update for *our* users
now,
encoded_context if whitelisted_homeserver(destination) else "{}",
)
@@ -1752,19 +1722,8 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
room_ids: Collection[str],
stream_ids: List[str],
context: Dict[str, str],
- hosts_have_been_calculated: bool,
) -> None:
- """Record the user in the room has updated their device.
-
- Args:
- hosts_have_been_calculated: True if `device_lists_outbound_pokes`
- has been updated already with the updates.
- """
-
- # We only need to convert to outbound pokes if they are our user.
- converted_to_destinations = (
- hosts_have_been_calculated or not self.hs.is_mine_id(user_id)
- )
+ """Record the user in the room has updated their device."""
encoded_context = json_encoder.encode(context)
@@ -1789,7 +1748,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
device_id,
room_id,
stream_id,
- converted_to_destinations,
+ False,
encoded_context,
)
for room_id in room_ids
diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py
index 3fcd5f5b99..2a1e567ce0 100644
--- a/synapse/storage/databases/main/events.py
+++ b/synapse/storage/databases/main/events.py
@@ -963,6 +963,21 @@ class PersistEventsStore:
values=to_insert,
)
+ async def update_current_state(
+ self,
+ room_id: str,
+ state_delta: DeltaState,
+ stream_id: int,
+ ) -> None:
+ """Update the current state stored in the datatabase for the given room"""
+
+ await self.db_pool.runInteraction(
+ "update_current_state",
+ self._update_current_state_txn,
+ state_delta_by_room={room_id: state_delta},
+ stream_id=stream_id,
+ )
+
def _update_current_state_txn(
self,
txn: LoggingTransaction,
@@ -1819,10 +1834,7 @@ class PersistEventsStore:
if rel_type == RelationTypes.REPLACE:
txn.call_after(self.store.get_applicable_edit.invalidate, (parent_id,))
- if (
- rel_type == RelationTypes.THREAD
- or rel_type == RelationTypes.UNSTABLE_THREAD
- ):
+ if rel_type == RelationTypes.THREAD:
txn.call_after(self.store.get_thread_summary.invalidate, (parent_id,))
# It should be safe to only invalidate the cache if the user has not
# previously participated in the thread, but that's difficult (and
diff --git a/synapse/storage/databases/main/events_forward_extremities.py b/synapse/storage/databases/main/events_forward_extremities.py
index 68901b4335..f851bff604 100644
--- a/synapse/storage/databases/main/events_forward_extremities.py
+++ b/synapse/storage/databases/main/events_forward_extremities.py
@@ -66,13 +66,15 @@ class EventForwardExtremitiesStore(
"""
txn.execute(sql, (event_id, room_id))
+
+ deleted_count = txn.rowcount
logger.info(
"Deleted %s extra forward extremities for room %s",
- txn.rowcount,
+ deleted_count,
room_id,
)
- if txn.rowcount > 0:
+ if deleted_count > 0:
# Invalidate the cache
self._invalidate_cache_and_stream(
txn,
@@ -80,7 +82,7 @@ class EventForwardExtremitiesStore(
(room_id,),
)
- return txn.rowcount
+ return deleted_count
return await self.db_pool.runInteraction(
"delete_forward_extremities_for_room",
diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py
index a60e3f4fdd..60876204bd 100644
--- a/synapse/storage/databases/main/events_worker.py
+++ b/synapse/storage/databases/main/events_worker.py
@@ -303,6 +303,24 @@ class EventsWorkerStore(SQLBaseStore):
desc="get_received_ts",
)
+ async def have_censored_event(self, event_id: str) -> bool:
+ """Check if an event has been censored, i.e. if the content of the event has been erased
+ from the database due to a redaction.
+
+ Args:
+ event_id: The event ID that was redacted.
+
+ Returns:
+ True if the event has been censored, False otherwise.
+ """
+ censored_redactions_list = await self.db_pool.simple_select_onecol(
+ table="redactions",
+ keyvalues={"redacts": event_id},
+ retcol="have_censored",
+ desc="get_have_censored",
+ )
+ return any(censored_redactions_list)
+
# Inform mypy that if allow_none is False (the default) then get_event
# always returns an EventBase.
@overload
@@ -1979,3 +1997,27 @@ class EventsWorkerStore(SQLBaseStore):
desc="is_partial_state_event",
)
return result is not None
+
+ async def get_partial_state_events_batch(self, room_id: str) -> List[str]:
+ """Get a list of events in the given room that have partial state"""
+ return await self.db_pool.runInteraction(
+ "get_partial_state_events_batch",
+ self._get_partial_state_events_batch_txn,
+ room_id,
+ )
+
+ @staticmethod
+ def _get_partial_state_events_batch_txn(
+ txn: LoggingTransaction, room_id: str
+ ) -> List[str]:
+ txn.execute(
+ """
+ SELECT event_id FROM partial_state_events AS pse
+ JOIN events USING (event_id)
+ WHERE pse.room_id = ?
+ ORDER BY events.stream_ordering
+ LIMIT 100
+ """,
+ (room_id,),
+ )
+ return [row[0] for row in txn]
diff --git a/synapse/storage/databases/main/relations.py b/synapse/storage/databases/main/relations.py
index 407158ceee..a5c31f6787 100644
--- a/synapse/storage/databases/main/relations.py
+++ b/synapse/storage/databases/main/relations.py
@@ -14,7 +14,6 @@
import logging
from typing import (
- TYPE_CHECKING,
Collection,
Dict,
FrozenSet,
@@ -32,20 +31,12 @@ import attr
from synapse.api.constants import RelationTypes
from synapse.events import EventBase
from synapse.storage._base import SQLBaseStore
-from synapse.storage.database import (
- DatabasePool,
- LoggingDatabaseConnection,
- LoggingTransaction,
- make_in_list_sql_clause,
-)
+from synapse.storage.database import LoggingTransaction, make_in_list_sql_clause
from synapse.storage.databases.main.stream import generate_pagination_where_clause
from synapse.storage.engines import PostgresEngine
from synapse.types import JsonDict, RoomStreamToken, StreamToken
from synapse.util.caches.descriptors import cached, cachedList
-if TYPE_CHECKING:
- from synapse.server import HomeServer
-
logger = logging.getLogger(__name__)
@@ -63,16 +54,6 @@ class _RelatedEvent:
class RelationsWorkerStore(SQLBaseStore):
- def __init__(
- self,
- database: DatabasePool,
- db_conn: LoggingDatabaseConnection,
- hs: "HomeServer",
- ):
- super().__init__(database, db_conn, hs)
-
- self._msc3440_enabled = hs.config.experimental.msc3440_enabled
-
@cached(uncached_args=("event",), tree=True)
async def get_relations_for_event(
self,
@@ -497,7 +478,7 @@ class RelationsWorkerStore(SQLBaseStore):
AND parent.room_id = child.room_id
WHERE
%s
- AND %s
+ AND relation_type = ?
ORDER BY parent.event_id, child.topological_ordering DESC, child.stream_ordering DESC
"""
else:
@@ -512,22 +493,16 @@ class RelationsWorkerStore(SQLBaseStore):
AND parent.room_id = child.room_id
WHERE
%s
- AND %s
+ AND relation_type = ?
ORDER BY child.topological_ordering DESC, child.stream_ordering DESC
"""
clause, args = make_in_list_sql_clause(
txn.database_engine, "relates_to_id", event_ids
)
+ args.append(RelationTypes.THREAD)
- if self._msc3440_enabled:
- relations_clause = "(relation_type = ? OR relation_type = ?)"
- args.extend((RelationTypes.THREAD, RelationTypes.UNSTABLE_THREAD))
- else:
- relations_clause = "relation_type = ?"
- args.append(RelationTypes.THREAD)
-
- txn.execute(sql % (clause, relations_clause), args)
+ txn.execute(sql % (clause,), args)
latest_event_ids = {}
for parent_event_id, child_event_id in txn:
# Only consider the latest threaded reply (by topological ordering).
@@ -547,7 +522,7 @@ class RelationsWorkerStore(SQLBaseStore):
AND parent.room_id = child.room_id
WHERE
%s
- AND %s
+ AND relation_type = ?
GROUP BY parent.event_id
"""
@@ -556,15 +531,9 @@ class RelationsWorkerStore(SQLBaseStore):
clause, args = make_in_list_sql_clause(
txn.database_engine, "relates_to_id", latest_event_ids.keys()
)
+ args.append(RelationTypes.THREAD)
- if self._msc3440_enabled:
- relations_clause = "(relation_type = ? OR relation_type = ?)"
- args.extend((RelationTypes.THREAD, RelationTypes.UNSTABLE_THREAD))
- else:
- relations_clause = "relation_type = ?"
- args.append(RelationTypes.THREAD)
-
- txn.execute(sql % (clause, relations_clause), args)
+ txn.execute(sql % (clause,), args)
counts = dict(cast(List[Tuple[str, int]], txn.fetchall()))
return counts, latest_event_ids
@@ -622,7 +591,7 @@ class RelationsWorkerStore(SQLBaseStore):
parent.event_id = relates_to_id
AND parent.room_id = child.room_id
WHERE
- %s
+ relation_type = ?
AND %s
AND %s
GROUP BY parent.event_id, child.sender
@@ -638,16 +607,9 @@ class RelationsWorkerStore(SQLBaseStore):
txn.database_engine, "relates_to_id", event_ids
)
- if self._msc3440_enabled:
- relations_clause = "(relation_type = ? OR relation_type = ?)"
- relations_args = [RelationTypes.THREAD, RelationTypes.UNSTABLE_THREAD]
- else:
- relations_clause = "relation_type = ?"
- relations_args = [RelationTypes.THREAD]
-
txn.execute(
- sql % (users_sql, events_clause, relations_clause),
- users_args + events_args + relations_args,
+ sql % (users_sql, events_clause),
+ [RelationTypes.THREAD] + users_args + events_args,
)
return {(row[0], row[1]): row[2] for row in txn}
@@ -677,7 +639,7 @@ class RelationsWorkerStore(SQLBaseStore):
user participated in that event's thread, otherwise false.
"""
- def _get_thread_summary_txn(txn: LoggingTransaction) -> Set[str]:
+ def _get_threads_participated_txn(txn: LoggingTransaction) -> Set[str]:
# Fetch whether the requester has participated or not.
sql = """
SELECT DISTINCT relates_to_id
@@ -688,28 +650,20 @@ class RelationsWorkerStore(SQLBaseStore):
AND parent.room_id = child.room_id
WHERE
%s
- AND %s
+ AND relation_type = ?
AND child.sender = ?
"""
clause, args = make_in_list_sql_clause(
txn.database_engine, "relates_to_id", event_ids
)
+ args.extend([RelationTypes.THREAD, user_id])
- if self._msc3440_enabled:
- relations_clause = "(relation_type = ? OR relation_type = ?)"
- args.extend((RelationTypes.THREAD, RelationTypes.UNSTABLE_THREAD))
- else:
- relations_clause = "relation_type = ?"
- args.append(RelationTypes.THREAD)
-
- args.append(user_id)
-
- txn.execute(sql % (clause, relations_clause), args)
+ txn.execute(sql % (clause,), args)
return {row[0] for row in txn.fetchall()}
participated_threads = await self.db_pool.runInteraction(
- "get_thread_summary", _get_thread_summary_txn
+ "get_threads_participated", _get_threads_participated_txn
)
return {event_id: event_id in participated_threads for event_id in event_ids}
diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py
index 18b1acd9e1..87e9482c60 100644
--- a/synapse/storage/databases/main/room.py
+++ b/synapse/storage/databases/main/room.py
@@ -1077,6 +1077,37 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
get_rooms_for_retention_period_in_range_txn,
)
+ async def clear_partial_state_room(self, room_id: str) -> bool:
+ # this can race with incoming events, so we watch out for FK errors.
+ # TODO(faster_joins): this still doesn't completely fix the race, since the persist process
+ # is not atomic. I fear we need an application-level lock.
+ try:
+ await self.db_pool.runInteraction(
+ "clear_partial_state_room", self._clear_partial_state_room_txn, room_id
+ )
+ return True
+ except self.db_pool.engine.module.DatabaseError as e:
+ # TODO(faster_joins): how do we distinguish between FK errors and other errors?
+ logger.warning(
+ "Exception while clearing lazy partial-state-room %s, retrying: %s",
+ room_id,
+ e,
+ )
+ return False
+
+ @staticmethod
+ def _clear_partial_state_room_txn(txn: LoggingTransaction, room_id: str) -> None:
+ DatabasePool.simple_delete_txn(
+ txn,
+ table="partial_state_rooms_servers",
+ keyvalues={"room_id": room_id},
+ )
+ DatabasePool.simple_delete_one_txn(
+ txn,
+ table="partial_state_rooms",
+ keyvalues={"room_id": room_id},
+ )
+
class _BackgroundUpdates:
REMOVE_TOMESTONED_ROOMS_BG_UPDATE = "remove_tombstoned_rooms_from_directory"
diff --git a/synapse/storage/databases/main/state.py b/synapse/storage/databases/main/state.py
index ecdc1fdc4c..7a1b013fa3 100644
--- a/synapse/storage/databases/main/state.py
+++ b/synapse/storage/databases/main/state.py
@@ -21,6 +21,7 @@ from synapse.api.constants import EventTypes, Membership
from synapse.api.errors import NotFoundError, UnsupportedRoomVersionError
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion
from synapse.events import EventBase
+from synapse.events.snapshot import EventContext
from synapse.storage._base import SQLBaseStore
from synapse.storage.database import (
DatabasePool,
@@ -129,7 +130,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
)
if room_version is None:
- raise NotFoundError("Could not room_version for %s" % (room_id,))
+ raise NotFoundError("Could not find room_version for %s" % (room_id,))
return room_version
@@ -354,6 +355,53 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
return {row["state_group"] for row in rows}
+ async def update_state_for_partial_state_event(
+ self,
+ event: EventBase,
+ context: EventContext,
+ ) -> None:
+ """Update the state group for a partial state event"""
+ await self.db_pool.runInteraction(
+ "update_state_for_partial_state_event",
+ self._update_state_for_partial_state_event_txn,
+ event,
+ context,
+ )
+
+ def _update_state_for_partial_state_event_txn(
+ self,
+ txn,
+ event: EventBase,
+ context: EventContext,
+ ):
+ # we shouldn't have any outliers here
+ assert not event.internal_metadata.is_outlier()
+
+ # anything that was rejected should have the same state as its
+ # predecessor.
+ if context.rejected:
+ assert context.state_group == context.state_group_before_event
+
+ self.db_pool.simple_update_txn(
+ txn,
+ table="event_to_state_groups",
+ keyvalues={"event_id": event.event_id},
+ updatevalues={"state_group": context.state_group},
+ )
+
+ self.db_pool.simple_delete_one_txn(
+ txn,
+ table="partial_state_events",
+ keyvalues={"event_id": event.event_id},
+ )
+
+ # TODO(faster_joins): need to do something about workers here
+ txn.call_after(
+ self._get_state_group_for_event.prefill,
+ (event.event_id,),
+ context.state_group,
+ )
+
class MainStateBackgroundUpdateStore(RoomMemberWorkerStore):
diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py
index 6d45a8a9f6..793e906630 100644
--- a/synapse/storage/databases/main/stream.py
+++ b/synapse/storage/databases/main/stream.py
@@ -758,6 +758,32 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
"get_room_event_before_stream_ordering", _f
)
+ async def get_last_event_in_room_before_stream_ordering(
+ self,
+ room_id: str,
+ end_token: RoomStreamToken,
+ ) -> Optional[EventBase]:
+ """Returns the last event in a room at or before a stream ordering
+
+ Args:
+ room_id
+ end_token: The token used to stream from
+
+ Returns:
+ The most recent event.
+ """
+
+ last_row = await self.get_room_event_before_stream_ordering(
+ room_id=room_id,
+ stream_ordering=end_token.stream,
+ )
+ if last_row:
+ _, _, event_id = last_row
+ event = await self.get_event(event_id, get_prev_content=True)
+ return event
+
+ return None
+
async def get_current_room_stream_token_for_room_id(
self, room_id: Optional[str] = None
) -> RoomStreamToken:
diff --git a/synapse/storage/persist_events.py b/synapse/storage/persist_events.py
index b402922817..e496ba7bed 100644
--- a/synapse/storage/persist_events.py
+++ b/synapse/storage/persist_events.py
@@ -376,6 +376,62 @@ class EventsPersistenceStorage:
pos = PersistedEventPosition(self._instance_name, event_stream_id)
return event, pos, self.main_store.get_room_max_token()
+ async def update_current_state(self, room_id: str) -> None:
+ """Recalculate the current state for a room, and persist it"""
+ state = await self._calculate_current_state(room_id)
+ delta = await self._calculate_state_delta(room_id, state)
+
+ # TODO(faster_joins): get a real stream ordering, to make this work correctly
+ # across workers.
+ #
+ # TODO(faster_joins): this can race against event persistence, in which case we
+ # will end up with incorrect state. Perhaps we should make this a job we
+ # farm out to the event persister, somehow.
+ stream_id = self.main_store.get_room_max_stream_ordering()
+ await self.persist_events_store.update_current_state(room_id, delta, stream_id)
+
+ async def _calculate_current_state(self, room_id: str) -> StateMap[str]:
+ """Calculate the current state of a room, based on the forward extremities
+
+ Args:
+ room_id: room for which to calculate current state
+
+ Returns:
+ map from (type, state_key) to event id for the current state in the room
+ """
+ latest_event_ids = await self.main_store.get_latest_event_ids_in_room(room_id)
+ state_groups = set(
+ (
+ await self.main_store._get_state_group_for_events(latest_event_ids)
+ ).values()
+ )
+
+ state_maps_by_state_group = await self.state_store._get_state_for_groups(
+ state_groups
+ )
+
+ if len(state_groups) == 1:
+ # If there is only one state group, then we know what the current
+ # state is.
+ return state_maps_by_state_group[state_groups.pop()]
+
+ # Ok, we need to defer to the state handler to resolve our state sets.
+ logger.debug("calling resolve_state_groups from preserve_events")
+
+ # Avoid a circular import.
+ from synapse.state import StateResolutionStore
+
+ room_version = await self.main_store.get_room_version_id(room_id)
+ res = await self._state_resolution_handler.resolve_state_groups(
+ room_id,
+ room_version,
+ state_maps_by_state_group,
+ event_map=None,
+ state_res_store=StateResolutionStore(self.main_store),
+ )
+
+ return res.state
+
async def _persist_event_batch(
self,
events_and_contexts: List[Tuple[EventBase, EventContext]],
diff --git a/synapse/storage/schema/__init__.py b/synapse/storage/schema/__init__.py
index 151f2aa9bb..871d4ace12 100644
--- a/synapse/storage/schema/__init__.py
+++ b/synapse/storage/schema/__init__.py
@@ -66,9 +66,9 @@ Changes in SCHEMA_VERSION = 69:
SCHEMA_COMPAT_VERSION = (
- # we now have `state_key` columns in both `events` and `state_events`, so
- # now incompatible with synapses wth SCHEMA_VERSION < 66.
- 66
+ # We now assume that `device_lists_changes_in_room` has been filled out for
+ # recent device_list_updates.
+ 69
)
"""Limit on how far the synapse codebase can be rolled back without breaking db compat
diff --git a/sytest-blacklist b/sytest-blacklist
index 57e603a4a6..d5fa36cec7 100644
--- a/sytest-blacklist
+++ b/sytest-blacklist
@@ -21,10 +21,6 @@ Newly created users see their own presence in /initialSync (SYT-34)
# Blacklisted due to https://github.com/matrix-org/synapse/issues/1396
Should reject keys claiming to belong to a different user
-# Blacklisted due to https://github.com/matrix-org/matrix-doc/pull/2314 removing
-# this requirement from the spec
-Inbound federation of state requires event_id as a mandatory paramater
-
# Blacklisted until MSC2753 is implemented
Local users can peek into world_readable rooms by room ID
We can't peek into rooms with shared history_visibility
diff --git a/tests/api/test_filtering.py b/tests/api/test_filtering.py
index 8c3354ce3c..985d6e397d 100644
--- a/tests/api/test_filtering.py
+++ b/tests/api/test_filtering.py
@@ -481,9 +481,7 @@ class FilteringTestCase(unittest.HomeserverTestCase):
# events). This is a bit cheeky, but tests the logic of _check_event_relations.
# Filter for a particular sender.
- definition = {
- "io.element.relation_senders": ["@foo:bar"],
- }
+ definition = {"related_by_senders": ["@foo:bar"]}
async def events_have_relations(*args, **kwargs):
return ["$with_relation"]
diff --git a/tests/federation/test_federation_sender.py b/tests/federation/test_federation_sender.py
index a6e91956af..91f982518e 100644
--- a/tests/federation/test_federation_sender.py
+++ b/tests/federation/test_federation_sender.py
@@ -14,7 +14,6 @@
from typing import Optional
from unittest.mock import Mock
-from parameterized import parameterized_class
from signedjson import key, sign
from signedjson.types import BaseKey, SigningKey
@@ -155,12 +154,6 @@ class FederationSenderReceiptsTestCases(HomeserverTestCase):
)
-@parameterized_class(
- [
- {"enable_room_poke_code_path": False},
- {"enable_room_poke_code_path": True},
- ]
-)
class FederationSenderDevicesTestCases(HomeserverTestCase):
servlets = [
admin.register_servlets,
@@ -169,13 +162,14 @@ class FederationSenderDevicesTestCases(HomeserverTestCase):
def make_homeserver(self, reactor, clock):
return self.setup_test_homeserver(
- federation_transport_client=Mock(spec=["send_transaction"]),
+ federation_transport_client=Mock(
+ spec=["send_transaction", "query_user_devices"]
+ ),
)
def default_config(self):
c = super().default_config()
c["send_federation"] = True
- c["use_new_device_lists_changes_in_room"] = self.enable_room_poke_code_path
return c
def prepare(self, reactor, clock, hs):
@@ -226,6 +220,45 @@ class FederationSenderDevicesTestCases(HomeserverTestCase):
self.assertEqual(len(self.edus), 1)
self.check_device_update_edu(self.edus.pop(0), u1, "D2", stream_id)
+ def test_dont_send_device_updates_for_remote_users(self):
+ """Check that we don't send device updates for remote users"""
+
+ # Send the server a device list EDU for the other user, this will cause
+ # it to try and resync the device lists.
+ self.hs.get_federation_transport_client().query_user_devices.return_value = (
+ defer.succeed(
+ {
+ "stream_id": "1",
+ "user_id": "@user2:host2",
+ "devices": [{"device_id": "D1"}],
+ }
+ )
+ )
+
+ self.get_success(
+ self.hs.get_device_handler().device_list_updater.incoming_device_list_update(
+ "host2",
+ {
+ "user_id": "@user2:host2",
+ "device_id": "D1",
+ "stream_id": "1",
+ "prev_ids": [],
+ },
+ )
+ )
+
+ self.reactor.advance(1)
+
+ # We shouldn't see an EDU for that update
+ self.assertEqual(self.edus, [])
+
+ # Check that we did successfully process the inbound EDU (otherwise this
+ # test would pass if we failed to process the EDU)
+ devices = self.get_success(
+ self.hs.get_datastores().main.get_cached_devices_for_user("@user2:host2")
+ )
+ self.assertIn("D1", devices)
+
def test_upload_signatures(self):
"""Uploading signatures on some devices should produce updates for that user"""
diff --git a/tests/federation/test_federation_server.py b/tests/federation/test_federation_server.py
index 30e7e5093a..b19365b81a 100644
--- a/tests/federation/test_federation_server.py
+++ b/tests/federation/test_federation_server.py
@@ -104,58 +104,21 @@ class ServerACLsTestCase(unittest.TestCase):
class StateQueryTests(unittest.FederatingHomeserverTestCase):
-
servlets = [
admin.register_servlets,
room.register_servlets,
login.register_servlets,
]
- def test_without_event_id(self):
- """
- Querying v1/state/<room_id> without an event ID will return the current
- known state.
- """
- u1 = self.register_user("u1", "pass")
- u1_token = self.login("u1", "pass")
-
- room_1 = self.helper.create_room_as(u1, tok=u1_token)
- self.inject_room_member(room_1, "@user:other.example.com", "join")
-
- channel = self.make_signed_federation_request(
- "GET", "/_matrix/federation/v1/state/%s" % (room_1,)
- )
- self.assertEqual(200, channel.code, channel.result)
-
- self.assertEqual(
- channel.json_body["room_version"],
- self.hs.config.server.default_room_version.identifier,
- )
-
- members = set(
- map(
- lambda x: x["state_key"],
- filter(
- lambda x: x["type"] == "m.room.member", channel.json_body["pdus"]
- ),
- )
- )
-
- self.assertEqual(members, {"@user:other.example.com", u1})
- self.assertEqual(len(channel.json_body["pdus"]), 6)
-
def test_needs_to_be_in_room(self):
- """
- Querying v1/state/<room_id> requires the server
- be in the room to provide data.
- """
+ """/v1/state/<room_id> requires the server to be in the room"""
u1 = self.register_user("u1", "pass")
u1_token = self.login("u1", "pass")
room_1 = self.helper.create_room_as(u1, tok=u1_token)
channel = self.make_signed_federation_request(
- "GET", "/_matrix/federation/v1/state/%s" % (room_1,)
+ "GET", "/_matrix/federation/v1/state/%s?event_id=xyz" % (room_1,)
)
self.assertEqual(403, channel.code, channel.result)
self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN")
diff --git a/tests/handlers/test_presence.py b/tests/handlers/test_presence.py
index b2ed9cbe37..c96dc6caf2 100644
--- a/tests/handlers/test_presence.py
+++ b/tests/handlers/test_presence.py
@@ -657,6 +657,85 @@ class PresenceHandlerTestCase(unittest.HomeserverTestCase):
# Mark user as online and `status_msg = None`
self._set_presencestate_with_status_msg(user_id, PresenceState.ONLINE, None)
+ def test_set_presence_from_syncing_not_set(self):
+ """Test that presence is not set by syncing if affect_presence is false"""
+ user_id = "@test:server"
+ status_msg = "I'm here!"
+
+ self._set_presencestate_with_status_msg(
+ user_id, PresenceState.UNAVAILABLE, status_msg
+ )
+
+ self.get_success(
+ self.presence_handler.user_syncing(user_id, False, PresenceState.ONLINE)
+ )
+
+ state = self.get_success(
+ self.presence_handler.get_state(UserID.from_string(user_id))
+ )
+ # we should still be unavailable
+ self.assertEqual(state.state, PresenceState.UNAVAILABLE)
+ # and status message should still be the same
+ self.assertEqual(state.status_msg, status_msg)
+
+ def test_set_presence_from_syncing_is_set(self):
+ """Test that presence is set by syncing if affect_presence is true"""
+ user_id = "@test:server"
+ status_msg = "I'm here!"
+
+ self._set_presencestate_with_status_msg(
+ user_id, PresenceState.UNAVAILABLE, status_msg
+ )
+
+ self.get_success(
+ self.presence_handler.user_syncing(user_id, True, PresenceState.ONLINE)
+ )
+
+ state = self.get_success(
+ self.presence_handler.get_state(UserID.from_string(user_id))
+ )
+ # we should now be online
+ self.assertEqual(state.state, PresenceState.ONLINE)
+
+ def test_set_presence_from_syncing_keeps_status(self):
+ """Test that presence set by syncing retains status message"""
+ user_id = "@test:server"
+ status_msg = "I'm here!"
+
+ self._set_presencestate_with_status_msg(
+ user_id, PresenceState.UNAVAILABLE, status_msg
+ )
+
+ self.get_success(
+ self.presence_handler.user_syncing(user_id, True, PresenceState.ONLINE)
+ )
+
+ state = self.get_success(
+ self.presence_handler.get_state(UserID.from_string(user_id))
+ )
+ # our status message should be the same as it was before
+ self.assertEqual(state.status_msg, status_msg)
+
+ def test_set_presence_from_syncing_keeps_busy(self):
+ """Test that presence set by syncing doesn't affect busy status"""
+ # while this isn't the default
+ self.presence_handler._busy_presence_enabled = True
+
+ user_id = "@test:server"
+ status_msg = "I'm busy!"
+
+ self._set_presencestate_with_status_msg(user_id, PresenceState.BUSY, status_msg)
+
+ self.get_success(
+ self.presence_handler.user_syncing(user_id, True, PresenceState.ONLINE)
+ )
+
+ state = self.get_success(
+ self.presence_handler.get_state(UserID.from_string(user_id))
+ )
+ # we should still be busy
+ self.assertEqual(state.state, PresenceState.BUSY)
+
def _set_presencestate_with_status_msg(
self, user_id: str, state: str, status_msg: Optional[str]
):
diff --git a/tests/rest/admin/test_admin.py b/tests/rest/admin/test_admin.py
index 849d00ab4d..40571b753a 100644
--- a/tests/rest/admin/test_admin.py
+++ b/tests/rest/admin/test_admin.py
@@ -63,6 +63,7 @@ class DeleteGroupTestCase(unittest.HomeserverTestCase):
self.other_user = self.register_user("user", "pass")
self.other_user_token = self.login("user", "pass")
+ @unittest.override_config({"experimental_features": {"groups_enabled": True}})
def test_delete_group(self) -> None:
# Create a new group
channel = self.make_request(
diff --git a/tests/rest/client/test_login.py b/tests/rest/client/test_login.py
index 090d2d0a29..0a3d017dc9 100644
--- a/tests/rest/client/test_login.py
+++ b/tests/rest/client/test_login.py
@@ -11,7 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
+import json
import time
import urllib.parse
from typing import Any, Dict, List, Optional, Union
@@ -384,6 +384,31 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase):
channel = self.make_request(b"POST", "/logout/all", access_token=access_token)
self.assertEqual(channel.result["code"], b"200", channel.result)
+ def test_login_with_overly_long_device_id_fails(self) -> None:
+ self.register_user("mickey", "cheese")
+
+ # create a device_id longer than 512 characters
+ device_id = "yolo" * 512
+
+ body = {
+ "type": "m.login.password",
+ "user": "mickey",
+ "password": "cheese",
+ "device_id": device_id,
+ }
+
+ # make a login request with the bad device_id
+ channel = self.make_request(
+ "POST",
+ "/_matrix/client/v3/login",
+ json.dumps(body).encode("utf8"),
+ custom_headers=None,
+ )
+
+ # test that the login fails with the correct error code
+ self.assertEqual(channel.code, 400)
+ self.assertEqual(channel.json_body["errcode"], "M_INVALID_PARAM")
+
@skip_unless(has_saml2 and HAS_OIDC, "Requires SAML2 and OIDC")
class MultiSSOTestCase(unittest.HomeserverTestCase):
diff --git a/tests/rest/client/test_relations.py b/tests/rest/client/test_relations.py
index 6fabada8b3..65743cdf67 100644
--- a/tests/rest/client/test_relations.py
+++ b/tests/rest/client/test_relations.py
@@ -355,7 +355,6 @@ class RelationsTestCase(BaseRelationsTestCase):
self.assertEqual(200, channel.code, channel.json_body)
self.assertNotIn("m.relations", channel.json_body["unsigned"])
- @unittest.override_config({"experimental_features": {"msc3666_enabled": True}})
def test_edit(self) -> None:
"""Test that a simple edit works."""
@@ -380,13 +379,16 @@ class RelationsTestCase(BaseRelationsTestCase):
{"event_id": edit_event_id, "sender": self.user_id}, m_replace_dict
)
+ # /event should return the *original* event
channel = self.make_request(
"GET",
f"/rooms/{self.room}/event/{self.parent_id}",
access_token=self.user_token,
)
self.assertEqual(200, channel.code, channel.json_body)
- self.assertEqual(channel.json_body["content"], new_body)
+ self.assertEqual(
+ channel.json_body["content"], {"body": "Hi!", "msgtype": "m.text"}
+ )
assert_bundle(channel.json_body)
# Request the room messages.
@@ -399,6 +401,7 @@ class RelationsTestCase(BaseRelationsTestCase):
assert_bundle(self._find_event_in_chunk(channel.json_body["chunk"]))
# Request the room context.
+ # /context should return the edited event.
channel = self.make_request(
"GET",
f"/rooms/{self.room}/context/{self.parent_id}",
@@ -406,6 +409,7 @@ class RelationsTestCase(BaseRelationsTestCase):
)
self.assertEqual(200, channel.code, channel.json_body)
assert_bundle(channel.json_body["event"])
+ self.assertEqual(channel.json_body["event"]["content"], new_body)
# Request sync, but limit the timeline so it becomes limited (and includes
# bundled aggregations).
@@ -470,14 +474,14 @@ class RelationsTestCase(BaseRelationsTestCase):
channel = self.make_request(
"GET",
- f"/rooms/{self.room}/event/{self.parent_id}",
+ f"/rooms/{self.room}/context/{self.parent_id}",
access_token=self.user_token,
)
self.assertEqual(200, channel.code, channel.json_body)
- self.assertEqual(channel.json_body["content"], new_body)
+ self.assertEqual(channel.json_body["event"]["content"], new_body)
- relations_dict = channel.json_body["unsigned"].get("m.relations")
+ relations_dict = channel.json_body["event"]["unsigned"].get("m.relations")
self.assertIn(RelationTypes.REPLACE, relations_dict)
m_replace_dict = relations_dict[RelationTypes.REPLACE]
@@ -492,10 +496,9 @@ class RelationsTestCase(BaseRelationsTestCase):
"""Test that editing a reply works."""
# Create a reply to edit.
+ original_body = {"msgtype": "m.text", "body": "A reply!"}
channel = self._send_relation(
- RelationTypes.REFERENCE,
- "m.room.message",
- content={"msgtype": "m.text", "body": "A reply!"},
+ RelationTypes.REFERENCE, "m.room.message", content=original_body
)
reply = channel.json_body["event_id"]
@@ -508,38 +511,54 @@ class RelationsTestCase(BaseRelationsTestCase):
)
edit_event_id = channel.json_body["event_id"]
+ # /event returns the original event
channel = self.make_request(
"GET",
f"/rooms/{self.room}/event/{reply}",
access_token=self.user_token,
)
self.assertEqual(200, channel.code, channel.json_body)
+ event_result = channel.json_body
+ self.assertDictContainsSubset(original_body, event_result["content"])
- # We expect to see the new body in the dict, as well as the reference
- # metadata sill intact.
- self.assertDictContainsSubset(new_body, channel.json_body["content"])
- self.assertDictContainsSubset(
- {
- "m.relates_to": {
- "event_id": self.parent_id,
- "rel_type": "m.reference",
- }
- },
- channel.json_body["content"],
+ # also check /context, which returns the *edited* event
+ channel = self.make_request(
+ "GET",
+ f"/rooms/{self.room}/context/{reply}",
+ access_token=self.user_token,
)
+ self.assertEqual(200, channel.code, channel.json_body)
+ context_result = channel.json_body["event"]
- # We expect that the edit relation appears in the unsigned relations
- # section.
- relations_dict = channel.json_body["unsigned"].get("m.relations")
- self.assertIn(RelationTypes.REPLACE, relations_dict)
+ # check that the relations are correct for both APIs
+ for result_event_dict, desc in (
+ (event_result, "/event"),
+ (context_result, "/context"),
+ ):
+ # The reference metadata should still be intact.
+ self.assertDictContainsSubset(
+ {
+ "m.relates_to": {
+ "event_id": self.parent_id,
+ "rel_type": "m.reference",
+ }
+ },
+ result_event_dict["content"],
+ desc,
+ )
- m_replace_dict = relations_dict[RelationTypes.REPLACE]
- for key in ["event_id", "sender", "origin_server_ts"]:
- self.assertIn(key, m_replace_dict)
+ # We expect that the edit relation appears in the unsigned relations
+ # section.
+ relations_dict = result_event_dict["unsigned"].get("m.relations")
+ self.assertIn(RelationTypes.REPLACE, relations_dict, desc)
- self.assert_dict(
- {"event_id": edit_event_id, "sender": self.user_id}, m_replace_dict
- )
+ m_replace_dict = relations_dict[RelationTypes.REPLACE]
+ for key in ["event_id", "sender", "origin_server_ts"]:
+ self.assertIn(key, m_replace_dict, desc)
+
+ self.assert_dict(
+ {"event_id": edit_event_id, "sender": self.user_id}, m_replace_dict
+ )
def test_edit_thread(self) -> None:
"""Test that editing a thread works."""
@@ -605,19 +624,31 @@ class RelationsTestCase(BaseRelationsTestCase):
)
# Request the original event.
+ # /event should return the original event.
channel = self.make_request(
"GET",
f"/rooms/{self.room}/event/{self.parent_id}",
access_token=self.user_token,
)
self.assertEqual(200, channel.code, channel.json_body)
- # The edit to the edit should be ignored.
- self.assertEqual(channel.json_body["content"], new_body)
+ self.assertEqual(
+ channel.json_body["content"], {"body": "Hi!", "msgtype": "m.text"}
+ )
# The relations information should not include the edit to the edit.
relations_dict = channel.json_body["unsigned"].get("m.relations")
self.assertIn(RelationTypes.REPLACE, relations_dict)
+ # /context should return the event updated for the *first* edit
+ # (The edit to the edit should be ignored.)
+ channel = self.make_request(
+ "GET",
+ f"/rooms/{self.room}/context/{self.parent_id}",
+ access_token=self.user_token,
+ )
+ self.assertEqual(200, channel.code, channel.json_body)
+ self.assertEqual(channel.json_body["event"]["content"], new_body)
+
m_replace_dict = relations_dict[RelationTypes.REPLACE]
for key in ["event_id", "sender", "origin_server_ts"]:
self.assertIn(key, m_replace_dict)
@@ -966,7 +997,6 @@ class BundledAggregationsTestCase(BaseRelationsTestCase):
]
assert_bundle(self._find_event_in_chunk(chunk))
- @unittest.override_config({"experimental_features": {"msc3666_enabled": True}})
def test_annotation(self) -> None:
"""
Test that annotations get correctly bundled.
@@ -991,7 +1021,6 @@ class BundledAggregationsTestCase(BaseRelationsTestCase):
self._test_bundled_aggregations(RelationTypes.ANNOTATION, assert_annotations, 7)
- @unittest.override_config({"experimental_features": {"msc3666_enabled": True}})
def test_reference(self) -> None:
"""
Test that references get correctly bundled.
@@ -1010,7 +1039,6 @@ class BundledAggregationsTestCase(BaseRelationsTestCase):
self._test_bundled_aggregations(RelationTypes.REFERENCE, assert_annotations, 7)
- @unittest.override_config({"experimental_features": {"msc3666_enabled": True}})
def test_thread(self) -> None:
"""
Test that threads get correctly bundled.
diff --git a/tests/rest/client/test_room_batch.py b/tests/rest/client/test_room_batch.py
index 44f333a0ee..41a1bf6d89 100644
--- a/tests/rest/client/test_room_batch.py
+++ b/tests/rest/client/test_room_batch.py
@@ -7,9 +7,9 @@ from twisted.test.proto_helpers import MemoryReactor
from synapse.api.constants import EventContentFields, EventTypes
from synapse.appservice import ApplicationService
from synapse.rest import admin
-from synapse.rest.client import login, register, room, room_batch
+from synapse.rest.client import login, register, room, room_batch, sync
from synapse.server import HomeServer
-from synapse.types import JsonDict
+from synapse.types import JsonDict, RoomStreamToken
from synapse.util import Clock
from tests import unittest
@@ -63,6 +63,7 @@ class RoomBatchTestCase(unittest.HomeserverTestCase):
room.register_servlets,
register.register_servlets,
login.register_servlets,
+ sync.register_servlets,
]
def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
@@ -178,3 +179,123 @@ class RoomBatchTestCase(unittest.HomeserverTestCase):
"Expected a single state_group to be returned by saw state_groups=%s"
% (state_group_map.keys(),),
)
+
+ @unittest.override_config({"experimental_features": {"msc2716_enabled": True}})
+ def test_sync_while_batch_importing(self) -> None:
+ """
+ Make sure that /sync correctly returns full room state when a user joins
+ during ongoing batch backfilling.
+ See: https://github.com/matrix-org/synapse/issues/12281
+ """
+ # Create user who will be invited & join room
+ user_id = self.register_user("beep", "test")
+ user_tok = self.login("beep", "test")
+
+ time_before_room = int(self.clock.time_msec())
+
+ # Create a room with some events
+ room_id, _, _, _ = self._create_test_room()
+ # Invite the user
+ self.helper.invite(
+ room_id, src=self.appservice.sender, tok=self.appservice.token, targ=user_id
+ )
+
+ # Create another room, send a bunch of events to advance the stream token
+ other_room_id = self.helper.create_room_as(
+ self.appservice.sender, tok=self.appservice.token
+ )
+ for _ in range(5):
+ self.helper.send_event(
+ room_id=other_room_id,
+ type=EventTypes.Message,
+ content={"msgtype": "m.text", "body": "C"},
+ tok=self.appservice.token,
+ )
+
+ # Join the room as the normal user
+ self.helper.join(room_id, user_id, tok=user_tok)
+
+ # Create an event to hang the historical batch from - In order to see
+ # the failure case originally reported in #12281, the historical batch
+ # must be hung from the most recent event in the room so the base
+ # insertion event ends up with the highest `topogological_ordering`
+ # (`depth`) in the room but will have a negative `stream_ordering`
+ # because it's a `historical` event. Previously, when assembling the
+ # `state` for the `/sync` response, the bugged logic would sort by
+ # `topological_ordering` descending and pick up the base insertion
+ # event because it has a negative `stream_ordering` below the given
+ # pagination token. Now we properly sort by `stream_ordering`
+ # descending which puts `historical` events with a negative
+ # `stream_ordering` way at the bottom and aren't selected as expected.
+ response = self.helper.send_event(
+ room_id=room_id,
+ type=EventTypes.Message,
+ content={
+ "msgtype": "m.text",
+ "body": "C",
+ },
+ tok=self.appservice.token,
+ )
+ event_to_hang_id = response["event_id"]
+
+ channel = self.make_request(
+ "POST",
+ "/_matrix/client/unstable/org.matrix.msc2716/rooms/%s/batch_send?prev_event_id=%s"
+ % (room_id, event_to_hang_id),
+ content={
+ "events": _create_message_events_for_batch_send_request(
+ self.virtual_user_id, time_before_room, 3
+ ),
+ "state_events_at_start": _create_join_state_events_for_batch_send_request(
+ [self.virtual_user_id], time_before_room
+ ),
+ },
+ access_token=self.appservice.token,
+ )
+ self.assertEqual(channel.code, 200, channel.result)
+
+ # Now we need to find the invite + join events stream tokens so we can sync between
+ main_store = self.hs.get_datastores().main
+ events, next_key = self.get_success(
+ main_store.get_recent_events_for_room(
+ room_id,
+ 50,
+ end_token=main_store.get_room_max_token(),
+ ),
+ )
+ invite_event_position = None
+ for event in events:
+ if (
+ event.type == "m.room.member"
+ and event.content["membership"] == "invite"
+ ):
+ invite_event_position = self.get_success(
+ main_store.get_topological_token_for_event(event.event_id)
+ )
+ break
+
+ assert invite_event_position is not None, "No invite event found"
+
+ # Remove the topological order from the token by re-creating w/stream only
+ invite_event_position = RoomStreamToken(None, invite_event_position.stream)
+
+ # Sync everything after this token
+ since_token = self.get_success(invite_event_position.to_string(main_store))
+ sync_response = self.make_request(
+ "GET",
+ f"/sync?since={since_token}",
+ access_token=user_tok,
+ )
+
+ # Assert that, for this room, the user was considered to have joined and thus
+ # receives the full state history
+ state_event_types = [
+ event["type"]
+ for event in sync_response.json_body["rooms"]["join"][room_id]["state"][
+ "events"
+ ]
+ ]
+
+ assert (
+ "m.room.create" in state_event_types
+ ), "Missing room full state in sync response"
diff --git a/tests/storage/test_devices.py b/tests/storage/test_devices.py
index d1227dd4ac..ccc3893869 100644
--- a/tests/storage/test_devices.py
+++ b/tests/storage/test_devices.py
@@ -21,6 +21,29 @@ class DeviceStoreTestCase(HomeserverTestCase):
def prepare(self, reactor, clock, hs):
self.store = hs.get_datastores().main
+ def add_device_change(self, user_id, device_ids, host):
+ """Add a device list change for the given device to
+ `device_lists_outbound_pokes` table.
+ """
+
+ for device_id in device_ids:
+ stream_id = self.get_success(
+ self.store.add_device_change_to_streams(
+ "user_id", [device_id], ["!some:room"]
+ )
+ )
+
+ self.get_success(
+ self.store.add_device_list_outbound_pokes(
+ user_id=user_id,
+ device_id=device_id,
+ room_id="!some:room",
+ stream_id=stream_id,
+ hosts=[host],
+ context={},
+ )
+ )
+
def test_store_new_device(self):
self.get_success(
self.store.store_device("user_id", "device_id", "display_name")
@@ -95,11 +118,7 @@ class DeviceStoreTestCase(HomeserverTestCase):
device_ids = ["device_id1", "device_id2"]
# Add two device updates with sequential `stream_id`s
- self.get_success(
- self.store.add_device_change_to_streams(
- "user_id", device_ids, ["somehost"], ["!some:room"]
- )
- )
+ self.add_device_change("@user_id:test", device_ids, "somehost")
# Get all device updates ever meant for this remote
now_stream_id, device_updates = self.get_success(
@@ -123,11 +142,7 @@ class DeviceStoreTestCase(HomeserverTestCase):
"device_id4",
"device_id5",
]
- self.get_success(
- self.store.add_device_change_to_streams(
- "user_id", device_ids, ["somehost"], ["!some:room"]
- )
- )
+ self.add_device_change("@user_id:test", device_ids, "somehost")
# Get device updates meant for this remote
next_stream_id, device_updates = self.get_success(
@@ -147,11 +162,7 @@ class DeviceStoreTestCase(HomeserverTestCase):
# Add some more device updates to ensure it still resumes properly
device_ids = ["device_id6", "device_id7"]
- self.get_success(
- self.store.add_device_change_to_streams(
- "user_id", device_ids, ["somehost"], ["!some:room"]
- )
- )
+ self.add_device_change("@user_id:test", device_ids, "somehost")
# Get the next batch of device updates
next_stream_id, device_updates = self.get_success(
@@ -224,11 +235,7 @@ class DeviceStoreTestCase(HomeserverTestCase):
"fakeSelfSigning",
]
- self.get_success(
- self.store.add_device_change_to_streams(
- "@user_id:test", device_ids, ["somehost"], ["!some:room"]
- )
- )
+ self.add_device_change("@user_id:test", device_ids, "somehost")
# Get device updates meant for this remote
next_stream_id, device_updates = self.get_success(
diff --git a/tox.ini b/tox.ini
index b4ce400edf..4cd9dfb966 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,5 +1,5 @@
[tox]
-envlist = py37, py38, py39, py310, check_codestyle, check_isort
+envlist = py37, py38, py39, py310
# we require tox>=2.3.2 for the fix to https://github.com/tox-dev/tox/issues/208
minversion = 2.3.2
@@ -32,20 +32,6 @@ deps =
# install the "enum34" dependency of cryptography.
pip>=10
-# directories/files we run the linters on.
-# TODO: this is now out of date; we will remove as part of poetry migration.
-lint_targets =
- setup.py
- synapse
- tests
- # annoyingly, black doesn't find these so we have to list them
- scripts-dev
- stubs
- contrib
- synmark
- .ci
- docker
-
# default settings for all tox environments
[testenv]
deps =
@@ -116,18 +102,3 @@ setenv =
commands =
python -m synmark {posargs:}
-[testenv:check_codestyle]
-extras = lint
-commands =
- python -m black --check --diff {[base]lint_targets}
- flake8 {[base]lint_targets} {env:PEP8SUFFIX:}
-
-[testenv:check_isort]
-extras = lint
-commands = isort -c --df {[base]lint_targets}
-
-[testenv:mypy]
-deps =
- {[base]deps}
-extras = all,mypy
-commands = mypy
|