diff options
209 files changed, 3426 insertions, 7549 deletions
diff --git a/.github/ISSUE_TEMPLATE/BUG_REPORT.md b/.github/ISSUE_TEMPLATE/BUG_REPORT.md deleted file mode 100644 index 978b699886..0000000000 --- a/.github/ISSUE_TEMPLATE/BUG_REPORT.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -name: Bug report -about: Create a report to help us improve - ---- - -<!-- - -**THIS IS NOT A SUPPORT CHANNEL!** -**IF YOU HAVE SUPPORT QUESTIONS ABOUT RUNNING OR CONFIGURING YOUR OWN HOME SERVER**, -please ask in **#synapse:matrix.org** (using a matrix.org account if necessary) - -If you want to report a security issue, please see https://matrix.org/security-disclosure-policy/ - -This is a bug report template. By following the instructions below and -filling out the sections with your information, you will help the us to get all -the necessary data to fix your issue. - -You can also preview your report before submitting it. You may remove sections -that aren't relevant to your particular case. - -Text between <!-- and --​> marks will be invisible in the report. - ---> - -### Description - -<!-- Describe here the problem that you are experiencing --> - -### Steps to reproduce - -- list the steps -- that reproduce the bug -- using hyphens as bullet points - -<!-- -Describe how what happens differs from what you expected. - -If you can identify any relevant log snippets from _homeserver.log_, please include -those (please be careful to remove any personal or private data). Please surround them with -``` (three backticks, on a line on their own), so that they are formatted legibly. ---> - -### Version information - -<!-- IMPORTANT: please answer the following questions, to help us narrow down the problem --> - -<!-- Was this issue identified on matrix.org or another homeserver? --> -- **Homeserver**: - -If not matrix.org: - -<!-- - What version of Synapse is running? - -You can find the Synapse version with this command: - -$ curl http://localhost:8008/_synapse/admin/v1/server_version - -(You may need to replace `localhost:8008` if Synapse is not configured to -listen on that port.) ---> -- **Version**: - -- **Install method**: -<!-- examples: package manager/git clone/pip --> - -- **Platform**: -<!-- -Tell us about the environment in which your homeserver is operating -distro, hardware, if it's running in a vm/container, etc. ---> diff --git a/.github/ISSUE_TEMPLATE/BUG_REPORT.yml b/.github/ISSUE_TEMPLATE/BUG_REPORT.yml new file mode 100644 index 0000000000..1b304198bc --- /dev/null +++ b/.github/ISSUE_TEMPLATE/BUG_REPORT.yml @@ -0,0 +1,103 @@ +name: Bug report +description: Create a report to help us improve +body: + - type: markdown + attributes: + value: | + **THIS IS NOT A SUPPORT CHANNEL!** + **IF YOU HAVE SUPPORT QUESTIONS ABOUT RUNNING OR CONFIGURING YOUR OWN HOME SERVER**, please ask in **[#synapse:matrix.org](https://matrix.to/#/#synapse:matrix.org)** (using a matrix.org account if necessary). + + If you want to report a security issue, please see https://matrix.org/security-disclosure-policy/ + + This is a bug report form. By following the instructions below and completing the sections with your information, you will help the us to get all the necessary data to fix your issue. + + You can also preview your report before submitting it. + - type: textarea + id: description + attributes: + label: Description + description: Describe the problem that you are experiencing + validations: + required: true + - type: textarea + id: reproduction_steps + attributes: + label: Steps to reproduce + description: | + Describe the series of steps that leads you to the problem. + + Describe how what happens differs from what you expected. + placeholder: Tell us what you see! + value: | + - list the steps + - that reproduce the bug + - using hyphens as bullet points + validations: + required: true + - type: markdown + attributes: + value: | + --- + + **IMPORTANT**: please answer the following questions, to help us narrow down the problem. + - type: input + id: homeserver + attributes: + label: Homeserver + description: Which homeserver was this issue identified on? (matrix.org, another homeserver, etc) + validations: + required: true + - type: input + id: version + attributes: + label: Synapse Version + description: | + What version of Synapse is this homeserver running? + + You can find the Synapse version by visiting https://yourserver.example.com/_matrix/federation/v1/version + + or with this command: + + ``` + $ curl http://localhost:8008/_synapse/admin/v1/server_version + ``` + + (You may need to replace `localhost:8008` if Synapse is not configured to listen on that port.) + validations: + required: true + - type: dropdown + id: install_method + attributes: + label: Installation Method + options: + - Docker (matrixdotorg/synapse) + - Debian packages from packages.matrix.org + - pip (from PyPI) + - Other (please mention below) + - type: textarea + id: platform + attributes: + label: Platform + description: | + Tell us about the environment in which your homeserver is operating... + e.g. distro, hardware, if it's running in a vm/container, etc. + validations: + required: true + - type: textarea + id: logs + attributes: + label: Relevant log output + description: | + Please copy and paste any relevant log output, ideally at INFO or DEBUG log level. + This will be automatically formatted into code, so there is no need for backticks. + + Please be careful to remove any personal or private data. + + **Bug reports are usually very difficult to diagnose without logging.** + render: shell + validations: + required: true + - type: textarea + id: anything_else + attributes: + label: Anything else that would be useful to know? diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 83ab727378..4ce27ff413 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -310,6 +310,16 @@ jobs: needs: linting-done runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + include: + - arrangement: monolith + database: SQLite + + - arrangement: monolith + database: Postgres + steps: # The path is set via a file given by $GITHUB_PATH. We need both Go 1.17 and GOPATH on the path to run Complement. # See https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#adding-a-system-path @@ -337,7 +347,7 @@ jobs: - run: | set -o pipefail - COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | gotestfmt + POSTGRES=${{ (matrix.database == 'Postgres') && 1 || '' }} COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | gotestfmt shell: bash name: Run Complement Tests diff --git a/changelog.d/12674.misc b/changelog.d/12674.misc new file mode 100644 index 0000000000..c8a8f32f0a --- /dev/null +++ b/changelog.d/12674.misc @@ -0,0 +1 @@ +Add tests for cancellation of `GET /rooms/$room_id/members` and `GET /rooms/$room_id/state` requests. diff --git a/changelog.d/12737.doc b/changelog.d/12737.doc new file mode 100644 index 0000000000..ab2d1f2fd9 --- /dev/null +++ b/changelog.d/12737.doc @@ -0,0 +1 @@ +Add documentation for how to configure Synapse with Workers using Docker Compose. Includes example worker config and docker-compose.yaml. Contributed by @Thumbscrew. \ No newline at end of file diff --git a/changelog.d/12738.misc b/changelog.d/12738.misc new file mode 100644 index 0000000000..8252223475 --- /dev/null +++ b/changelog.d/12738.misc @@ -0,0 +1 @@ +Report login failures due to unknown third party identifiers in the same way as failures due to invalid passwords. This prevents an attacker from using the error response to determine if the identifier exists. Contributed by Daniel Aloni. \ No newline at end of file diff --git a/changelog.d/12857.feature b/changelog.d/12857.feature new file mode 100644 index 0000000000..ddd1dbe685 --- /dev/null +++ b/changelog.d/12857.feature @@ -0,0 +1 @@ +Port spam-checker API callbacks to a new, richer API. This is part of an ongoing change to let spam-checker modules inform users of the reason their event or operation is rejected. diff --git a/changelog.d/12881.misc b/changelog.d/12881.misc new file mode 100644 index 0000000000..8a83182bd4 --- /dev/null +++ b/changelog.d/12881.misc @@ -0,0 +1 @@ +Merge the Complement testing Docker images into a single, multi-purpose image. \ No newline at end of file diff --git a/changelog.d/12929.misc b/changelog.d/12929.misc new file mode 100644 index 0000000000..20718d258d --- /dev/null +++ b/changelog.d/12929.misc @@ -0,0 +1 @@ +Clean up the test code for client disconnection. diff --git a/changelog.d/12941.misc b/changelog.d/12941.misc new file mode 100644 index 0000000000..6a74f255df --- /dev/null +++ b/changelog.d/12941.misc @@ -0,0 +1 @@ +Remove code generating comments in configuration. diff --git a/changelog.d/12954.misc b/changelog.d/12954.misc new file mode 100644 index 0000000000..20bf136732 --- /dev/null +++ b/changelog.d/12954.misc @@ -0,0 +1 @@ +Replace noop background updates with `DELETE` delta. diff --git a/changelog.d/12957.misc b/changelog.d/12957.misc new file mode 100644 index 0000000000..0c075276ec --- /dev/null +++ b/changelog.d/12957.misc @@ -0,0 +1 @@ +Use lower isolation level when inserting read receipts to avoid serialization errors. Contributed by Nick @ Beeper. diff --git a/changelog.d/12963.misc b/changelog.d/12963.misc new file mode 100644 index 0000000000..d57e1aca6b --- /dev/null +++ b/changelog.d/12963.misc @@ -0,0 +1 @@ +Reduce the amount of state we pull from the DB. diff --git a/changelog.d/12965.misc b/changelog.d/12965.misc new file mode 100644 index 0000000000..cc2823e12b --- /dev/null +++ b/changelog.d/12965.misc @@ -0,0 +1 @@ +Enable testing against PostgreSQL databases in Complement CI. \ No newline at end of file diff --git a/changelog.d/12969.misc b/changelog.d/12969.misc new file mode 100644 index 0000000000..05de7ce839 --- /dev/null +++ b/changelog.d/12969.misc @@ -0,0 +1 @@ +Fix an inaccurate comment. diff --git a/changelog.d/12970.misc b/changelog.d/12970.misc new file mode 100644 index 0000000000..8f874aa07b --- /dev/null +++ b/changelog.d/12970.misc @@ -0,0 +1 @@ +Remove the `delete_device` method and always call `delete_devices`. diff --git a/changelog.d/12973.bugfix b/changelog.d/12973.bugfix new file mode 100644 index 0000000000..1bf45854ff --- /dev/null +++ b/changelog.d/12973.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse 1.58 where Synapse would not report full version information when installed from a git checkout. This is a best-effort affair and not guaranteed to be stable. diff --git a/changelog.d/12979.bugfix b/changelog.d/12979.bugfix new file mode 100644 index 0000000000..6b54408025 --- /dev/null +++ b/changelog.d/12979.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse 1.60 where Synapse would fail to start if the `sqlite3` module was not available. diff --git a/changelog.d/12982.misc b/changelog.d/12982.misc new file mode 100644 index 0000000000..036b69efe6 --- /dev/null +++ b/changelog.d/12982.misc @@ -0,0 +1 @@ +Use a GitHub form for issues rather than a hard-to-read, easy-to-ignore template. \ No newline at end of file diff --git a/changelog.d/12984.misc b/changelog.d/12984.misc new file mode 100644 index 0000000000..a902017180 --- /dev/null +++ b/changelog.d/12984.misc @@ -0,0 +1 @@ +Move [MSC3715](https://github.com/matrix-org/matrix-spec-proposals/pull/3715) behind an experimental config flag. diff --git a/changelog.d/12985.misc b/changelog.d/12985.misc new file mode 100644 index 0000000000..d5ab9eedea --- /dev/null +++ b/changelog.d/12985.misc @@ -0,0 +1 @@ +Add type annotations to `tests.state.test_v2`. diff --git a/changelog.d/12986.misc b/changelog.d/12986.misc new file mode 100644 index 0000000000..937b888023 --- /dev/null +++ b/changelog.d/12986.misc @@ -0,0 +1 @@ +Refactor macaroon tokens generation and move the unsubscribe link in notification emails to `/_synapse/client/unsubscribe`. diff --git a/changelog.d/12990.misc b/changelog.d/12990.misc new file mode 100644 index 0000000000..c68f6a731e --- /dev/null +++ b/changelog.d/12990.misc @@ -0,0 +1 @@ +Fix documentation for running complement tests. diff --git a/changelog.d/12991.bugfix b/changelog.d/12991.bugfix new file mode 100644 index 0000000000..c6e388d5b9 --- /dev/null +++ b/changelog.d/12991.bugfix @@ -0,0 +1,2 @@ +Fix a bug where non-standard information was required when requesting the `/hierarchy` API over federation. Introduced +in Synapse v1.41.0. diff --git a/changelog.d/13004.misc b/changelog.d/13004.misc new file mode 100644 index 0000000000..d8e93d87af --- /dev/null +++ b/changelog.d/13004.misc @@ -0,0 +1 @@ +Faster joins: add issue links to the TODO comments in the code. diff --git a/changelog.d/13013.misc b/changelog.d/13013.misc new file mode 100644 index 0000000000..903c6a3c8a --- /dev/null +++ b/changelog.d/13013.misc @@ -0,0 +1 @@ +Modernize the `contrib/graph/` scripts. diff --git a/changelog.d/13017.misc b/changelog.d/13017.misc new file mode 100644 index 0000000000..b314687f9c --- /dev/null +++ b/changelog.d/13017.misc @@ -0,0 +1 @@ +Remove redundant `room_version` parameters from event auth functions. diff --git a/changelog.d/13021.misc b/changelog.d/13021.misc new file mode 100644 index 0000000000..84c41cdf59 --- /dev/null +++ b/changelog.d/13021.misc @@ -0,0 +1 @@ +Decouple `synapse.api.auth_blocking.AuthBlocking` from `synapse.api.auth.Auth`. diff --git a/changelog.d/13022.doc b/changelog.d/13022.doc new file mode 100644 index 0000000000..4d6ac7ae94 --- /dev/null +++ b/changelog.d/13022.doc @@ -0,0 +1 @@ +Ensure the [Poetry cheat sheet](https://matrix-org.github.io/synapse/develop/development/dependencies.html) is available in the online documentation. diff --git a/changelog.d/13023.doc b/changelog.d/13023.doc new file mode 100644 index 0000000000..5589c7492c --- /dev/null +++ b/changelog.d/13023.doc @@ -0,0 +1 @@ +Mention removed community/group worker endpoints in upgrade.md. Contributed by @olmari. \ No newline at end of file diff --git a/changelog.d/13034.misc b/changelog.d/13034.misc new file mode 100644 index 0000000000..cc2823e12b --- /dev/null +++ b/changelog.d/13034.misc @@ -0,0 +1 @@ +Enable testing against PostgreSQL databases in Complement CI. \ No newline at end of file diff --git a/changelog.d/13041.bugfix b/changelog.d/13041.bugfix new file mode 100644 index 0000000000..edb1635eb9 --- /dev/null +++ b/changelog.d/13041.bugfix @@ -0,0 +1,2 @@ +Fix a bug introduced in Synapse 1.58 where profile requests for a malformed user ID would ccause an internal error. Synapse now returns 400 Bad Request in this situation. + diff --git a/changelog.d/13047.feature b/changelog.d/13047.feature new file mode 100644 index 0000000000..ddd1dbe685 --- /dev/null +++ b/changelog.d/13047.feature @@ -0,0 +1 @@ +Port spam-checker API callbacks to a new, richer API. This is part of an ongoing change to let spam-checker modules inform users of the reason their event or operation is rejected. diff --git a/changelog.d/13048.misc b/changelog.d/13048.misc new file mode 100644 index 0000000000..073c8b1a96 --- /dev/null +++ b/changelog.d/13048.misc @@ -0,0 +1 @@ +Increase timeout of complement CI test runs. diff --git a/changelog.d/13050.misc b/changelog.d/13050.misc new file mode 100644 index 0000000000..20bf136732 --- /dev/null +++ b/changelog.d/13050.misc @@ -0,0 +1 @@ +Replace noop background updates with `DELETE` delta. diff --git a/changelog.d/13052.misc b/changelog.d/13052.misc new file mode 100644 index 0000000000..0d11dfb12a --- /dev/null +++ b/changelog.d/13052.misc @@ -0,0 +1 @@ +Refactor entry points so that they all have a `main` function. \ No newline at end of file diff --git a/contrib/docker_compose_workers/README.md b/contrib/docker_compose_workers/README.md new file mode 100644 index 0000000000..4dbfee2853 --- /dev/null +++ b/contrib/docker_compose_workers/README.md @@ -0,0 +1,125 @@ +# Setting up Synapse with Workers using Docker Compose + +This directory describes how deploy and manage Synapse and workers via [Docker Compose](https://docs.docker.com/compose/). + +Example worker configuration files can be found [here](workers). + +All examples and snippets assume that your Synapse service is called `synapse` in your Docker Compose file. + +An example Docker Compose file can be found [here](docker-compose.yaml). + +## Worker Service Examples in Docker Compose + +In order to start the Synapse container as a worker, you must specify an `entrypoint` that loads both the `homeserver.yaml` and the configuration for the worker (`synapse-generic-worker-1.yaml` in the example below). You must also include the worker type in the environment variable `SYNAPSE_WORKER` or alternatively pass `-m synapse.app.generic_worker` as part of the `entrypoint` after `"/start.py", "run"`). + +### Generic Worker Example + +```yaml +synapse-generic-worker-1: + image: matrixdotorg/synapse:latest + container_name: synapse-generic-worker-1 + restart: unless-stopped + entrypoint: ["/start.py", "run", "--config-path=/data/homeserver.yaml", "--config-path=/data/workers/synapse-generic-worker-1.yaml"] + healthcheck: + test: ["CMD-SHELL", "curl -fSs http://localhost:8081/health || exit 1"] + start_period: "5s" + interval: "15s" + timeout: "5s" + volumes: + - ${VOLUME_PATH}/data:/data:rw # Replace VOLUME_PATH with the path to your Synapse volume + environment: + SYNAPSE_WORKER: synapse.app.generic_worker + # Expose port if required so your reverse proxy can send requests to this worker + # Port configuration will depend on how the http listener is defined in the worker configuration file + ports: + - 8081:8081 + depends_on: + - synapse +``` + +### Federation Sender Example + +Please note: The federation sender does not receive REST API calls so no exposed ports are required. + +```yaml +synapse-federation-sender-1: + image: matrixdotorg/synapse:latest + container_name: synapse-federation-sender-1 + restart: unless-stopped + entrypoint: ["/start.py", "run", "--config-path=/data/homeserver.yaml", "--config-path=/data/workers/synapse-federation-sender-1.yaml"] + healthcheck: + disable: true + volumes: + - ${VOLUME_PATH}/data:/data:rw # Replace VOLUME_PATH with the path to your Synapse volume + environment: + SYNAPSE_WORKER: synapse.app.federation_sender + depends_on: + - synapse +``` + +## `homeserver.yaml` Configuration + +### Enable Redis + +Locate the `redis` section of your `homeserver.yaml` and enable and configure it: + +```yaml +redis: + enabled: true + host: redis + port: 6379 + # password: <secret_password> +``` + +This assumes that your Redis service is called `redis` in your Docker Compose file. + +### Add a replication Listener + +Locate the `listeners` section of your `homeserver.yaml` and add the following replication listener: + +```yaml +listeners: + # Other listeners + + - port: 9093 + type: http + resources: + - names: [replication] +``` + +This listener is used by the workers for replication and is referred to in worker config files using the following settings: + +```yaml +worker_replication_host: synapse +worker_replication_http_port: 9093 +``` + +### Add Workers to `instance_map` + +Locate the `instance_map` section of your `homeserver.yaml` and populate it with your workers: + +```yaml +instance_map: + synapse-generic-worker-1: # The worker_name setting in your worker configuration file + host: synapse-generic-worker-1 # The name of the worker service in your Docker Compose file + port: 8034 # The port assigned to the replication listener in your worker config file + synapse-federation-sender-1: + host: synapse-federation-sender-1 + port: 8034 +``` + +### Configure Federation Senders + +This section is applicable if you are using Federation senders (synapse.app.federation_sender). Locate the `send_federation` and `federation_sender_instances` settings in your `homeserver.yaml` and configure them: + +```yaml +# This will disable federation sending on the main Synapse instance +send_federation: false + +federation_sender_instances: + - synapse-federation-sender-1 # The worker_name setting in your federation sender worker configuration file +``` + +## Other Worker types + +Using the concepts shown here it is possible to create other worker types in Docker Compose. See the [Workers](https://matrix-org.github.io/synapse/latest/workers.html#available-worker-applications) documentation for a list of available workers. \ No newline at end of file diff --git a/contrib/docker_compose_workers/docker-compose.yaml b/contrib/docker_compose_workers/docker-compose.yaml new file mode 100644 index 0000000000..eaf02c2af9 --- /dev/null +++ b/contrib/docker_compose_workers/docker-compose.yaml @@ -0,0 +1,77 @@ +networks: + backend: + +services: + postgres: + image: postgres:latest + restart: unless-stopped + volumes: + - ${VOLUME_PATH}/var/lib/postgresql/data:/var/lib/postgresql/data:rw + networks: + - backend + environment: + POSTGRES_DB: synapse + POSTGRES_USER: synapse_user + POSTGRES_PASSWORD: postgres + POSTGRES_INITDB_ARGS: --encoding=UTF8 --locale=C + + redis: + image: redis:latest + restart: unless-stopped + networks: + - backend + + synapse: + image: matrixdotorg/synapse:latest + container_name: synapse + restart: unless-stopped + volumes: + - ${VOLUME_PATH}/data:/data:rw + ports: + - 8008:8008 + networks: + - backend + environment: + SYNAPSE_CONFIG_DIR: /data + SYNAPSE_CONFIG_PATH: /data/homeserver.yaml + depends_on: + - postgres + + synapse-generic-worker-1: + image: matrixdotorg/synapse:latest + container_name: synapse-generic-worker-1 + restart: unless-stopped + entrypoint: ["/start.py", "run", "--config-path=/data/homeserver.yaml", "--config-path=/data/workers/synapse-generic-worker-1.yaml"] + healthcheck: + test: ["CMD-SHELL", "curl -fSs http://localhost:8081/health || exit 1"] + start_period: "5s" + interval: "15s" + timeout: "5s" + networks: + - backend + volumes: + - ${VOLUME_PATH}/data:/data:rw # Replace VOLUME_PATH with the path to your Synapse volume + environment: + SYNAPSE_WORKER: synapse.app.generic_worker + # Expose port if required so your reverse proxy can send requests to this worker + # Port configuration will depend on how the http listener is defined in the worker configuration file + ports: + - 8081:8081 + depends_on: + - synapse + + synapse-federation-sender-1: + image: matrixdotorg/synapse:latest + container_name: synapse-federation-sender-1 + restart: unless-stopped + entrypoint: ["/start.py", "run", "--config-path=/data/homeserver.yaml", "--config-path=/data/workers/synapse-federation-sender-1.yaml"] + healthcheck: + disable: true + networks: + - backend + volumes: + - ${VOLUME_PATH}/data:/data:rw # Replace VOLUME_PATH with the path to your Synapse volume + environment: + SYNAPSE_WORKER: synapse.app.federation_sender + depends_on: + - synapse diff --git a/contrib/docker_compose_workers/workers/synapse-federation-sender-1.yaml b/contrib/docker_compose_workers/workers/synapse-federation-sender-1.yaml new file mode 100644 index 0000000000..5ba42a92d2 --- /dev/null +++ b/contrib/docker_compose_workers/workers/synapse-federation-sender-1.yaml @@ -0,0 +1,14 @@ +worker_app: synapse.app.federation_sender +worker_name: synapse-federation-sender-1 + +# The replication listener on the main synapse process. +worker_replication_host: synapse +worker_replication_http_port: 9093 + +worker_listeners: + - type: http + port: 8034 + resources: + - names: [replication] + +worker_log_config: /data/federation_sender.log.config diff --git a/contrib/docker_compose_workers/workers/synapse-generic-worker-1.yaml b/contrib/docker_compose_workers/workers/synapse-generic-worker-1.yaml new file mode 100644 index 0000000000..694584105a --- /dev/null +++ b/contrib/docker_compose_workers/workers/synapse-generic-worker-1.yaml @@ -0,0 +1,19 @@ +worker_app: synapse.app.generic_worker +worker_name: synapse-generic-worker-1 + +# The replication listener on the main synapse process. +worker_replication_host: synapse +worker_replication_http_port: 9093 + +worker_listeners: + - type: http + port: 8034 + resources: + - names: [replication] + - type: http + port: 8081 + x_forwarded: true + resources: + - names: [client, federation] + +worker_log_config: /data/worker.log.config diff --git a/contrib/graph/graph.py b/contrib/graph/graph.py index fdbac087bd..3c4f47dbd2 100644 --- a/contrib/graph/graph.py +++ b/contrib/graph/graph.py @@ -1,11 +1,3 @@ -import argparse -import cgi -import datetime -import json - -import pydot -import urllib2 - # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,12 +12,25 @@ import urllib2 # See the License for the specific language governing permissions and # limitations under the License. +import argparse +import cgi +import datetime +import json +import urllib.request +from typing import List + +import pydot + -def make_name(pdu_id, origin): - return "%s@%s" % (pdu_id, origin) +def make_name(pdu_id: str, origin: str) -> str: + return f"{pdu_id}@{origin}" -def make_graph(pdus, room, filename_prefix): +def make_graph(pdus: List[dict], filename_prefix: str) -> None: + """ + Generate a dot and SVG file for a graph of events in the room based on the + topological ordering by querying a homeserver. + """ pdu_map = {} node_map = {} @@ -111,10 +116,10 @@ def make_graph(pdus, room, filename_prefix): graph.write_svg("%s.svg" % filename_prefix, prog="dot") -def get_pdus(host, room): +def get_pdus(host: str, room: str) -> List[dict]: transaction = json.loads( - urllib2.urlopen( - "http://%s/_matrix/federation/v1/context/%s/" % (host, room) + urllib.request.urlopen( + f"http://{host}/_matrix/federation/v1/context/{room}/" ).read() ) @@ -141,4 +146,4 @@ if __name__ == "__main__": pdus = get_pdus(host, room) - make_graph(pdus, room, prefix) + make_graph(pdus, prefix) diff --git a/contrib/graph/graph2.py b/contrib/graph/graph2.py index 0980231e4a..b46094ce0a 100644 --- a/contrib/graph/graph2.py +++ b/contrib/graph/graph2.py @@ -14,22 +14,31 @@ import argparse -import cgi import datetime +import html import json import sqlite3 import pydot -from synapse.events import FrozenEvent +from synapse.api.room_versions import KNOWN_ROOM_VERSIONS +from synapse.events import make_event_from_dict from synapse.util.frozenutils import unfreeze -def make_graph(db_name, room_id, file_prefix, limit): +def make_graph(db_name: str, room_id: str, file_prefix: str, limit: int) -> None: + """ + Generate a dot and SVG file for a graph of events in the room based on the + topological ordering by reading from a Synapse SQLite database. + """ conn = sqlite3.connect(db_name) + sql = "SELECT room_version FROM rooms WHERE room_id = ?" + c = conn.execute(sql, (room_id,)) + room_version = KNOWN_ROOM_VERSIONS[c.fetchone()[0]] + sql = ( - "SELECT json FROM event_json as j " + "SELECT json, internal_metadata FROM event_json as j " "INNER JOIN events as e ON e.event_id = j.event_id " "WHERE j.room_id = ?" ) @@ -43,7 +52,10 @@ def make_graph(db_name, room_id, file_prefix, limit): c = conn.execute(sql, args) - events = [FrozenEvent(json.loads(e[0])) for e in c.fetchall()] + events = [ + make_event_from_dict(json.loads(e[0]), room_version, json.loads(e[1])) + for e in c.fetchall() + ] events.sort(key=lambda e: e.depth) @@ -84,7 +96,7 @@ def make_graph(db_name, room_id, file_prefix, limit): "name": event.event_id, "type": event.type, "state_key": event.get("state_key", None), - "content": cgi.escape(content, quote=True), + "content": html.escape(content, quote=True), "time": t, "depth": event.depth, "state_group": state_group, @@ -96,11 +108,11 @@ def make_graph(db_name, room_id, file_prefix, limit): graph.add_node(node) for event in events: - for prev_id, _ in event.prev_events: + for prev_id in event.prev_event_ids(): try: end_node = node_map[prev_id] except Exception: - end_node = pydot.Node(name=prev_id, label="<<b>%s</b>>" % (prev_id,)) + end_node = pydot.Node(name=prev_id, label=f"<<b>{prev_id}</b>>") node_map[prev_id] = end_node graph.add_node(end_node) @@ -112,7 +124,7 @@ def make_graph(db_name, room_id, file_prefix, limit): if len(event_ids) <= 1: continue - cluster = pydot.Cluster(str(group), label="<State Group: %s>" % (str(group),)) + cluster = pydot.Cluster(str(group), label=f"<State Group: {str(group)}>") for event_id in event_ids: cluster.add_node(node_map[event_id]) @@ -126,7 +138,7 @@ def make_graph(db_name, room_id, file_prefix, limit): if __name__ == "__main__": parser = argparse.ArgumentParser( description="Generate a PDU graph for a given room by talking " - "to the given homeserver to get the list of PDUs. \n" + "to the given Synapse SQLite file to get the list of PDUs. \n" "Requires pydot." ) parser.add_argument( diff --git a/contrib/graph/graph3.py b/contrib/graph/graph3.py index dd0c19368b..a28a1594c7 100644 --- a/contrib/graph/graph3.py +++ b/contrib/graph/graph3.py @@ -1,13 +1,3 @@ -import argparse -import cgi -import datetime - -import pydot -import simplejson as json - -from synapse.events import FrozenEvent -from synapse.util.frozenutils import unfreeze - # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,15 +12,35 @@ from synapse.util.frozenutils import unfreeze # See the License for the specific language governing permissions and # limitations under the License. +import argparse +import datetime +import html +import json + +import pydot -def make_graph(file_name, room_id, file_prefix, limit): +from synapse.api.room_versions import KNOWN_ROOM_VERSIONS +from synapse.events import make_event_from_dict +from synapse.util.frozenutils import unfreeze + + +def make_graph(file_name: str, file_prefix: str, limit: int) -> None: + """ + Generate a dot and SVG file for a graph of events in the room based on the + topological ordering by reading line-delimited JSON from a file. + """ print("Reading lines") with open(file_name) as f: lines = f.readlines() print("Read lines") - events = [FrozenEvent(json.loads(line)) for line in lines] + # Figure out the room version, assume the first line is the create event. + room_version = KNOWN_ROOM_VERSIONS[ + json.loads(lines[0]).get("content", {}).get("room_version") + ] + + events = [make_event_from_dict(json.loads(line), room_version) for line in lines] print("Loaded events.") @@ -66,8 +76,8 @@ def make_graph(file_name, room_id, file_prefix, limit): content.append( "<b>%s</b>: %s," % ( - cgi.escape(key, quote=True).encode("ascii", "xmlcharrefreplace"), - cgi.escape(value, quote=True).encode("ascii", "xmlcharrefreplace"), + html.escape(key, quote=True).encode("ascii", "xmlcharrefreplace"), + html.escape(value, quote=True).encode("ascii", "xmlcharrefreplace"), ) ) @@ -101,11 +111,11 @@ def make_graph(file_name, room_id, file_prefix, limit): print("Created Nodes") for event in events: - for prev_id, _ in event.prev_events: + for prev_id in event.prev_event_ids(): try: end_node = node_map[prev_id] except Exception: - end_node = pydot.Node(name=prev_id, label="<<b>%s</b>>" % (prev_id,)) + end_node = pydot.Node(name=prev_id, label=f"<<b>{prev_id}</b>>") node_map[prev_id] = end_node graph.add_node(end_node) @@ -139,8 +149,7 @@ if __name__ == "__main__": ) parser.add_argument("-l", "--limit", help="Only retrieve the last N events.") parser.add_argument("event_file") - parser.add_argument("room") args = parser.parse_args() - make_graph(args.event_file, args.room, args.prefix, args.limit) + make_graph(args.event_file, args.prefix, args.limit) diff --git a/docker/Dockerfile-workers b/docker/Dockerfile-workers index 24b03585f9..83db0a95b9 100644 --- a/docker/Dockerfile-workers +++ b/docker/Dockerfile-workers @@ -1,5 +1,6 @@ # Inherit from the official Synapse docker image -FROM matrixdotorg/synapse +ARG SYNAPSE_VERSION=latest +FROM matrixdotorg/synapse:$SYNAPSE_VERSION # Install deps RUN \ diff --git a/docker/README-testing.md b/docker/README-testing.md index c38cae7530..1f0423f09b 100644 --- a/docker/README-testing.md +++ b/docker/README-testing.md @@ -8,79 +8,50 @@ docker images that can be run inside Complement for testing purposes. Note that running Synapse's unit tests from within the docker image is not supported. -## Testing with SQLite and single-process Synapse +## Using the Complement launch script -> Note that `scripts-dev/complement.sh` is a script that will automatically build -> and run an SQLite-based, single-process of Synapse against Complement. +`scripts-dev/complement.sh` is a script that will automatically build +and run Synapse against Complement. +Consult the [contributing guide][guideComplementSh] for instructions on how to use it. -The instructions below will set up Complement testing for a single-process, -SQLite-based Synapse deployment. -Start by building the base Synapse docker image. If you wish to run tests with the latest -release of Synapse, instead of your current checkout, you can skip this step. From the -root of the repository: - -```sh -docker build -t matrixdotorg/synapse -f docker/Dockerfile . -``` - -This will build an image with the tag `matrixdotorg/synapse`. - -Next, build the Synapse image for Complement. +[guideComplementSh]: https://matrix-org.github.io/synapse/latest/development/contributing_guide.html#run-the-integration-tests-complement -```sh -docker build -t complement-synapse -f "docker/complement/Dockerfile" docker/complement -``` +## Building and running the images manually -This will build an image with the tag `complement-synapse`, which can be handed to -Complement for testing via the `COMPLEMENT_BASE_IMAGE` environment variable. Refer to -[Complement's documentation](https://github.com/matrix-org/complement/#running) for -how to run the tests, as well as the various available command line flags. - -## Testing with PostgreSQL and single or multi-process Synapse +Under some circumstances, you may wish to build the images manually. +The instructions below will lead you to doing that. -The above docker image only supports running Synapse with SQLite and in a -single-process topology. The following instructions are used to build a Synapse image for -Complement that supports either single or multi-process topology with a PostgreSQL -database backend. - -As with the single-process image, build the base Synapse docker image. If you wish to run -tests with the latest release of Synapse, instead of your current checkout, you can skip -this step. From the root of the repository: +Start by building the base Synapse docker image. If you wish to run tests with the latest +release of Synapse, instead of your current checkout, you can skip this step. From the +root of the repository: ```sh docker build -t matrixdotorg/synapse -f docker/Dockerfile . ``` -This will build an image with the tag `matrixdotorg/synapse`. - -Next, we build a new image with worker support based on `matrixdotorg/synapse:latest`. -Again, from the root of the repository: +Next, build the workerised Synapse docker image, which is a layer over the base +image. ```sh docker build -t matrixdotorg/synapse-workers -f docker/Dockerfile-workers . ``` -This will build an image with the tag` matrixdotorg/synapse-workers`. - -It's worth noting at this point that this image is fully functional, and -can be used for testing against locally. See instructions for using the container -under -[Running the Dockerfile-worker image standalone](#running-the-dockerfile-worker-image-standalone) -below. - -Finally, build the Synapse image for Complement, which is based on -`matrixdotorg/synapse-workers`. +Finally, build the multi-purpose image for Complement, which is a layer over the workers image. ```sh -docker build -t matrixdotorg/complement-synapse-workers -f docker/complement/SynapseWorkers.Dockerfile docker/complement +docker build -t complement-synapse -f docker/complement/Dockerfile docker/complement ``` -This will build an image with the tag `complement-synapse-workers`, which can be handed to +This will build an image with the tag `complement-synapse`, which can be handed to Complement for testing via the `COMPLEMENT_BASE_IMAGE` environment variable. Refer to [Complement's documentation](https://github.com/matrix-org/complement/#running) for how to run the tests, as well as the various available command line flags. +See [the Complement image README](./complement/README.md) for information about the +expected environment variables. + + ## Running the Dockerfile-worker image standalone For manual testing of a multi-process Synapse instance in Docker, @@ -113,6 +84,9 @@ docker run -d --name synapse \ ...substituting `POSTGRES*` variables for those that match a postgres host you have available (usually a running postgres docker container). + +### Workers + The `SYNAPSE_WORKER_TYPES` environment variable is a comma-separated list of workers to use when running the container. All possible worker names are defined by the keys of the `WORKERS_CONFIG` variable in [this script](configure_workers_and_start.py), which the @@ -125,8 +99,11 @@ type, simply specify the type multiple times in `SYNAPSE_WORKER_TYPES` (e.g `SYNAPSE_WORKER_TYPES=event_creator,event_creator...`). Otherwise, `SYNAPSE_WORKER_TYPES` can either be left empty or unset to spawn no workers -(leaving only the main process). The container is configured to use redis-based worker -mode. +(leaving only the main process). +The container will only be configured to use Redis-based worker mode if there are +workers enabled. + +### Logging Logs for workers and the main process are logged to stdout and can be viewed with standard `docker logs` tooling. Worker logs contain their worker name @@ -136,3 +113,21 @@ Setting `SYNAPSE_WORKERS_WRITE_LOGS_TO_DISK=1` will cause worker logs to be writ `<data_dir>/logs/<worker_name>.log`. Logs are kept for 1 week and rotate every day at 00: 00, according to the container's clock. Logging for the main process must still be configured by modifying the homeserver's log config in your Synapse data volume. + + +### Application Services + +Setting the `SYNAPSE_AS_REGISTRATION_DIR` environment variable to the path of +a directory (within the container) will cause the configuration script to scan +that directory for `.yaml`/`.yml` registration files. +Synapse will be configured to load these configuration files. + + +### TLS Termination + +Nginx is present in the image to route requests to the appropriate workers, +but it does not serve TLS by default. + +You can configure `SYNAPSE_TLS_CERT` and `SYNAPSE_TLS_KEY` to point to a +TLS certificate and key (respectively), both in PEM (textual) format. +In this case, Nginx will additionally serve using HTTPS on port 8448. diff --git a/docker/complement/Dockerfile b/docker/complement/Dockerfile index 4823ce7364..50684c956d 100644 --- a/docker/complement/Dockerfile +++ b/docker/complement/Dockerfile @@ -1,22 +1,45 @@ -# A dockerfile which builds an image suitable for testing Synapse under -# complement. - +# This dockerfile builds on top of 'docker/Dockerfile-workers' in matrix-org/synapse +# by including a built-in postgres instance, as well as setting up the homeserver so +# that it is ready for testing via Complement. +# +# Instructions for building this image from those it depends on is detailed in this guide: +# https://github.com/matrix-org/synapse/blob/develop/docker/README-testing.md#testing-with-postgresql-and-single-or-multi-process-synapse ARG SYNAPSE_VERSION=latest +FROM matrixdotorg/synapse-workers:$SYNAPSE_VERSION + +# Install postgresql +RUN apt-get update && \ + DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -y postgresql-13 + +# Configure a user and create a database for Synapse +RUN pg_ctlcluster 13 main start && su postgres -c "echo \ + \"ALTER USER postgres PASSWORD 'somesecret'; \ + CREATE DATABASE synapse \ + ENCODING 'UTF8' \ + LC_COLLATE='C' \ + LC_CTYPE='C' \ + template=template0;\" | psql" && pg_ctlcluster 13 main stop + +# Extend the shared homeserver config to disable rate-limiting, +# set Complement's static shared secret, enable registration, amongst other +# tweaks to get Synapse ready for testing. +# To do this, we copy the old template out of the way and then include it +# with Jinja2. +RUN mv /conf/shared.yaml.j2 /conf/shared-orig.yaml.j2 +COPY conf/workers-shared-extra.yaml.j2 /conf/shared.yaml.j2 -FROM matrixdotorg/synapse:${SYNAPSE_VERSION} - -ENV SERVER_NAME=localhost - -COPY conf/* /conf/ +WORKDIR /data -# generate a signing key -RUN generate_signing_key -o /conf/server.signing.key +COPY conf/postgres.supervisord.conf /etc/supervisor/conf.d/postgres.conf -WORKDIR /data +# Copy the entrypoint +COPY conf/start_for_complement.sh / +# Expose nginx's listener ports EXPOSE 8008 8448 -ENTRYPOINT ["/conf/start.sh"] +ENTRYPOINT ["/start_for_complement.sh"] +# Update the healthcheck to have a shorter check interval HEALTHCHECK --start-period=5s --interval=1s --timeout=1s \ - CMD curl -fSs http://localhost:8008/health || exit 1 + CMD /bin/sh /healthcheck.sh diff --git a/docker/complement/README.md b/docker/complement/README.md index e075418e4a..37c39e2dfc 100644 --- a/docker/complement/README.md +++ b/docker/complement/README.md @@ -1 +1,32 @@ -Stuff for building the docker image used for testing under complement. +# Unified Complement image for Synapse + +This is an image for testing Synapse with [the *Complement* integration test suite][complement]. +It contains some insecure defaults that are only suitable for testing purposes, +so **please don't use this image for a production server**. + +This multi-purpose image is built on top of `Dockerfile-workers` in the parent directory +and can be switched using environment variables between the following configurations: + +- Monolithic Synapse with SQLite (`SYNAPSE_COMPLEMENT_DATABASE=sqlite`) +- Monolithic Synapse with Postgres (`SYNAPSE_COMPLEMENT_DATABASE=postgres`) +- Workerised Synapse with Postgres (`SYNAPSE_COMPLEMENT_DATABASE=postgres` and `SYNAPSE_COMPLEMENT_USE_WORKERS=true`) + +The image is self-contained; it contains an integrated Postgres, Redis and Nginx. + + +## How to get Complement to pass the environment variables through + +To pass these environment variables, use [Complement's `COMPLEMENT_SHARE_ENV_PREFIX`][complementEnv] +variable to configure an environment prefix to pass through, then prefix the above options +with that prefix. + +Example: +``` +COMPLEMENT_SHARE_ENV_PREFIX=PASS_ PASS_SYNAPSE_COMPLEMENT_DATABASE=postgres +``` + +Consult `scripts-dev/complement.sh` in the repository root for a real example. + + +[complement]: https://github.com/matrix-org/complement +[complementEnv]: https://github.com/matrix-org/complement/pull/382 diff --git a/docker/complement/SynapseWorkers.Dockerfile b/docker/complement/SynapseWorkers.Dockerfile deleted file mode 100644 index 99a09cbc2b..0000000000 --- a/docker/complement/SynapseWorkers.Dockerfile +++ /dev/null @@ -1,40 +0,0 @@ -# This dockerfile builds on top of 'docker/Dockerfile-worker' in matrix-org/synapse -# by including a built-in postgres instance, as well as setting up the homeserver so -# that it is ready for testing via Complement. -# -# Instructions for building this image from those it depends on is detailed in this guide: -# https://github.com/matrix-org/synapse/blob/develop/docker/README-testing.md#testing-with-postgresql-and-single-or-multi-process-synapse -FROM matrixdotorg/synapse-workers - -# Install postgresql -RUN apt-get update && \ - DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -y postgresql-13 - -# Configure a user and create a database for Synapse -RUN pg_ctlcluster 13 main start && su postgres -c "echo \ - \"ALTER USER postgres PASSWORD 'somesecret'; \ - CREATE DATABASE synapse \ - ENCODING 'UTF8' \ - LC_COLLATE='C' \ - LC_CTYPE='C' \ - template=template0;\" | psql" && pg_ctlcluster 13 main stop - -# Modify the shared homeserver config with postgres support, certificate setup -# and the disabling of rate-limiting -COPY conf-workers/workers-shared.yaml /conf/workers/shared.yaml - -WORKDIR /data - -COPY conf-workers/postgres.supervisord.conf /etc/supervisor/conf.d/postgres.conf - -# Copy the entrypoint -COPY conf-workers/start-complement-synapse-workers.sh / - -# Expose nginx's listener ports -EXPOSE 8008 8448 - -ENTRYPOINT ["/start-complement-synapse-workers.sh"] - -# Update the healthcheck to have a shorter check interval -HEALTHCHECK --start-period=5s --interval=1s --timeout=1s \ - CMD /bin/sh /healthcheck.sh diff --git a/docker/complement/conf-workers/start-complement-synapse-workers.sh b/docker/complement/conf-workers/start-complement-synapse-workers.sh deleted file mode 100755 index b7e2444000..0000000000 --- a/docker/complement/conf-workers/start-complement-synapse-workers.sh +++ /dev/null @@ -1,61 +0,0 @@ -#!/bin/bash -# -# Default ENTRYPOINT for the docker image used for testing synapse with workers under complement - -set -e - -function log { - d=$(date +"%Y-%m-%d %H:%M:%S,%3N") - echo "$d $@" -} - -# Set the server name of the homeserver -export SYNAPSE_SERVER_NAME=${SERVER_NAME} - -# No need to report stats here -export SYNAPSE_REPORT_STATS=no - -# Set postgres authentication details which will be placed in the homeserver config file -export POSTGRES_PASSWORD=somesecret -export POSTGRES_USER=postgres -export POSTGRES_HOST=localhost - -# Specify the workers to test with -export SYNAPSE_WORKER_TYPES="\ - event_persister, \ - event_persister, \ - background_worker, \ - frontend_proxy, \ - event_creator, \ - user_dir, \ - media_repository, \ - federation_inbound, \ - federation_reader, \ - federation_sender, \ - synchrotron, \ - appservice, \ - pusher" - -# Add Complement's appservice registration directory, if there is one -# (It can be absent when there are no application services in this test!) -if [ -d /complement/appservice ]; then - export SYNAPSE_AS_REGISTRATION_DIR=/complement/appservice -fi - -# Generate a TLS key, then generate a certificate by having Complement's CA sign it -# Note that both the key and certificate are in PEM format (not DER). -openssl genrsa -out /conf/server.tls.key 2048 - -openssl req -new -key /conf/server.tls.key -out /conf/server.tls.csr \ - -subj "/CN=${SERVER_NAME}" - -openssl x509 -req -in /conf/server.tls.csr \ - -CA /complement/ca/ca.crt -CAkey /complement/ca/ca.key -set_serial 1 \ - -out /conf/server.tls.crt - -export SYNAPSE_TLS_CERT=/conf/server.tls.crt -export SYNAPSE_TLS_KEY=/conf/server.tls.key - -# Run the script that writes the necessary config files and starts supervisord, which in turn -# starts everything else -exec /configure_workers_and_start.py diff --git a/docker/complement/conf/homeserver.yaml b/docker/complement/conf/homeserver.yaml deleted file mode 100644 index e2be540bbb..0000000000 --- a/docker/complement/conf/homeserver.yaml +++ /dev/null @@ -1,129 +0,0 @@ -## Server ## - -server_name: SERVER_NAME -log_config: /conf/log_config.yaml -report_stats: False -signing_key_path: /conf/server.signing.key -trusted_key_servers: [] -enable_registration: true -enable_registration_without_verification: true - -## Listeners ## - -tls_certificate_path: /conf/server.tls.crt -tls_private_key_path: /conf/server.tls.key -bcrypt_rounds: 4 -registration_shared_secret: complement - -listeners: - - port: 8448 - bind_addresses: ['::'] - type: http - tls: true - resources: - - names: [federation] - - - port: 8008 - bind_addresses: ['::'] - type: http - - resources: - - names: [client] - -## Database ## - -database: - name: "sqlite3" - args: - # We avoid /data, as it is a volume and is not transferred when the container is committed, - # which is a fundamental necessity in complement. - database: "/conf/homeserver.db" - -## Federation ## - -# trust certs signed by the complement CA -federation_custom_ca_list: -- /complement/ca/ca.crt - -# unblacklist RFC1918 addresses -ip_range_blacklist: [] - -# Disable server rate-limiting -rc_federation: - window_size: 1000 - sleep_limit: 10 - sleep_delay: 500 - reject_limit: 99999 - concurrent: 3 - -rc_message: - per_second: 9999 - burst_count: 9999 - -rc_registration: - per_second: 9999 - burst_count: 9999 - -rc_login: - address: - per_second: 9999 - burst_count: 9999 - account: - per_second: 9999 - burst_count: 9999 - failed_attempts: - per_second: 9999 - burst_count: 9999 - -rc_admin_redaction: - per_second: 9999 - burst_count: 9999 - -rc_joins: - local: - per_second: 9999 - burst_count: 9999 - remote: - per_second: 9999 - burst_count: 9999 - -rc_3pid_validation: - per_second: 1000 - burst_count: 1000 - -rc_invites: - per_room: - per_second: 1000 - burst_count: 1000 - per_user: - per_second: 1000 - burst_count: 1000 - -federation_rr_transactions_per_room_per_second: 9999 - -## API Configuration ## - -# A list of application service config files to use -# -app_service_config_files: -AS_REGISTRATION_FILES - -## Experimental Features ## - -experimental_features: - # Enable spaces support - spaces_enabled: true - # Enable history backfilling support - msc2716_enabled: true - # server-side support for partial state in /send_join responses - msc3706_enabled: true - # client-side support for partial state in /send_join responses - faster_joins: true - # Enable jump to date endpoint - msc3030_enabled: true - -server_notices: - system_mxid_localpart: _server - system_mxid_display_name: "Server Alert" - system_mxid_avatar_url: "" - room_name: "Server Alert" diff --git a/docker/complement/conf/log_config.yaml b/docker/complement/conf/log_config.yaml deleted file mode 100644 index c33fd6cd00..0000000000 --- a/docker/complement/conf/log_config.yaml +++ /dev/null @@ -1,24 +0,0 @@ -version: 1 - -formatters: - precise: - format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s' - -filters: - context: - (): synapse.logging.context.LoggingContextFilter - request: "" - -handlers: - console: - class: logging.StreamHandler - formatter: precise - filters: [context] - # log to stdout, for easier use with 'docker logs' - stream: 'ext://sys.stdout' - -root: - level: INFO - handlers: [console] - -disable_existing_loggers: false diff --git a/docker/complement/conf-workers/postgres.supervisord.conf b/docker/complement/conf/postgres.supervisord.conf index 5608342d1a..5dae3e6330 100644 --- a/docker/complement/conf-workers/postgres.supervisord.conf +++ b/docker/complement/conf/postgres.supervisord.conf @@ -1,6 +1,9 @@ [program:postgres] command=/usr/local/bin/prefix-log /usr/bin/pg_ctlcluster 13 main start --foreground +# Only start if START_POSTGRES=1 +autostart=%(ENV_START_POSTGRES)s + # Lower priority number = starts first priority=1 diff --git a/docker/complement/conf/start.sh b/docker/complement/conf/start.sh deleted file mode 100755 index 5d8d0fe016..0000000000 --- a/docker/complement/conf/start.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/sh - -set -e - -sed -i "s/SERVER_NAME/${SERVER_NAME}/g" /conf/homeserver.yaml - -# Add the application service registration files to the homeserver.yaml config -for filename in /complement/appservice/*.yaml; do - [ -f "$filename" ] || break - - as_id=$(basename "$filename" .yaml) - - # Insert the path to the registration file and the AS_REGISTRATION_FILES marker after - # so we can add the next application service in the next iteration of this for loop - sed -i "s/AS_REGISTRATION_FILES/ - \/complement\/appservice\/${as_id}.yaml\nAS_REGISTRATION_FILES/g" /conf/homeserver.yaml -done -# Remove the AS_REGISTRATION_FILES entry -sed -i "s/AS_REGISTRATION_FILES//g" /conf/homeserver.yaml - -# generate an ssl key and cert for the server, signed by the complement CA -openssl genrsa -out /conf/server.tls.key 2048 - -openssl req -new -key /conf/server.tls.key -out /conf/server.tls.csr \ - -subj "/CN=${SERVER_NAME}" -openssl x509 -req -in /conf/server.tls.csr \ - -CA /complement/ca/ca.crt -CAkey /complement/ca/ca.key -set_serial 1 \ - -out /conf/server.tls.crt - -exec python -m synapse.app.homeserver -c /conf/homeserver.yaml "$@" - diff --git a/docker/complement/conf/start_for_complement.sh b/docker/complement/conf/start_for_complement.sh new file mode 100755 index 0000000000..b9c97ab687 --- /dev/null +++ b/docker/complement/conf/start_for_complement.sh @@ -0,0 +1,90 @@ +#!/bin/bash +# +# Default ENTRYPOINT for the docker image used for testing synapse with workers under complement + +set -e + +echo "Complement Synapse launcher" +echo " Args: $@" +echo " Env: SYNAPSE_COMPLEMENT_DATABASE=$SYNAPSE_COMPLEMENT_DATABASE SYNAPSE_COMPLEMENT_USE_WORKERS=$SYNAPSE_COMPLEMENT_USE_WORKERS" + +function log { + d=$(date +"%Y-%m-%d %H:%M:%S,%3N") + echo "$d $@" +} + +# Set the server name of the homeserver +export SYNAPSE_SERVER_NAME=${SERVER_NAME} + +# No need to report stats here +export SYNAPSE_REPORT_STATS=no + + +case "$SYNAPSE_COMPLEMENT_DATABASE" in + postgres) + # Set postgres authentication details which will be placed in the homeserver config file + export POSTGRES_PASSWORD=somesecret + export POSTGRES_USER=postgres + export POSTGRES_HOST=localhost + + # configure supervisord to start postgres + export START_POSTGRES=true + ;; + + sqlite) + # Configure supervisord not to start Postgres, as we don't need it + export START_POSTGRES=false + ;; + + *) + echo "Unknown Synapse database: SYNAPSE_COMPLEMENT_DATABASE=$SYNAPSE_COMPLEMENT_DATABASE" >&2 + exit 1 + ;; +esac + + +if [[ -n "$SYNAPSE_COMPLEMENT_USE_WORKERS" ]]; then + # Specify the workers to test with + export SYNAPSE_WORKER_TYPES="\ + event_persister, \ + event_persister, \ + background_worker, \ + frontend_proxy, \ + event_creator, \ + user_dir, \ + media_repository, \ + federation_inbound, \ + federation_reader, \ + federation_sender, \ + synchrotron, \ + appservice, \ + pusher" +else + # Empty string here means 'main process only' + export SYNAPSE_WORKER_TYPES="" +fi + + +# Add Complement's appservice registration directory, if there is one +# (It can be absent when there are no application services in this test!) +if [ -d /complement/appservice ]; then + export SYNAPSE_AS_REGISTRATION_DIR=/complement/appservice +fi + +# Generate a TLS key, then generate a certificate by having Complement's CA sign it +# Note that both the key and certificate are in PEM format (not DER). +openssl genrsa -out /conf/server.tls.key 2048 + +openssl req -new -key /conf/server.tls.key -out /conf/server.tls.csr \ + -subj "/CN=${SERVER_NAME}" + +openssl x509 -req -in /conf/server.tls.csr \ + -CA /complement/ca/ca.crt -CAkey /complement/ca/ca.key -set_serial 1 \ + -out /conf/server.tls.crt + +export SYNAPSE_TLS_CERT=/conf/server.tls.crt +export SYNAPSE_TLS_KEY=/conf/server.tls.key + +# Run the script that writes the necessary config files and starts supervisord, which in turn +# starts everything else +exec /configure_workers_and_start.py diff --git a/docker/complement/conf-workers/workers-shared.yaml b/docker/complement/conf/workers-shared-extra.yaml.j2 index cd7b50c65c..a5b1b6bb8b 100644 --- a/docker/complement/conf-workers/workers-shared.yaml +++ b/docker/complement/conf/workers-shared-extra.yaml.j2 @@ -1,3 +1,11 @@ +{# + This file extends the default 'shared' configuration file (from the 'synapse-workers' + docker image) with Complement-specific tweak. + + The base configuration is moved out of the default path to `shared-orig.yaml.j2` + in the Complement Dockerfile and below we include that original file. +#} + ## Server ## report_stats: False trusted_key_servers: [] @@ -76,10 +84,16 @@ federation_rr_transactions_per_room_per_second: 9999 ## Experimental Features ## experimental_features: - # Enable history backfilling support - msc2716_enabled: true # Enable spaces support spaces_enabled: true + # Enable history backfilling support + msc2716_enabled: true + # server-side support for partial state in /send_join responses + msc3706_enabled: true + {% if not workers_in_use %} + # client-side support for partial state in /send_join responses + faster_joins: true + {% endif %} # Enable jump to date endpoint msc3030_enabled: true @@ -88,3 +102,5 @@ server_notices: system_mxid_display_name: "Server Alert" system_mxid_avatar_url: "" room_name: "Server Alert" + +{% include "shared-orig.yaml.j2" %} diff --git a/docker/conf-workers/shared.yaml.j2 b/docker/conf-workers/shared.yaml.j2 index 644ed788f3..92d25386dc 100644 --- a/docker/conf-workers/shared.yaml.j2 +++ b/docker/conf-workers/shared.yaml.j2 @@ -3,8 +3,10 @@ # configure_workers_and_start.py uses and amends to this file depending on the workers # that have been selected. +{% if enable_redis %} redis: enabled: true +{% endif %} {% if appservice_registrations is not none %} ## Application Services ## diff --git a/docker/conf-workers/supervisord.conf.j2 b/docker/conf-workers/supervisord.conf.j2 index ca1f7aef8e..7afab05133 100644 --- a/docker/conf-workers/supervisord.conf.j2 +++ b/docker/conf-workers/supervisord.conf.j2 @@ -28,6 +28,9 @@ stderr_logfile_maxbytes=0 username=redis autorestart=true +# Redis can be disabled if the image is being used without workers +autostart={{ enable_redis }} + [program:synapse_main] command=/usr/local/bin/prefix-log /usr/local/bin/python -m synapse.app.homeserver --config-path="{{ main_config_path }}" --config-path=/conf/workers/shared.yaml priority=10 @@ -41,4 +44,4 @@ autorestart=unexpected exitcodes=0 # Additional process blocks -{{ worker_config }} \ No newline at end of file +{{ worker_config }} diff --git a/docker/configure_workers_and_start.py b/docker/configure_workers_and_start.py index f7dac90222..64697e0354 100755 --- a/docker/configure_workers_and_start.py +++ b/docker/configure_workers_and_start.py @@ -37,8 +37,8 @@ import sys from pathlib import Path from typing import Any, Dict, List, Mapping, MutableMapping, NoReturn, Set -import jinja2 import yaml +from jinja2 import Environment, FileSystemLoader MAIN_PROCESS_HTTP_LISTENER_PORT = 8080 @@ -236,12 +236,13 @@ def convert(src: str, dst: str, **template_vars: object) -> None: template_vars: The arguments to replace placeholder variables in the template with. """ # Read the template file - with open(src) as infile: - template = infile.read() + # We disable autoescape to prevent template variables from being escaped, + # as we're not using HTML. + env = Environment(loader=FileSystemLoader(os.path.dirname(src)), autoescape=False) + template = env.get_template(os.path.basename(src)) - # Generate a string from the template. We disable autoescape to prevent template - # variables from being escaped. - rendered = jinja2.Template(template, autoescape=False).render(**template_vars) + # Generate a string from the template. + rendered = template.render(**template_vars) # Write the generated contents to a file # @@ -378,8 +379,8 @@ def generate_worker_files( nginx_locations = {} # Read the desired worker configuration from the environment - worker_types_env = environ.get("SYNAPSE_WORKER_TYPES") - if worker_types_env is None: + worker_types_env = environ.get("SYNAPSE_WORKER_TYPES", "").strip() + if not worker_types_env: # No workers, just the main process worker_types = [] else: @@ -506,12 +507,16 @@ def generate_worker_files( if reg_path.suffix.lower() in (".yaml", ".yml") ] + workers_in_use = len(worker_types) > 0 + # Shared homeserver config convert( "/conf/shared.yaml.j2", "/conf/workers/shared.yaml", shared_worker_config=yaml.dump(shared_config), appservice_registrations=appservice_registrations, + enable_redis=workers_in_use, + workers_in_use=workers_in_use, ) # Nginx config @@ -531,6 +536,7 @@ def generate_worker_files( "/etc/supervisor/supervisord.conf", main_config_path=config_path, worker_config=supervisord_config, + enable_redis=workers_in_use, ) # healthcheck config diff --git a/docs/.sample_config_header.yaml b/docs/.sample_config_header.yaml index 09e86ca0ca..2355337e6d 100644 --- a/docs/.sample_config_header.yaml +++ b/docs/.sample_config_header.yaml @@ -1,26 +1,12 @@ # This file is maintained as an up-to-date snapshot of the default -# homeserver.yaml configuration generated by Synapse. -# -# It is intended to act as a reference for the default configuration, -# helping admins keep track of new options and other changes, and compare -# their configs with the current default. As such, many of the actual -# config values shown are placeholders. +# homeserver.yaml configuration generated by Synapse. You can find a +# complete accounting of possible configuration options at +# https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html # # It is *not* intended to be copied and used as the basis for a real # homeserver.yaml. Instead, if you are starting from scratch, please generate # a fresh config using Synapse by following the instructions in # https://matrix-org.github.io/synapse/latest/setup/installation.html. - -# Configuration options that take a time period can be set using a number -# followed by a letter. Letters have the following meanings: -# s = second -# m = minute -# h = hour -# d = day -# w = week -# y = year -# For example, setting redaction_retention_period: 5m would remove redacted -# messages from the database after 5 minutes, rather than 5 months. - +# ################################################################################ diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index 8400a6539a..d7cf2df112 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -88,6 +88,7 @@ - [OpenTracing](opentracing.md) - [Database Schemas](development/database_schema.md) - [Experimental features](development/experimental_features.md) + - [Dependency management](development/dependencies.md) - [Synapse Architecture]() - [Cancellation](development/synapse_architecture/cancellation.md) - [Log Contexts](log_contexts.md) diff --git a/docs/development/contributing_guide.md b/docs/development/contributing_guide.md index 2b3714df66..c2f04a3905 100644 --- a/docs/development/contributing_guide.md +++ b/docs/development/contributing_guide.md @@ -304,6 +304,11 @@ To run a specific test, you can specify the whole name structure: COMPLEMENT_DIR=../complement ./scripts-dev/complement.sh -run TestImportHistoricalMessages/parallel/Historical_events_resolve_in_the_correct_order ``` +The above will run a monolithic (single-process) Synapse with SQLite as the database. For other configurations, try: + +- Passing `POSTGRES=1` as an environment variable to use the Postgres database instead. +- Passing `WORKERS=1` as an environment variable to use a workerised setup instead. This option implies the use of Postgres. + ### Access database for homeserver after Complement test runs. diff --git a/docs/modules/spam_checker_callbacks.md b/docs/modules/spam_checker_callbacks.md index ad35e667ed..50969edd46 100644 --- a/docs/modules/spam_checker_callbacks.md +++ b/docs/modules/spam_checker_callbacks.md @@ -38,15 +38,13 @@ this callback. _First introduced in Synapse v1.37.0_ +_Changed in Synapse v1.61.0: `synapse.module_api.NOT_SPAM` and `synapse.module_api.errors.Codes` can be returned by this callback. Returning a boolean is now deprecated._ + ```python -async def user_may_join_room(user: str, room: str, is_invited: bool) -> bool +async def user_may_join_room(user: str, room: str, is_invited: bool) -> Union["synapse.module_api.NOT_SPAM", "synapse.module_api.errors.Codes", bool] ``` -Called when a user is trying to join a room. The module must return a `bool` to indicate -whether the user can join the room. Return `False` to prevent the user from joining the -room; otherwise return `True` to permit the joining. - -The user is represented by their Matrix user ID (e.g. +Called when a user is trying to join a room. The user is represented by their Matrix user ID (e.g. `@alice:example.com`) and the room is represented by its Matrix ID (e.g. `!room:example.com`). The module is also given a boolean to indicate whether the user currently has a pending invite in the room. @@ -54,46 +52,67 @@ currently has a pending invite in the room. This callback isn't called if the join is performed by a server administrator, or in the context of a room creation. +The callback must return one of: + - `synapse.module_api.NOT_SPAM`, to allow the operation. Other callbacks may still + decide to reject it. + - `synapse.module_api.errors.Codes` to reject the operation with an error code. In case + of doubt, `synapse.module_api.errors.Codes.FORBIDDEN` is a good error code. + - (deprecated) `False`, which is the same as returning `synapse.module_api.NOT_SPAM`. + - (deprecated) `True`, which is the same as returning `synapse.module_api.errors.Codes.FORBIDDEN`. + If multiple modules implement this callback, they will be considered in order. If a -callback returns `True`, Synapse falls through to the next one. The value of the first -callback that does not return `True` will be used. If this happens, Synapse will not call -any of the subsequent implementations of this callback. +callback returns `synapse.module_api.NOT_SPAM`, Synapse falls through to the next one. +The value of the first callback that does not return `synapse.module_api.NOT_SPAM` will +be used. If this happens, Synapse will not call any of the subsequent implementations of +this callback. ### `user_may_invite` _First introduced in Synapse v1.37.0_ +_Changed in Synapse v1.62.0: `synapse.module_api.NOT_SPAM` and `synapse.module_api.errors.Codes` can be returned by this callback. Returning a boolean is now deprecated._ + ```python -async def user_may_invite(inviter: str, invitee: str, room_id: str) -> bool +async def user_may_invite(inviter: str, invitee: str, room_id: str) -> Union["synapse.module_api.NOT_SPAM", "synapse.module_api.errors.Codes", bool] ``` -Called when processing an invitation. The module must return a `bool` indicating whether -the inviter can invite the invitee to the given room. Both inviter and invitee are -represented by their Matrix user ID (e.g. `@alice:example.com`). Return `False` to prevent -the invitation; otherwise return `True` to permit it. +Called when processing an invitation. Both inviter and invitee are +represented by their Matrix user ID (e.g. `@alice:example.com`). + + +The callback must return one of: + - `synapse.module_api.NOT_SPAM`, to allow the operation. Other callbacks may still + decide to reject it. + - `synapse.module_api.errors.Codes` to reject the operation with an error code. In case + of doubt, `synapse.module_api.errors.Codes.FORBIDDEN` is a good error code. + + - (deprecated) `False`, which is the same as returning `synapse.module_api.NOT_SPAM`. + - (deprecated) `True`, which is the same as returning `synapse.module_api.errors.Codes.FORBIDDEN`. If multiple modules implement this callback, they will be considered in order. If a -callback returns `True`, Synapse falls through to the next one. The value of the first -callback that does not return `True` will be used. If this happens, Synapse will not call -any of the subsequent implementations of this callback. +callback returns `synapse.module_api.NOT_SPAM`, Synapse falls through to the next one. +The value of the first callback that does not return `synapse.module_api.NOT_SPAM` will +be used. If this happens, Synapse will not call any of the subsequent implementations of +this callback. + ### `user_may_send_3pid_invite` _First introduced in Synapse v1.45.0_ +_Changed in Synapse v1.62.0: `synapse.module_api.NOT_SPAM` and `synapse.module_api.errors.Codes` can be returned by this callback. Returning a boolean is now deprecated._ + ```python async def user_may_send_3pid_invite( inviter: str, medium: str, address: str, room_id: str, -) -> bool +) -> Union["synapse.module_api.NOT_SPAM", "synapse.module_api.errors.Codes", bool] ``` Called when processing an invitation using a third-party identifier (also called a 3PID, -e.g. an email address or a phone number). The module must return a `bool` indicating -whether the inviter can invite the invitee to the given room. Return `False` to prevent -the invitation; otherwise return `True` to permit it. +e.g. an email address or a phone number). The inviter is represented by their Matrix user ID (e.g. `@alice:example.com`), and the invitee is represented by its medium (e.g. "email") and its address @@ -115,63 +134,108 @@ await user_may_send_3pid_invite( **Note**: If the third-party identifier is already associated with a matrix user ID, [`user_may_invite`](#user_may_invite) will be used instead. +The callback must return one of: + - `synapse.module_api.NOT_SPAM`, to allow the operation. Other callbacks may still + decide to reject it. + - `synapse.module_api.errors.Codes` to reject the operation with an error code. In case + of doubt, `synapse.module_api.errors.Codes.FORBIDDEN` is a good error code. + + - (deprecated) `False`, which is the same as returning `synapse.module_api.NOT_SPAM`. + - (deprecated) `True`, which is the same as returning `synapse.module_api.errors.Codes.FORBIDDEN`. + If multiple modules implement this callback, they will be considered in order. If a -callback returns `True`, Synapse falls through to the next one. The value of the first -callback that does not return `True` will be used. If this happens, Synapse will not call -any of the subsequent implementations of this callback. +callback returns `synapse.module_api.NOT_SPAM`, Synapse falls through to the next one. +The value of the first callback that does not return `synapse.module_api.NOT_SPAM` will +be used. If this happens, Synapse will not call any of the subsequent implementations of +this callback. + ### `user_may_create_room` _First introduced in Synapse v1.37.0_ +_Changed in Synapse v1.62.0: `synapse.module_api.NOT_SPAM` and `synapse.module_api.errors.Codes` can be returned by this callback. Returning a boolean is now deprecated._ + ```python -async def user_may_create_room(user: str) -> bool +async def user_may_create_room(user_id: str) -> Union["synapse.module_api.NOT_SPAM", "synapse.module_api.errors.Codes", bool] ``` -Called when processing a room creation request. The module must return a `bool` indicating -whether the given user (represented by their Matrix user ID) is allowed to create a room. -Return `False` to prevent room creation; otherwise return `True` to permit it. +Called when processing a room creation request. + +The callback must return one of: + - `synapse.module_api.NOT_SPAM`, to allow the operation. Other callbacks may still + decide to reject it. + - `synapse.module_api.errors.Codes` to reject the operation with an error code. In case + of doubt, `synapse.module_api.errors.Codes.FORBIDDEN` is a good error code. + + - (deprecated) `False`, which is the same as returning `synapse.module_api.NOT_SPAM`. + - (deprecated) `True`, which is the same as returning `synapse.module_api.errors.Codes.FORBIDDEN`. If multiple modules implement this callback, they will be considered in order. If a -callback returns `True`, Synapse falls through to the next one. The value of the first -callback that does not return `True` will be used. If this happens, Synapse will not call -any of the subsequent implementations of this callback. +callback returns `synapse.module_api.NOT_SPAM`, Synapse falls through to the next one. +The value of the first callback that does not return `synapse.module_api.NOT_SPAM` will +be used. If this happens, Synapse will not call any of the subsequent implementations of +this callback. + + ### `user_may_create_room_alias` _First introduced in Synapse v1.37.0_ +_Changed in Synapse v1.62.0: `synapse.module_api.NOT_SPAM` and `synapse.module_api.errors.Codes` can be returned by this callback. Returning a boolean is now deprecated._ + ```python -async def user_may_create_room_alias(user: str, room_alias: "synapse.types.RoomAlias") -> bool +async def user_may_create_room_alias(user_id: str, room_alias: "synapse.module_api.RoomAlias") -> Union["synapse.module_api.NOT_SPAM", "synapse.module_api.errors.Codes", bool] ``` -Called when trying to associate an alias with an existing room. The module must return a -`bool` indicating whether the given user (represented by their Matrix user ID) is allowed -to set the given alias. Return `False` to prevent the alias creation; otherwise return -`True` to permit it. +Called when trying to associate an alias with an existing room. + +The callback must return one of: + - `synapse.module_api.NOT_SPAM`, to allow the operation. Other callbacks may still + decide to reject it. + - `synapse.module_api.errors.Codes` to reject the operation with an error code. In case + of doubt, `synapse.module_api.errors.Codes.FORBIDDEN` is a good error code. + + - (deprecated) `False`, which is the same as returning `synapse.module_api.NOT_SPAM`. + - (deprecated) `True`, which is the same as returning `synapse.module_api.errors.Codes.FORBIDDEN`. If multiple modules implement this callback, they will be considered in order. If a -callback returns `True`, Synapse falls through to the next one. The value of the first -callback that does not return `True` will be used. If this happens, Synapse will not call -any of the subsequent implementations of this callback. +callback returns `synapse.module_api.NOT_SPAM`, Synapse falls through to the next one. +The value of the first callback that does not return `synapse.module_api.NOT_SPAM` will +be used. If this happens, Synapse will not call any of the subsequent implementations of +this callback. + + ### `user_may_publish_room` _First introduced in Synapse v1.37.0_ +_Changed in Synapse v1.62.0: `synapse.module_api.NOT_SPAM` and `synapse.module_api.errors.Codes` can be returned by this callback. Returning a boolean is now deprecated._ + ```python -async def user_may_publish_room(user: str, room_id: str) -> bool +async def user_may_publish_room(user_id: str, room_id: str) -> Union["synapse.module_api.NOT_SPAM", "synapse.module_api.errors.Codes", bool] ``` -Called when trying to publish a room to the homeserver's public rooms directory. The -module must return a `bool` indicating whether the given user (represented by their -Matrix user ID) is allowed to publish the given room. Return `False` to prevent the -room from being published; otherwise return `True` to permit its publication. +Called when trying to publish a room to the homeserver's public rooms directory. + +The callback must return one of: + - `synapse.module_api.NOT_SPAM`, to allow the operation. Other callbacks may still + decide to reject it. + - `synapse.module_api.errors.Codes` to reject the operation with an error code. In case + of doubt, `synapse.module_api.errors.Codes.FORBIDDEN` is a good error code. + + - (deprecated) `False`, which is the same as returning `synapse.module_api.NOT_SPAM`. + - (deprecated) `True`, which is the same as returning `synapse.module_api.errors.Codes.FORBIDDEN`. If multiple modules implement this callback, they will be considered in order. If a -callback returns `True`, Synapse falls through to the next one. The value of the first -callback that does not return `True` will be used. If this happens, Synapse will not call -any of the subsequent implementations of this callback. +callback returns `synapse.module_api.NOT_SPAM`, Synapse falls through to the next one. +The value of the first callback that does not return `synapse.module_api.NOT_SPAM` will +be used. If this happens, Synapse will not call any of the subsequent implementations of +this callback. + + ### `check_username_for_spam` @@ -239,21 +303,32 @@ this callback. _First introduced in Synapse v1.37.0_ +_Changed in Synapse v1.62.0: `synapse.module_api.NOT_SPAM` and `synapse.module_api.errors.Codes` can be returned by this callback. Returning a boolean is now deprecated._ + ```python async def check_media_file_for_spam( file_wrapper: "synapse.rest.media.v1.media_storage.ReadableFileWrapper", file_info: "synapse.rest.media.v1._base.FileInfo", -) -> bool +) -> Union["synapse.module_api.NOT_SPAM", "synapse.module_api.errors.Codes", bool] ``` -Called when storing a local or remote file. The module must return a `bool` indicating -whether the given file should be excluded from the homeserver's media store. Return -`True` to prevent this file from being stored; otherwise return `False`. +Called when storing a local or remote file. + +The callback must return one of: + - `synapse.module_api.NOT_SPAM`, to allow the operation. Other callbacks may still + decide to reject it. + - `synapse.module_api.errors.Codes` to reject the operation with an error code. In case + of doubt, `synapse.module_api.errors.Codes.FORBIDDEN` is a good error code. + + - (deprecated) `False`, which is the same as returning `synapse.module_api.NOT_SPAM`. + - (deprecated) `True`, which is the same as returning `synapse.module_api.errors.Codes.FORBIDDEN`. If multiple modules implement this callback, they will be considered in order. If a -callback returns `False`, Synapse falls through to the next one. The value of the first -callback that does not return `False` will be used. If this happens, Synapse will not call -any of the subsequent implementations of this callback. +callback returns `synapse.module_api.NOT_SPAM`, Synapse falls through to the next one. +The value of the first callback that does not return `synapse.module_api.NOT_SPAM` will +be used. If this happens, Synapse will not call any of the subsequent implementations of +this callback. + ### `should_drop_federated_event` @@ -316,6 +391,9 @@ class ListSpamChecker: resource=IsUserEvilResource(config), ) - async def check_event_for_spam(self, event: "synapse.events.EventBase") -> Union[bool, str]: - return event.sender not in self.evil_users + async def check_event_for_spam(self, event: "synapse.events.EventBase") -> Union[Literal["NOT_SPAM"], Codes]: + if event.sender in self.evil_users: + return Codes.FORBIDDEN + else: + return synapse.module_api.NOT_SPAM ``` diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 56a25c534f..6578ec0229 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -1,27 +1,13 @@ # This file is maintained as an up-to-date snapshot of the default -# homeserver.yaml configuration generated by Synapse. -# -# It is intended to act as a reference for the default configuration, -# helping admins keep track of new options and other changes, and compare -# their configs with the current default. As such, many of the actual -# config values shown are placeholders. +# homeserver.yaml configuration generated by Synapse. You can find a +# complete accounting of possible configuration options at +# https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html # # It is *not* intended to be copied and used as the basis for a real # homeserver.yaml. Instead, if you are starting from scratch, please generate # a fresh config using Synapse by following the instructions in # https://matrix-org.github.io/synapse/latest/setup/installation.html. - -# Configuration options that take a time period can be set using a number -# followed by a letter. Letters have the following meanings: -# s = second -# m = minute -# h = hour -# d = day -# w = week -# y = year -# For example, setting redaction_retention_period: 5m would remove redacted -# messages from the database after 5 minutes, rather than 5 months. - +# ################################################################################ # Configuration file for Synapse. @@ -31,2825 +17,27 @@ # should have the same indentation. # # [1] https://docs.ansible.com/ansible/latest/reference_appendices/YAMLSyntax.html - - -## Modules ## - -# Server admins can expand Synapse's functionality with external modules. -# -# See https://matrix-org.github.io/synapse/latest/modules/index.html for more -# documentation on how to configure or create custom modules for Synapse. -# -modules: - #- module: my_super_module.MySuperClass - # config: - # do_thing: true - #- module: my_other_super_module.SomeClass - # config: {} - - -## Server ## - -# The public-facing domain of the server -# -# The server_name name will appear at the end of usernames and room addresses -# created on this server. For example if the server_name was example.com, -# usernames on this server would be in the format @user:example.com -# -# In most cases you should avoid using a matrix specific subdomain such as -# matrix.example.com or synapse.example.com as the server_name for the same -# reasons you wouldn't use user@email.example.com as your email address. -# See https://matrix-org.github.io/synapse/latest/delegate.html -# for information on how to host Synapse on a subdomain while preserving -# a clean server_name. -# -# The server_name cannot be changed later so it is important to -# configure this correctly before you start Synapse. It should be all -# lowercase and may contain an explicit port. -# Examples: matrix.org, localhost:8080 # +# For more information on how to configure Synapse, including a complete accounting of +# each option, go to docs/usage/configuration/config_documentation.md or +# https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html server_name: "SERVERNAME" - -# When running as a daemon, the file to store the pid in -# pid_file: DATADIR/homeserver.pid - -# The absolute URL to the web client which / will redirect to. -# -#web_client_location: https://riot.example.com/ - -# The public-facing base URL that clients use to access this Homeserver (not -# including _matrix/...). This is the same URL a user might enter into the -# 'Custom Homeserver URL' field on their client. If you use Synapse with a -# reverse proxy, this should be the URL to reach Synapse via the proxy. -# Otherwise, it should be the URL to reach Synapse's client HTTP listener (see -# 'listeners' below). -# -# Defaults to 'https://<server_name>/'. -# -#public_baseurl: https://example.com/ - -# Uncomment the following to tell other servers to send federation traffic on -# port 443. -# -# By default, other servers will try to reach our server on port 8448, which can -# be inconvenient in some environments. -# -# Provided 'https://<server_name>/' on port 443 is routed to Synapse, this -# option configures Synapse to serve a file at -# 'https://<server_name>/.well-known/matrix/server'. This will tell other -# servers to send traffic to port 443 instead. -# -# See https://matrix-org.github.io/synapse/latest/delegate.html for more -# information. -# -# Defaults to 'false'. -# -#serve_server_wellknown: true - -# Set the soft limit on the number of file descriptors synapse can use -# Zero is used to indicate synapse should set the soft limit to the -# hard limit. -# -#soft_file_limit: 0 - -# Presence tracking allows users to see the state (e.g online/offline) -# of other local and remote users. -# -presence: - # Uncomment to disable presence tracking on this homeserver. This option - # replaces the previous top-level 'use_presence' option. - # - #enabled: false - -# Whether to require authentication to retrieve profile data (avatars, -# display names) of other users through the client API. Defaults to -# 'false'. Note that profile data is also available via the federation -# API, unless allow_profile_lookup_over_federation is set to false. -# -#require_auth_for_profile_requests: true - -# Uncomment to require a user to share a room with another user in order -# to retrieve their profile information. Only checked on Client-Server -# requests. Profile requests from other servers should be checked by the -# requesting server. Defaults to 'false'. -# -#limit_profile_requests_to_users_who_share_rooms: true - -# Uncomment to prevent a user's profile data from being retrieved and -# displayed in a room until they have joined it. By default, a user's -# profile data is included in an invite event, regardless of the values -# of the above two settings, and whether or not the users share a server. -# Defaults to 'true'. -# -#include_profile_data_on_invite: false - -# If set to 'true', removes the need for authentication to access the server's -# public rooms directory through the client API, meaning that anyone can -# query the room directory. Defaults to 'false'. -# -#allow_public_rooms_without_auth: true - -# If set to 'true', allows any other homeserver to fetch the server's public -# rooms directory via federation. Defaults to 'false'. -# -#allow_public_rooms_over_federation: true - -# The default room version for newly created rooms. -# -# Known room versions are listed here: -# https://spec.matrix.org/latest/rooms/#complete-list-of-room-versions -# -# For example, for room version 1, default_room_version should be set -# to "1". -# -#default_room_version: "9" - -# The GC threshold parameters to pass to `gc.set_threshold`, if defined -# -#gc_thresholds: [700, 10, 10] - -# The minimum time in seconds between each GC for a generation, regardless of -# the GC thresholds. This ensures that we don't do GC too frequently. -# -# A value of `[1s, 10s, 30s]` indicates that a second must pass between consecutive -# generation 0 GCs, etc. -# -# Defaults to `[1s, 10s, 30s]`. -# -#gc_min_interval: [0.5s, 30s, 1m] - -# Set the limit on the returned events in the timeline in the get -# and sync operations. The default value is 100. -1 means no upper limit. -# -# Uncomment the following to increase the limit to 5000. -# -#filter_timeline_limit: 5000 - -# Whether room invites to users on this server should be blocked -# (except those sent by local server admins). The default is False. -# -#block_non_admin_invites: true - -# Room searching -# -# If disabled, new messages will not be indexed for searching and users -# will receive errors when searching for messages. Defaults to enabled. -# -#enable_search: false - -# Prevent outgoing requests from being sent to the following blacklisted IP address -# CIDR ranges. If this option is not specified then it defaults to private IP -# address ranges (see the example below). -# -# The blacklist applies to the outbound requests for federation, identity servers, -# push servers, and for checking key validity for third-party invite events. -# -# (0.0.0.0 and :: are always blacklisted, whether or not they are explicitly -# listed here, since they correspond to unroutable addresses.) -# -# This option replaces federation_ip_range_blacklist in Synapse v1.25.0. -# -# Note: The value is ignored when an HTTP proxy is in use -# -#ip_range_blacklist: -# - '127.0.0.0/8' -# - '10.0.0.0/8' -# - '172.16.0.0/12' -# - '192.168.0.0/16' -# - '100.64.0.0/10' -# - '192.0.0.0/24' -# - '169.254.0.0/16' -# - '192.88.99.0/24' -# - '198.18.0.0/15' -# - '192.0.2.0/24' -# - '198.51.100.0/24' -# - '203.0.113.0/24' -# - '224.0.0.0/4' -# - '::1/128' -# - 'fe80::/10' -# - 'fc00::/7' -# - '2001:db8::/32' -# - 'ff00::/8' -# - 'fec0::/10' - -# List of IP address CIDR ranges that should be allowed for federation, -# identity servers, push servers, and for checking key validity for -# third-party invite events. This is useful for specifying exceptions to -# wide-ranging blacklisted target IP ranges - e.g. for communication with -# a push server only visible in your network. -# -# This whitelist overrides ip_range_blacklist and defaults to an empty -# list. -# -#ip_range_whitelist: -# - '192.168.1.1' - -# List of ports that Synapse should listen on, their purpose and their -# configuration. -# -# Options for each listener include: -# -# port: the TCP port to bind to -# -# bind_addresses: a list of local addresses to listen on. The default is -# 'all local interfaces'. -# -# type: the type of listener. Normally 'http', but other valid options are: -# 'manhole' (see https://matrix-org.github.io/synapse/latest/manhole.html), -# 'metrics' (see https://matrix-org.github.io/synapse/latest/metrics-howto.html), -# 'replication' (see https://matrix-org.github.io/synapse/latest/workers.html). -# -# tls: set to true to enable TLS for this listener. Will use the TLS -# key/cert specified in tls_private_key_path / tls_certificate_path. -# -# x_forwarded: Only valid for an 'http' listener. Set to true to use the -# X-Forwarded-For header as the client IP. Useful when Synapse is -# behind a reverse-proxy. -# -# resources: Only valid for an 'http' listener. A list of resources to host -# on this port. Options for each resource are: -# -# names: a list of names of HTTP resources. See below for a list of -# valid resource names. -# -# compress: set to true to enable HTTP compression for this resource. -# -# additional_resources: Only valid for an 'http' listener. A map of -# additional endpoints which should be loaded via dynamic modules. -# -# Valid resource names are: -# -# client: the client-server API (/_matrix/client), and the synapse admin -# API (/_synapse/admin). Also implies 'media' and 'static'. -# -# consent: user consent forms (/_matrix/consent). -# See https://matrix-org.github.io/synapse/latest/consent_tracking.html. -# -# federation: the server-server API (/_matrix/federation). Also implies -# 'media', 'keys', 'openid' -# -# keys: the key discovery API (/_matrix/key). -# -# media: the media API (/_matrix/media). -# -# metrics: the metrics interface. -# See https://matrix-org.github.io/synapse/latest/metrics-howto.html. -# -# openid: OpenID authentication. -# -# replication: the HTTP replication API (/_synapse/replication). -# See https://matrix-org.github.io/synapse/latest/workers.html. -# -# static: static resources under synapse/static (/_matrix/static). (Mostly -# useful for 'fallback authentication'.) -# listeners: - # TLS-enabled listener: for when matrix traffic is sent directly to synapse. - # - # Disabled by default. To enable it, uncomment the following. (Note that you - # will also need to give Synapse a TLS key and certificate: see the TLS section - # below.) - # - #- port: 8448 - # type: http - # tls: true - # resources: - # - names: [client, federation] - - # Unsecure HTTP listener: for when matrix traffic passes through a reverse proxy - # that unwraps TLS. - # - # If you plan to use a reverse proxy, please see - # https://matrix-org.github.io/synapse/latest/reverse_proxy.html. - # - port: 8008 tls: false type: http x_forwarded: true bind_addresses: ['::1', '127.0.0.1'] - resources: - names: [client, federation] compress: false - - # example additional_resources: - # - #additional_resources: - # "/_matrix/my/custom/endpoint": - # module: my_module.CustomRequestHandler - # config: {} - - # Turn on the twisted ssh manhole service on localhost on the given - # port. - # - #- port: 9000 - # bind_addresses: ['::1', '127.0.0.1'] - # type: manhole - -# Connection settings for the manhole -# -manhole_settings: - # The username for the manhole. This defaults to 'matrix'. - # - #username: manhole - - # The password for the manhole. This defaults to 'rabbithole'. - # - #password: mypassword - - # The private and public SSH key pair used to encrypt the manhole traffic. - # If these are left unset, then hardcoded and non-secret keys are used, - # which could allow traffic to be intercepted if sent over a public network. - # - #ssh_priv_key_path: CONFDIR/id_rsa - #ssh_pub_key_path: CONFDIR/id_rsa.pub - -# Forward extremities can build up in a room due to networking delays between -# homeservers. Once this happens in a large room, calculation of the state of -# that room can become quite expensive. To mitigate this, once the number of -# forward extremities reaches a given threshold, Synapse will send an -# org.matrix.dummy_event event, which will reduce the forward extremities -# in the room. -# -# This setting defines the threshold (i.e. number of forward extremities in the -# room) at which dummy events are sent. The default value is 10. -# -#dummy_events_threshold: 5 - - -## Homeserver blocking ## - -# How to reach the server admin, used in ResourceLimitError -# -#admin_contact: 'mailto:admin@server.com' - -# Global blocking -# -#hs_disabled: false -#hs_disabled_message: 'Human readable reason for why the HS is blocked' - -# Monthly Active User Blocking -# -# Used in cases where the admin or server owner wants to limit to the -# number of monthly active users. -# -# 'limit_usage_by_mau' disables/enables monthly active user blocking. When -# enabled and a limit is reached the server returns a 'ResourceLimitError' -# with error type Codes.RESOURCE_LIMIT_EXCEEDED -# -# 'max_mau_value' is the hard limit of monthly active users above which -# the server will start blocking user actions. -# -# 'mau_trial_days' is a means to add a grace period for active users. It -# means that users must be active for this number of days before they -# can be considered active and guards against the case where lots of users -# sign up in a short space of time never to return after their initial -# session. -# -# The option `mau_appservice_trial_days` is similar to `mau_trial_days`, but -# applies a different trial number if the user was registered by an appservice. -# A value of 0 means no trial days are applied. Appservices not listed in this -# dictionary use the value of `mau_trial_days` instead. -# -# 'mau_limit_alerting' is a means of limiting client side alerting -# should the mau limit be reached. This is useful for small instances -# where the admin has 5 mau seats (say) for 5 specific people and no -# interest increasing the mau limit further. Defaults to True, which -# means that alerting is enabled -# -#limit_usage_by_mau: false -#max_mau_value: 50 -#mau_trial_days: 2 -#mau_limit_alerting: false -#mau_appservice_trial_days: -# "appservice-id": 1 - -# If enabled, the metrics for the number of monthly active users will -# be populated, however no one will be limited. If limit_usage_by_mau -# is true, this is implied to be true. -# -#mau_stats_only: false - -# Sometimes the server admin will want to ensure certain accounts are -# never blocked by mau checking. These accounts are specified here. -# -#mau_limit_reserved_threepids: -# - medium: 'email' -# address: 'reserved_user@example.com' - -# Used by phonehome stats to group together related servers. -#server_context: context - -# Resource-constrained homeserver settings -# -# When this is enabled, the room "complexity" will be checked before a user -# joins a new remote room. If it is above the complexity limit, the server will -# disallow joining, or will instantly leave. -# -# Room complexity is an arbitrary measure based on factors such as the number of -# users in the room. -# -limit_remote_rooms: - # Uncomment to enable room complexity checking. - # - #enabled: true - - # the limit above which rooms cannot be joined. The default is 1.0. - # - #complexity: 0.5 - - # override the error which is returned when the room is too complex. - # - #complexity_error: "This room is too complex." - - # allow server admins to join complex rooms. Default is false. - # - #admins_can_join: true - -# Whether to require a user to be in the room to add an alias to it. -# Defaults to 'true'. -# -#require_membership_for_aliases: false - -# Whether to allow per-room membership profiles through the send of membership -# events with profile information that differ from the target's global profile. -# Defaults to 'true'. -# -#allow_per_room_profiles: false - -# The largest allowed file size for a user avatar. Defaults to no restriction. -# -# Note that user avatar changes will not work if this is set without -# using Synapse's media repository. -# -#max_avatar_size: 10M - -# The MIME types allowed for user avatars. Defaults to no restriction. -# -# Note that user avatar changes will not work if this is set without -# using Synapse's media repository. -# -#allowed_avatar_mimetypes: ["image/png", "image/jpeg", "image/gif"] - -# How long to keep redacted events in unredacted form in the database. After -# this period redacted events get replaced with their redacted form in the DB. -# -# Defaults to `7d`. Set to `null` to disable. -# -#redaction_retention_period: 28d - -# How long to track users' last seen time and IPs in the database. -# -# Defaults to `28d`. Set to `null` to disable clearing out of old rows. -# -#user_ips_max_age: 14d - -# Inhibits the /requestToken endpoints from returning an error that might leak -# information about whether an e-mail address is in use or not on this -# homeserver. -# Note that for some endpoints the error situation is the e-mail already being -# used, and for others the error is entering the e-mail being unused. -# If this option is enabled, instead of returning an error, these endpoints will -# act as if no error happened and return a fake session ID ('sid') to clients. -# -#request_token_inhibit_3pid_errors: true - -# A list of domains that the domain portion of 'next_link' parameters -# must match. -# -# This parameter is optionally provided by clients while requesting -# validation of an email or phone number, and maps to a link that -# users will be automatically redirected to after validation -# succeeds. Clients can make use this parameter to aid the validation -# process. -# -# The whitelist is applied whether the homeserver or an -# identity server is handling validation. -# -# The default value is no whitelist functionality; all domains are -# allowed. Setting this value to an empty list will instead disallow -# all domains. -# -#next_link_domain_whitelist: ["matrix.org"] - -# Templates to use when generating email or HTML page contents. -# -templates: - # Directory in which Synapse will try to find template files to use to generate - # email or HTML page contents. - # If not set, or a file is not found within the template directory, a default - # template from within the Synapse package will be used. - # - # See https://matrix-org.github.io/synapse/latest/templates.html for more - # information about using custom templates. - # - #custom_template_directory: /path/to/custom/templates/ - -# List of rooms to exclude from sync responses. This is useful for server -# administrators wishing to group users into a room without these users being able -# to see it from their client. -# -# By default, no room is excluded. -# -#exclude_rooms_from_sync: -# - !foo:example.com - - -# Message retention policy at the server level. -# -# Room admins and mods can define a retention period for their rooms using the -# 'm.room.retention' state event, and server admins can cap this period by setting -# the 'allowed_lifetime_min' and 'allowed_lifetime_max' config options. -# -# If this feature is enabled, Synapse will regularly look for and purge events -# which are older than the room's maximum retention period. Synapse will also -# filter events received over federation so that events that should have been -# purged are ignored and not stored again. -# -retention: - # The message retention policies feature is disabled by default. Uncomment the - # following line to enable it. - # - #enabled: true - - # Default retention policy. If set, Synapse will apply it to rooms that lack the - # 'm.room.retention' state event. Currently, the value of 'min_lifetime' doesn't - # matter much because Synapse doesn't take it into account yet. - # - #default_policy: - # min_lifetime: 1d - # max_lifetime: 1y - - # Retention policy limits. If set, and the state of a room contains a - # 'm.room.retention' event in its state which contains a 'min_lifetime' or a - # 'max_lifetime' that's out of these bounds, Synapse will cap the room's policy - # to these limits when running purge jobs. - # - #allowed_lifetime_min: 1d - #allowed_lifetime_max: 1y - - # Server admins can define the settings of the background jobs purging the - # events which lifetime has expired under the 'purge_jobs' section. - # - # If no configuration is provided, a single job will be set up to delete expired - # events in every room daily. - # - # Each job's configuration defines which range of message lifetimes the job - # takes care of. For example, if 'shortest_max_lifetime' is '2d' and - # 'longest_max_lifetime' is '3d', the job will handle purging expired events in - # rooms whose state defines a 'max_lifetime' that's both higher than 2 days, and - # lower than or equal to 3 days. Both the minimum and the maximum value of a - # range are optional, e.g. a job with no 'shortest_max_lifetime' and a - # 'longest_max_lifetime' of '3d' will handle every room with a retention policy - # which 'max_lifetime' is lower than or equal to three days. - # - # The rationale for this per-job configuration is that some rooms might have a - # retention policy with a low 'max_lifetime', where history needs to be purged - # of outdated messages on a more frequent basis than for the rest of the rooms - # (e.g. every 12h), but not want that purge to be performed by a job that's - # iterating over every room it knows, which could be heavy on the server. - # - # If any purge job is configured, it is strongly recommended to have at least - # a single job with neither 'shortest_max_lifetime' nor 'longest_max_lifetime' - # set, or one job without 'shortest_max_lifetime' and one job without - # 'longest_max_lifetime' set. Otherwise some rooms might be ignored, even if - # 'allowed_lifetime_min' and 'allowed_lifetime_max' are set, because capping a - # room's policy to these values is done after the policies are retrieved from - # Synapse's database (which is done using the range specified in a purge job's - # configuration). - # - #purge_jobs: - # - longest_max_lifetime: 3d - # interval: 12h - # - shortest_max_lifetime: 3d - # interval: 1d - - -## TLS ## - -# PEM-encoded X509 certificate for TLS. -# This certificate, as of Synapse 1.0, will need to be a valid and verifiable -# certificate, signed by a recognised Certificate Authority. -# -# Be sure to use a `.pem` file that includes the full certificate chain including -# any intermediate certificates (for instance, if using certbot, use -# `fullchain.pem` as your certificate, not `cert.pem`). -# -#tls_certificate_path: "CONFDIR/SERVERNAME.tls.crt" - -# PEM-encoded private key for TLS -# -#tls_private_key_path: "CONFDIR/SERVERNAME.tls.key" - -# Whether to verify TLS server certificates for outbound federation requests. -# -# Defaults to `true`. To disable certificate verification, uncomment the -# following line. -# -#federation_verify_certificates: false - -# The minimum TLS version that will be used for outbound federation requests. -# -# Defaults to `1`. Configurable to `1`, `1.1`, `1.2`, or `1.3`. Note -# that setting this value higher than `1.2` will prevent federation to most -# of the public Matrix network: only configure it to `1.3` if you have an -# entirely private federation setup and you can ensure TLS 1.3 support. -# -#federation_client_minimum_tls_version: 1.2 - -# Skip federation certificate verification on the following whitelist -# of domains. -# -# This setting should only be used in very specific cases, such as -# federation over Tor hidden services and similar. For private networks -# of homeservers, you likely want to use a private CA instead. -# -# Only effective if federation_verify_certicates is `true`. -# -#federation_certificate_verification_whitelist: -# - lon.example.com -# - "*.domain.com" -# - "*.onion" - -# List of custom certificate authorities for federation traffic. -# -# This setting should only normally be used within a private network of -# homeservers. -# -# Note that this list will replace those that are provided by your -# operating environment. Certificates must be in PEM format. -# -#federation_custom_ca_list: -# - myCA1.pem -# - myCA2.pem -# - myCA3.pem - - -## Federation ## - -# Restrict federation to the following whitelist of domains. -# N.B. we recommend also firewalling your federation listener to limit -# inbound federation traffic as early as possible, rather than relying -# purely on this application-layer restriction. If not specified, the -# default is to whitelist everything. -# -#federation_domain_whitelist: -# - lon.example.com -# - nyc.example.com -# - syd.example.com - -# Report prometheus metrics on the age of PDUs being sent to and received from -# the following domains. This can be used to give an idea of "delay" on inbound -# and outbound federation, though be aware that any delay can be due to problems -# at either end or with the intermediate network. -# -# By default, no domains are monitored in this way. -# -#federation_metrics_domains: -# - matrix.org -# - example.com - -# Uncomment to disable profile lookup over federation. By default, the -# Federation API allows other homeservers to obtain profile data of any user -# on this homeserver. Defaults to 'true'. -# -#allow_profile_lookup_over_federation: false - -# Uncomment to allow device display name lookup over federation. By default, the -# Federation API prevents other homeservers from obtaining the display names of -# user devices on this homeserver. Defaults to 'false'. -# -#allow_device_name_lookup_over_federation: true - - -## Caching ## - -# Caching can be configured through the following options. -# -# A cache 'factor' is a multiplier that can be applied to each of -# Synapse's caches in order to increase or decrease the maximum -# number of entries that can be stored. -# -# The configuration for cache factors (caches.global_factor and -# caches.per_cache_factors) can be reloaded while the application is running, -# by sending a SIGHUP signal to the Synapse process. Changes to other parts of -# the caching config will NOT be applied after a SIGHUP is received; a restart -# is necessary. - -# The number of events to cache in memory. Not affected by -# caches.global_factor. -# -#event_cache_size: 10K - -caches: - # Controls the global cache factor, which is the default cache factor - # for all caches if a specific factor for that cache is not otherwise - # set. - # - # This can also be set by the "SYNAPSE_CACHE_FACTOR" environment - # variable. Setting by environment variable takes priority over - # setting through the config file. - # - # Defaults to 0.5, which will half the size of all caches. - # - #global_factor: 1.0 - - # A dictionary of cache name to cache factor for that individual - # cache. Overrides the global cache factor for a given cache. - # - # These can also be set through environment variables comprised - # of "SYNAPSE_CACHE_FACTOR_" + the name of the cache in capital - # letters and underscores. Setting by environment variable - # takes priority over setting through the config file. - # Ex. SYNAPSE_CACHE_FACTOR_GET_USERS_WHO_SHARE_ROOM_WITH_USER=2.0 - # - # Some caches have '*' and other characters that are not - # alphanumeric or underscores. These caches can be named with or - # without the special characters stripped. For example, to specify - # the cache factor for `*stateGroupCache*` via an environment - # variable would be `SYNAPSE_CACHE_FACTOR_STATEGROUPCACHE=2.0`. - # - per_cache_factors: - #get_users_who_share_room_with_user: 2.0 - - # Controls whether cache entries are evicted after a specified time - # period. Defaults to true. Uncomment to disable this feature. - # - #expire_caches: false - - # If expire_caches is enabled, this flag controls how long an entry can - # be in a cache without having been accessed before being evicted. - # Defaults to 30m. Uncomment to set a different time to live for cache entries. - # - #cache_entry_ttl: 30m - - # This flag enables cache autotuning, and is further specified by the sub-options `max_cache_memory_usage`, - # `target_cache_memory_usage`, `min_cache_ttl`. These flags work in conjunction with each other to maintain - # a balance between cache memory usage and cache entry availability. You must be using jemalloc to utilize - # this option, and all three of the options must be specified for this feature to work. - #cache_autotuning: - # This flag sets a ceiling on much memory the cache can use before caches begin to be continuously evicted. - # They will continue to be evicted until the memory usage drops below the `target_memory_usage`, set in - # the flag below, or until the `min_cache_ttl` is hit. - #max_cache_memory_usage: 1024M - - # This flag sets a rough target for the desired memory usage of the caches. - #target_cache_memory_usage: 758M - - # 'min_cache_ttl` sets a limit under which newer cache entries are not evicted and is only applied when - # caches are actively being evicted/`max_cache_memory_usage` has been exceeded. This is to protect hot caches - # from being emptied while Synapse is evicting due to memory. - #min_cache_ttl: 5m - - # Controls how long the results of a /sync request are cached for after - # a successful response is returned. A higher duration can help clients with - # intermittent connections, at the cost of higher memory usage. - # - # By default, this is zero, which means that sync responses are not cached - # at all. - # - #sync_response_cache_duration: 2m - - -## Database ## - -# The 'database' setting defines the database that synapse uses to store all of -# its data. -# -# 'name' gives the database engine to use: either 'sqlite3' (for SQLite) or -# 'psycopg2' (for PostgreSQL). -# -# 'txn_limit' gives the maximum number of transactions to run per connection -# before reconnecting. Defaults to 0, which means no limit. -# -# 'allow_unsafe_locale' is an option specific to Postgres. Under the default behavior, Synapse will refuse to -# start if the postgres db is set to a non-C locale. You can override this behavior (which is *not* recommended) -# by setting 'allow_unsafe_locale' to true. Note that doing so may corrupt your database. You can find more information -# here: https://matrix-org.github.io/synapse/latest/postgres.html#fixing-incorrect-collate-or-ctype and here: -# https://wiki.postgresql.org/wiki/Locale_data_changes -# -# 'args' gives options which are passed through to the database engine, -# except for options starting 'cp_', which are used to configure the Twisted -# connection pool. For a reference to valid arguments, see: -# * for sqlite: https://docs.python.org/3/library/sqlite3.html#sqlite3.connect -# * for postgres: https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-PARAMKEYWORDS -# * for the connection pool: https://twistedmatrix.com/documents/current/api/twisted.enterprise.adbapi.ConnectionPool.html#__init__ -# -# -# Example SQLite configuration: -# -#database: -# name: sqlite3 -# args: -# database: /path/to/homeserver.db -# -# -# Example Postgres configuration: -# -#database: -# name: psycopg2 -# txn_limit: 10000 -# args: -# user: synapse_user -# password: secretpassword -# database: synapse -# host: localhost -# port: 5432 -# cp_min: 5 -# cp_max: 10 -# -# For more information on using Synapse with Postgres, -# see https://matrix-org.github.io/synapse/latest/postgres.html. -# database: name: sqlite3 args: database: DATADIR/homeserver.db - - -## Logging ## - -# A yaml python logging config file as described by -# https://docs.python.org/3.7/library/logging.config.html#configuration-dictionary-schema -# log_config: "CONFDIR/SERVERNAME.log.config" - - -## Ratelimiting ## - -# Ratelimiting settings for client actions (registration, login, messaging). -# -# Each ratelimiting configuration is made of two parameters: -# - per_second: number of requests a client can send per second. -# - burst_count: number of requests a client can send before being throttled. -# -# Synapse currently uses the following configurations: -# - one for messages that ratelimits sending based on the account the client -# is using -# - one for registration that ratelimits registration requests based on the -# client's IP address. -# - one for checking the validity of registration tokens that ratelimits -# requests based on the client's IP address. -# - one for login that ratelimits login requests based on the client's IP -# address. -# - one for login that ratelimits login requests based on the account the -# client is attempting to log into. -# - one for login that ratelimits login requests based on the account the -# client is attempting to log into, based on the amount of failed login -# attempts for this account. -# - one for ratelimiting redactions by room admins. If this is not explicitly -# set then it uses the same ratelimiting as per rc_message. This is useful -# to allow room admins to deal with abuse quickly. -# - two for ratelimiting number of rooms a user can join, "local" for when -# users are joining rooms the server is already in (this is cheap) vs -# "remote" for when users are trying to join rooms not on the server (which -# can be more expensive) -# - one for ratelimiting how often a user or IP can attempt to validate a 3PID. -# - two for ratelimiting how often invites can be sent in a room or to a -# specific user. -# - one for ratelimiting 3PID invites (i.e. invites sent to a third-party ID -# such as an email address or a phone number) based on the account that's -# sending the invite. -# -# The defaults are as shown below. -# -#rc_message: -# per_second: 0.2 -# burst_count: 10 -# -#rc_registration: -# per_second: 0.17 -# burst_count: 3 -# -#rc_registration_token_validity: -# per_second: 0.1 -# burst_count: 5 -# -#rc_login: -# address: -# per_second: 0.17 -# burst_count: 3 -# account: -# per_second: 0.17 -# burst_count: 3 -# failed_attempts: -# per_second: 0.17 -# burst_count: 3 -# -#rc_admin_redaction: -# per_second: 1 -# burst_count: 50 -# -#rc_joins: -# local: -# per_second: 0.1 -# burst_count: 10 -# remote: -# per_second: 0.01 -# burst_count: 10 -# -#rc_3pid_validation: -# per_second: 0.003 -# burst_count: 5 -# -#rc_invites: -# per_room: -# per_second: 0.3 -# burst_count: 10 -# per_user: -# per_second: 0.003 -# burst_count: 5 -# -#rc_third_party_invite: -# per_second: 0.2 -# burst_count: 10 - -# Ratelimiting settings for incoming federation -# -# The rc_federation configuration is made up of the following settings: -# - window_size: window size in milliseconds -# - sleep_limit: number of federation requests from a single server in -# a window before the server will delay processing the request. -# - sleep_delay: duration in milliseconds to delay processing events -# from remote servers by if they go over the sleep limit. -# - reject_limit: maximum number of concurrent federation requests -# allowed from a single server -# - concurrent: number of federation requests to concurrently process -# from a single server -# -# The defaults are as shown below. -# -#rc_federation: -# window_size: 1000 -# sleep_limit: 10 -# sleep_delay: 500 -# reject_limit: 50 -# concurrent: 3 - -# Target outgoing federation transaction frequency for sending read-receipts, -# per-room. -# -# If we end up trying to send out more read-receipts, they will get buffered up -# into fewer transactions. -# -#federation_rr_transactions_per_room_per_second: 50 - - - -## Media Store ## - -# Enable the media store service in the Synapse master. Uncomment the -# following if you are using a separate media store worker. -# -#enable_media_repo: false - -# Directory where uploaded images and attachments are stored. -# -media_store_path: "DATADIR/media_store" - -# Media storage providers allow media to be stored in different -# locations. -# -#media_storage_providers: -# - module: file_system -# # Whether to store newly uploaded local files -# store_local: false -# # Whether to store newly downloaded remote files -# store_remote: false -# # Whether to wait for successful storage for local uploads -# store_synchronous: false -# config: -# directory: /mnt/some/other/directory - -# The largest allowed upload size in bytes -# -# If you are using a reverse proxy you may also need to set this value in -# your reverse proxy's config. Notably Nginx has a small max body size by default. -# See https://matrix-org.github.io/synapse/latest/reverse_proxy.html. -# -#max_upload_size: 50M - -# Maximum number of pixels that will be thumbnailed -# -#max_image_pixels: 32M - -# Whether to generate new thumbnails on the fly to precisely match -# the resolution requested by the client. If true then whenever -# a new resolution is requested by the client the server will -# generate a new thumbnail. If false the server will pick a thumbnail -# from a precalculated list. -# -#dynamic_thumbnails: false - -# List of thumbnails to precalculate when an image is uploaded. -# -#thumbnail_sizes: -# - width: 32 -# height: 32 -# method: crop -# - width: 96 -# height: 96 -# method: crop -# - width: 320 -# height: 240 -# method: scale -# - width: 640 -# height: 480 -# method: scale -# - width: 800 -# height: 600 -# method: scale - -# Is the preview URL API enabled? -# -# 'false' by default: uncomment the following to enable it (and specify a -# url_preview_ip_range_blacklist blacklist). -# -#url_preview_enabled: true - -# List of IP address CIDR ranges that the URL preview spider is denied -# from accessing. There are no defaults: you must explicitly -# specify a list for URL previewing to work. You should specify any -# internal services in your network that you do not want synapse to try -# to connect to, otherwise anyone in any Matrix room could cause your -# synapse to issue arbitrary GET requests to your internal services, -# causing serious security issues. -# -# (0.0.0.0 and :: are always blacklisted, whether or not they are explicitly -# listed here, since they correspond to unroutable addresses.) -# -# This must be specified if url_preview_enabled is set. It is recommended that -# you uncomment the following list as a starting point. -# -# Note: The value is ignored when an HTTP proxy is in use -# -#url_preview_ip_range_blacklist: -# - '127.0.0.0/8' -# - '10.0.0.0/8' -# - '172.16.0.0/12' -# - '192.168.0.0/16' -# - '100.64.0.0/10' -# - '192.0.0.0/24' -# - '169.254.0.0/16' -# - '192.88.99.0/24' -# - '198.18.0.0/15' -# - '192.0.2.0/24' -# - '198.51.100.0/24' -# - '203.0.113.0/24' -# - '224.0.0.0/4' -# - '::1/128' -# - 'fe80::/10' -# - 'fc00::/7' -# - '2001:db8::/32' -# - 'ff00::/8' -# - 'fec0::/10' - -# List of IP address CIDR ranges that the URL preview spider is allowed -# to access even if they are specified in url_preview_ip_range_blacklist. -# This is useful for specifying exceptions to wide-ranging blacklisted -# target IP ranges - e.g. for enabling URL previews for a specific private -# website only visible in your network. -# -#url_preview_ip_range_whitelist: -# - '192.168.1.1' - -# Optional list of URL matches that the URL preview spider is -# denied from accessing. You should use url_preview_ip_range_blacklist -# in preference to this, otherwise someone could define a public DNS -# entry that points to a private IP address and circumvent the blacklist. -# This is more useful if you know there is an entire shape of URL that -# you know that will never want synapse to try to spider. -# -# Each list entry is a dictionary of url component attributes as returned -# by urlparse.urlsplit as applied to the absolute form of the URL. See -# https://docs.python.org/2/library/urlparse.html#urlparse.urlsplit -# The values of the dictionary are treated as an filename match pattern -# applied to that component of URLs, unless they start with a ^ in which -# case they are treated as a regular expression match. If all the -# specified component matches for a given list item succeed, the URL is -# blacklisted. -# -#url_preview_url_blacklist: -# # blacklist any URL with a username in its URI -# - username: '*' -# -# # blacklist all *.google.com URLs -# - netloc: 'google.com' -# - netloc: '*.google.com' -# -# # blacklist all plain HTTP URLs -# - scheme: 'http' -# -# # blacklist http(s)://www.acme.com/foo -# - netloc: 'www.acme.com' -# path: '/foo' -# -# # blacklist any URL with a literal IPv4 address -# - netloc: '^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$' - -# The largest allowed URL preview spidering size in bytes -# -#max_spider_size: 10M - -# A list of values for the Accept-Language HTTP header used when -# downloading webpages during URL preview generation. This allows -# Synapse to specify the preferred languages that URL previews should -# be in when communicating with remote servers. -# -# Each value is a IETF language tag; a 2-3 letter identifier for a -# language, optionally followed by subtags separated by '-', specifying -# a country or region variant. -# -# Multiple values can be provided, and a weight can be added to each by -# using quality value syntax (;q=). '*' translates to any language. -# -# Defaults to "en". -# -# Example: -# -# url_preview_accept_language: -# - en-UK -# - en-US;q=0.9 -# - fr;q=0.8 -# - *;q=0.7 -# -url_preview_accept_language: -# - en - - -# oEmbed allows for easier embedding content from a website. It can be -# used for generating URLs previews of services which support it. -# -oembed: - # A default list of oEmbed providers is included with Synapse. - # - # Uncomment the following to disable using these default oEmbed URLs. - # Defaults to 'false'. - # - #disable_default_providers: true - - # Additional files with oEmbed configuration (each should be in the - # form of providers.json). - # - # By default, this list is empty (so only the default providers.json - # is used). - # - #additional_providers: - # - oembed/my_providers.json - - -## Captcha ## -# See docs/CAPTCHA_SETUP.md for full details of configuring this. - -# This homeserver's ReCAPTCHA public key. Must be specified if -# enable_registration_captcha is enabled. -# -#recaptcha_public_key: "YOUR_PUBLIC_KEY" - -# This homeserver's ReCAPTCHA private key. Must be specified if -# enable_registration_captcha is enabled. -# -#recaptcha_private_key: "YOUR_PRIVATE_KEY" - -# Uncomment to enable ReCaptcha checks when registering, preventing signup -# unless a captcha is answered. Requires a valid ReCaptcha -# public/private key. Defaults to 'false'. -# -#enable_registration_captcha: true - -# The API endpoint to use for verifying m.login.recaptcha responses. -# Defaults to "https://www.recaptcha.net/recaptcha/api/siteverify". -# -#recaptcha_siteverify_api: "https://my.recaptcha.site" - - -## TURN ## - -# The public URIs of the TURN server to give to clients -# -#turn_uris: [] - -# The shared secret used to compute passwords for the TURN server -# -#turn_shared_secret: "YOUR_SHARED_SECRET" - -# The Username and password if the TURN server needs them and -# does not use a token -# -#turn_username: "TURNSERVER_USERNAME" -#turn_password: "TURNSERVER_PASSWORD" - -# How long generated TURN credentials last -# -#turn_user_lifetime: 1h - -# Whether guests should be allowed to use the TURN server. -# This defaults to True, otherwise VoIP will be unreliable for guests. -# However, it does introduce a slight security risk as it allows users to -# connect to arbitrary endpoints without having first signed up for a -# valid account (e.g. by passing a CAPTCHA). -# -#turn_allow_guests: true - - -## Registration ## -# -# Registration can be rate-limited using the parameters in the "Ratelimiting" -# section of this file. - -# Enable registration for new users. Defaults to 'false'. It is highly recommended that if you enable registration, -# you use either captcha, email, or token-based verification to verify that new users are not bots. In order to enable registration -# without any verification, you must also set `enable_registration_without_verification`, found below. -# -#enable_registration: false - -# Enable registration without email or captcha verification. Note: this option is *not* recommended, -# as registration without verification is a known vector for spam and abuse. Defaults to false. Has no effect -# unless `enable_registration` is also enabled. -# -#enable_registration_without_verification: true - -# Time that a user's session remains valid for, after they log in. -# -# Note that this is not currently compatible with guest logins. -# -# Note also that this is calculated at login time: changes are not applied -# retrospectively to users who have already logged in. -# -# By default, this is infinite. -# -#session_lifetime: 24h - -# Time that an access token remains valid for, if the session is -# using refresh tokens. -# For more information about refresh tokens, please see the manual. -# Note that this only applies to clients which advertise support for -# refresh tokens. -# -# Note also that this is calculated at login time and refresh time: -# changes are not applied to existing sessions until they are refreshed. -# -# By default, this is 5 minutes. -# -#refreshable_access_token_lifetime: 5m - -# Time that a refresh token remains valid for (provided that it is not -# exchanged for another one first). -# This option can be used to automatically log-out inactive sessions. -# Please see the manual for more information. -# -# Note also that this is calculated at login time and refresh time: -# changes are not applied to existing sessions until they are refreshed. -# -# By default, this is infinite. -# -#refresh_token_lifetime: 24h - -# Time that an access token remains valid for, if the session is NOT -# using refresh tokens. -# Please note that not all clients support refresh tokens, so setting -# this to a short value may be inconvenient for some users who will -# then be logged out frequently. -# -# Note also that this is calculated at login time: changes are not applied -# retrospectively to existing sessions for users that have already logged in. -# -# By default, this is infinite. -# -#nonrefreshable_access_token_lifetime: 24h - -# The user must provide all of the below types of 3PID when registering. -# -#registrations_require_3pid: -# - email -# - msisdn - -# Explicitly disable asking for MSISDNs from the registration -# flow (overrides registrations_require_3pid if MSISDNs are set as required) -# -#disable_msisdn_registration: true - -# Mandate that users are only allowed to associate certain formats of -# 3PIDs with accounts on this server. -# -#allowed_local_3pids: -# - medium: email -# pattern: '^[^@]+@matrix\.org$' -# - medium: email -# pattern: '^[^@]+@vector\.im$' -# - medium: msisdn -# pattern: '\+44' - -# Enable 3PIDs lookup requests to identity servers from this server. -# -#enable_3pid_lookup: true - -# Require users to submit a token during registration. -# Tokens can be managed using the admin API: -# https://matrix-org.github.io/synapse/latest/usage/administration/admin_api/registration_tokens.html -# Note that `enable_registration` must be set to `true`. -# Disabling this option will not delete any tokens previously generated. -# Defaults to false. Uncomment the following to require tokens: -# -#registration_requires_token: true - -# Allow users to submit a token during registration to bypass any required 3pid -# steps configured in `registrations_require_3pid`. -# Defaults to false, requiring that registration tokens (if enabled) complete a 3pid flow. -# -#enable_registration_token_3pid_bypass: false - -# If set, allows registration of standard or admin accounts by anyone who -# has the shared secret, even if registration is otherwise disabled. -# -#registration_shared_secret: <PRIVATE STRING> - -# Set the number of bcrypt rounds used to generate password hash. -# Larger numbers increase the work factor needed to generate the hash. -# The default number is 12 (which equates to 2^12 rounds). -# N.B. that increasing this will exponentially increase the time required -# to register or login - e.g. 24 => 2^24 rounds which will take >20 mins. -# -#bcrypt_rounds: 12 - -# Allows users to register as guests without a password/email/etc, and -# participate in rooms hosted on this server which have been made -# accessible to anonymous users. -# -#allow_guest_access: false - -# The identity server which we suggest that clients should use when users log -# in on this server. -# -# (By default, no suggestion is made, so it is left up to the client. -# This setting is ignored unless public_baseurl is also explicitly set.) -# -#default_identity_server: https://matrix.org - -# Handle threepid (email/phone etc) registration and password resets through a set of -# *trusted* identity servers. Note that this allows the configured identity server to -# reset passwords for accounts! -# -# Be aware that if `email` is not set, and SMTP options have not been -# configured in the email config block, registration and user password resets via -# email will be globally disabled. -# -# Additionally, if `msisdn` is not set, registration and password resets via msisdn -# will be disabled regardless, and users will not be able to associate an msisdn -# identifier to their account. This is due to Synapse currently not supporting -# any method of sending SMS messages on its own. -# -# To enable using an identity server for operations regarding a particular third-party -# identifier type, set the value to the URL of that identity server as shown in the -# examples below. -# -# Servers handling the these requests must answer the `/requestToken` endpoints defined -# by the Matrix Identity Service API specification: -# https://matrix.org/docs/spec/identity_service/latest -# -account_threepid_delegates: - #email: https://example.com # Delegate email sending to example.com - #msisdn: http://localhost:8090 # Delegate SMS sending to this local process - -# Whether users are allowed to change their displayname after it has -# been initially set. Useful when provisioning users based on the -# contents of a third-party directory. -# -# Does not apply to server administrators. Defaults to 'true' -# -#enable_set_displayname: false - -# Whether users are allowed to change their avatar after it has been -# initially set. Useful when provisioning users based on the contents -# of a third-party directory. -# -# Does not apply to server administrators. Defaults to 'true' -# -#enable_set_avatar_url: false - -# Whether users can change the 3PIDs associated with their accounts -# (email address and msisdn). -# -# Defaults to 'true' -# -#enable_3pid_changes: false - -# Users who register on this homeserver will automatically be joined -# to these rooms. -# -# By default, any room aliases included in this list will be created -# as a publicly joinable room when the first user registers for the -# homeserver. This behaviour can be customised with the settings below. -# If the room already exists, make certain it is a publicly joinable -# room. The join rule of the room must be set to 'public'. -# -#auto_join_rooms: -# - "#example:example.com" - -# Where auto_join_rooms are specified, setting this flag ensures that the -# the rooms exist by creating them when the first user on the -# homeserver registers. -# -# By default the auto-created rooms are publicly joinable from any federated -# server. Use the autocreate_auto_join_rooms_federated and -# autocreate_auto_join_room_preset settings below to customise this behaviour. -# -# Setting to false means that if the rooms are not manually created, -# users cannot be auto-joined since they do not exist. -# -# Defaults to true. Uncomment the following line to disable automatically -# creating auto-join rooms. -# -#autocreate_auto_join_rooms: false - -# Whether the auto_join_rooms that are auto-created are available via -# federation. Only has an effect if autocreate_auto_join_rooms is true. -# -# Note that whether a room is federated cannot be modified after -# creation. -# -# Defaults to true: the room will be joinable from other servers. -# Uncomment the following to prevent users from other homeservers from -# joining these rooms. -# -#autocreate_auto_join_rooms_federated: false - -# The room preset to use when auto-creating one of auto_join_rooms. Only has an -# effect if autocreate_auto_join_rooms is true. -# -# This can be one of "public_chat", "private_chat", or "trusted_private_chat". -# If a value of "private_chat" or "trusted_private_chat" is used then -# auto_join_mxid_localpart must also be configured. -# -# Defaults to "public_chat", meaning that the room is joinable by anyone, including -# federated servers if autocreate_auto_join_rooms_federated is true (the default). -# Uncomment the following to require an invitation to join these rooms. -# -#autocreate_auto_join_room_preset: private_chat - -# The local part of the user id which is used to create auto_join_rooms if -# autocreate_auto_join_rooms is true. If this is not provided then the -# initial user account that registers will be used to create the rooms. -# -# The user id is also used to invite new users to any auto-join rooms which -# are set to invite-only. -# -# It *must* be configured if autocreate_auto_join_room_preset is set to -# "private_chat" or "trusted_private_chat". -# -# Note that this must be specified in order for new users to be correctly -# invited to any auto-join rooms which have been set to invite-only (either -# at the time of creation or subsequently). -# -# Note that, if the room already exists, this user must be joined and -# have the appropriate permissions to invite new members. -# -#auto_join_mxid_localpart: system - -# When auto_join_rooms is specified, setting this flag to false prevents -# guest accounts from being automatically joined to the rooms. -# -# Defaults to true. -# -#auto_join_rooms_for_guests: false - -# Whether to inhibit errors raised when registering a new account if the user ID -# already exists. If turned on, that requests to /register/available will always -# show a user ID as available, and Synapse won't raise an error when starting -# a registration with a user ID that already exists. However, Synapse will still -# raise an error if the registration completes and the username conflicts. -# -# Defaults to false. -# -#inhibit_user_in_use_error: true - - -## Metrics ### - -# Enable collection and rendering of performance metrics -# -#enable_metrics: false - -# Enable sentry integration -# NOTE: While attempts are made to ensure that the logs don't contain -# any sensitive information, this cannot be guaranteed. By enabling -# this option the sentry server may therefore receive sensitive -# information, and it in turn may then diseminate sensitive information -# through insecure notification channels if so configured. -# -#sentry: -# dsn: "..." - -# Flags to enable Prometheus metrics which are not suitable to be -# enabled by default, either for performance reasons or limited use. -# -metrics_flags: - # Publish synapse_federation_known_servers, a gauge of the number of - # servers this homeserver knows about, including itself. May cause - # performance problems on large homeservers. - # - #known_servers: true - -# Whether or not to report anonymized homeserver usage statistics. -# -#report_stats: true|false - -# The endpoint to report the anonymized homeserver usage statistics to. -# Defaults to https://matrix.org/report-usage-stats/push -# -#report_stats_endpoint: https://example.com/report-usage-stats/push - - -## API Configuration ## - -# Controls for the state that is shared with users who receive an invite -# to a room -# -room_prejoin_state: - # By default, the following state event types are shared with users who - # receive invites to the room: - # - # - m.room.join_rules - # - m.room.canonical_alias - # - m.room.avatar - # - m.room.encryption - # - m.room.name - # - m.room.create - # - m.room.topic - # - # Uncomment the following to disable these defaults (so that only the event - # types listed in 'additional_event_types' are shared). Defaults to 'false'. - # - #disable_default_event_types: true - - # Additional state event types to share with users when they are invited - # to a room. - # - # By default, this list is empty (so only the default event types are shared). - # - #additional_event_types: - # - org.example.custom.event.type - -# We record the IP address of clients used to access the API for various -# reasons, including displaying it to the user in the "Where you're signed in" -# dialog. -# -# By default, when puppeting another user via the admin API, the client IP -# address is recorded against the user who created the access token (ie, the -# admin user), and *not* the puppeted user. -# -# Uncomment the following to also record the IP address against the puppeted -# user. (This also means that the puppeted user will count as an "active" user -# for the purpose of monthly active user tracking - see 'limit_usage_by_mau' etc -# above.) -# -#track_puppeted_user_ips: true - - -# A list of application service config files to use -# -#app_service_config_files: -# - app_service_1.yaml -# - app_service_2.yaml - -# Uncomment to enable tracking of application service IP addresses. Implicitly -# enables MAU tracking for application service users. -# -#track_appservice_user_ips: true - - -# a secret which is used to sign access tokens. If none is specified, -# the registration_shared_secret is used, if one is given; otherwise, -# a secret key is derived from the signing key. -# -#macaroon_secret_key: <PRIVATE STRING> - -# a secret which is used to calculate HMACs for form values, to stop -# falsification of values. Must be specified for the User Consent -# forms to work. -# -#form_secret: <PRIVATE STRING> - -## Signing Keys ## - -# Path to the signing key to sign messages with -# +media_store_path: DATADIR/media_store signing_key_path: "CONFDIR/SERVERNAME.signing.key" - -# The keys that the server used to sign messages with but won't use -# to sign new messages. -# -old_signing_keys: - # For each key, `key` should be the base64-encoded public key, and - # `expired_ts`should be the time (in milliseconds since the unix epoch) that - # it was last used. - # - # It is possible to build an entry from an old signing.key file using the - # `export_signing_key` script which is provided with synapse. - # - # For example: - # - #"ed25519:id": { key: "base64string", expired_ts: 123456789123 } - -# How long key response published by this server is valid for. -# Used to set the valid_until_ts in /key/v2 APIs. -# Determines how quickly servers will query to check which keys -# are still valid. -# -#key_refresh_interval: 1d - -# The trusted servers to download signing keys from. -# -# When we need to fetch a signing key, each server is tried in parallel. -# -# Normally, the connection to the key server is validated via TLS certificates. -# Additional security can be provided by configuring a `verify key`, which -# will make synapse check that the response is signed by that key. -# -# This setting supercedes an older setting named `perspectives`. The old format -# is still supported for backwards-compatibility, but it is deprecated. -# -# 'trusted_key_servers' defaults to matrix.org, but using it will generate a -# warning on start-up. To suppress this warning, set -# 'suppress_key_server_warning' to true. -# -# Options for each entry in the list include: -# -# server_name: the name of the server. required. -# -# verify_keys: an optional map from key id to base64-encoded public key. -# If specified, we will check that the response is signed by at least -# one of the given keys. -# -# accept_keys_insecurely: a boolean. Normally, if `verify_keys` is unset, -# and federation_verify_certificates is not `true`, synapse will refuse -# to start, because this would allow anyone who can spoof DNS responses -# to masquerade as the trusted key server. If you know what you are doing -# and are sure that your network environment provides a secure connection -# to the key server, you can set this to `true` to override this -# behaviour. -# -# An example configuration might look like: -# -#trusted_key_servers: -# - server_name: "my_trusted_server.example.com" -# verify_keys: -# "ed25519:auto": "abcdefghijklmnopqrstuvwxyzabcdefghijklmopqr" -# - server_name: "my_other_trusted_server.example.com" -# trusted_key_servers: - server_name: "matrix.org" - -# Uncomment the following to disable the warning that is emitted when the -# trusted_key_servers include 'matrix.org'. See above. -# -#suppress_key_server_warning: true - -# The signing keys to use when acting as a trusted key server. If not specified -# defaults to the server signing key. -# -# Can contain multiple keys, one per line. -# -#key_server_signing_keys_path: "key_server_signing_keys.key" - - -## Single sign-on integration ## - -# The following settings can be used to make Synapse use a single sign-on -# provider for authentication, instead of its internal password database. -# -# You will probably also want to set the following options to `false` to -# disable the regular login/registration flows: -# * enable_registration -# * password_config.enabled -# -# You will also want to investigate the settings under the "sso" configuration -# section below. - -# Enable SAML2 for registration and login. Uses pysaml2. -# -# At least one of `sp_config` or `config_path` must be set in this section to -# enable SAML login. -# -# Once SAML support is enabled, a metadata file will be exposed at -# https://<server>:<port>/_synapse/client/saml2/metadata.xml, which you may be able to -# use to configure your SAML IdP with. Alternatively, you can manually configure -# the IdP to use an ACS location of -# https://<server>:<port>/_synapse/client/saml2/authn_response. -# -saml2_config: - # `sp_config` is the configuration for the pysaml2 Service Provider. - # See pysaml2 docs for format of config. - # - # Default values will be used for the 'entityid' and 'service' settings, - # so it is not normally necessary to specify them unless you need to - # override them. - # - sp_config: - # Point this to the IdP's metadata. You must provide either a local - # file via the `local` attribute or (preferably) a URL via the - # `remote` attribute. - # - #metadata: - # local: ["saml2/idp.xml"] - # remote: - # - url: https://our_idp/metadata.xml - - # Allowed clock difference in seconds between the homeserver and IdP. - # - # Uncomment the below to increase the accepted time difference from 0 to 3 seconds. - # - #accepted_time_diff: 3 - - # By default, the user has to go to our login page first. If you'd like - # to allow IdP-initiated login, set 'allow_unsolicited: true' in a - # 'service.sp' section: - # - #service: - # sp: - # allow_unsolicited: true - - # The examples below are just used to generate our metadata xml, and you - # may well not need them, depending on your setup. Alternatively you - # may need a whole lot more detail - see the pysaml2 docs! - - #description: ["My awesome SP", "en"] - #name: ["Test SP", "en"] - - #ui_info: - # display_name: - # - lang: en - # text: "Display Name is the descriptive name of your service." - # description: - # - lang: en - # text: "Description should be a short paragraph explaining the purpose of the service." - # information_url: - # - lang: en - # text: "https://example.com/terms-of-service" - # privacy_statement_url: - # - lang: en - # text: "https://example.com/privacy-policy" - # keywords: - # - lang: en - # text: ["Matrix", "Element"] - # logo: - # - lang: en - # text: "https://example.com/logo.svg" - # width: "200" - # height: "80" - - #organization: - # name: Example com - # display_name: - # - ["Example co", "en"] - # url: "http://example.com" - - #contact_person: - # - given_name: Bob - # sur_name: "the Sysadmin" - # email_address": ["admin@example.com"] - # contact_type": technical - - # Instead of putting the config inline as above, you can specify a - # separate pysaml2 configuration file: - # - #config_path: "CONFDIR/sp_conf.py" - - # The lifetime of a SAML session. This defines how long a user has to - # complete the authentication process, if allow_unsolicited is unset. - # The default is 15 minutes. - # - #saml_session_lifetime: 5m - - # An external module can be provided here as a custom solution to - # mapping attributes returned from a saml provider onto a matrix user. - # - user_mapping_provider: - # The custom module's class. Uncomment to use a custom module. - # - #module: mapping_provider.SamlMappingProvider - - # Custom configuration values for the module. Below options are - # intended for the built-in provider, they should be changed if - # using a custom module. This section will be passed as a Python - # dictionary to the module's `parse_config` method. - # - config: - # The SAML attribute (after mapping via the attribute maps) to use - # to derive the Matrix ID from. 'uid' by default. - # - # Note: This used to be configured by the - # saml2_config.mxid_source_attribute option. If that is still - # defined, its value will be used instead. - # - #mxid_source_attribute: displayName - - # The mapping system to use for mapping the saml attribute onto a - # matrix ID. - # - # Options include: - # * 'hexencode' (which maps unpermitted characters to '=xx') - # * 'dotreplace' (which replaces unpermitted characters with - # '.'). - # The default is 'hexencode'. - # - # Note: This used to be configured by the - # saml2_config.mxid_mapping option. If that is still defined, its - # value will be used instead. - # - #mxid_mapping: dotreplace - - # In previous versions of synapse, the mapping from SAML attribute to - # MXID was always calculated dynamically rather than stored in a - # table. For backwards- compatibility, we will look for user_ids - # matching such a pattern before creating a new account. - # - # This setting controls the SAML attribute which will be used for this - # backwards-compatibility lookup. Typically it should be 'uid', but if - # the attribute maps are changed, it may be necessary to change it. - # - # The default is 'uid'. - # - #grandfathered_mxid_source_attribute: upn - - # It is possible to configure Synapse to only allow logins if SAML attributes - # match particular values. The requirements can be listed under - # `attribute_requirements` as shown below. All of the listed attributes must - # match for the login to be permitted. - # - #attribute_requirements: - # - attribute: userGroup - # value: "staff" - # - attribute: department - # value: "sales" - - # If the metadata XML contains multiple IdP entities then the `idp_entityid` - # option must be set to the entity to redirect users to. - # - # Most deployments only have a single IdP entity and so should omit this - # option. - # - #idp_entityid: 'https://our_idp/entityid' - - -# List of OpenID Connect (OIDC) / OAuth 2.0 identity providers, for registration -# and login. -# -# Options for each entry include: -# -# idp_id: a unique identifier for this identity provider. Used internally -# by Synapse; should be a single word such as 'github'. -# -# Note that, if this is changed, users authenticating via that provider -# will no longer be recognised as the same user! -# -# (Use "oidc" here if you are migrating from an old "oidc_config" -# configuration.) -# -# idp_name: A user-facing name for this identity provider, which is used to -# offer the user a choice of login mechanisms. -# -# idp_icon: An optional icon for this identity provider, which is presented -# by clients and Synapse's own IdP picker page. If given, must be an -# MXC URI of the format mxc://<server-name>/<media-id>. (An easy way to -# obtain such an MXC URI is to upload an image to an (unencrypted) room -# and then copy the "url" from the source of the event.) -# -# idp_brand: An optional brand for this identity provider, allowing clients -# to style the login flow according to the identity provider in question. -# See the spec for possible options here. -# -# discover: set to 'false' to disable the use of the OIDC discovery mechanism -# to discover endpoints. Defaults to true. -# -# issuer: Required. The OIDC issuer. Used to validate tokens and (if discovery -# is enabled) to discover the provider's endpoints. -# -# client_id: Required. oauth2 client id to use. -# -# client_secret: oauth2 client secret to use. May be omitted if -# client_secret_jwt_key is given, or if client_auth_method is 'none'. -# -# client_secret_jwt_key: Alternative to client_secret: details of a key used -# to create a JSON Web Token to be used as an OAuth2 client secret. If -# given, must be a dictionary with the following properties: -# -# key: a pem-encoded signing key. Must be a suitable key for the -# algorithm specified. Required unless 'key_file' is given. -# -# key_file: the path to file containing a pem-encoded signing key file. -# Required unless 'key' is given. -# -# jwt_header: a dictionary giving properties to include in the JWT -# header. Must include the key 'alg', giving the algorithm used to -# sign the JWT, such as "ES256", using the JWA identifiers in -# RFC7518. -# -# jwt_payload: an optional dictionary giving properties to include in -# the JWT payload. Normally this should include an 'iss' key. -# -# client_auth_method: auth method to use when exchanging the token. Valid -# values are 'client_secret_basic' (default), 'client_secret_post' and -# 'none'. -# -# scopes: list of scopes to request. This should normally include the "openid" -# scope. Defaults to ["openid"]. -# -# authorization_endpoint: the oauth2 authorization endpoint. Required if -# provider discovery is disabled. -# -# token_endpoint: the oauth2 token endpoint. Required if provider discovery is -# disabled. -# -# userinfo_endpoint: the OIDC userinfo endpoint. Required if discovery is -# disabled and the 'openid' scope is not requested. -# -# jwks_uri: URI where to fetch the JWKS. Required if discovery is disabled and -# the 'openid' scope is used. -# -# skip_verification: set to 'true' to skip metadata verification. Use this if -# you are connecting to a provider that is not OpenID Connect compliant. -# Defaults to false. Avoid this in production. -# -# user_profile_method: Whether to fetch the user profile from the userinfo -# endpoint, or to rely on the data returned in the id_token from the -# token_endpoint. -# -# Valid values are: 'auto' or 'userinfo_endpoint'. -# -# Defaults to 'auto', which uses the userinfo endpoint if 'openid' is -# not included in 'scopes'. Set to 'userinfo_endpoint' to always use the -# userinfo endpoint. -# -# allow_existing_users: set to 'true' to allow a user logging in via OIDC to -# match a pre-existing account instead of failing. This could be used if -# switching from password logins to OIDC. Defaults to false. -# -# user_mapping_provider: Configuration for how attributes returned from a OIDC -# provider are mapped onto a matrix user. This setting has the following -# sub-properties: -# -# module: The class name of a custom mapping module. Default is -# 'synapse.handlers.oidc.JinjaOidcMappingProvider'. -# See https://matrix-org.github.io/synapse/latest/sso_mapping_providers.html#openid-mapping-providers -# for information on implementing a custom mapping provider. -# -# config: Configuration for the mapping provider module. This section will -# be passed as a Python dictionary to the user mapping provider -# module's `parse_config` method. -# -# For the default provider, the following settings are available: -# -# subject_claim: name of the claim containing a unique identifier -# for the user. Defaults to 'sub', which OpenID Connect -# compliant providers should provide. -# -# localpart_template: Jinja2 template for the localpart of the MXID. -# If this is not set, the user will be prompted to choose their -# own username (see the documentation for the -# 'sso_auth_account_details.html' template). This template can -# use the 'localpart_from_email' filter. -# -# confirm_localpart: Whether to prompt the user to validate (or -# change) the generated localpart (see the documentation for the -# 'sso_auth_account_details.html' template), instead of -# registering the account right away. -# -# display_name_template: Jinja2 template for the display name to set -# on first login. If unset, no displayname will be set. -# -# email_template: Jinja2 template for the email address of the user. -# If unset, no email address will be added to the account. -# -# extra_attributes: a map of Jinja2 templates for extra attributes -# to send back to the client during login. -# Note that these are non-standard and clients will ignore them -# without modifications. -# -# When rendering, the Jinja2 templates are given a 'user' variable, -# which is set to the claims returned by the UserInfo Endpoint and/or -# in the ID Token. -# -# It is possible to configure Synapse to only allow logins if certain attributes -# match particular values in the OIDC userinfo. The requirements can be listed under -# `attribute_requirements` as shown below. All of the listed attributes must -# match for the login to be permitted. Additional attributes can be added to -# userinfo by expanding the `scopes` section of the OIDC config to retrieve -# additional information from the OIDC provider. -# -# If the OIDC claim is a list, then the attribute must match any value in the list. -# Otherwise, it must exactly match the value of the claim. Using the example -# below, the `family_name` claim MUST be "Stephensson", but the `groups` -# claim MUST contain "admin". -# -# attribute_requirements: -# - attribute: family_name -# value: "Stephensson" -# - attribute: groups -# value: "admin" -# -# See https://matrix-org.github.io/synapse/latest/openid.html -# for information on how to configure these options. -# -# For backwards compatibility, it is also possible to configure a single OIDC -# provider via an 'oidc_config' setting. This is now deprecated and admins are -# advised to migrate to the 'oidc_providers' format. (When doing that migration, -# use 'oidc' for the idp_id to ensure that existing users continue to be -# recognised.) -# -oidc_providers: - # Generic example - # - #- idp_id: my_idp - # idp_name: "My OpenID provider" - # idp_icon: "mxc://example.com/mediaid" - # discover: false - # issuer: "https://accounts.example.com/" - # client_id: "provided-by-your-issuer" - # client_secret: "provided-by-your-issuer" - # client_auth_method: client_secret_post - # scopes: ["openid", "profile"] - # authorization_endpoint: "https://accounts.example.com/oauth2/auth" - # token_endpoint: "https://accounts.example.com/oauth2/token" - # userinfo_endpoint: "https://accounts.example.com/userinfo" - # jwks_uri: "https://accounts.example.com/.well-known/jwks.json" - # skip_verification: true - # user_mapping_provider: - # config: - # subject_claim: "id" - # localpart_template: "{{ user.login }}" - # display_name_template: "{{ user.name }}" - # email_template: "{{ user.email }}" - # attribute_requirements: - # - attribute: userGroup - # value: "synapseUsers" - - -# Enable Central Authentication Service (CAS) for registration and login. -# -cas_config: - # Uncomment the following to enable authorization against a CAS server. - # Defaults to false. - # - #enabled: true - - # The URL of the CAS authorization endpoint. - # - #server_url: "https://cas-server.com" - - # The attribute of the CAS response to use as the display name. - # - # If unset, no displayname will be set. - # - #displayname_attribute: name - - # It is possible to configure Synapse to only allow logins if CAS attributes - # match particular values. All of the keys in the mapping below must exist - # and the values must match the given value. Alternately if the given value - # is None then any value is allowed (the attribute just must exist). - # All of the listed attributes must match for the login to be permitted. - # - #required_attributes: - # userGroup: "staff" - # department: None - - -# Additional settings to use with single-sign on systems such as OpenID Connect, -# SAML2 and CAS. -# -# Server admins can configure custom templates for pages related to SSO. See -# https://matrix-org.github.io/synapse/latest/templates.html for more information. -# -sso: - # A list of client URLs which are whitelisted so that the user does not - # have to confirm giving access to their account to the URL. Any client - # whose URL starts with an entry in the following list will not be subject - # to an additional confirmation step after the SSO login is completed. - # - # WARNING: An entry such as "https://my.client" is insecure, because it - # will also match "https://my.client.evil.site", exposing your users to - # phishing attacks from evil.site. To avoid this, include a slash after the - # hostname: "https://my.client/". - # - # The login fallback page (used by clients that don't natively support the - # required login flows) is whitelisted in addition to any URLs in this list. - # - # By default, this list contains only the login fallback page. - # - #client_whitelist: - # - https://riot.im/develop - # - https://my.custom.client/ - - # Uncomment to keep a user's profile fields in sync with information from - # the identity provider. Currently only syncing the displayname is - # supported. Fields are checked on every SSO login, and are updated - # if necessary. - # - # Note that enabling this option will override user profile information, - # regardless of whether users have opted-out of syncing that - # information when first signing in. Defaults to false. - # - #update_profile_information: true - - -# JSON web token integration. The following settings can be used to make -# Synapse JSON web tokens for authentication, instead of its internal -# password database. -# -# Each JSON Web Token needs to contain a "sub" (subject) claim, which is -# used as the localpart of the mxid. -# -# Additionally, the expiration time ("exp"), not before time ("nbf"), -# and issued at ("iat") claims are validated if present. -# -# Note that this is a non-standard login type and client support is -# expected to be non-existent. -# -# See https://matrix-org.github.io/synapse/latest/jwt.html. -# -#jwt_config: - # Uncomment the following to enable authorization using JSON web - # tokens. Defaults to false. - # - #enabled: true - - # This is either the private shared secret or the public key used to - # decode the contents of the JSON web token. - # - # Required if 'enabled' is true. - # - #secret: "provided-by-your-issuer" - - # The algorithm used to sign the JSON web token. - # - # Supported algorithms are listed at - # https://pyjwt.readthedocs.io/en/latest/algorithms.html - # - # Required if 'enabled' is true. - # - #algorithm: "provided-by-your-issuer" - - # Name of the claim containing a unique identifier for the user. - # - # Optional, defaults to `sub`. - # - #subject_claim: "sub" - - # The issuer to validate the "iss" claim against. - # - # Optional, if provided the "iss" claim will be required and - # validated for all JSON web tokens. - # - #issuer: "provided-by-your-issuer" - - # A list of audiences to validate the "aud" claim against. - # - # Optional, if provided the "aud" claim will be required and - # validated for all JSON web tokens. - # - # Note that if the "aud" claim is included in a JSON web token then - # validation will fail without configuring audiences. - # - #audiences: - # - "provided-by-your-issuer" - - -password_config: - # Uncomment to disable password login. - # Set to `only_for_reauth` to permit reauthentication for users that - # have passwords and are already logged in. - # - #enabled: false - - # Uncomment to disable authentication against the local password - # database. This is ignored if `enabled` is false, and is only useful - # if you have other password_providers. - # - #localdb_enabled: false - - # Uncomment and change to a secret random string for extra security. - # DO NOT CHANGE THIS AFTER INITIAL SETUP! - # - #pepper: "EVEN_MORE_SECRET" - - # Define and enforce a password policy. Each parameter is optional. - # This is an implementation of MSC2000. - # - policy: - # Whether to enforce the password policy. - # Defaults to 'false'. - # - #enabled: true - - # Minimum accepted length for a password. - # Defaults to 0. - # - #minimum_length: 15 - - # Whether a password must contain at least one digit. - # Defaults to 'false'. - # - #require_digit: true - - # Whether a password must contain at least one symbol. - # A symbol is any character that's not a number or a letter. - # Defaults to 'false'. - # - #require_symbol: true - - # Whether a password must contain at least one lowercase letter. - # Defaults to 'false'. - # - #require_lowercase: true - - # Whether a password must contain at least one uppercase letter. - # Defaults to 'false'. - # - #require_uppercase: true - -ui_auth: - # The amount of time to allow a user-interactive authentication session - # to be active. - # - # This defaults to 0, meaning the user is queried for their credentials - # before every action, but this can be overridden to allow a single - # validation to be re-used. This weakens the protections afforded by - # the user-interactive authentication process, by allowing for multiple - # (and potentially different) operations to use the same validation session. - # - # This is ignored for potentially "dangerous" operations (including - # deactivating an account, modifying an account password, and - # adding a 3PID). - # - # Uncomment below to allow for credential validation to last for 15 - # seconds. - # - #session_timeout: "15s" - - -# Configuration for sending emails from Synapse. -# -# Server admins can configure custom templates for email content. See -# https://matrix-org.github.io/synapse/latest/templates.html for more information. -# -email: - # The hostname of the outgoing SMTP server to use. Defaults to 'localhost'. - # - #smtp_host: mail.server - - # The port on the mail server for outgoing SMTP. Defaults to 25. - # - #smtp_port: 587 - - # Username/password for authentication to the SMTP server. By default, no - # authentication is attempted. - # - #smtp_user: "exampleusername" - #smtp_pass: "examplepassword" - - # Uncomment the following to require TLS transport security for SMTP. - # By default, Synapse will connect over plain text, and will then switch to - # TLS via STARTTLS *if the SMTP server supports it*. If this option is set, - # Synapse will refuse to connect unless the server supports STARTTLS. - # - #require_transport_security: true - - # Uncomment the following to disable TLS for SMTP. - # - # By default, if the server supports TLS, it will be used, and the server - # must present a certificate that is valid for 'smtp_host'. If this option - # is set to false, TLS will not be used. - # - #enable_tls: false - - # notif_from defines the "From" address to use when sending emails. - # It must be set if email sending is enabled. - # - # The placeholder '%(app)s' will be replaced by the application name, - # which is normally 'app_name' (below), but may be overridden by the - # Matrix client application. - # - # Note that the placeholder must be written '%(app)s', including the - # trailing 's'. - # - #notif_from: "Your Friendly %(app)s homeserver <noreply@example.com>" - - # app_name defines the default value for '%(app)s' in notif_from and email - # subjects. It defaults to 'Matrix'. - # - #app_name: my_branded_matrix_server - - # Uncomment the following to enable sending emails for messages that the user - # has missed. Disabled by default. - # - #enable_notifs: true - - # Uncomment the following to disable automatic subscription to email - # notifications for new users. Enabled by default. - # - #notif_for_new_users: false - - # Custom URL for client links within the email notifications. By default - # links will be based on "https://matrix.to". - # - # (This setting used to be called riot_base_url; the old name is still - # supported for backwards-compatibility but is now deprecated.) - # - #client_base_url: "http://localhost/riot" - - # Configure the time that a validation email will expire after sending. - # Defaults to 1h. - # - #validation_token_lifetime: 15m - - # The web client location to direct users to during an invite. This is passed - # to the identity server as the org.matrix.web_client_location key. Defaults - # to unset, giving no guidance to the identity server. - # - #invite_client_location: https://app.element.io - - # Subjects to use when sending emails from Synapse. - # - # The placeholder '%(app)s' will be replaced with the value of the 'app_name' - # setting above, or by a value dictated by the Matrix client application. - # - # If a subject isn't overridden in this configuration file, the value used as - # its example will be used. - # - #subjects: - - # Subjects for notification emails. - # - # On top of the '%(app)s' placeholder, these can use the following - # placeholders: - # - # * '%(person)s', which will be replaced by the display name of the user(s) - # that sent the message(s), e.g. "Alice and Bob". - # * '%(room)s', which will be replaced by the name of the room the - # message(s) have been sent to, e.g. "My super room". - # - # See the example provided for each setting to see which placeholder can be - # used and how to use them. - # - # Subject to use to notify about one message from one or more user(s) in a - # room which has a name. - #message_from_person_in_room: "[%(app)s] You have a message on %(app)s from %(person)s in the %(room)s room..." - # - # Subject to use to notify about one message from one or more user(s) in a - # room which doesn't have a name. - #message_from_person: "[%(app)s] You have a message on %(app)s from %(person)s..." - # - # Subject to use to notify about multiple messages from one or more users in - # a room which doesn't have a name. - #messages_from_person: "[%(app)s] You have messages on %(app)s from %(person)s..." - # - # Subject to use to notify about multiple messages in a room which has a - # name. - #messages_in_room: "[%(app)s] You have messages on %(app)s in the %(room)s room..." - # - # Subject to use to notify about multiple messages in multiple rooms. - #messages_in_room_and_others: "[%(app)s] You have messages on %(app)s in the %(room)s room and others..." - # - # Subject to use to notify about multiple messages from multiple persons in - # multiple rooms. This is similar to the setting above except it's used when - # the room in which the notification was triggered has no name. - #messages_from_person_and_others: "[%(app)s] You have messages on %(app)s from %(person)s and others..." - # - # Subject to use to notify about an invite to a room which has a name. - #invite_from_person_to_room: "[%(app)s] %(person)s has invited you to join the %(room)s room on %(app)s..." - # - # Subject to use to notify about an invite to a room which doesn't have a - # name. - #invite_from_person: "[%(app)s] %(person)s has invited you to chat on %(app)s..." - - # Subject for emails related to account administration. - # - # On top of the '%(app)s' placeholder, these one can use the - # '%(server_name)s' placeholder, which will be replaced by the value of the - # 'server_name' setting in your Synapse configuration. - # - # Subject to use when sending a password reset email. - #password_reset: "[%(server_name)s] Password reset" - # - # Subject to use when sending a verification email to assert an address's - # ownership. - #email_validation: "[%(server_name)s] Validate your email" - - - -## Push ## - -push: - # Clients requesting push notifications can either have the body of - # the message sent in the notification poke along with other details - # like the sender, or just the event ID and room ID (`event_id_only`). - # If clients choose the former, this option controls whether the - # notification request includes the content of the event (other details - # like the sender are still included). For `event_id_only` push, it - # has no effect. - # - # For modern android devices the notification content will still appear - # because it is loaded by the app. iPhone, however will send a - # notification saying only that a message arrived and who it came from. - # - # The default value is "true" to include message details. Uncomment to only - # include the event ID and room ID in push notification payloads. - # - #include_content: false - - # When a push notification is received, an unread count is also sent. - # This number can either be calculated as the number of unread messages - # for the user, or the number of *rooms* the user has unread messages in. - # - # The default value is "true", meaning push clients will see the number of - # rooms with unread messages in them. Uncomment to instead send the number - # of unread messages. - # - #group_unread_count_by_room: false - - -## Rooms ## - -# Controls whether locally-created rooms should be end-to-end encrypted by -# default. -# -# Possible options are "all", "invite", and "off". They are defined as: -# -# * "all": any locally-created room -# * "invite": any room created with the "private_chat" or "trusted_private_chat" -# room creation presets -# * "off": this option will take no effect -# -# The default value is "off". -# -# Note that this option will only affect rooms created after it is set. It -# will also not affect rooms created by other servers. -# -#encryption_enabled_by_default_for_room_type: invite - -# Override the default power levels for rooms created on this server, per -# room creation preset. -# -# The appropriate dictionary for the room preset will be applied on top -# of the existing power levels content. -# -# Useful if you know that your users need special permissions in rooms -# that they create (e.g. to send particular types of state events without -# needing an elevated power level). This takes the same shape as the -# `power_level_content_override` parameter in the /createRoom API, but -# is applied before that parameter. -# -# Valid keys are some or all of `private_chat`, `trusted_private_chat` -# and `public_chat`. Inside each of those should be any of the -# properties allowed in `power_level_content_override` in the -# /createRoom API. If any property is missing, its default value will -# continue to be used. If any property is present, it will overwrite -# the existing default completely (so if the `events` property exists, -# the default event power levels will be ignored). -# -#default_power_level_content_override: -# private_chat: -# "events": -# "com.example.myeventtype" : 0 -# "m.room.avatar": 50 -# "m.room.canonical_alias": 50 -# "m.room.encryption": 100 -# "m.room.history_visibility": 100 -# "m.room.name": 50 -# "m.room.power_levels": 100 -# "m.room.server_acl": 100 -# "m.room.tombstone": 100 -# "events_default": 1 - - - -# User Directory configuration -# -user_directory: - # Defines whether users can search the user directory. If false then - # empty responses are returned to all queries. Defaults to true. - # - # Uncomment to disable the user directory. - # - #enabled: false - - # Defines whether to search all users visible to your HS when searching - # the user directory. If false, search results will only contain users - # visible in public rooms and users sharing a room with the requester. - # Defaults to false. - # - # NB. If you set this to true, and the last time the user_directory search - # indexes were (re)built was before Synapse 1.44, you'll have to - # rebuild the indexes in order to search through all known users. - # These indexes are built the first time Synapse starts; admins can - # manually trigger a rebuild via API following the instructions at - # https://matrix-org.github.io/synapse/latest/usage/administration/admin_api/background_updates.html#run - # - # Uncomment to return search results containing all known users, even if that - # user does not share a room with the requester. - # - #search_all_users: true - - # Defines whether to prefer local users in search query results. - # If True, local users are more likely to appear above remote users - # when searching the user directory. Defaults to false. - # - # Uncomment to prefer local over remote users in user directory search - # results. - # - #prefer_local_users: true - - -# User Consent configuration -# -# for detailed instructions, see -# https://matrix-org.github.io/synapse/latest/consent_tracking.html -# -# Parts of this section are required if enabling the 'consent' resource under -# 'listeners', in particular 'template_dir' and 'version'. -# -# 'template_dir' gives the location of the templates for the HTML forms. -# This directory should contain one subdirectory per language (eg, 'en', 'fr'), -# and each language directory should contain the policy document (named as -# '<version>.html') and a success page (success.html). -# -# 'version' specifies the 'current' version of the policy document. It defines -# the version to be served by the consent resource if there is no 'v' -# parameter. -# -# 'server_notice_content', if enabled, will send a user a "Server Notice" -# asking them to consent to the privacy policy. The 'server_notices' section -# must also be configured for this to work. Notices will *not* be sent to -# guest users unless 'send_server_notice_to_guests' is set to true. -# -# 'block_events_error', if set, will block any attempts to send events -# until the user consents to the privacy policy. The value of the setting is -# used as the text of the error. -# -# 'require_at_registration', if enabled, will add a step to the registration -# process, similar to how captcha works. Users will be required to accept the -# policy before their account is created. -# -# 'policy_name' is the display name of the policy users will see when registering -# for an account. Has no effect unless `require_at_registration` is enabled. -# Defaults to "Privacy Policy". -# -#user_consent: -# template_dir: res/templates/privacy -# version: 1.0 -# server_notice_content: -# msgtype: m.text -# body: >- -# To continue using this homeserver you must review and agree to the -# terms and conditions at %(consent_uri)s -# send_server_notice_to_guests: true -# block_events_error: >- -# To continue using this homeserver you must review and agree to the -# terms and conditions at %(consent_uri)s -# require_at_registration: false -# policy_name: Privacy Policy -# - - - -# Settings for local room and user statistics collection. See -# https://matrix-org.github.io/synapse/latest/room_and_user_statistics.html. -# -stats: - # Uncomment the following to disable room and user statistics. Note that doing - # so may cause certain features (such as the room directory) not to work - # correctly. - # - #enabled: false - - -# Server Notices room configuration -# -# Uncomment this section to enable a room which can be used to send notices -# from the server to users. It is a special room which cannot be left; notices -# come from a special "notices" user id. -# -# If you uncomment this section, you *must* define the system_mxid_localpart -# setting, which defines the id of the user which will be used to send the -# notices. -# -# It's also possible to override the room name, the display name of the -# "notices" user, and the avatar for the user. -# -#server_notices: -# system_mxid_localpart: notices -# system_mxid_display_name: "Server Notices" -# system_mxid_avatar_url: "mxc://server.com/oumMVlgDnLYFaPVkExemNVVZ" -# room_name: "Server Notices" - - - -# Uncomment to disable searching the public room list. When disabled -# blocks searching local and remote room lists for local and remote -# users by always returning an empty list for all queries. -# -#enable_room_list_search: false - -# The `alias_creation` option controls who's allowed to create aliases -# on this server. -# -# The format of this option is a list of rules that contain globs that -# match against user_id, room_id and the new alias (fully qualified with -# server name). The action in the first rule that matches is taken, -# which can currently either be "allow" or "deny". -# -# Missing user_id/room_id/alias fields default to "*". -# -# If no rules match the request is denied. An empty list means no one -# can create aliases. -# -# Options for the rules include: -# -# user_id: Matches against the creator of the alias -# alias: Matches against the alias being created -# room_id: Matches against the room ID the alias is being pointed at -# action: Whether to "allow" or "deny" the request if the rule matches -# -# The default is: -# -#alias_creation_rules: -# - user_id: "*" -# alias: "*" -# room_id: "*" -# action: allow - -# The `room_list_publication_rules` option controls who can publish and -# which rooms can be published in the public room list. -# -# The format of this option is the same as that for -# `alias_creation_rules`. -# -# If the room has one or more aliases associated with it, only one of -# the aliases needs to match the alias rule. If there are no aliases -# then only rules with `alias: *` match. -# -# If no rules match the request is denied. An empty list means no one -# can publish rooms. -# -# Options for the rules include: -# -# user_id: Matches against the creator of the alias -# room_id: Matches against the room ID being published -# alias: Matches against any current local or canonical aliases -# associated with the room -# action: Whether to "allow" or "deny" the request if the rule matches -# -# The default is: -# -#room_list_publication_rules: -# - user_id: "*" -# alias: "*" -# room_id: "*" -# action: allow - - -## Opentracing ## - -# These settings enable opentracing, which implements distributed tracing. -# This allows you to observe the causal chains of events across servers -# including requests, key lookups etc., across any server running -# synapse or any other other services which supports opentracing -# (specifically those implemented with Jaeger). -# -opentracing: - # tracing is disabled by default. Uncomment the following line to enable it. - # - #enabled: true - - # The list of homeservers we wish to send and receive span contexts and span baggage. - # See https://matrix-org.github.io/synapse/latest/opentracing.html. - # - # This is a list of regexes which are matched against the server_name of the - # homeserver. - # - # By default, it is empty, so no servers are matched. - # - #homeserver_whitelist: - # - ".*" - - # A list of the matrix IDs of users whose requests will always be traced, - # even if the tracing system would otherwise drop the traces due to - # probabilistic sampling. - # - # By default, the list is empty. - # - #force_tracing_for_users: - # - "@user1:server_name" - # - "@user2:server_name" - - # Jaeger can be configured to sample traces at different rates. - # All configuration options provided by Jaeger can be set here. - # Jaeger's configuration is mostly related to trace sampling which - # is documented here: - # https://www.jaegertracing.io/docs/latest/sampling/. - # - #jaeger_config: - # sampler: - # type: const - # param: 1 - # logging: - # false - - -## Workers ## - -# Disables sending of outbound federation transactions on the main process. -# Uncomment if using a federation sender worker. -# -#send_federation: false - -# It is possible to run multiple federation sender workers, in which case the -# work is balanced across them. -# -# This configuration must be shared between all federation sender workers, and if -# changed all federation sender workers must be stopped at the same time and then -# started, to ensure that all instances are running with the same config (otherwise -# events may be dropped). -# -#federation_sender_instances: -# - federation_sender1 - -# When using workers this should be a map from `worker_name` to the -# HTTP replication listener of the worker, if configured. -# -#instance_map: -# worker1: -# host: localhost -# port: 8034 - -# Experimental: When using workers you can define which workers should -# handle event persistence and typing notifications. Any worker -# specified here must also be in the `instance_map`. -# -#stream_writers: -# events: worker1 -# typing: worker1 - -# The worker that is used to run background tasks (e.g. cleaning up expired -# data). If not provided this defaults to the main process. -# -#run_background_tasks_on: worker1 - -# A shared secret used by the replication APIs to authenticate HTTP requests -# from workers. -# -# By default this is unused and traffic is not authenticated. -# -#worker_replication_secret: "" - - -# Configuration for Redis when using workers. This *must* be enabled when -# using workers (unless using old style direct TCP configuration). -# -redis: - # Uncomment the below to enable Redis support. - # - #enabled: true - - # Optional host and port to use to connect to redis. Defaults to - # localhost and 6379 - # - #host: localhost - #port: 6379 - - # Optional password if configured on the Redis instance - # - #password: <secret_password> - - -## Background Updates ## - -# Background updates are database updates that are run in the background in batches. -# The duration, minimum batch size, default batch size, whether to sleep between batches and if so, how long to -# sleep can all be configured. This is helpful to speed up or slow down the updates. -# -background_updates: - # How long in milliseconds to run a batch of background updates for. Defaults to 100. Uncomment and set - # a time to change the default. - # - #background_update_duration_ms: 500 - - # Whether to sleep between updates. Defaults to True. Uncomment to change the default. - # - #sleep_enabled: false - - # If sleeping between updates, how long in milliseconds to sleep for. Defaults to 1000. Uncomment - # and set a duration to change the default. - # - #sleep_duration_ms: 300 - - # Minimum size a batch of background updates can be. Must be greater than 0. Defaults to 1. Uncomment and - # set a size to change the default. - # - #min_batch_size: 10 - - # The batch size to use for the first iteration of a new background update. The default is 100. - # Uncomment and set a size to change the default. - # - #default_batch_size: 50 diff --git a/docs/upgrade.md b/docs/upgrade.md index 5ac29abb08..312f0b87fe 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -89,6 +89,47 @@ process, for example: dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb ``` +# Upgrading to v1.62.0 + +## New signatures for spam checker callbacks + +As a followup to changes in v1.60.0, the following spam-checker callbacks have changed signature: + +- `user_may_join_room` +- `user_may_invite` +- `user_may_send_3pid_invite` +- `user_may_create_room` +- `user_may_create_room_alias` +- `user_may_publish_room` +- `check_media_file_for_spam` + +For each of these methods, the previous callback signature has been deprecated. + +Whereas callbacks used to return `bool`, they should now return `Union["synapse.module_api.NOT_SPAM", "synapse.module_api.errors.Codes"]`. + +For instance, if your module implements `user_may_join_room` as follows: + +```python +async def user_may_join_room(self, user_id: str, room_id: str, is_invited: bool) + if ...: + # Request is spam + return False + # Request is not spam + return True +``` + +you should rewrite it as follows: + +```python +async def user_may_join_room(self, user_id: str, room_id: str, is_invited: bool) + if ...: + # Request is spam, mark it as forbidden (you may use some more precise error + # code if it is useful). + return synapse.module_api.errors.Codes.FORBIDDEN + # Request is not spam, mark it as such. + return synapse.module_api.NOT_SPAM +``` + # Upgrading to v1.61.0 ## Removal of deprecated community/groups diff --git a/mypy.ini b/mypy.ini index fe3e3f9b8e..7973f2ac01 100644 --- a/mypy.ini +++ b/mypy.ini @@ -56,7 +56,6 @@ exclude = (?x) |tests/rest/media/v1/test_media_storage.py |tests/server.py |tests/server_notices/test_resource_limits_server_notices.py - |tests/state/test_v2.py |tests/test_metrics.py |tests/test_server.py |tests/test_state.py @@ -115,6 +114,9 @@ disallow_untyped_defs = False [mypy-tests.handlers.test_user_directory] disallow_untyped_defs = True +[mypy-tests.state.test_profile] +disallow_untyped_defs = True + [mypy-tests.storage.test_profile] disallow_untyped_defs = True diff --git a/poetry.lock b/poetry.lock index 7c561e3182..8a54a939fe 100644 --- a/poetry.lock +++ b/poetry.lock @@ -524,7 +524,7 @@ python-versions = ">=3.7" [[package]] name = "matrix-common" -version = "1.1.0" +version = "1.2.1" description = "Common utilities for Synapse, Sydent and Sygnal" category = "main" optional = false @@ -535,7 +535,7 @@ attrs = "*" importlib-metadata = {version = ">=1.4", markers = "python_version < \"3.8\""} [package.extras] -dev = ["tox", "twisted", "aiounittest", "mypy (==0.910)", "black (==21.9b0)", "flake8 (==4.0.1)", "isort (==5.9.3)"] +dev = ["tox", "twisted", "aiounittest", "mypy (==0.910)", "black (==22.3.0)", "flake8 (==4.0.1)", "isort (==5.9.3)", "build (==0.8.0)", "twine (==4.0.1)"] test = ["tox", "twisted", "aiounittest"] [[package]] @@ -1563,7 +1563,7 @@ url_preview = ["lxml"] [metadata] lock-version = "1.1" python-versions = "^3.7.1" -content-hash = "539e5326f401472d1ffc8325d53d72e544cd70156b3f43f32f1285c4c131f831" +content-hash = "c1bb4dabba1e87517e25ca7bf778e8082fbc960a51d83819aec3a154110a374f" [metadata.files] attrs = [ @@ -2042,8 +2042,8 @@ markupsafe = [ {file = "MarkupSafe-2.1.0.tar.gz", hash = "sha256:80beaf63ddfbc64a0452b841d8036ca0611e049650e20afcb882f5d3c266d65f"}, ] matrix-common = [ - {file = "matrix_common-1.1.0-py3-none-any.whl", hash = "sha256:5d6dfd777503b2f3a031b566e6af25b6e95f9c0818ef57d954c3190fce5eb407"}, - {file = "matrix_common-1.1.0.tar.gz", hash = "sha256:a8238748afc2b37079818367fed5156f355771b07c8ff0a175934f47e0ff3276"}, + {file = "matrix_common-1.2.1-py3-none-any.whl", hash = "sha256:946709c405944a0d4b1d73207b77eb064b6dbfc5d70a69471320b06d8ce98b20"}, + {file = "matrix_common-1.2.1.tar.gz", hash = "sha256:a99dcf02a6bd95b24a5a61b354888a2ac92bf2b4b839c727b8dd9da2cdfa3853"}, ] matrix-synapse-ldap3 = [ {file = "matrix-synapse-ldap3-0.2.0.tar.gz", hash = "sha256:91a0715b43a41ec3033244174fca20846836da98fda711fb01687f7199eecd2e"}, diff --git a/pyproject.toml b/pyproject.toml index 8b21bdc837..3c64e248ab 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -150,7 +150,7 @@ typing-extensions = ">=3.10.0.1" cryptography = ">=3.4.7" # ijson 3.1.4 fixes a bug with "." in property names ijson = ">=3.1.4" -matrix-common = "~=1.1.0" +matrix-common = "~=1.2.1" # We need packaging.requirements.Requirement, added in 16.1. packaging = ">=16.1" # At the time of writing, we only use functions from the version `importlib.metadata` diff --git a/scripts-dev/complement.sh b/scripts-dev/complement.sh index 3c472c576e..52ef1fd07f 100755 --- a/scripts-dev/complement.sh +++ b/scripts-dev/complement.sh @@ -18,7 +18,7 @@ # argument to the script. Complement will then only run those tests. If # no regex is supplied, all tests are run. For example; # -# ./complement.sh "TestOutboundFederation(Profile|Send)" +# ./complement.sh -run "TestOutboundFederation(Profile|Send)" # # Exit if a line returns a non-zero exit code @@ -43,17 +43,32 @@ fi # Build the base Synapse image from the local checkout docker build -t matrixdotorg/synapse -f "docker/Dockerfile" . +# Build the workers docker image (from the base Synapse image we just built). +docker build -t matrixdotorg/synapse-workers -f "docker/Dockerfile-workers" . + +# Build the unified Complement image (from the worker Synapse image we just built). +docker build -t complement-synapse \ + -f "docker/complement/Dockerfile" "docker/complement" + +export COMPLEMENT_BASE_IMAGE=complement-synapse + extra_test_args=() test_tags="synapse_blacklist,msc2716,msc3030,msc3787" -# If we're using workers, modify the docker files slightly. +# All environment variables starting with PASS_ will be shared. +# (The prefix is stripped off before reaching the container.) +export COMPLEMENT_SHARE_ENV_PREFIX=PASS_ + +# It takes longer than 10m to run the whole suite. +extra_test_args+=("-timeout=60m") + if [[ -n "$WORKERS" ]]; then - # Build the workers docker image (from the base Synapse image). - docker build -t matrixdotorg/synapse-workers -f "docker/Dockerfile-workers" . + # Use workers. + export PASS_SYNAPSE_COMPLEMENT_USE_WORKERS=true - export COMPLEMENT_BASE_IMAGE=complement-synapse-workers - COMPLEMENT_DOCKERFILE=SynapseWorkers.Dockerfile + # Workers can only use Postgres as a database. + export PASS_SYNAPSE_COMPLEMENT_DATABASE=postgres # And provide some more configuration to complement. @@ -61,21 +76,19 @@ if [[ -n "$WORKERS" ]]; then # time (the main problem is that we start 14 python processes for each test, # and complement likes to do two of them in parallel). export COMPLEMENT_SPAWN_HS_TIMEOUT_SECS=120 - - # ... and it takes longer than 10m to run the whole suite. - extra_test_args+=("-timeout=60m") else - export COMPLEMENT_BASE_IMAGE=complement-synapse - COMPLEMENT_DOCKERFILE=Dockerfile + export PASS_SYNAPSE_COMPLEMENT_USE_WORKERS= + if [[ -n "$POSTGRES" ]]; then + export PASS_SYNAPSE_COMPLEMENT_DATABASE=postgres + else + export PASS_SYNAPSE_COMPLEMENT_DATABASE=sqlite + fi # We only test faster room joins on monoliths, because they are purposefully # being developed without worker support to start with. test_tags="$test_tags,faster_joins" fi -# Build the Complement image from the Synapse image we just built. -docker build -t $COMPLEMENT_BASE_IMAGE -f "docker/complement/$COMPLEMENT_DOCKERFILE" "docker/complement" - # Run the tests! echo "Images built; running complement" cd "$COMPLEMENT_DIR" diff --git a/synapse/__init__.py b/synapse/__init__.py index 1613941759..b1369aca8f 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -20,8 +20,6 @@ import json import os import sys -from matrix_common.versionstring import get_distribution_version_string - # Check that we're not running on an unsupported Python version. if sys.version_info < (3, 7): print("Synapse requires Python 3.7 or above.") @@ -70,7 +68,9 @@ try: except ImportError: pass -__version__ = get_distribution_version_string("matrix-synapse") +import synapse.util + +__version__ = synapse.util.SYNAPSE_VERSION if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when diff --git a/synapse/_scripts/synapse_port_db.py b/synapse/_scripts/synapse_port_db.py index 361b51d2fa..9586086c03 100755 --- a/synapse/_scripts/synapse_port_db.py +++ b/synapse/_scripts/synapse_port_db.py @@ -40,7 +40,6 @@ from typing import ( ) import yaml -from matrix_common.versionstring import get_distribution_version_string from typing_extensions import TypedDict from twisted.internet import defer, reactor as reactor_ @@ -62,7 +61,6 @@ from synapse.storage.databases.main.end_to_end_keys import EndToEndKeyBackground from synapse.storage.databases.main.events_bg_updates import ( EventsBackgroundUpdatesStore, ) -from synapse.storage.databases.main.group_server import GroupServerStore from synapse.storage.databases.main.media_repository import ( MediaRepositoryBackgroundUpdateStore, ) @@ -84,7 +82,7 @@ from synapse.storage.databases.state.bg_updates import StateBackgroundUpdateStor from synapse.storage.engines import create_engine from synapse.storage.prepare_database import prepare_database from synapse.types import ISynapseReactor -from synapse.util import Clock +from synapse.util import SYNAPSE_VERSION, Clock # Cast safety: Twisted does some naughty magic which replaces the # twisted.internet.reactor module with a Reactor instance at runtime. @@ -219,7 +217,6 @@ class Store( PushRuleStore, PusherWorkerStore, PresenceBackgroundUpdateStore, - GroupServerStore, ): def execute(self, f: Callable[..., R], *args: Any, **kwargs: Any) -> Awaitable[R]: return self.db_pool.runInteraction(f.__name__, f, *args, **kwargs) @@ -258,9 +255,7 @@ class MockHomeserver: self.clock = Clock(reactor) self.config = config self.hostname = config.server.server_name - self.version_string = "Synapse/" + get_distribution_version_string( - "matrix-synapse" - ) + self.version_string = SYNAPSE_VERSION def get_clock(self) -> Clock: return self.clock diff --git a/synapse/_scripts/update_synapse_database.py b/synapse/_scripts/update_synapse_database.py index c443522c05..b4aeae6dd5 100755 --- a/synapse/_scripts/update_synapse_database.py +++ b/synapse/_scripts/update_synapse_database.py @@ -19,7 +19,6 @@ import sys from typing import cast import yaml -from matrix_common.versionstring import get_distribution_version_string from twisted.internet import defer, reactor as reactor_ @@ -28,6 +27,7 @@ from synapse.metrics.background_process_metrics import run_as_background_process from synapse.server import HomeServer from synapse.storage import DataStore from synapse.types import ISynapseReactor +from synapse.util import SYNAPSE_VERSION # Cast safety: Twisted does some naughty magic which replaces the # twisted.internet.reactor module with a Reactor instance at runtime. @@ -43,8 +43,7 @@ class MockHomeserver(HomeServer): hostname=config.server.server_name, config=config, reactor=reactor, - version_string="Synapse/" - + get_distribution_version_string("matrix-synapse"), + version_string=f"Synapse/{SYNAPSE_VERSION}", ) diff --git a/synapse/api/auth.py b/synapse/api/auth.py index 5a410f805a..6e6eaf3805 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -20,7 +20,6 @@ from netaddr import IPAddress from twisted.web.server import Request from synapse import event_auth -from synapse.api.auth_blocking import AuthBlocking from synapse.api.constants import EventTypes, HistoryVisibility, Membership from synapse.api.errors import ( AuthError, @@ -34,8 +33,6 @@ from synapse.http.site import SynapseRequest from synapse.logging.opentracing import active_span, force_tracing, start_active_span from synapse.storage.databases.main.registration import TokenLookupResult from synapse.types import Requester, UserID, create_requester -from synapse.util.caches.lrucache import LruCache -from synapse.util.macaroons import get_value_from_macaroon, satisfy_expiry if TYPE_CHECKING: from synapse.server import HomeServer @@ -47,10 +44,6 @@ logger = logging.getLogger(__name__) GUEST_DEVICE_ID = "guest_device" -class _InvalidMacaroonException(Exception): - pass - - class Auth: """ This class contains functions for authenticating users of our client-server API. @@ -62,16 +55,10 @@ class Auth: self.store = hs.get_datastores().main self._account_validity_handler = hs.get_account_validity_handler() self._storage_controllers = hs.get_storage_controllers() - - self.token_cache: LruCache[str, Tuple[str, bool]] = LruCache( - 10000, "token_cache" - ) - - self._auth_blocking = AuthBlocking(self.hs) + self._macaroon_generator = hs.get_macaroon_generator() self._track_appservice_user_ips = hs.config.appservice.track_appservice_user_ips self._track_puppeted_user_ips = hs.config.api.track_puppeted_user_ips - self._macaroon_secret_key = hs.config.key.macaroon_secret_key self._force_tracing_for_users = hs.config.tracing.force_tracing_for_users async def check_user_in_room( @@ -126,7 +113,6 @@ class Auth: self, request: SynapseRequest, allow_guest: bool = False, - rights: str = "access", allow_expired: bool = False, ) -> Requester: """Get a registered user's ID. @@ -135,7 +121,6 @@ class Auth: request: An HTTP request with an access_token query parameter. allow_guest: If False, will raise an AuthError if the user making the request is a guest. - rights: The operation being performed; the access token must allow this allow_expired: If True, allow the request through even if the account is expired, or session token lifetime has ended. Note that /login will deliver access tokens regardless of expiration. @@ -150,7 +135,7 @@ class Auth: parent_span = active_span() with start_active_span("get_user_by_req"): requester = await self._wrapped_get_user_by_req( - request, allow_guest, rights, allow_expired + request, allow_guest, allow_expired ) if parent_span: @@ -176,7 +161,6 @@ class Auth: self, request: SynapseRequest, allow_guest: bool, - rights: str, allow_expired: bool, ) -> Requester: """Helper for get_user_by_req @@ -214,7 +198,7 @@ class Auth: return requester user_info = await self.get_user_by_access_token( - access_token, rights, allow_expired=allow_expired + access_token, allow_expired=allow_expired ) token_id = user_info.token_id is_guest = user_info.is_guest @@ -394,15 +378,12 @@ class Auth: async def get_user_by_access_token( self, token: str, - rights: str = "access", allow_expired: bool = False, ) -> TokenLookupResult: """Validate access token and get user_id from it Args: token: The access token to get the user by - rights: The operation being performed; the access token must - allow this allow_expired: If False, raises an InvalidClientTokenError if the token is expired @@ -413,70 +394,55 @@ class Auth: is invalid """ - if rights == "access": - # First look in the database to see if the access token is present - # as an opaque token. - r = await self.store.get_user_by_access_token(token) - if r: - valid_until_ms = r.valid_until_ms - if ( - not allow_expired - and valid_until_ms is not None - and valid_until_ms < self.clock.time_msec() - ): - # there was a valid access token, but it has expired. - # soft-logout the user. - raise InvalidClientTokenError( - msg="Access token has expired", soft_logout=True - ) + # First look in the database to see if the access token is present + # as an opaque token. + r = await self.store.get_user_by_access_token(token) + if r: + valid_until_ms = r.valid_until_ms + if ( + not allow_expired + and valid_until_ms is not None + and valid_until_ms < self.clock.time_msec() + ): + # there was a valid access token, but it has expired. + # soft-logout the user. + raise InvalidClientTokenError( + msg="Access token has expired", soft_logout=True + ) - return r + return r # If the token isn't found in the database, then it could still be a - # macaroon, so we check that here. + # macaroon for a guest, so we check that here. try: - user_id, guest = self._parse_and_validate_macaroon(token, rights) - - if rights == "access": - if not guest: - # non-guest access tokens must be in the database - logger.warning("Unrecognised access token - not in store.") - raise InvalidClientTokenError() - - # Guest access tokens are not stored in the database (there can - # only be one access token per guest, anyway). - # - # In order to prevent guest access tokens being used as regular - # user access tokens (and hence getting around the invalidation - # process), we look up the user id and check that it is indeed - # a guest user. - # - # It would of course be much easier to store guest access - # tokens in the database as well, but that would break existing - # guest tokens. - stored_user = await self.store.get_user_by_id(user_id) - if not stored_user: - raise InvalidClientTokenError("Unknown user_id %s" % user_id) - if not stored_user["is_guest"]: - raise InvalidClientTokenError( - "Guest access token used for regular user" - ) - - ret = TokenLookupResult( - user_id=user_id, - is_guest=True, - # all guests get the same device id - device_id=GUEST_DEVICE_ID, + user_id = self._macaroon_generator.verify_guest_token(token) + + # Guest access tokens are not stored in the database (there can + # only be one access token per guest, anyway). + # + # In order to prevent guest access tokens being used as regular + # user access tokens (and hence getting around the invalidation + # process), we look up the user id and check that it is indeed + # a guest user. + # + # It would of course be much easier to store guest access + # tokens in the database as well, but that would break existing + # guest tokens. + stored_user = await self.store.get_user_by_id(user_id) + if not stored_user: + raise InvalidClientTokenError("Unknown user_id %s" % user_id) + if not stored_user["is_guest"]: + raise InvalidClientTokenError( + "Guest access token used for regular user" ) - elif rights == "delete_pusher": - # We don't store these tokens in the database - ret = TokenLookupResult(user_id=user_id, is_guest=False) - else: - raise RuntimeError("Unknown rights setting %s", rights) - return ret + return TokenLookupResult( + user_id=user_id, + is_guest=True, + # all guests get the same device id + device_id=GUEST_DEVICE_ID, + ) except ( - _InvalidMacaroonException, pymacaroons.exceptions.MacaroonException, TypeError, ValueError, @@ -488,78 +454,6 @@ class Auth: ) raise InvalidClientTokenError("Invalid access token passed.") - def _parse_and_validate_macaroon( - self, token: str, rights: str = "access" - ) -> Tuple[str, bool]: - """Takes a macaroon and tries to parse and validate it. This is cached - if and only if rights == access and there isn't an expiry. - - On invalid macaroon raises _InvalidMacaroonException - - Returns: - (user_id, is_guest) - """ - if rights == "access": - cached = self.token_cache.get(token, None) - if cached: - return cached - - try: - macaroon = pymacaroons.Macaroon.deserialize(token) - except Exception: # deserialize can throw more-or-less anything - # The access token doesn't look like a macaroon. - raise _InvalidMacaroonException() - - try: - user_id = get_value_from_macaroon(macaroon, "user_id") - - guest = False - for caveat in macaroon.caveats: - if caveat.caveat_id == "guest = true": - guest = True - - self.validate_macaroon(macaroon, rights, user_id=user_id) - except ( - pymacaroons.exceptions.MacaroonException, - KeyError, - TypeError, - ValueError, - ): - raise InvalidClientTokenError("Invalid macaroon passed.") - - if rights == "access": - self.token_cache[token] = (user_id, guest) - - return user_id, guest - - def validate_macaroon( - self, macaroon: pymacaroons.Macaroon, type_string: str, user_id: str - ) -> None: - """ - validate that a Macaroon is understood by and was signed by this server. - - Args: - macaroon: The macaroon to validate - type_string: The kind of token required (e.g. "access", "delete_pusher") - user_id: The user_id required - """ - v = pymacaroons.Verifier() - - # the verifier runs a test for every caveat on the macaroon, to check - # that it is met for the current request. Each caveat must match at - # least one of the predicates specified by satisfy_exact or - # specify_general. - v.satisfy_exact("gen = 1") - v.satisfy_exact("type = " + type_string) - v.satisfy_exact("user_id = %s" % user_id) - v.satisfy_exact("guest = true") - satisfy_expiry(v, self.clock.time_msec) - - # access_tokens include a nonce for uniqueness: any value is acceptable - v.satisfy_general(lambda c: c.startswith("nonce = ")) - - v.verify(macaroon, self._macaroon_secret_key) - def get_appservice_by_req(self, request: SynapseRequest) -> ApplicationService: token = self.get_access_token_from_request(request) service = self.store.get_app_service_by_token(token) @@ -711,14 +605,3 @@ class Auth: "User %s not in room %s, and room previews are disabled" % (user_id, room_id), ) - - async def check_auth_blocking( - self, - user_id: Optional[str] = None, - threepid: Optional[dict] = None, - user_type: Optional[str] = None, - requester: Optional[Requester] = None, - ) -> None: - await self._auth_blocking.check_auth_blocking( - user_id=user_id, threepid=threepid, user_type=user_type, requester=requester - ) diff --git a/synapse/app/_base.py b/synapse/app/_base.py index a3446ac6e8..84e389a6cd 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -37,7 +37,6 @@ from typing import ( ) from cryptography.utils import CryptographyDeprecationWarning -from matrix_common.versionstring import get_distribution_version_string from typing_extensions import ParamSpec import twisted @@ -68,6 +67,7 @@ from synapse.metrics import install_gc_manager, register_threadpool from synapse.metrics.background_process_metrics import wrap_as_background_process from synapse.metrics.jemalloc import setup_jemalloc_stats from synapse.types import ISynapseReactor +from synapse.util import SYNAPSE_VERSION from synapse.util.caches.lrucache import setup_expire_lru_cache_entries from synapse.util.daemonize import daemonize_process from synapse.util.gai_resolver import GAIResolver @@ -540,7 +540,7 @@ def setup_sentry(hs: "HomeServer") -> None: sentry_sdk.init( dsn=hs.config.metrics.sentry_dsn, - release=get_distribution_version_string("matrix-synapse"), + release=SYNAPSE_VERSION, ) # We set some default tags that give some context to this instance diff --git a/synapse/app/admin_cmd.py b/synapse/app/admin_cmd.py index 6fedf681f8..561621a285 100644 --- a/synapse/app/admin_cmd.py +++ b/synapse/app/admin_cmd.py @@ -19,8 +19,6 @@ import sys import tempfile from typing import List, Optional -from matrix_common.versionstring import get_distribution_version_string - from twisted.internet import defer, task import synapse @@ -43,6 +41,7 @@ from synapse.replication.slave.storage.registration import SlavedRegistrationSto from synapse.server import HomeServer from synapse.storage.databases.main.room import RoomWorkerStore from synapse.types import StateMap +from synapse.util import SYNAPSE_VERSION from synapse.util.logcontext import LoggingContext logger = logging.getLogger("synapse.app.admin_cmd") @@ -220,7 +219,7 @@ def start(config_options: List[str]) -> None: ss = AdminCmdServer( config.server.server_name, config=config, - version_string="Synapse/" + get_distribution_version_string("matrix-synapse"), + version_string=f"Synapse/{SYNAPSE_VERSION}", ) setup_logging(ss, config, use_worker_options=True) diff --git a/synapse/app/appservice.py b/synapse/app/appservice.py index de1bcee0a7..b6aed651ed 100644 --- a/synapse/app/appservice.py +++ b/synapse/app/appservice.py @@ -17,6 +17,11 @@ import sys from synapse.app.generic_worker import start from synapse.util.logcontext import LoggingContext -if __name__ == "__main__": + +def main() -> None: with LoggingContext("main"): start(sys.argv[1:]) + + +if __name__ == "__main__": + main() diff --git a/synapse/app/client_reader.py b/synapse/app/client_reader.py index de1bcee0a7..b6aed651ed 100644 --- a/synapse/app/client_reader.py +++ b/synapse/app/client_reader.py @@ -17,6 +17,11 @@ import sys from synapse.app.generic_worker import start from synapse.util.logcontext import LoggingContext -if __name__ == "__main__": + +def main() -> None: with LoggingContext("main"): start(sys.argv[1:]) + + +if __name__ == "__main__": + main() diff --git a/synapse/app/event_creator.py b/synapse/app/event_creator.py index 885454ed44..7995d99825 100644 --- a/synapse/app/event_creator.py +++ b/synapse/app/event_creator.py @@ -17,6 +17,11 @@ import sys from synapse.app.generic_worker import start from synapse.util.logcontext import LoggingContext -if __name__ == "__main__": + +def main() -> None: with LoggingContext("main"): start(sys.argv[1:]) + + +if __name__ == "__main__": + main() diff --git a/synapse/app/federation_reader.py b/synapse/app/federation_reader.py index de1bcee0a7..b6aed651ed 100644 --- a/synapse/app/federation_reader.py +++ b/synapse/app/federation_reader.py @@ -17,6 +17,11 @@ import sys from synapse.app.generic_worker import start from synapse.util.logcontext import LoggingContext -if __name__ == "__main__": + +def main() -> None: with LoggingContext("main"): start(sys.argv[1:]) + + +if __name__ == "__main__": + main() diff --git a/synapse/app/federation_sender.py b/synapse/app/federation_sender.py index de1bcee0a7..b6aed651ed 100644 --- a/synapse/app/federation_sender.py +++ b/synapse/app/federation_sender.py @@ -17,6 +17,11 @@ import sys from synapse.app.generic_worker import start from synapse.util.logcontext import LoggingContext -if __name__ == "__main__": + +def main() -> None: with LoggingContext("main"): start(sys.argv[1:]) + + +if __name__ == "__main__": + main() diff --git a/synapse/app/frontend_proxy.py b/synapse/app/frontend_proxy.py index de1bcee0a7..b6aed651ed 100644 --- a/synapse/app/frontend_proxy.py +++ b/synapse/app/frontend_proxy.py @@ -17,6 +17,11 @@ import sys from synapse.app.generic_worker import start from synapse.util.logcontext import LoggingContext -if __name__ == "__main__": + +def main() -> None: with LoggingContext("main"): start(sys.argv[1:]) + + +if __name__ == "__main__": + main() diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index 89f8998f0e..4a987fb759 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -16,8 +16,6 @@ import logging import sys from typing import Dict, List, Optional, Tuple -from matrix_common.versionstring import get_distribution_version_string - from twisted.internet import address from twisted.web.resource import Resource @@ -121,6 +119,7 @@ from synapse.storage.databases.main.transactions import TransactionWorkerStore from synapse.storage.databases.main.ui_auth import UIAuthWorkerStore from synapse.storage.databases.main.user_directory import UserDirectoryStore from synapse.types import JsonDict +from synapse.util import SYNAPSE_VERSION from synapse.util.httpresourcetree import create_resource_tree logger = logging.getLogger("synapse.app.generic_worker") @@ -447,7 +446,7 @@ def start(config_options: List[str]) -> None: hs = GenericWorkerServer( config.server.server_name, config=config, - version_string="Synapse/" + get_distribution_version_string("matrix-synapse"), + version_string=f"Synapse/{SYNAPSE_VERSION}", ) setup_logging(hs, config, use_worker_options=True) diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 4c6c0658ab..745e704141 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -18,8 +18,6 @@ import os import sys from typing import Dict, Iterable, List -from matrix_common.versionstring import get_distribution_version_string - from twisted.internet.tcp import Port from twisted.web.resource import EncodingResourceWrapper, Resource from twisted.web.server import GzipEncoderFactory @@ -69,7 +67,7 @@ from synapse.rest.synapse.client import build_synapse_client_resource_tree from synapse.rest.well_known import well_known_resource from synapse.server import HomeServer from synapse.storage import DataStore -from synapse.util.check_dependencies import check_requirements +from synapse.util.check_dependencies import VERSION, check_requirements from synapse.util.httpresourcetree import create_resource_tree from synapse.util.module_loader import load_module @@ -371,7 +369,7 @@ def setup(config_options: List[str]) -> SynapseHomeServer: hs = SynapseHomeServer( config.server.server_name, config=config, - version_string="Synapse/" + get_distribution_version_string("matrix-synapse"), + version_string=f"Synapse/{VERSION}", ) synapse.config.logger.setup_logging(hs, config, use_worker_options=False) diff --git a/synapse/app/media_repository.py b/synapse/app/media_repository.py index de1bcee0a7..b6aed651ed 100644 --- a/synapse/app/media_repository.py +++ b/synapse/app/media_repository.py @@ -17,6 +17,11 @@ import sys from synapse.app.generic_worker import start from synapse.util.logcontext import LoggingContext -if __name__ == "__main__": + +def main() -> None: with LoggingContext("main"): start(sys.argv[1:]) + + +if __name__ == "__main__": + main() diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py index de1bcee0a7..b6aed651ed 100644 --- a/synapse/app/pusher.py +++ b/synapse/app/pusher.py @@ -17,6 +17,11 @@ import sys from synapse.app.generic_worker import start from synapse.util.logcontext import LoggingContext -if __name__ == "__main__": + +def main() -> None: with LoggingContext("main"): start(sys.argv[1:]) + + +if __name__ == "__main__": + main() diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py index de1bcee0a7..b6aed651ed 100644 --- a/synapse/app/synchrotron.py +++ b/synapse/app/synchrotron.py @@ -17,6 +17,11 @@ import sys from synapse.app.generic_worker import start from synapse.util.logcontext import LoggingContext -if __name__ == "__main__": + +def main() -> None: with LoggingContext("main"): start(sys.argv[1:]) + + +if __name__ == "__main__": + main() diff --git a/synapse/app/user_dir.py b/synapse/app/user_dir.py index 14bde27179..34f23c4e5d 100644 --- a/synapse/app/user_dir.py +++ b/synapse/app/user_dir.py @@ -17,6 +17,11 @@ import sys from synapse.app.generic_worker import start from synapse.util.logcontext import LoggingContext -if __name__ == "__main__": + +def main() -> None: with LoggingContext("main"): start(sys.argv[1:]) + + +if __name__ == "__main__": + main() diff --git a/synapse/config/_base.py b/synapse/config/_base.py index 42364fc133..095eca16c5 100644 --- a/synapse/config/_base.py +++ b/synapse/config/_base.py @@ -18,6 +18,7 @@ import argparse import errno import logging import os +import re from collections import OrderedDict from hashlib import sha256 from textwrap import dedent @@ -123,7 +124,10 @@ CONFIG_FILE_HEADER = """\ # should have the same indentation. # # [1] https://docs.ansible.com/ansible/latest/reference_appendices/YAMLSyntax.html - +# +# For more information on how to configure Synapse, including a complete accounting of +# each option, go to docs/usage/configuration/config_documentation.md or +# https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html """ @@ -470,7 +474,7 @@ class RootConfig: The yaml config file """ - return CONFIG_FILE_HEADER + "\n\n".join( + conf = CONFIG_FILE_HEADER + "\n".join( dedent(conf) for conf in self.invoke_all( "generate_config_section", @@ -485,6 +489,8 @@ class RootConfig: tls_private_key_path=tls_private_key_path, ).values() ) + conf = re.sub("\n{2,}", "\n", conf) + return conf @classmethod def load_config( diff --git a/synapse/config/api.py b/synapse/config/api.py index 2cc6305340..e46728e73f 100644 --- a/synapse/config/api.py +++ b/synapse/config/api.py @@ -31,54 +31,6 @@ class ApiConfig(Config): self.room_prejoin_state = list(self._get_prejoin_state_types(config)) self.track_puppeted_user_ips = config.get("track_puppeted_user_ips", False) - def generate_config_section(cls, **kwargs: Any) -> str: - formatted_default_state_types = "\n".join( - " # - %s" % (t,) for t in _DEFAULT_PREJOIN_STATE_TYPES - ) - - return """\ - ## API Configuration ## - - # Controls for the state that is shared with users who receive an invite - # to a room - # - room_prejoin_state: - # By default, the following state event types are shared with users who - # receive invites to the room: - # -%(formatted_default_state_types)s - # - # Uncomment the following to disable these defaults (so that only the event - # types listed in 'additional_event_types' are shared). Defaults to 'false'. - # - #disable_default_event_types: true - - # Additional state event types to share with users when they are invited - # to a room. - # - # By default, this list is empty (so only the default event types are shared). - # - #additional_event_types: - # - org.example.custom.event.type - - # We record the IP address of clients used to access the API for various - # reasons, including displaying it to the user in the "Where you're signed in" - # dialog. - # - # By default, when puppeting another user via the admin API, the client IP - # address is recorded against the user who created the access token (ie, the - # admin user), and *not* the puppeted user. - # - # Uncomment the following to also record the IP address against the puppeted - # user. (This also means that the puppeted user will count as an "active" user - # for the purpose of monthly active user tracking - see 'limit_usage_by_mau' etc - # above.) - # - #track_puppeted_user_ips: true - """ % { - "formatted_default_state_types": formatted_default_state_types - } - def _get_prejoin_state_types(self, config: JsonDict) -> Iterable[str]: """Get the event types to include in the prejoin state diff --git a/synapse/config/appservice.py b/synapse/config/appservice.py index 16f93273b3..00182090b2 100644 --- a/synapse/config/appservice.py +++ b/synapse/config/appservice.py @@ -35,20 +35,6 @@ class AppServiceConfig(Config): self.app_service_config_files = config.get("app_service_config_files", []) self.track_appservice_user_ips = config.get("track_appservice_user_ips", False) - def generate_config_section(cls, **kwargs: Any) -> str: - return """\ - # A list of application service config files to use - # - #app_service_config_files: - # - app_service_1.yaml - # - app_service_2.yaml - - # Uncomment to enable tracking of application service IP addresses. Implicitly - # enables MAU tracking for application service users. - # - #track_appservice_user_ips: true - """ - def load_appservices( hostname: str, config_files: List[str] diff --git a/synapse/config/auth.py b/synapse/config/auth.py index 265a554a5d..35774962c0 100644 --- a/synapse/config/auth.py +++ b/synapse/config/auth.py @@ -53,78 +53,3 @@ class AuthConfig(Config): self.ui_auth_session_timeout = self.parse_duration( ui_auth.get("session_timeout", 0) ) - - def generate_config_section(self, **kwargs: Any) -> str: - return """\ - password_config: - # Uncomment to disable password login. - # Set to `only_for_reauth` to permit reauthentication for users that - # have passwords and are already logged in. - # - #enabled: false - - # Uncomment to disable authentication against the local password - # database. This is ignored if `enabled` is false, and is only useful - # if you have other password_providers. - # - #localdb_enabled: false - - # Uncomment and change to a secret random string for extra security. - # DO NOT CHANGE THIS AFTER INITIAL SETUP! - # - #pepper: "EVEN_MORE_SECRET" - - # Define and enforce a password policy. Each parameter is optional. - # This is an implementation of MSC2000. - # - policy: - # Whether to enforce the password policy. - # Defaults to 'false'. - # - #enabled: true - - # Minimum accepted length for a password. - # Defaults to 0. - # - #minimum_length: 15 - - # Whether a password must contain at least one digit. - # Defaults to 'false'. - # - #require_digit: true - - # Whether a password must contain at least one symbol. - # A symbol is any character that's not a number or a letter. - # Defaults to 'false'. - # - #require_symbol: true - - # Whether a password must contain at least one lowercase letter. - # Defaults to 'false'. - # - #require_lowercase: true - - # Whether a password must contain at least one uppercase letter. - # Defaults to 'false'. - # - #require_uppercase: true - - ui_auth: - # The amount of time to allow a user-interactive authentication session - # to be active. - # - # This defaults to 0, meaning the user is queried for their credentials - # before every action, but this can be overridden to allow a single - # validation to be re-used. This weakens the protections afforded by - # the user-interactive authentication process, by allowing for multiple - # (and potentially different) operations to use the same validation session. - # - # This is ignored for potentially "dangerous" operations (including - # deactivating an account, modifying an account password, and - # adding a 3PID). - # - # Uncomment below to allow for credential validation to last for 15 - # seconds. - # - #session_timeout: "15s" - """ diff --git a/synapse/config/background_updates.py b/synapse/config/background_updates.py index 07fadbe041..1c6cd97de8 100644 --- a/synapse/config/background_updates.py +++ b/synapse/config/background_updates.py @@ -21,40 +21,6 @@ from ._base import Config class BackgroundUpdateConfig(Config): section = "background_updates" - def generate_config_section(self, **kwargs: Any) -> str: - return """\ - ## Background Updates ## - - # Background updates are database updates that are run in the background in batches. - # The duration, minimum batch size, default batch size, whether to sleep between batches and if so, how long to - # sleep can all be configured. This is helpful to speed up or slow down the updates. - # - background_updates: - # How long in milliseconds to run a batch of background updates for. Defaults to 100. Uncomment and set - # a time to change the default. - # - #background_update_duration_ms: 500 - - # Whether to sleep between updates. Defaults to True. Uncomment to change the default. - # - #sleep_enabled: false - - # If sleeping between updates, how long in milliseconds to sleep for. Defaults to 1000. Uncomment - # and set a duration to change the default. - # - #sleep_duration_ms: 300 - - # Minimum size a batch of background updates can be. Must be greater than 0. Defaults to 1. Uncomment and - # set a size to change the default. - # - #min_batch_size: 10 - - # The batch size to use for the first iteration of a new background update. The default is 100. - # Uncomment and set a size to change the default. - # - #default_batch_size: 50 - """ - def read_config(self, config: JsonDict, **kwargs: Any) -> None: bg_update_config = config.get("background_updates") or {} diff --git a/synapse/config/cache.py b/synapse/config/cache.py index d2f55534d7..d0b491ea6c 100644 --- a/synapse/config/cache.py +++ b/synapse/config/cache.py @@ -113,97 +113,6 @@ class CacheConfig(Config): with _CACHES_LOCK: _CACHES.clear() - def generate_config_section(self, **kwargs: Any) -> str: - return """\ - ## Caching ## - - # Caching can be configured through the following options. - # - # A cache 'factor' is a multiplier that can be applied to each of - # Synapse's caches in order to increase or decrease the maximum - # number of entries that can be stored. - # - # The configuration for cache factors (caches.global_factor and - # caches.per_cache_factors) can be reloaded while the application is running, - # by sending a SIGHUP signal to the Synapse process. Changes to other parts of - # the caching config will NOT be applied after a SIGHUP is received; a restart - # is necessary. - - # The number of events to cache in memory. Not affected by - # caches.global_factor. - # - #event_cache_size: 10K - - caches: - # Controls the global cache factor, which is the default cache factor - # for all caches if a specific factor for that cache is not otherwise - # set. - # - # This can also be set by the "SYNAPSE_CACHE_FACTOR" environment - # variable. Setting by environment variable takes priority over - # setting through the config file. - # - # Defaults to 0.5, which will half the size of all caches. - # - #global_factor: 1.0 - - # A dictionary of cache name to cache factor for that individual - # cache. Overrides the global cache factor for a given cache. - # - # These can also be set through environment variables comprised - # of "SYNAPSE_CACHE_FACTOR_" + the name of the cache in capital - # letters and underscores. Setting by environment variable - # takes priority over setting through the config file. - # Ex. SYNAPSE_CACHE_FACTOR_GET_USERS_WHO_SHARE_ROOM_WITH_USER=2.0 - # - # Some caches have '*' and other characters that are not - # alphanumeric or underscores. These caches can be named with or - # without the special characters stripped. For example, to specify - # the cache factor for `*stateGroupCache*` via an environment - # variable would be `SYNAPSE_CACHE_FACTOR_STATEGROUPCACHE=2.0`. - # - per_cache_factors: - #get_users_who_share_room_with_user: 2.0 - - # Controls whether cache entries are evicted after a specified time - # period. Defaults to true. Uncomment to disable this feature. - # - #expire_caches: false - - # If expire_caches is enabled, this flag controls how long an entry can - # be in a cache without having been accessed before being evicted. - # Defaults to 30m. Uncomment to set a different time to live for cache entries. - # - #cache_entry_ttl: 30m - - # This flag enables cache autotuning, and is further specified by the sub-options `max_cache_memory_usage`, - # `target_cache_memory_usage`, `min_cache_ttl`. These flags work in conjunction with each other to maintain - # a balance between cache memory usage and cache entry availability. You must be using jemalloc to utilize - # this option, and all three of the options must be specified for this feature to work. - #cache_autotuning: - # This flag sets a ceiling on much memory the cache can use before caches begin to be continuously evicted. - # They will continue to be evicted until the memory usage drops below the `target_memory_usage`, set in - # the flag below, or until the `min_cache_ttl` is hit. - #max_cache_memory_usage: 1024M - - # This flag sets a rough target for the desired memory usage of the caches. - #target_cache_memory_usage: 758M - - # 'min_cache_ttl` sets a limit under which newer cache entries are not evicted and is only applied when - # caches are actively being evicted/`max_cache_memory_usage` has been exceeded. This is to protect hot caches - # from being emptied while Synapse is evicting due to memory. - #min_cache_ttl: 5m - - # Controls how long the results of a /sync request are cached for after - # a successful response is returned. A higher duration can help clients with - # intermittent connections, at the cost of higher memory usage. - # - # By default, this is zero, which means that sync responses are not cached - # at all. - # - #sync_response_cache_duration: 2m - """ - def read_config(self, config: JsonDict, **kwargs: Any) -> None: """Populate this config object with values from `config`. diff --git a/synapse/config/captcha.py b/synapse/config/captcha.py index 92c603f224..1737d5e327 100644 --- a/synapse/config/captcha.py +++ b/synapse/config/captcha.py @@ -45,30 +45,3 @@ class CaptchaConfig(Config): "https://www.recaptcha.net/recaptcha/api/siteverify", ) self.recaptcha_template = self.read_template("recaptcha.html") - - def generate_config_section(self, **kwargs: Any) -> str: - return """\ - ## Captcha ## - # See docs/CAPTCHA_SETUP.md for full details of configuring this. - - # This homeserver's ReCAPTCHA public key. Must be specified if - # enable_registration_captcha is enabled. - # - #recaptcha_public_key: "YOUR_PUBLIC_KEY" - - # This homeserver's ReCAPTCHA private key. Must be specified if - # enable_registration_captcha is enabled. - # - #recaptcha_private_key: "YOUR_PRIVATE_KEY" - - # Uncomment to enable ReCaptcha checks when registering, preventing signup - # unless a captcha is answered. Requires a valid ReCaptcha - # public/private key. Defaults to 'false'. - # - #enable_registration_captcha: true - - # The API endpoint to use for verifying m.login.recaptcha responses. - # Defaults to "https://www.recaptcha.net/recaptcha/api/siteverify". - # - #recaptcha_siteverify_api: "https://my.recaptcha.site" - """ diff --git a/synapse/config/cas.py b/synapse/config/cas.py index 8af0794ba4..9152c06bd6 100644 --- a/synapse/config/cas.py +++ b/synapse/config/cas.py @@ -53,37 +53,6 @@ class CasConfig(Config): self.cas_displayname_attribute = None self.cas_required_attributes = [] - def generate_config_section(self, **kwargs: Any) -> str: - return """\ - # Enable Central Authentication Service (CAS) for registration and login. - # - cas_config: - # Uncomment the following to enable authorization against a CAS server. - # Defaults to false. - # - #enabled: true - - # The URL of the CAS authorization endpoint. - # - #server_url: "https://cas-server.com" - - # The attribute of the CAS response to use as the display name. - # - # If unset, no displayname will be set. - # - #displayname_attribute: name - - # It is possible to configure Synapse to only allow logins if CAS attributes - # match particular values. All of the keys in the mapping below must exist - # and the values must match the given value. Alternately if the given value - # is None then any value is allowed (the attribute just must exist). - # All of the listed attributes must match for the login to be permitted. - # - #required_attributes: - # userGroup: "staff" - # department: None - """ - # CAS uses a legacy required attributes mapping, not the one provided by # SsoAttributeRequirement. diff --git a/synapse/config/consent.py b/synapse/config/consent.py index 8ee3d34521..be74609dc4 100644 --- a/synapse/config/consent.py +++ b/synapse/config/consent.py @@ -20,58 +20,6 @@ from synapse.types import JsonDict from ._base import Config -DEFAULT_CONFIG = """\ -# User Consent configuration -# -# for detailed instructions, see -# https://matrix-org.github.io/synapse/latest/consent_tracking.html -# -# Parts of this section are required if enabling the 'consent' resource under -# 'listeners', in particular 'template_dir' and 'version'. -# -# 'template_dir' gives the location of the templates for the HTML forms. -# This directory should contain one subdirectory per language (eg, 'en', 'fr'), -# and each language directory should contain the policy document (named as -# '<version>.html') and a success page (success.html). -# -# 'version' specifies the 'current' version of the policy document. It defines -# the version to be served by the consent resource if there is no 'v' -# parameter. -# -# 'server_notice_content', if enabled, will send a user a "Server Notice" -# asking them to consent to the privacy policy. The 'server_notices' section -# must also be configured for this to work. Notices will *not* be sent to -# guest users unless 'send_server_notice_to_guests' is set to true. -# -# 'block_events_error', if set, will block any attempts to send events -# until the user consents to the privacy policy. The value of the setting is -# used as the text of the error. -# -# 'require_at_registration', if enabled, will add a step to the registration -# process, similar to how captcha works. Users will be required to accept the -# policy before their account is created. -# -# 'policy_name' is the display name of the policy users will see when registering -# for an account. Has no effect unless `require_at_registration` is enabled. -# Defaults to "Privacy Policy". -# -#user_consent: -# template_dir: res/templates/privacy -# version: 1.0 -# server_notice_content: -# msgtype: m.text -# body: >- -# To continue using this homeserver you must review and agree to the -# terms and conditions at %(consent_uri)s -# send_server_notice_to_guests: true -# block_events_error: >- -# To continue using this homeserver you must review and agree to the -# terms and conditions at %(consent_uri)s -# require_at_registration: false -# policy_name: Privacy Policy -# -""" - class ConsentConfig(Config): @@ -118,6 +66,3 @@ class ConsentConfig(Config): self.user_consent_policy_name = consent_config.get( "policy_name", "Privacy Policy" ) - - def generate_config_section(self, **kwargs: Any) -> str: - return DEFAULT_CONFIG diff --git a/synapse/config/database.py b/synapse/config/database.py index de0d3ca0f0..928fec8dfe 100644 --- a/synapse/config/database.py +++ b/synapse/config/database.py @@ -28,56 +28,6 @@ Ignoring 'database_path' setting: not using a sqlite3 database. """ DEFAULT_CONFIG = """\ -## Database ## - -# The 'database' setting defines the database that synapse uses to store all of -# its data. -# -# 'name' gives the database engine to use: either 'sqlite3' (for SQLite) or -# 'psycopg2' (for PostgreSQL). -# -# 'txn_limit' gives the maximum number of transactions to run per connection -# before reconnecting. Defaults to 0, which means no limit. -# -# 'allow_unsafe_locale' is an option specific to Postgres. Under the default behavior, Synapse will refuse to -# start if the postgres db is set to a non-C locale. You can override this behavior (which is *not* recommended) -# by setting 'allow_unsafe_locale' to true. Note that doing so may corrupt your database. You can find more information -# here: https://matrix-org.github.io/synapse/latest/postgres.html#fixing-incorrect-collate-or-ctype and here: -# https://wiki.postgresql.org/wiki/Locale_data_changes -# -# 'args' gives options which are passed through to the database engine, -# except for options starting 'cp_', which are used to configure the Twisted -# connection pool. For a reference to valid arguments, see: -# * for sqlite: https://docs.python.org/3/library/sqlite3.html#sqlite3.connect -# * for postgres: https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-PARAMKEYWORDS -# * for the connection pool: https://twistedmatrix.com/documents/current/api/twisted.enterprise.adbapi.ConnectionPool.html#__init__ -# -# -# Example SQLite configuration: -# -#database: -# name: sqlite3 -# args: -# database: /path/to/homeserver.db -# -# -# Example Postgres configuration: -# -#database: -# name: psycopg2 -# txn_limit: 10000 -# args: -# user: synapse_user -# password: secretpassword -# database: synapse -# host: localhost -# port: 5432 -# cp_min: 5 -# cp_max: 10 -# -# For more information on using Synapse with Postgres, -# see https://matrix-org.github.io/synapse/latest/postgres.html. -# database: name: sqlite3 args: diff --git a/synapse/config/emailconfig.py b/synapse/config/emailconfig.py index 5b5c2f4fff..c82f3ee7a3 100644 --- a/synapse/config/emailconfig.py +++ b/synapse/config/emailconfig.py @@ -357,160 +357,6 @@ class EmailConfig(Config): path=("email", "invite_client_location"), ) - def generate_config_section(self, **kwargs: Any) -> str: - return ( - """\ - # Configuration for sending emails from Synapse. - # - # Server admins can configure custom templates for email content. See - # https://matrix-org.github.io/synapse/latest/templates.html for more information. - # - email: - # The hostname of the outgoing SMTP server to use. Defaults to 'localhost'. - # - #smtp_host: mail.server - - # The port on the mail server for outgoing SMTP. Defaults to 25. - # - #smtp_port: 587 - - # Username/password for authentication to the SMTP server. By default, no - # authentication is attempted. - # - #smtp_user: "exampleusername" - #smtp_pass: "examplepassword" - - # Uncomment the following to require TLS transport security for SMTP. - # By default, Synapse will connect over plain text, and will then switch to - # TLS via STARTTLS *if the SMTP server supports it*. If this option is set, - # Synapse will refuse to connect unless the server supports STARTTLS. - # - #require_transport_security: true - - # Uncomment the following to disable TLS for SMTP. - # - # By default, if the server supports TLS, it will be used, and the server - # must present a certificate that is valid for 'smtp_host'. If this option - # is set to false, TLS will not be used. - # - #enable_tls: false - - # notif_from defines the "From" address to use when sending emails. - # It must be set if email sending is enabled. - # - # The placeholder '%%(app)s' will be replaced by the application name, - # which is normally 'app_name' (below), but may be overridden by the - # Matrix client application. - # - # Note that the placeholder must be written '%%(app)s', including the - # trailing 's'. - # - #notif_from: "Your Friendly %%(app)s homeserver <noreply@example.com>" - - # app_name defines the default value for '%%(app)s' in notif_from and email - # subjects. It defaults to 'Matrix'. - # - #app_name: my_branded_matrix_server - - # Uncomment the following to enable sending emails for messages that the user - # has missed. Disabled by default. - # - #enable_notifs: true - - # Uncomment the following to disable automatic subscription to email - # notifications for new users. Enabled by default. - # - #notif_for_new_users: false - - # Custom URL for client links within the email notifications. By default - # links will be based on "https://matrix.to". - # - # (This setting used to be called riot_base_url; the old name is still - # supported for backwards-compatibility but is now deprecated.) - # - #client_base_url: "http://localhost/riot" - - # Configure the time that a validation email will expire after sending. - # Defaults to 1h. - # - #validation_token_lifetime: 15m - - # The web client location to direct users to during an invite. This is passed - # to the identity server as the org.matrix.web_client_location key. Defaults - # to unset, giving no guidance to the identity server. - # - #invite_client_location: https://app.element.io - - # Subjects to use when sending emails from Synapse. - # - # The placeholder '%%(app)s' will be replaced with the value of the 'app_name' - # setting above, or by a value dictated by the Matrix client application. - # - # If a subject isn't overridden in this configuration file, the value used as - # its example will be used. - # - #subjects: - - # Subjects for notification emails. - # - # On top of the '%%(app)s' placeholder, these can use the following - # placeholders: - # - # * '%%(person)s', which will be replaced by the display name of the user(s) - # that sent the message(s), e.g. "Alice and Bob". - # * '%%(room)s', which will be replaced by the name of the room the - # message(s) have been sent to, e.g. "My super room". - # - # See the example provided for each setting to see which placeholder can be - # used and how to use them. - # - # Subject to use to notify about one message from one or more user(s) in a - # room which has a name. - #message_from_person_in_room: "%(message_from_person_in_room)s" - # - # Subject to use to notify about one message from one or more user(s) in a - # room which doesn't have a name. - #message_from_person: "%(message_from_person)s" - # - # Subject to use to notify about multiple messages from one or more users in - # a room which doesn't have a name. - #messages_from_person: "%(messages_from_person)s" - # - # Subject to use to notify about multiple messages in a room which has a - # name. - #messages_in_room: "%(messages_in_room)s" - # - # Subject to use to notify about multiple messages in multiple rooms. - #messages_in_room_and_others: "%(messages_in_room_and_others)s" - # - # Subject to use to notify about multiple messages from multiple persons in - # multiple rooms. This is similar to the setting above except it's used when - # the room in which the notification was triggered has no name. - #messages_from_person_and_others: "%(messages_from_person_and_others)s" - # - # Subject to use to notify about an invite to a room which has a name. - #invite_from_person_to_room: "%(invite_from_person_to_room)s" - # - # Subject to use to notify about an invite to a room which doesn't have a - # name. - #invite_from_person: "%(invite_from_person)s" - - # Subject for emails related to account administration. - # - # On top of the '%%(app)s' placeholder, these one can use the - # '%%(server_name)s' placeholder, which will be replaced by the value of the - # 'server_name' setting in your Synapse configuration. - # - # Subject to use when sending a password reset email. - #password_reset: "%(password_reset)s" - # - # Subject to use when sending a verification email to assert an address's - # ownership. - #email_validation: "%(email_validation)s" - """ - % DEFAULT_SUBJECTS - ) - class ThreepidBehaviour(Enum): """ diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index f2dfd49b07..0a285dba31 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -84,3 +84,6 @@ class ExperimentalConfig(Config): # MSC3772: A push rule for mutual relations. self.msc3772_enabled: bool = experimental.get("msc3772_enabled", False) + + # MSC3715: dir param on /relations. + self.msc3715_enabled: bool = experimental.get("msc3715_enabled", False) diff --git a/synapse/config/federation.py b/synapse/config/federation.py index f83f93c0ef..336fca578a 100644 --- a/synapse/config/federation.py +++ b/synapse/config/federation.py @@ -49,44 +49,5 @@ class FederationConfig(Config): "allow_device_name_lookup_over_federation", False ) - def generate_config_section(self, **kwargs: Any) -> str: - return """\ - ## Federation ## - - # Restrict federation to the following whitelist of domains. - # N.B. we recommend also firewalling your federation listener to limit - # inbound federation traffic as early as possible, rather than relying - # purely on this application-layer restriction. If not specified, the - # default is to whitelist everything. - # - #federation_domain_whitelist: - # - lon.example.com - # - nyc.example.com - # - syd.example.com - - # Report prometheus metrics on the age of PDUs being sent to and received from - # the following domains. This can be used to give an idea of "delay" on inbound - # and outbound federation, though be aware that any delay can be due to problems - # at either end or with the intermediate network. - # - # By default, no domains are monitored in this way. - # - #federation_metrics_domains: - # - matrix.org - # - example.com - - # Uncomment to disable profile lookup over federation. By default, the - # Federation API allows other homeservers to obtain profile data of any user - # on this homeserver. Defaults to 'true'. - # - #allow_profile_lookup_over_federation: false - - # Uncomment to allow device display name lookup over federation. By default, the - # Federation API prevents other homeservers from obtaining the display names of - # user devices on this homeserver. Defaults to 'false'. - # - #allow_device_name_lookup_over_federation: true - """ - _METRICS_FOR_DOMAINS_SCHEMA = {"type": "array", "items": {"type": "string"}} diff --git a/synapse/config/groups.py b/synapse/config/groups.py new file mode 100644 index 0000000000..baa051fdd4 --- /dev/null +++ b/synapse/config/groups.py @@ -0,0 +1,27 @@ +# Copyright 2017 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Any + +from synapse.types import JsonDict + +from ._base import Config + + +class GroupsConfig(Config): + section = "groups" + + def read_config(self, config: JsonDict, **kwargs: Any) -> None: + self.enable_group_creation = config.get("enable_group_creation", False) + self.group_creation_prefix = config.get("group_creation_prefix", "") diff --git a/synapse/config/jwt.py b/synapse/config/jwt.py index 2a756d1a7c..7e3c764b2c 100644 --- a/synapse/config/jwt.py +++ b/synapse/config/jwt.py @@ -55,67 +55,3 @@ class JWTConfig(Config): self.jwt_subject_claim = None self.jwt_issuer = None self.jwt_audiences = None - - def generate_config_section(self, **kwargs: Any) -> str: - return """\ - # JSON web token integration. The following settings can be used to make - # Synapse JSON web tokens for authentication, instead of its internal - # password database. - # - # Each JSON Web Token needs to contain a "sub" (subject) claim, which is - # used as the localpart of the mxid. - # - # Additionally, the expiration time ("exp"), not before time ("nbf"), - # and issued at ("iat") claims are validated if present. - # - # Note that this is a non-standard login type and client support is - # expected to be non-existent. - # - # See https://matrix-org.github.io/synapse/latest/jwt.html. - # - #jwt_config: - # Uncomment the following to enable authorization using JSON web - # tokens. Defaults to false. - # - #enabled: true - - # This is either the private shared secret or the public key used to - # decode the contents of the JSON web token. - # - # Required if 'enabled' is true. - # - #secret: "provided-by-your-issuer" - - # The algorithm used to sign the JSON web token. - # - # Supported algorithms are listed at - # https://pyjwt.readthedocs.io/en/latest/algorithms.html - # - # Required if 'enabled' is true. - # - #algorithm: "provided-by-your-issuer" - - # Name of the claim containing a unique identifier for the user. - # - # Optional, defaults to `sub`. - # - #subject_claim: "sub" - - # The issuer to validate the "iss" claim against. - # - # Optional, if provided the "iss" claim will be required and - # validated for all JSON web tokens. - # - #issuer: "provided-by-your-issuer" - - # A list of audiences to validate the "aud" claim against. - # - # Optional, if provided the "aud" claim will be required and - # validated for all JSON web tokens. - # - # Note that if the "aud" claim is included in a JSON web token then - # validation will fail without configuring audiences. - # - #audiences: - # - "provided-by-your-issuer" - """ diff --git a/synapse/config/key.py b/synapse/config/key.py index ada65f6dd6..cc75efdf8f 100644 --- a/synapse/config/key.py +++ b/synapse/config/key.py @@ -159,16 +159,18 @@ class KeyConfig(Config): ) ) - self.macaroon_secret_key = config.get( + macaroon_secret_key: Optional[str] = config.get( "macaroon_secret_key", self.root.registration.registration_shared_secret ) - if not self.macaroon_secret_key: + if not macaroon_secret_key: # Unfortunately, there are people out there that don't have this # set. Lets just be "nice" and derive one from their secret key. logger.warning("Config is missing macaroon_secret_key") seed = bytes(self.signing_key[0]) self.macaroon_secret_key = hashlib.sha256(seed).digest() + else: + self.macaroon_secret_key = macaroon_secret_key.encode("utf-8") # a secret which is used to calculate HMACs for form values, to stop # falsification of values @@ -182,111 +184,22 @@ class KeyConfig(Config): **kwargs: Any, ) -> str: base_key_name = os.path.join(config_dir_path, server_name) + macaroon_secret_key = "" + form_secret = "" if generate_secrets: macaroon_secret_key = 'macaroon_secret_key: "%s"' % ( random_string_with_symbols(50), ) form_secret = 'form_secret: "%s"' % random_string_with_symbols(50) - else: - macaroon_secret_key = "#macaroon_secret_key: <PRIVATE STRING>" - form_secret = "#form_secret: <PRIVATE STRING>" return ( """\ - # a secret which is used to sign access tokens. If none is specified, - # the registration_shared_secret is used, if one is given; otherwise, - # a secret key is derived from the signing key. - # %(macaroon_secret_key)s - - # a secret which is used to calculate HMACs for form values, to stop - # falsification of values. Must be specified for the User Consent - # forms to work. - # %(form_secret)s - - ## Signing Keys ## - - # Path to the signing key to sign messages with - # signing_key_path: "%(base_key_name)s.signing.key" - - # The keys that the server used to sign messages with but won't use - # to sign new messages. - # - old_signing_keys: - # For each key, `key` should be the base64-encoded public key, and - # `expired_ts`should be the time (in milliseconds since the unix epoch) that - # it was last used. - # - # It is possible to build an entry from an old signing.key file using the - # `export_signing_key` script which is provided with synapse. - # - # For example: - # - #"ed25519:id": { key: "base64string", expired_ts: 123456789123 } - - # How long key response published by this server is valid for. - # Used to set the valid_until_ts in /key/v2 APIs. - # Determines how quickly servers will query to check which keys - # are still valid. - # - #key_refresh_interval: 1d - - # The trusted servers to download signing keys from. - # - # When we need to fetch a signing key, each server is tried in parallel. - # - # Normally, the connection to the key server is validated via TLS certificates. - # Additional security can be provided by configuring a `verify key`, which - # will make synapse check that the response is signed by that key. - # - # This setting supercedes an older setting named `perspectives`. The old format - # is still supported for backwards-compatibility, but it is deprecated. - # - # 'trusted_key_servers' defaults to matrix.org, but using it will generate a - # warning on start-up. To suppress this warning, set - # 'suppress_key_server_warning' to true. - # - # Options for each entry in the list include: - # - # server_name: the name of the server. required. - # - # verify_keys: an optional map from key id to base64-encoded public key. - # If specified, we will check that the response is signed by at least - # one of the given keys. - # - # accept_keys_insecurely: a boolean. Normally, if `verify_keys` is unset, - # and federation_verify_certificates is not `true`, synapse will refuse - # to start, because this would allow anyone who can spoof DNS responses - # to masquerade as the trusted key server. If you know what you are doing - # and are sure that your network environment provides a secure connection - # to the key server, you can set this to `true` to override this - # behaviour. - # - # An example configuration might look like: - # - #trusted_key_servers: - # - server_name: "my_trusted_server.example.com" - # verify_keys: - # "ed25519:auto": "abcdefghijklmnopqrstuvwxyzabcdefghijklmopqr" - # - server_name: "my_other_trusted_server.example.com" - # trusted_key_servers: - server_name: "matrix.org" - - # Uncomment the following to disable the warning that is emitted when the - # trusted_key_servers include 'matrix.org'. See above. - # - #suppress_key_server_warning: true - - # The signing keys to use when acting as a trusted key server. If not specified - # defaults to the server signing key. - # - # Can contain multiple keys, one per line. - # - #key_server_signing_keys_path: "key_server_signing_keys.key" """ % locals() ) diff --git a/synapse/config/logger.py b/synapse/config/logger.py index 470b8b4492..6c1f78f8df 100644 --- a/synapse/config/logger.py +++ b/synapse/config/logger.py @@ -22,7 +22,6 @@ from string import Template from typing import TYPE_CHECKING, Any, Dict, Optional import yaml -from matrix_common.versionstring import get_distribution_version_string from zope.interface import implementer from twisted.logger import ( @@ -37,6 +36,7 @@ from synapse.logging.context import LoggingContextFilter from synapse.logging.filter import MetadataFilter from synapse.types import JsonDict +from ..util import SYNAPSE_VERSION from ._base import Config, ConfigError if TYPE_CHECKING: @@ -153,11 +153,6 @@ class LoggingConfig(Config): log_config = os.path.join(config_dir_path, server_name + ".log.config") return ( """\ - ## Logging ## - - # A yaml python logging config file as described by - # https://docs.python.org/3.7/library/logging.config.html#configuration-dictionary-schema - # log_config: "%(log_config)s" """ % locals() @@ -349,7 +344,7 @@ def setup_logging( logging.warning( "Server %s version %s", sys.argv[0], - get_distribution_version_string("matrix-synapse"), + SYNAPSE_VERSION, ) logging.info("Server hostname: %s", config.server.server_name) logging.info("Instance name: %s", hs.get_instance_name()) diff --git a/synapse/config/metrics.py b/synapse/config/metrics.py index aa360a417c..d636507886 100644 --- a/synapse/config/metrics.py +++ b/synapse/config/metrics.py @@ -73,46 +73,8 @@ class MetricsConfig(Config): def generate_config_section( self, report_stats: Optional[bool] = None, **kwargs: Any ) -> str: - res = """\ - ## Metrics ### - - # Enable collection and rendering of performance metrics - # - #enable_metrics: false - - # Enable sentry integration - # NOTE: While attempts are made to ensure that the logs don't contain - # any sensitive information, this cannot be guaranteed. By enabling - # this option the sentry server may therefore receive sensitive - # information, and it in turn may then diseminate sensitive information - # through insecure notification channels if so configured. - # - #sentry: - # dsn: "..." - - # Flags to enable Prometheus metrics which are not suitable to be - # enabled by default, either for performance reasons or limited use. - # - metrics_flags: - # Publish synapse_federation_known_servers, a gauge of the number of - # servers this homeserver knows about, including itself. May cause - # performance problems on large homeservers. - # - #known_servers: true - - # Whether or not to report anonymized homeserver usage statistics. - # - """ - - if report_stats is None: - res += "#report_stats: true|false\n" + if report_stats is not None: + res = "report_stats: %s\n" % ("true" if report_stats else "false") else: - res += "report_stats: %s\n" % ("true" if report_stats else "false") - - res += """ - # The endpoint to report the anonymized homeserver usage statistics to. - # Defaults to https://matrix.org/report-usage-stats/push - # - #report_stats_endpoint: https://example.com/report-usage-stats/push - """ + res = "\n" return res diff --git a/synapse/config/modules.py b/synapse/config/modules.py index 0915014f7d..903637be8e 100644 --- a/synapse/config/modules.py +++ b/synapse/config/modules.py @@ -31,20 +31,3 @@ class ModulesConfig(Config): raise ConfigError("expected a mapping", config_path) self.loaded_modules.append(load_module(module, config_path)) - - def generate_config_section(self, **kwargs: Any) -> str: - return """ - ## Modules ## - - # Server admins can expand Synapse's functionality with external modules. - # - # See https://matrix-org.github.io/synapse/latest/modules/index.html for more - # documentation on how to configure or create custom modules for Synapse. - # - modules: - #- module: my_super_module.MySuperClass - # config: - # do_thing: true - #- module: my_other_super_module.SomeClass - # config: {} - """ diff --git a/synapse/config/oembed.py b/synapse/config/oembed.py index e9edea0731..0d32aba70a 100644 --- a/synapse/config/oembed.py +++ b/synapse/config/oembed.py @@ -143,29 +143,6 @@ class OembedConfig(Config): ) return re.compile(pattern) - def generate_config_section(self, **kwargs: Any) -> str: - return """\ - # oEmbed allows for easier embedding content from a website. It can be - # used for generating URLs previews of services which support it. - # - oembed: - # A default list of oEmbed providers is included with Synapse. - # - # Uncomment the following to disable using these default oEmbed URLs. - # Defaults to 'false'. - # - #disable_default_providers: true - - # Additional files with oEmbed configuration (each should be in the - # form of providers.json). - # - # By default, this list is empty (so only the default providers.json - # is used). - # - #additional_providers: - # - oembed/my_providers.json - """ - _OEMBED_PROVIDER_SCHEMA = { "type": "array", diff --git a/synapse/config/oidc.py b/synapse/config/oidc.py index b9c40522d8..98e8cd8b5a 100644 --- a/synapse/config/oidc.py +++ b/synapse/config/oidc.py @@ -66,203 +66,6 @@ class OIDCConfig(Config): # OIDC is enabled if we have a provider return bool(self.oidc_providers) - def generate_config_section(self, **kwargs: Any) -> str: - return """\ - # List of OpenID Connect (OIDC) / OAuth 2.0 identity providers, for registration - # and login. - # - # Options for each entry include: - # - # idp_id: a unique identifier for this identity provider. Used internally - # by Synapse; should be a single word such as 'github'. - # - # Note that, if this is changed, users authenticating via that provider - # will no longer be recognised as the same user! - # - # (Use "oidc" here if you are migrating from an old "oidc_config" - # configuration.) - # - # idp_name: A user-facing name for this identity provider, which is used to - # offer the user a choice of login mechanisms. - # - # idp_icon: An optional icon for this identity provider, which is presented - # by clients and Synapse's own IdP picker page. If given, must be an - # MXC URI of the format mxc://<server-name>/<media-id>. (An easy way to - # obtain such an MXC URI is to upload an image to an (unencrypted) room - # and then copy the "url" from the source of the event.) - # - # idp_brand: An optional brand for this identity provider, allowing clients - # to style the login flow according to the identity provider in question. - # See the spec for possible options here. - # - # discover: set to 'false' to disable the use of the OIDC discovery mechanism - # to discover endpoints. Defaults to true. - # - # issuer: Required. The OIDC issuer. Used to validate tokens and (if discovery - # is enabled) to discover the provider's endpoints. - # - # client_id: Required. oauth2 client id to use. - # - # client_secret: oauth2 client secret to use. May be omitted if - # client_secret_jwt_key is given, or if client_auth_method is 'none'. - # - # client_secret_jwt_key: Alternative to client_secret: details of a key used - # to create a JSON Web Token to be used as an OAuth2 client secret. If - # given, must be a dictionary with the following properties: - # - # key: a pem-encoded signing key. Must be a suitable key for the - # algorithm specified. Required unless 'key_file' is given. - # - # key_file: the path to file containing a pem-encoded signing key file. - # Required unless 'key' is given. - # - # jwt_header: a dictionary giving properties to include in the JWT - # header. Must include the key 'alg', giving the algorithm used to - # sign the JWT, such as "ES256", using the JWA identifiers in - # RFC7518. - # - # jwt_payload: an optional dictionary giving properties to include in - # the JWT payload. Normally this should include an 'iss' key. - # - # client_auth_method: auth method to use when exchanging the token. Valid - # values are 'client_secret_basic' (default), 'client_secret_post' and - # 'none'. - # - # scopes: list of scopes to request. This should normally include the "openid" - # scope. Defaults to ["openid"]. - # - # authorization_endpoint: the oauth2 authorization endpoint. Required if - # provider discovery is disabled. - # - # token_endpoint: the oauth2 token endpoint. Required if provider discovery is - # disabled. - # - # userinfo_endpoint: the OIDC userinfo endpoint. Required if discovery is - # disabled and the 'openid' scope is not requested. - # - # jwks_uri: URI where to fetch the JWKS. Required if discovery is disabled and - # the 'openid' scope is used. - # - # skip_verification: set to 'true' to skip metadata verification. Use this if - # you are connecting to a provider that is not OpenID Connect compliant. - # Defaults to false. Avoid this in production. - # - # user_profile_method: Whether to fetch the user profile from the userinfo - # endpoint, or to rely on the data returned in the id_token from the - # token_endpoint. - # - # Valid values are: 'auto' or 'userinfo_endpoint'. - # - # Defaults to 'auto', which uses the userinfo endpoint if 'openid' is - # not included in 'scopes'. Set to 'userinfo_endpoint' to always use the - # userinfo endpoint. - # - # allow_existing_users: set to 'true' to allow a user logging in via OIDC to - # match a pre-existing account instead of failing. This could be used if - # switching from password logins to OIDC. Defaults to false. - # - # user_mapping_provider: Configuration for how attributes returned from a OIDC - # provider are mapped onto a matrix user. This setting has the following - # sub-properties: - # - # module: The class name of a custom mapping module. Default is - # {mapping_provider!r}. - # See https://matrix-org.github.io/synapse/latest/sso_mapping_providers.html#openid-mapping-providers - # for information on implementing a custom mapping provider. - # - # config: Configuration for the mapping provider module. This section will - # be passed as a Python dictionary to the user mapping provider - # module's `parse_config` method. - # - # For the default provider, the following settings are available: - # - # subject_claim: name of the claim containing a unique identifier - # for the user. Defaults to 'sub', which OpenID Connect - # compliant providers should provide. - # - # localpart_template: Jinja2 template for the localpart of the MXID. - # If this is not set, the user will be prompted to choose their - # own username (see the documentation for the - # 'sso_auth_account_details.html' template). This template can - # use the 'localpart_from_email' filter. - # - # confirm_localpart: Whether to prompt the user to validate (or - # change) the generated localpart (see the documentation for the - # 'sso_auth_account_details.html' template), instead of - # registering the account right away. - # - # display_name_template: Jinja2 template for the display name to set - # on first login. If unset, no displayname will be set. - # - # email_template: Jinja2 template for the email address of the user. - # If unset, no email address will be added to the account. - # - # extra_attributes: a map of Jinja2 templates for extra attributes - # to send back to the client during login. - # Note that these are non-standard and clients will ignore them - # without modifications. - # - # When rendering, the Jinja2 templates are given a 'user' variable, - # which is set to the claims returned by the UserInfo Endpoint and/or - # in the ID Token. - # - # It is possible to configure Synapse to only allow logins if certain attributes - # match particular values in the OIDC userinfo. The requirements can be listed under - # `attribute_requirements` as shown below. All of the listed attributes must - # match for the login to be permitted. Additional attributes can be added to - # userinfo by expanding the `scopes` section of the OIDC config to retrieve - # additional information from the OIDC provider. - # - # If the OIDC claim is a list, then the attribute must match any value in the list. - # Otherwise, it must exactly match the value of the claim. Using the example - # below, the `family_name` claim MUST be "Stephensson", but the `groups` - # claim MUST contain "admin". - # - # attribute_requirements: - # - attribute: family_name - # value: "Stephensson" - # - attribute: groups - # value: "admin" - # - # See https://matrix-org.github.io/synapse/latest/openid.html - # for information on how to configure these options. - # - # For backwards compatibility, it is also possible to configure a single OIDC - # provider via an 'oidc_config' setting. This is now deprecated and admins are - # advised to migrate to the 'oidc_providers' format. (When doing that migration, - # use 'oidc' for the idp_id to ensure that existing users continue to be - # recognised.) - # - oidc_providers: - # Generic example - # - #- idp_id: my_idp - # idp_name: "My OpenID provider" - # idp_icon: "mxc://example.com/mediaid" - # discover: false - # issuer: "https://accounts.example.com/" - # client_id: "provided-by-your-issuer" - # client_secret: "provided-by-your-issuer" - # client_auth_method: client_secret_post - # scopes: ["openid", "profile"] - # authorization_endpoint: "https://accounts.example.com/oauth2/auth" - # token_endpoint: "https://accounts.example.com/oauth2/token" - # userinfo_endpoint: "https://accounts.example.com/userinfo" - # jwks_uri: "https://accounts.example.com/.well-known/jwks.json" - # skip_verification: true - # user_mapping_provider: - # config: - # subject_claim: "id" - # localpart_template: "{{{{ user.login }}}}" - # display_name_template: "{{{{ user.name }}}}" - # email_template: "{{{{ user.email }}}}" - # attribute_requirements: - # - attribute: userGroup - # value: "synapseUsers" - """.format( - mapping_provider=DEFAULT_USER_MAPPING_PROVIDER - ) - # jsonschema definition of the configuration settings for an oidc identity provider OIDC_PROVIDER_CONFIG_SCHEMA = { diff --git a/synapse/config/push.py b/synapse/config/push.py index 2e796d1c46..979b128eae 100644 --- a/synapse/config/push.py +++ b/synapse/config/push.py @@ -49,36 +49,3 @@ class PushConfig(Config): "please set push.include_content instead" ) self.push_include_content = not redact_content - - def generate_config_section(self, **kwargs: Any) -> str: - return """ - ## Push ## - - push: - # Clients requesting push notifications can either have the body of - # the message sent in the notification poke along with other details - # like the sender, or just the event ID and room ID (`event_id_only`). - # If clients choose the former, this option controls whether the - # notification request includes the content of the event (other details - # like the sender are still included). For `event_id_only` push, it - # has no effect. - # - # For modern android devices the notification content will still appear - # because it is loaded by the app. iPhone, however will send a - # notification saying only that a message arrived and who it came from. - # - # The default value is "true" to include message details. Uncomment to only - # include the event ID and room ID in push notification payloads. - # - #include_content: false - - # When a push notification is received, an unread count is also sent. - # This number can either be calculated as the number of unread messages - # for the user, or the number of *rooms* the user has unread messages in. - # - # The default value is "true", meaning push clients will see the number of - # rooms with unread messages in them. Uncomment to instead send the number - # of unread messages. - # - #group_unread_count_by_room: false - """ diff --git a/synapse/config/ratelimiting.py b/synapse/config/ratelimiting.py index 0587f5c10f..d4090a1f9a 100644 --- a/synapse/config/ratelimiting.py +++ b/synapse/config/ratelimiting.py @@ -143,125 +143,3 @@ class RatelimitConfig(Config): "burst_count": self.rc_message.burst_count, }, ) - - def generate_config_section(self, **kwargs: Any) -> str: - return """\ - ## Ratelimiting ## - - # Ratelimiting settings for client actions (registration, login, messaging). - # - # Each ratelimiting configuration is made of two parameters: - # - per_second: number of requests a client can send per second. - # - burst_count: number of requests a client can send before being throttled. - # - # Synapse currently uses the following configurations: - # - one for messages that ratelimits sending based on the account the client - # is using - # - one for registration that ratelimits registration requests based on the - # client's IP address. - # - one for checking the validity of registration tokens that ratelimits - # requests based on the client's IP address. - # - one for login that ratelimits login requests based on the client's IP - # address. - # - one for login that ratelimits login requests based on the account the - # client is attempting to log into. - # - one for login that ratelimits login requests based on the account the - # client is attempting to log into, based on the amount of failed login - # attempts for this account. - # - one for ratelimiting redactions by room admins. If this is not explicitly - # set then it uses the same ratelimiting as per rc_message. This is useful - # to allow room admins to deal with abuse quickly. - # - two for ratelimiting number of rooms a user can join, "local" for when - # users are joining rooms the server is already in (this is cheap) vs - # "remote" for when users are trying to join rooms not on the server (which - # can be more expensive) - # - one for ratelimiting how often a user or IP can attempt to validate a 3PID. - # - two for ratelimiting how often invites can be sent in a room or to a - # specific user. - # - one for ratelimiting 3PID invites (i.e. invites sent to a third-party ID - # such as an email address or a phone number) based on the account that's - # sending the invite. - # - # The defaults are as shown below. - # - #rc_message: - # per_second: 0.2 - # burst_count: 10 - # - #rc_registration: - # per_second: 0.17 - # burst_count: 3 - # - #rc_registration_token_validity: - # per_second: 0.1 - # burst_count: 5 - # - #rc_login: - # address: - # per_second: 0.17 - # burst_count: 3 - # account: - # per_second: 0.17 - # burst_count: 3 - # failed_attempts: - # per_second: 0.17 - # burst_count: 3 - # - #rc_admin_redaction: - # per_second: 1 - # burst_count: 50 - # - #rc_joins: - # local: - # per_second: 0.1 - # burst_count: 10 - # remote: - # per_second: 0.01 - # burst_count: 10 - # - #rc_3pid_validation: - # per_second: 0.003 - # burst_count: 5 - # - #rc_invites: - # per_room: - # per_second: 0.3 - # burst_count: 10 - # per_user: - # per_second: 0.003 - # burst_count: 5 - # - #rc_third_party_invite: - # per_second: 0.2 - # burst_count: 10 - - # Ratelimiting settings for incoming federation - # - # The rc_federation configuration is made up of the following settings: - # - window_size: window size in milliseconds - # - sleep_limit: number of federation requests from a single server in - # a window before the server will delay processing the request. - # - sleep_delay: duration in milliseconds to delay processing events - # from remote servers by if they go over the sleep limit. - # - reject_limit: maximum number of concurrent federation requests - # allowed from a single server - # - concurrent: number of federation requests to concurrently process - # from a single server - # - # The defaults are as shown below. - # - #rc_federation: - # window_size: 1000 - # sleep_limit: 10 - # sleep_delay: 500 - # reject_limit: 50 - # concurrent: 3 - - # Target outgoing federation transaction frequency for sending read-receipts, - # per-room. - # - # If we end up trying to send out more read-receipts, they will get buffered up - # into fewer transactions. - # - #federation_rr_transactions_per_room_per_second: 50 - """ diff --git a/synapse/config/redis.py b/synapse/config/redis.py index ec7a735418..b42dd2e93a 100644 --- a/synapse/config/redis.py +++ b/synapse/config/redis.py @@ -34,24 +34,3 @@ class RedisConfig(Config): self.redis_host = redis_config.get("host", "localhost") self.redis_port = redis_config.get("port", 6379) self.redis_password = redis_config.get("password") - - def generate_config_section(self, **kwargs: Any) -> str: - return """\ - # Configuration for Redis when using workers. This *must* be enabled when - # using workers (unless using old style direct TCP configuration). - # - redis: - # Uncomment the below to enable Redis support. - # - #enabled: true - - # Optional host and port to use to connect to redis. Defaults to - # localhost and 6379 - # - #host: localhost - #port: 6379 - - # Optional password if configured on the Redis instance - # - #password: <secret_password> - """ diff --git a/synapse/config/registration.py b/synapse/config/registration.py index d2d0425e62..fcf99be092 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -206,284 +206,9 @@ class RegistrationConfig(Config): registration_shared_secret = 'registration_shared_secret: "%s"' % ( random_string_with_symbols(50), ) + return registration_shared_secret else: - registration_shared_secret = "#registration_shared_secret: <PRIVATE STRING>" - - return ( - """\ - ## Registration ## - # - # Registration can be rate-limited using the parameters in the "Ratelimiting" - # section of this file. - - # Enable registration for new users. Defaults to 'false'. It is highly recommended that if you enable registration, - # you use either captcha, email, or token-based verification to verify that new users are not bots. In order to enable registration - # without any verification, you must also set `enable_registration_without_verification`, found below. - # - #enable_registration: false - - # Enable registration without email or captcha verification. Note: this option is *not* recommended, - # as registration without verification is a known vector for spam and abuse. Defaults to false. Has no effect - # unless `enable_registration` is also enabled. - # - #enable_registration_without_verification: true - - # Time that a user's session remains valid for, after they log in. - # - # Note that this is not currently compatible with guest logins. - # - # Note also that this is calculated at login time: changes are not applied - # retrospectively to users who have already logged in. - # - # By default, this is infinite. - # - #session_lifetime: 24h - - # Time that an access token remains valid for, if the session is - # using refresh tokens. - # For more information about refresh tokens, please see the manual. - # Note that this only applies to clients which advertise support for - # refresh tokens. - # - # Note also that this is calculated at login time and refresh time: - # changes are not applied to existing sessions until they are refreshed. - # - # By default, this is 5 minutes. - # - #refreshable_access_token_lifetime: 5m - - # Time that a refresh token remains valid for (provided that it is not - # exchanged for another one first). - # This option can be used to automatically log-out inactive sessions. - # Please see the manual for more information. - # - # Note also that this is calculated at login time and refresh time: - # changes are not applied to existing sessions until they are refreshed. - # - # By default, this is infinite. - # - #refresh_token_lifetime: 24h - - # Time that an access token remains valid for, if the session is NOT - # using refresh tokens. - # Please note that not all clients support refresh tokens, so setting - # this to a short value may be inconvenient for some users who will - # then be logged out frequently. - # - # Note also that this is calculated at login time: changes are not applied - # retrospectively to existing sessions for users that have already logged in. - # - # By default, this is infinite. - # - #nonrefreshable_access_token_lifetime: 24h - - # The user must provide all of the below types of 3PID when registering. - # - #registrations_require_3pid: - # - email - # - msisdn - - # Explicitly disable asking for MSISDNs from the registration - # flow (overrides registrations_require_3pid if MSISDNs are set as required) - # - #disable_msisdn_registration: true - - # Mandate that users are only allowed to associate certain formats of - # 3PIDs with accounts on this server. - # - #allowed_local_3pids: - # - medium: email - # pattern: '^[^@]+@matrix\\.org$' - # - medium: email - # pattern: '^[^@]+@vector\\.im$' - # - medium: msisdn - # pattern: '\\+44' - - # Enable 3PIDs lookup requests to identity servers from this server. - # - #enable_3pid_lookup: true - - # Require users to submit a token during registration. - # Tokens can be managed using the admin API: - # https://matrix-org.github.io/synapse/latest/usage/administration/admin_api/registration_tokens.html - # Note that `enable_registration` must be set to `true`. - # Disabling this option will not delete any tokens previously generated. - # Defaults to false. Uncomment the following to require tokens: - # - #registration_requires_token: true - - # Allow users to submit a token during registration to bypass any required 3pid - # steps configured in `registrations_require_3pid`. - # Defaults to false, requiring that registration tokens (if enabled) complete a 3pid flow. - # - #enable_registration_token_3pid_bypass: false - - # If set, allows registration of standard or admin accounts by anyone who - # has the shared secret, even if registration is otherwise disabled. - # - %(registration_shared_secret)s - - # Set the number of bcrypt rounds used to generate password hash. - # Larger numbers increase the work factor needed to generate the hash. - # The default number is 12 (which equates to 2^12 rounds). - # N.B. that increasing this will exponentially increase the time required - # to register or login - e.g. 24 => 2^24 rounds which will take >20 mins. - # - #bcrypt_rounds: 12 - - # Allows users to register as guests without a password/email/etc, and - # participate in rooms hosted on this server which have been made - # accessible to anonymous users. - # - #allow_guest_access: false - - # The identity server which we suggest that clients should use when users log - # in on this server. - # - # (By default, no suggestion is made, so it is left up to the client. - # This setting is ignored unless public_baseurl is also explicitly set.) - # - #default_identity_server: https://matrix.org - - # Handle threepid (email/phone etc) registration and password resets through a set of - # *trusted* identity servers. Note that this allows the configured identity server to - # reset passwords for accounts! - # - # Be aware that if `email` is not set, and SMTP options have not been - # configured in the email config block, registration and user password resets via - # email will be globally disabled. - # - # Additionally, if `msisdn` is not set, registration and password resets via msisdn - # will be disabled regardless, and users will not be able to associate an msisdn - # identifier to their account. This is due to Synapse currently not supporting - # any method of sending SMS messages on its own. - # - # To enable using an identity server for operations regarding a particular third-party - # identifier type, set the value to the URL of that identity server as shown in the - # examples below. - # - # Servers handling the these requests must answer the `/requestToken` endpoints defined - # by the Matrix Identity Service API specification: - # https://matrix.org/docs/spec/identity_service/latest - # - account_threepid_delegates: - #email: https://example.com # Delegate email sending to example.com - #msisdn: http://localhost:8090 # Delegate SMS sending to this local process - - # Whether users are allowed to change their displayname after it has - # been initially set. Useful when provisioning users based on the - # contents of a third-party directory. - # - # Does not apply to server administrators. Defaults to 'true' - # - #enable_set_displayname: false - - # Whether users are allowed to change their avatar after it has been - # initially set. Useful when provisioning users based on the contents - # of a third-party directory. - # - # Does not apply to server administrators. Defaults to 'true' - # - #enable_set_avatar_url: false - - # Whether users can change the 3PIDs associated with their accounts - # (email address and msisdn). - # - # Defaults to 'true' - # - #enable_3pid_changes: false - - # Users who register on this homeserver will automatically be joined - # to these rooms. - # - # By default, any room aliases included in this list will be created - # as a publicly joinable room when the first user registers for the - # homeserver. This behaviour can be customised with the settings below. - # If the room already exists, make certain it is a publicly joinable - # room. The join rule of the room must be set to 'public'. - # - #auto_join_rooms: - # - "#example:example.com" - - # Where auto_join_rooms are specified, setting this flag ensures that the - # the rooms exist by creating them when the first user on the - # homeserver registers. - # - # By default the auto-created rooms are publicly joinable from any federated - # server. Use the autocreate_auto_join_rooms_federated and - # autocreate_auto_join_room_preset settings below to customise this behaviour. - # - # Setting to false means that if the rooms are not manually created, - # users cannot be auto-joined since they do not exist. - # - # Defaults to true. Uncomment the following line to disable automatically - # creating auto-join rooms. - # - #autocreate_auto_join_rooms: false - - # Whether the auto_join_rooms that are auto-created are available via - # federation. Only has an effect if autocreate_auto_join_rooms is true. - # - # Note that whether a room is federated cannot be modified after - # creation. - # - # Defaults to true: the room will be joinable from other servers. - # Uncomment the following to prevent users from other homeservers from - # joining these rooms. - # - #autocreate_auto_join_rooms_federated: false - - # The room preset to use when auto-creating one of auto_join_rooms. Only has an - # effect if autocreate_auto_join_rooms is true. - # - # This can be one of "public_chat", "private_chat", or "trusted_private_chat". - # If a value of "private_chat" or "trusted_private_chat" is used then - # auto_join_mxid_localpart must also be configured. - # - # Defaults to "public_chat", meaning that the room is joinable by anyone, including - # federated servers if autocreate_auto_join_rooms_federated is true (the default). - # Uncomment the following to require an invitation to join these rooms. - # - #autocreate_auto_join_room_preset: private_chat - - # The local part of the user id which is used to create auto_join_rooms if - # autocreate_auto_join_rooms is true. If this is not provided then the - # initial user account that registers will be used to create the rooms. - # - # The user id is also used to invite new users to any auto-join rooms which - # are set to invite-only. - # - # It *must* be configured if autocreate_auto_join_room_preset is set to - # "private_chat" or "trusted_private_chat". - # - # Note that this must be specified in order for new users to be correctly - # invited to any auto-join rooms which have been set to invite-only (either - # at the time of creation or subsequently). - # - # Note that, if the room already exists, this user must be joined and - # have the appropriate permissions to invite new members. - # - #auto_join_mxid_localpart: system - - # When auto_join_rooms is specified, setting this flag to false prevents - # guest accounts from being automatically joined to the rooms. - # - # Defaults to true. - # - #auto_join_rooms_for_guests: false - - # Whether to inhibit errors raised when registering a new account if the user ID - # already exists. If turned on, that requests to /register/available will always - # show a user ID as available, and Synapse won't raise an error when starting - # a registration with a user ID that already exists. However, Synapse will still - # raise an error if the registration completes and the username conflicts. - # - # Defaults to false. - # - #inhibit_user_in_use_error: true - """ - % locals() - ) + return "" @staticmethod def add_arguments(parser: argparse.ArgumentParser) -> None: diff --git a/synapse/config/repository.py b/synapse/config/repository.py index f9c55143c3..aadec1e54e 100644 --- a/synapse/config/repository.py +++ b/synapse/config/repository.py @@ -19,7 +19,7 @@ from urllib.request import getproxies_environment # type: ignore import attr -from synapse.config.server import DEFAULT_IP_RANGE_BLACKLIST, generate_ip_set +from synapse.config.server import generate_ip_set from synapse.types import JsonDict from synapse.util.check_dependencies import DependencyException, check_requirements from synapse.util.module_loader import load_module @@ -242,166 +242,4 @@ class ContentRepositoryConfig(Config): def generate_config_section(self, data_dir_path: str, **kwargs: Any) -> str: assert data_dir_path is not None media_store = os.path.join(data_dir_path, "media_store") - - formatted_thumbnail_sizes = "".join( - THUMBNAIL_SIZE_YAML % s for s in DEFAULT_THUMBNAIL_SIZES - ) - # strip final NL - formatted_thumbnail_sizes = formatted_thumbnail_sizes[:-1] - - ip_range_blacklist = "\n".join( - " # - '%s'" % ip for ip in DEFAULT_IP_RANGE_BLACKLIST - ) - - return ( - r""" - ## Media Store ## - - # Enable the media store service in the Synapse master. Uncomment the - # following if you are using a separate media store worker. - # - #enable_media_repo: false - - # Directory where uploaded images and attachments are stored. - # - media_store_path: "%(media_store)s" - - # Media storage providers allow media to be stored in different - # locations. - # - #media_storage_providers: - # - module: file_system - # # Whether to store newly uploaded local files - # store_local: false - # # Whether to store newly downloaded remote files - # store_remote: false - # # Whether to wait for successful storage for local uploads - # store_synchronous: false - # config: - # directory: /mnt/some/other/directory - - # The largest allowed upload size in bytes - # - # If you are using a reverse proxy you may also need to set this value in - # your reverse proxy's config. Notably Nginx has a small max body size by default. - # See https://matrix-org.github.io/synapse/latest/reverse_proxy.html. - # - #max_upload_size: 50M - - # Maximum number of pixels that will be thumbnailed - # - #max_image_pixels: 32M - - # Whether to generate new thumbnails on the fly to precisely match - # the resolution requested by the client. If true then whenever - # a new resolution is requested by the client the server will - # generate a new thumbnail. If false the server will pick a thumbnail - # from a precalculated list. - # - #dynamic_thumbnails: false - - # List of thumbnails to precalculate when an image is uploaded. - # - #thumbnail_sizes: -%(formatted_thumbnail_sizes)s - - # Is the preview URL API enabled? - # - # 'false' by default: uncomment the following to enable it (and specify a - # url_preview_ip_range_blacklist blacklist). - # - #url_preview_enabled: true - - # List of IP address CIDR ranges that the URL preview spider is denied - # from accessing. There are no defaults: you must explicitly - # specify a list for URL previewing to work. You should specify any - # internal services in your network that you do not want synapse to try - # to connect to, otherwise anyone in any Matrix room could cause your - # synapse to issue arbitrary GET requests to your internal services, - # causing serious security issues. - # - # (0.0.0.0 and :: are always blacklisted, whether or not they are explicitly - # listed here, since they correspond to unroutable addresses.) - # - # This must be specified if url_preview_enabled is set. It is recommended that - # you uncomment the following list as a starting point. - # - # Note: The value is ignored when an HTTP proxy is in use - # - #url_preview_ip_range_blacklist: -%(ip_range_blacklist)s - - # List of IP address CIDR ranges that the URL preview spider is allowed - # to access even if they are specified in url_preview_ip_range_blacklist. - # This is useful for specifying exceptions to wide-ranging blacklisted - # target IP ranges - e.g. for enabling URL previews for a specific private - # website only visible in your network. - # - #url_preview_ip_range_whitelist: - # - '192.168.1.1' - - # Optional list of URL matches that the URL preview spider is - # denied from accessing. You should use url_preview_ip_range_blacklist - # in preference to this, otherwise someone could define a public DNS - # entry that points to a private IP address and circumvent the blacklist. - # This is more useful if you know there is an entire shape of URL that - # you know that will never want synapse to try to spider. - # - # Each list entry is a dictionary of url component attributes as returned - # by urlparse.urlsplit as applied to the absolute form of the URL. See - # https://docs.python.org/2/library/urlparse.html#urlparse.urlsplit - # The values of the dictionary are treated as an filename match pattern - # applied to that component of URLs, unless they start with a ^ in which - # case they are treated as a regular expression match. If all the - # specified component matches for a given list item succeed, the URL is - # blacklisted. - # - #url_preview_url_blacklist: - # # blacklist any URL with a username in its URI - # - username: '*' - # - # # blacklist all *.google.com URLs - # - netloc: 'google.com' - # - netloc: '*.google.com' - # - # # blacklist all plain HTTP URLs - # - scheme: 'http' - # - # # blacklist http(s)://www.acme.com/foo - # - netloc: 'www.acme.com' - # path: '/foo' - # - # # blacklist any URL with a literal IPv4 address - # - netloc: '^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$' - - # The largest allowed URL preview spidering size in bytes - # - #max_spider_size: 10M - - # A list of values for the Accept-Language HTTP header used when - # downloading webpages during URL preview generation. This allows - # Synapse to specify the preferred languages that URL previews should - # be in when communicating with remote servers. - # - # Each value is a IETF language tag; a 2-3 letter identifier for a - # language, optionally followed by subtags separated by '-', specifying - # a country or region variant. - # - # Multiple values can be provided, and a weight can be added to each by - # using quality value syntax (;q=). '*' translates to any language. - # - # Defaults to "en". - # - # Example: - # - # url_preview_accept_language: - # - en-UK - # - en-US;q=0.9 - # - fr;q=0.8 - # - *;q=0.7 - # - url_preview_accept_language: - # - en - """ - % locals() - ) + return f"media_store_path: {media_store}" diff --git a/synapse/config/retention.py b/synapse/config/retention.py index 03b723b84b..033051a9c2 100644 --- a/synapse/config/retention.py +++ b/synapse/config/retention.py @@ -153,75 +153,3 @@ class RetentionConfig(Config): self.retention_purge_jobs = [ RetentionPurgeJob(self.parse_duration("1d"), None, None) ] - - def generate_config_section(self, **kwargs: Any) -> str: - return """\ - # Message retention policy at the server level. - # - # Room admins and mods can define a retention period for their rooms using the - # 'm.room.retention' state event, and server admins can cap this period by setting - # the 'allowed_lifetime_min' and 'allowed_lifetime_max' config options. - # - # If this feature is enabled, Synapse will regularly look for and purge events - # which are older than the room's maximum retention period. Synapse will also - # filter events received over federation so that events that should have been - # purged are ignored and not stored again. - # - retention: - # The message retention policies feature is disabled by default. Uncomment the - # following line to enable it. - # - #enabled: true - - # Default retention policy. If set, Synapse will apply it to rooms that lack the - # 'm.room.retention' state event. Currently, the value of 'min_lifetime' doesn't - # matter much because Synapse doesn't take it into account yet. - # - #default_policy: - # min_lifetime: 1d - # max_lifetime: 1y - - # Retention policy limits. If set, and the state of a room contains a - # 'm.room.retention' event in its state which contains a 'min_lifetime' or a - # 'max_lifetime' that's out of these bounds, Synapse will cap the room's policy - # to these limits when running purge jobs. - # - #allowed_lifetime_min: 1d - #allowed_lifetime_max: 1y - - # Server admins can define the settings of the background jobs purging the - # events which lifetime has expired under the 'purge_jobs' section. - # - # If no configuration is provided, a single job will be set up to delete expired - # events in every room daily. - # - # Each job's configuration defines which range of message lifetimes the job - # takes care of. For example, if 'shortest_max_lifetime' is '2d' and - # 'longest_max_lifetime' is '3d', the job will handle purging expired events in - # rooms whose state defines a 'max_lifetime' that's both higher than 2 days, and - # lower than or equal to 3 days. Both the minimum and the maximum value of a - # range are optional, e.g. a job with no 'shortest_max_lifetime' and a - # 'longest_max_lifetime' of '3d' will handle every room with a retention policy - # which 'max_lifetime' is lower than or equal to three days. - # - # The rationale for this per-job configuration is that some rooms might have a - # retention policy with a low 'max_lifetime', where history needs to be purged - # of outdated messages on a more frequent basis than for the rest of the rooms - # (e.g. every 12h), but not want that purge to be performed by a job that's - # iterating over every room it knows, which could be heavy on the server. - # - # If any purge job is configured, it is strongly recommended to have at least - # a single job with neither 'shortest_max_lifetime' nor 'longest_max_lifetime' - # set, or one job without 'shortest_max_lifetime' and one job without - # 'longest_max_lifetime' set. Otherwise some rooms might be ignored, even if - # 'allowed_lifetime_min' and 'allowed_lifetime_max' are set, because capping a - # room's policy to these values is done after the policies are retrieved from - # Synapse's database (which is done using the range specified in a purge job's - # configuration). - # - #purge_jobs: - # - longest_max_lifetime: 3d - # interval: 12h - # - shortest_max_lifetime: 3d - # interval: 1d - """ diff --git a/synapse/config/room.py b/synapse/config/room.py index 462d85ac1d..4a7ac00540 100644 --- a/synapse/config/room.py +++ b/synapse/config/room.py @@ -75,59 +75,3 @@ class RoomConfig(Config): % preset ) # We validate the actual overrides when we try to apply them. - - def generate_config_section(self, **kwargs: Any) -> str: - return """\ - ## Rooms ## - - # Controls whether locally-created rooms should be end-to-end encrypted by - # default. - # - # Possible options are "all", "invite", and "off". They are defined as: - # - # * "all": any locally-created room - # * "invite": any room created with the "private_chat" or "trusted_private_chat" - # room creation presets - # * "off": this option will take no effect - # - # The default value is "off". - # - # Note that this option will only affect rooms created after it is set. It - # will also not affect rooms created by other servers. - # - #encryption_enabled_by_default_for_room_type: invite - - # Override the default power levels for rooms created on this server, per - # room creation preset. - # - # The appropriate dictionary for the room preset will be applied on top - # of the existing power levels content. - # - # Useful if you know that your users need special permissions in rooms - # that they create (e.g. to send particular types of state events without - # needing an elevated power level). This takes the same shape as the - # `power_level_content_override` parameter in the /createRoom API, but - # is applied before that parameter. - # - # Valid keys are some or all of `private_chat`, `trusted_private_chat` - # and `public_chat`. Inside each of those should be any of the - # properties allowed in `power_level_content_override` in the - # /createRoom API. If any property is missing, its default value will - # continue to be used. If any property is present, it will overwrite - # the existing default completely (so if the `events` property exists, - # the default event power levels will be ignored). - # - #default_power_level_content_override: - # private_chat: - # "events": - # "com.example.myeventtype" : 0 - # "m.room.avatar": 50 - # "m.room.canonical_alias": 50 - # "m.room.encryption": 100 - # "m.room.history_visibility": 100 - # "m.room.name": 50 - # "m.room.power_levels": 100 - # "m.room.server_acl": 100 - # "m.room.tombstone": 100 - # "events_default": 1 - """ diff --git a/synapse/config/room_directory.py b/synapse/config/room_directory.py index 717ba70e1c..3ed236217f 100644 --- a/synapse/config/room_directory.py +++ b/synapse/config/room_directory.py @@ -52,72 +52,6 @@ class RoomDirectoryConfig(Config): _RoomDirectoryRule("room_list_publication_rules", {"action": "allow"}) ] - def generate_config_section(self, **kwargs: Any) -> str: - return """ - # Uncomment to disable searching the public room list. When disabled - # blocks searching local and remote room lists for local and remote - # users by always returning an empty list for all queries. - # - #enable_room_list_search: false - - # The `alias_creation` option controls who's allowed to create aliases - # on this server. - # - # The format of this option is a list of rules that contain globs that - # match against user_id, room_id and the new alias (fully qualified with - # server name). The action in the first rule that matches is taken, - # which can currently either be "allow" or "deny". - # - # Missing user_id/room_id/alias fields default to "*". - # - # If no rules match the request is denied. An empty list means no one - # can create aliases. - # - # Options for the rules include: - # - # user_id: Matches against the creator of the alias - # alias: Matches against the alias being created - # room_id: Matches against the room ID the alias is being pointed at - # action: Whether to "allow" or "deny" the request if the rule matches - # - # The default is: - # - #alias_creation_rules: - # - user_id: "*" - # alias: "*" - # room_id: "*" - # action: allow - - # The `room_list_publication_rules` option controls who can publish and - # which rooms can be published in the public room list. - # - # The format of this option is the same as that for - # `alias_creation_rules`. - # - # If the room has one or more aliases associated with it, only one of - # the aliases needs to match the alias rule. If there are no aliases - # then only rules with `alias: *` match. - # - # If no rules match the request is denied. An empty list means no one - # can publish rooms. - # - # Options for the rules include: - # - # user_id: Matches against the creator of the alias - # room_id: Matches against the room ID being published - # alias: Matches against any current local or canonical aliases - # associated with the room - # action: Whether to "allow" or "deny" the request if the rule matches - # - # The default is: - # - #room_list_publication_rules: - # - user_id: "*" - # alias: "*" - # room_id: "*" - # action: allow - """ - def is_alias_creation_allowed(self, user_id: str, room_id: str, alias: str) -> bool: """Checks if the given user is allowed to create the given alias diff --git a/synapse/config/saml2.py b/synapse/config/saml2.py index 19b2f1b215..bd7c234d31 100644 --- a/synapse/config/saml2.py +++ b/synapse/config/saml2.py @@ -223,189 +223,6 @@ class SAML2Config(Config): }, } - def generate_config_section(self, config_dir_path: str, **kwargs: Any) -> str: - return """\ - ## Single sign-on integration ## - - # The following settings can be used to make Synapse use a single sign-on - # provider for authentication, instead of its internal password database. - # - # You will probably also want to set the following options to `false` to - # disable the regular login/registration flows: - # * enable_registration - # * password_config.enabled - # - # You will also want to investigate the settings under the "sso" configuration - # section below. - - # Enable SAML2 for registration and login. Uses pysaml2. - # - # At least one of `sp_config` or `config_path` must be set in this section to - # enable SAML login. - # - # Once SAML support is enabled, a metadata file will be exposed at - # https://<server>:<port>/_synapse/client/saml2/metadata.xml, which you may be able to - # use to configure your SAML IdP with. Alternatively, you can manually configure - # the IdP to use an ACS location of - # https://<server>:<port>/_synapse/client/saml2/authn_response. - # - saml2_config: - # `sp_config` is the configuration for the pysaml2 Service Provider. - # See pysaml2 docs for format of config. - # - # Default values will be used for the 'entityid' and 'service' settings, - # so it is not normally necessary to specify them unless you need to - # override them. - # - sp_config: - # Point this to the IdP's metadata. You must provide either a local - # file via the `local` attribute or (preferably) a URL via the - # `remote` attribute. - # - #metadata: - # local: ["saml2/idp.xml"] - # remote: - # - url: https://our_idp/metadata.xml - - # Allowed clock difference in seconds between the homeserver and IdP. - # - # Uncomment the below to increase the accepted time difference from 0 to 3 seconds. - # - #accepted_time_diff: 3 - - # By default, the user has to go to our login page first. If you'd like - # to allow IdP-initiated login, set 'allow_unsolicited: true' in a - # 'service.sp' section: - # - #service: - # sp: - # allow_unsolicited: true - - # The examples below are just used to generate our metadata xml, and you - # may well not need them, depending on your setup. Alternatively you - # may need a whole lot more detail - see the pysaml2 docs! - - #description: ["My awesome SP", "en"] - #name: ["Test SP", "en"] - - #ui_info: - # display_name: - # - lang: en - # text: "Display Name is the descriptive name of your service." - # description: - # - lang: en - # text: "Description should be a short paragraph explaining the purpose of the service." - # information_url: - # - lang: en - # text: "https://example.com/terms-of-service" - # privacy_statement_url: - # - lang: en - # text: "https://example.com/privacy-policy" - # keywords: - # - lang: en - # text: ["Matrix", "Element"] - # logo: - # - lang: en - # text: "https://example.com/logo.svg" - # width: "200" - # height: "80" - - #organization: - # name: Example com - # display_name: - # - ["Example co", "en"] - # url: "http://example.com" - - #contact_person: - # - given_name: Bob - # sur_name: "the Sysadmin" - # email_address": ["admin@example.com"] - # contact_type": technical - - # Instead of putting the config inline as above, you can specify a - # separate pysaml2 configuration file: - # - #config_path: "%(config_dir_path)s/sp_conf.py" - - # The lifetime of a SAML session. This defines how long a user has to - # complete the authentication process, if allow_unsolicited is unset. - # The default is 15 minutes. - # - #saml_session_lifetime: 5m - - # An external module can be provided here as a custom solution to - # mapping attributes returned from a saml provider onto a matrix user. - # - user_mapping_provider: - # The custom module's class. Uncomment to use a custom module. - # - #module: mapping_provider.SamlMappingProvider - - # Custom configuration values for the module. Below options are - # intended for the built-in provider, they should be changed if - # using a custom module. This section will be passed as a Python - # dictionary to the module's `parse_config` method. - # - config: - # The SAML attribute (after mapping via the attribute maps) to use - # to derive the Matrix ID from. 'uid' by default. - # - # Note: This used to be configured by the - # saml2_config.mxid_source_attribute option. If that is still - # defined, its value will be used instead. - # - #mxid_source_attribute: displayName - - # The mapping system to use for mapping the saml attribute onto a - # matrix ID. - # - # Options include: - # * 'hexencode' (which maps unpermitted characters to '=xx') - # * 'dotreplace' (which replaces unpermitted characters with - # '.'). - # The default is 'hexencode'. - # - # Note: This used to be configured by the - # saml2_config.mxid_mapping option. If that is still defined, its - # value will be used instead. - # - #mxid_mapping: dotreplace - - # In previous versions of synapse, the mapping from SAML attribute to - # MXID was always calculated dynamically rather than stored in a - # table. For backwards- compatibility, we will look for user_ids - # matching such a pattern before creating a new account. - # - # This setting controls the SAML attribute which will be used for this - # backwards-compatibility lookup. Typically it should be 'uid', but if - # the attribute maps are changed, it may be necessary to change it. - # - # The default is 'uid'. - # - #grandfathered_mxid_source_attribute: upn - - # It is possible to configure Synapse to only allow logins if SAML attributes - # match particular values. The requirements can be listed under - # `attribute_requirements` as shown below. All of the listed attributes must - # match for the login to be permitted. - # - #attribute_requirements: - # - attribute: userGroup - # value: "staff" - # - attribute: department - # value: "sales" - - # If the metadata XML contains multiple IdP entities then the `idp_entityid` - # option must be set to the entity to redirect users to. - # - # Most deployments only have a single IdP entity and so should omit this - # option. - # - #idp_entityid: 'https://our_idp/entityid' - """ % { - "config_dir_path": config_dir_path - } - ATTRIBUTE_REQUIREMENTS_SCHEMA = { "type": "array", diff --git a/synapse/config/server.py b/synapse/config/server.py index 657322cb1f..828938e5ec 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -16,7 +16,6 @@ import argparse import itertools import logging import os.path -import re import urllib.parse from textwrap import indent from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union @@ -702,9 +701,6 @@ class ServerConfig(Config): listeners: Optional[List[dict]], **kwargs: Any, ) -> str: - ip_range_blacklist = "\n".join( - " # - '%s'" % ip for ip in DEFAULT_IP_RANGE_BLACKLIST - ) _, bind_port = parse_and_validate_server_name(server_name) if bind_port is not None: @@ -715,9 +711,6 @@ class ServerConfig(Config): pid_file = os.path.join(data_dir_path, "homeserver.pid") - # Bring DEFAULT_ROOM_VERSION into the local-scope for use in the - # default config string - default_room_version = DEFAULT_ROOM_VERSION secure_listeners = [] unsecure_listeners = [] private_addresses = ["::1", "127.0.0.1"] @@ -765,501 +758,18 @@ class ServerConfig(Config): compress: false""" if listeners: - # comment out this block - unsecure_http_bindings = "#" + re.sub( - "\n {10}", - lambda match: match.group(0) + "#", - unsecure_http_bindings, - ) + unsecure_http_bindings = "" if not secure_listeners: - secure_http_bindings = ( - """#- port: %(bind_port)s - # type: http - # tls: true - # resources: - # - names: [client, federation]""" - % locals() - ) + secure_http_bindings = "" return ( """\ - ## Server ## - - # The public-facing domain of the server - # - # The server_name name will appear at the end of usernames and room addresses - # created on this server. For example if the server_name was example.com, - # usernames on this server would be in the format @user:example.com - # - # In most cases you should avoid using a matrix specific subdomain such as - # matrix.example.com or synapse.example.com as the server_name for the same - # reasons you wouldn't use user@email.example.com as your email address. - # See https://matrix-org.github.io/synapse/latest/delegate.html - # for information on how to host Synapse on a subdomain while preserving - # a clean server_name. - # - # The server_name cannot be changed later so it is important to - # configure this correctly before you start Synapse. It should be all - # lowercase and may contain an explicit port. - # Examples: matrix.org, localhost:8080 - # server_name: "%(server_name)s" - - # When running as a daemon, the file to store the pid in - # pid_file: %(pid_file)s - - # The absolute URL to the web client which / will redirect to. - # - #web_client_location: https://riot.example.com/ - - # The public-facing base URL that clients use to access this Homeserver (not - # including _matrix/...). This is the same URL a user might enter into the - # 'Custom Homeserver URL' field on their client. If you use Synapse with a - # reverse proxy, this should be the URL to reach Synapse via the proxy. - # Otherwise, it should be the URL to reach Synapse's client HTTP listener (see - # 'listeners' below). - # - # Defaults to 'https://<server_name>/'. - # - #public_baseurl: https://example.com/ - - # Uncomment the following to tell other servers to send federation traffic on - # port 443. - # - # By default, other servers will try to reach our server on port 8448, which can - # be inconvenient in some environments. - # - # Provided 'https://<server_name>/' on port 443 is routed to Synapse, this - # option configures Synapse to serve a file at - # 'https://<server_name>/.well-known/matrix/server'. This will tell other - # servers to send traffic to port 443 instead. - # - # See https://matrix-org.github.io/synapse/latest/delegate.html for more - # information. - # - # Defaults to 'false'. - # - #serve_server_wellknown: true - - # Set the soft limit on the number of file descriptors synapse can use - # Zero is used to indicate synapse should set the soft limit to the - # hard limit. - # - #soft_file_limit: 0 - - # Presence tracking allows users to see the state (e.g online/offline) - # of other local and remote users. - # - presence: - # Uncomment to disable presence tracking on this homeserver. This option - # replaces the previous top-level 'use_presence' option. - # - #enabled: false - - # Whether to require authentication to retrieve profile data (avatars, - # display names) of other users through the client API. Defaults to - # 'false'. Note that profile data is also available via the federation - # API, unless allow_profile_lookup_over_federation is set to false. - # - #require_auth_for_profile_requests: true - - # Uncomment to require a user to share a room with another user in order - # to retrieve their profile information. Only checked on Client-Server - # requests. Profile requests from other servers should be checked by the - # requesting server. Defaults to 'false'. - # - #limit_profile_requests_to_users_who_share_rooms: true - - # Uncomment to prevent a user's profile data from being retrieved and - # displayed in a room until they have joined it. By default, a user's - # profile data is included in an invite event, regardless of the values - # of the above two settings, and whether or not the users share a server. - # Defaults to 'true'. - # - #include_profile_data_on_invite: false - - # If set to 'true', removes the need for authentication to access the server's - # public rooms directory through the client API, meaning that anyone can - # query the room directory. Defaults to 'false'. - # - #allow_public_rooms_without_auth: true - - # If set to 'true', allows any other homeserver to fetch the server's public - # rooms directory via federation. Defaults to 'false'. - # - #allow_public_rooms_over_federation: true - - # The default room version for newly created rooms. - # - # Known room versions are listed here: - # https://spec.matrix.org/latest/rooms/#complete-list-of-room-versions - # - # For example, for room version 1, default_room_version should be set - # to "1". - # - #default_room_version: "%(default_room_version)s" - - # The GC threshold parameters to pass to `gc.set_threshold`, if defined - # - #gc_thresholds: [700, 10, 10] - - # The minimum time in seconds between each GC for a generation, regardless of - # the GC thresholds. This ensures that we don't do GC too frequently. - # - # A value of `[1s, 10s, 30s]` indicates that a second must pass between consecutive - # generation 0 GCs, etc. - # - # Defaults to `[1s, 10s, 30s]`. - # - #gc_min_interval: [0.5s, 30s, 1m] - - # Set the limit on the returned events in the timeline in the get - # and sync operations. The default value is 100. -1 means no upper limit. - # - # Uncomment the following to increase the limit to 5000. - # - #filter_timeline_limit: 5000 - - # Whether room invites to users on this server should be blocked - # (except those sent by local server admins). The default is False. - # - #block_non_admin_invites: true - - # Room searching - # - # If disabled, new messages will not be indexed for searching and users - # will receive errors when searching for messages. Defaults to enabled. - # - #enable_search: false - - # Prevent outgoing requests from being sent to the following blacklisted IP address - # CIDR ranges. If this option is not specified then it defaults to private IP - # address ranges (see the example below). - # - # The blacklist applies to the outbound requests for federation, identity servers, - # push servers, and for checking key validity for third-party invite events. - # - # (0.0.0.0 and :: are always blacklisted, whether or not they are explicitly - # listed here, since they correspond to unroutable addresses.) - # - # This option replaces federation_ip_range_blacklist in Synapse v1.25.0. - # - # Note: The value is ignored when an HTTP proxy is in use - # - #ip_range_blacklist: -%(ip_range_blacklist)s - - # List of IP address CIDR ranges that should be allowed for federation, - # identity servers, push servers, and for checking key validity for - # third-party invite events. This is useful for specifying exceptions to - # wide-ranging blacklisted target IP ranges - e.g. for communication with - # a push server only visible in your network. - # - # This whitelist overrides ip_range_blacklist and defaults to an empty - # list. - # - #ip_range_whitelist: - # - '192.168.1.1' - - # List of ports that Synapse should listen on, their purpose and their - # configuration. - # - # Options for each listener include: - # - # port: the TCP port to bind to - # - # bind_addresses: a list of local addresses to listen on. The default is - # 'all local interfaces'. - # - # type: the type of listener. Normally 'http', but other valid options are: - # 'manhole' (see https://matrix-org.github.io/synapse/latest/manhole.html), - # 'metrics' (see https://matrix-org.github.io/synapse/latest/metrics-howto.html), - # 'replication' (see https://matrix-org.github.io/synapse/latest/workers.html). - # - # tls: set to true to enable TLS for this listener. Will use the TLS - # key/cert specified in tls_private_key_path / tls_certificate_path. - # - # x_forwarded: Only valid for an 'http' listener. Set to true to use the - # X-Forwarded-For header as the client IP. Useful when Synapse is - # behind a reverse-proxy. - # - # resources: Only valid for an 'http' listener. A list of resources to host - # on this port. Options for each resource are: - # - # names: a list of names of HTTP resources. See below for a list of - # valid resource names. - # - # compress: set to true to enable HTTP compression for this resource. - # - # additional_resources: Only valid for an 'http' listener. A map of - # additional endpoints which should be loaded via dynamic modules. - # - # Valid resource names are: - # - # client: the client-server API (/_matrix/client), and the synapse admin - # API (/_synapse/admin). Also implies 'media' and 'static'. - # - # consent: user consent forms (/_matrix/consent). - # See https://matrix-org.github.io/synapse/latest/consent_tracking.html. - # - # federation: the server-server API (/_matrix/federation). Also implies - # 'media', 'keys', 'openid' - # - # keys: the key discovery API (/_matrix/key). - # - # media: the media API (/_matrix/media). - # - # metrics: the metrics interface. - # See https://matrix-org.github.io/synapse/latest/metrics-howto.html. - # - # openid: OpenID authentication. - # - # replication: the HTTP replication API (/_synapse/replication). - # See https://matrix-org.github.io/synapse/latest/workers.html. - # - # static: static resources under synapse/static (/_matrix/static). (Mostly - # useful for 'fallback authentication'.) - # listeners: - # TLS-enabled listener: for when matrix traffic is sent directly to synapse. - # - # Disabled by default. To enable it, uncomment the following. (Note that you - # will also need to give Synapse a TLS key and certificate: see the TLS section - # below.) - # %(secure_http_bindings)s - - # Unsecure HTTP listener: for when matrix traffic passes through a reverse proxy - # that unwraps TLS. - # - # If you plan to use a reverse proxy, please see - # https://matrix-org.github.io/synapse/latest/reverse_proxy.html. - # %(unsecure_http_bindings)s - - # example additional_resources: - # - #additional_resources: - # "/_matrix/my/custom/endpoint": - # module: my_module.CustomRequestHandler - # config: {} - - # Turn on the twisted ssh manhole service on localhost on the given - # port. - # - #- port: 9000 - # bind_addresses: ['::1', '127.0.0.1'] - # type: manhole - - # Connection settings for the manhole - # - manhole_settings: - # The username for the manhole. This defaults to 'matrix'. - # - #username: manhole - - # The password for the manhole. This defaults to 'rabbithole'. - # - #password: mypassword - - # The private and public SSH key pair used to encrypt the manhole traffic. - # If these are left unset, then hardcoded and non-secret keys are used, - # which could allow traffic to be intercepted if sent over a public network. - # - #ssh_priv_key_path: %(config_dir_path)s/id_rsa - #ssh_pub_key_path: %(config_dir_path)s/id_rsa.pub - - # Forward extremities can build up in a room due to networking delays between - # homeservers. Once this happens in a large room, calculation of the state of - # that room can become quite expensive. To mitigate this, once the number of - # forward extremities reaches a given threshold, Synapse will send an - # org.matrix.dummy_event event, which will reduce the forward extremities - # in the room. - # - # This setting defines the threshold (i.e. number of forward extremities in the - # room) at which dummy events are sent. The default value is 10. - # - #dummy_events_threshold: 5 - - - ## Homeserver blocking ## - - # How to reach the server admin, used in ResourceLimitError - # - #admin_contact: 'mailto:admin@server.com' - - # Global blocking - # - #hs_disabled: false - #hs_disabled_message: 'Human readable reason for why the HS is blocked' - - # Monthly Active User Blocking - # - # Used in cases where the admin or server owner wants to limit to the - # number of monthly active users. - # - # 'limit_usage_by_mau' disables/enables monthly active user blocking. When - # enabled and a limit is reached the server returns a 'ResourceLimitError' - # with error type Codes.RESOURCE_LIMIT_EXCEEDED - # - # 'max_mau_value' is the hard limit of monthly active users above which - # the server will start blocking user actions. - # - # 'mau_trial_days' is a means to add a grace period for active users. It - # means that users must be active for this number of days before they - # can be considered active and guards against the case where lots of users - # sign up in a short space of time never to return after their initial - # session. - # - # The option `mau_appservice_trial_days` is similar to `mau_trial_days`, but - # applies a different trial number if the user was registered by an appservice. - # A value of 0 means no trial days are applied. Appservices not listed in this - # dictionary use the value of `mau_trial_days` instead. - # - # 'mau_limit_alerting' is a means of limiting client side alerting - # should the mau limit be reached. This is useful for small instances - # where the admin has 5 mau seats (say) for 5 specific people and no - # interest increasing the mau limit further. Defaults to True, which - # means that alerting is enabled - # - #limit_usage_by_mau: false - #max_mau_value: 50 - #mau_trial_days: 2 - #mau_limit_alerting: false - #mau_appservice_trial_days: - # "appservice-id": 1 - - # If enabled, the metrics for the number of monthly active users will - # be populated, however no one will be limited. If limit_usage_by_mau - # is true, this is implied to be true. - # - #mau_stats_only: false - - # Sometimes the server admin will want to ensure certain accounts are - # never blocked by mau checking. These accounts are specified here. - # - #mau_limit_reserved_threepids: - # - medium: 'email' - # address: 'reserved_user@example.com' - - # Used by phonehome stats to group together related servers. - #server_context: context - - # Resource-constrained homeserver settings - # - # When this is enabled, the room "complexity" will be checked before a user - # joins a new remote room. If it is above the complexity limit, the server will - # disallow joining, or will instantly leave. - # - # Room complexity is an arbitrary measure based on factors such as the number of - # users in the room. - # - limit_remote_rooms: - # Uncomment to enable room complexity checking. - # - #enabled: true - - # the limit above which rooms cannot be joined. The default is 1.0. - # - #complexity: 0.5 - - # override the error which is returned when the room is too complex. - # - #complexity_error: "This room is too complex." - - # allow server admins to join complex rooms. Default is false. - # - #admins_can_join: true - - # Whether to require a user to be in the room to add an alias to it. - # Defaults to 'true'. - # - #require_membership_for_aliases: false - - # Whether to allow per-room membership profiles through the send of membership - # events with profile information that differ from the target's global profile. - # Defaults to 'true'. - # - #allow_per_room_profiles: false - - # The largest allowed file size for a user avatar. Defaults to no restriction. - # - # Note that user avatar changes will not work if this is set without - # using Synapse's media repository. - # - #max_avatar_size: 10M - - # The MIME types allowed for user avatars. Defaults to no restriction. - # - # Note that user avatar changes will not work if this is set without - # using Synapse's media repository. - # - #allowed_avatar_mimetypes: ["image/png", "image/jpeg", "image/gif"] - - # How long to keep redacted events in unredacted form in the database. After - # this period redacted events get replaced with their redacted form in the DB. - # - # Defaults to `7d`. Set to `null` to disable. - # - #redaction_retention_period: 28d - - # How long to track users' last seen time and IPs in the database. - # - # Defaults to `28d`. Set to `null` to disable clearing out of old rows. - # - #user_ips_max_age: 14d - - # Inhibits the /requestToken endpoints from returning an error that might leak - # information about whether an e-mail address is in use or not on this - # homeserver. - # Note that for some endpoints the error situation is the e-mail already being - # used, and for others the error is entering the e-mail being unused. - # If this option is enabled, instead of returning an error, these endpoints will - # act as if no error happened and return a fake session ID ('sid') to clients. - # - #request_token_inhibit_3pid_errors: true - - # A list of domains that the domain portion of 'next_link' parameters - # must match. - # - # This parameter is optionally provided by clients while requesting - # validation of an email or phone number, and maps to a link that - # users will be automatically redirected to after validation - # succeeds. Clients can make use this parameter to aid the validation - # process. - # - # The whitelist is applied whether the homeserver or an - # identity server is handling validation. - # - # The default value is no whitelist functionality; all domains are - # allowed. Setting this value to an empty list will instead disallow - # all domains. - # - #next_link_domain_whitelist: ["matrix.org"] - - # Templates to use when generating email or HTML page contents. - # - templates: - # Directory in which Synapse will try to find template files to use to generate - # email or HTML page contents. - # If not set, or a file is not found within the template directory, a default - # template from within the Synapse package will be used. - # - # See https://matrix-org.github.io/synapse/latest/templates.html for more - # information about using custom templates. - # - #custom_template_directory: /path/to/custom/templates/ - - # List of rooms to exclude from sync responses. This is useful for server - # administrators wishing to group users into a room without these users being able - # to see it from their client. - # - # By default, no room is excluded. - # - #exclude_rooms_from_sync: - # - !foo:example.com """ % locals() ) diff --git a/synapse/config/server_notices.py b/synapse/config/server_notices.py index 505b4f6c6c..ce041abe9b 100644 --- a/synapse/config/server_notices.py +++ b/synapse/config/server_notices.py @@ -18,27 +18,6 @@ from synapse.types import JsonDict, UserID from ._base import Config -DEFAULT_CONFIG = """\ -# Server Notices room configuration -# -# Uncomment this section to enable a room which can be used to send notices -# from the server to users. It is a special room which cannot be left; notices -# come from a special "notices" user id. -# -# If you uncomment this section, you *must* define the system_mxid_localpart -# setting, which defines the id of the user which will be used to send the -# notices. -# -# It's also possible to override the room name, the display name of the -# "notices" user, and the avatar for the user. -# -#server_notices: -# system_mxid_localpart: notices -# system_mxid_display_name: "Server Notices" -# system_mxid_avatar_url: "mxc://server.com/oumMVlgDnLYFaPVkExemNVVZ" -# room_name: "Server Notices" -""" - class ServerNoticesConfig(Config): """Configuration for the server notices room. @@ -83,6 +62,3 @@ class ServerNoticesConfig(Config): self.server_notices_mxid_avatar_url = c.get("system_mxid_avatar_url", None) # todo: i18n self.server_notices_room_name = c.get("room_name", "Server Notices") - - def generate_config_section(self, **kwargs: Any) -> str: - return DEFAULT_CONFIG diff --git a/synapse/config/sso.py b/synapse/config/sso.py index f88eba77d0..2178cbf983 100644 --- a/synapse/config/sso.py +++ b/synapse/config/sso.py @@ -107,43 +107,3 @@ class SSOConfig(Config): self.root.server.public_baseurl + "_matrix/static/client/login" ) self.sso_client_whitelist.append(login_fallback_url) - - def generate_config_section(self, **kwargs: Any) -> str: - return """\ - # Additional settings to use with single-sign on systems such as OpenID Connect, - # SAML2 and CAS. - # - # Server admins can configure custom templates for pages related to SSO. See - # https://matrix-org.github.io/synapse/latest/templates.html for more information. - # - sso: - # A list of client URLs which are whitelisted so that the user does not - # have to confirm giving access to their account to the URL. Any client - # whose URL starts with an entry in the following list will not be subject - # to an additional confirmation step after the SSO login is completed. - # - # WARNING: An entry such as "https://my.client" is insecure, because it - # will also match "https://my.client.evil.site", exposing your users to - # phishing attacks from evil.site. To avoid this, include a slash after the - # hostname: "https://my.client/". - # - # The login fallback page (used by clients that don't natively support the - # required login flows) is whitelisted in addition to any URLs in this list. - # - # By default, this list contains only the login fallback page. - # - #client_whitelist: - # - https://riot.im/develop - # - https://my.custom.client/ - - # Uncomment to keep a user's profile fields in sync with information from - # the identity provider. Currently only syncing the displayname is - # supported. Fields are checked on every SSO login, and are updated - # if necessary. - # - # Note that enabling this option will override user profile information, - # regardless of whether users have opted-out of syncing that - # information when first signing in. Defaults to false. - # - #update_profile_information: true - """ diff --git a/synapse/config/stats.py b/synapse/config/stats.py index ed1f416e4f..9621acd77f 100644 --- a/synapse/config/stats.py +++ b/synapse/config/stats.py @@ -46,16 +46,3 @@ class StatsConfig(Config): self.stats_enabled = stats_config.get("enabled", self.stats_enabled) if not self.stats_enabled: logger.warning(ROOM_STATS_DISABLED_WARN) - - def generate_config_section(self, **kwargs: Any) -> str: - return """ - # Settings for local room and user statistics collection. See - # https://matrix-org.github.io/synapse/latest/room_and_user_statistics.html. - # - stats: - # Uncomment the following to disable room and user statistics. Note that doing - # so may cause certain features (such as the room directory) not to work - # correctly. - # - #enabled: false - """ diff --git a/synapse/config/tls.py b/synapse/config/tls.py index cb17950d25..336fe3e0da 100644 --- a/synapse/config/tls.py +++ b/synapse/config/tls.py @@ -13,7 +13,6 @@ # limitations under the License. import logging -import os from typing import Any, List, Optional, Pattern from matrix_common.regex import glob_to_regex @@ -143,9 +142,6 @@ class TlsConfig(Config): def generate_config_section( self, - config_dir_path: str, - data_dir_path: str, - server_name: str, tls_certificate_path: Optional[str], tls_private_key_path: Optional[str], **kwargs: Any, @@ -153,90 +149,18 @@ class TlsConfig(Config): """If the TLS paths are not specified the default will be certs in the config directory""" - base_key_name = os.path.join(config_dir_path, server_name) - if bool(tls_certificate_path) != bool(tls_private_key_path): raise ConfigError( "Please specify both a cert path and a key path or neither." ) - tls_enabled = "" if tls_certificate_path and tls_private_key_path else "#" - - if not tls_certificate_path: - tls_certificate_path = base_key_name + ".tls.crt" - if not tls_private_key_path: - tls_private_key_path = base_key_name + ".tls.key" - - # flake8 doesn't recognise that variables are used in the below string - _ = tls_enabled - - return ( - """\ - ## TLS ## - - # PEM-encoded X509 certificate for TLS. - # This certificate, as of Synapse 1.0, will need to be a valid and verifiable - # certificate, signed by a recognised Certificate Authority. - # - # Be sure to use a `.pem` file that includes the full certificate chain including - # any intermediate certificates (for instance, if using certbot, use - # `fullchain.pem` as your certificate, not `cert.pem`). - # - %(tls_enabled)stls_certificate_path: "%(tls_certificate_path)s" - - # PEM-encoded private key for TLS - # - %(tls_enabled)stls_private_key_path: "%(tls_private_key_path)s" - - # Whether to verify TLS server certificates for outbound federation requests. - # - # Defaults to `true`. To disable certificate verification, uncomment the - # following line. - # - #federation_verify_certificates: false - - # The minimum TLS version that will be used for outbound federation requests. - # - # Defaults to `1`. Configurable to `1`, `1.1`, `1.2`, or `1.3`. Note - # that setting this value higher than `1.2` will prevent federation to most - # of the public Matrix network: only configure it to `1.3` if you have an - # entirely private federation setup and you can ensure TLS 1.3 support. - # - #federation_client_minimum_tls_version: 1.2 - - # Skip federation certificate verification on the following whitelist - # of domains. - # - # This setting should only be used in very specific cases, such as - # federation over Tor hidden services and similar. For private networks - # of homeservers, you likely want to use a private CA instead. - # - # Only effective if federation_verify_certicates is `true`. - # - #federation_certificate_verification_whitelist: - # - lon.example.com - # - "*.domain.com" - # - "*.onion" - - # List of custom certificate authorities for federation traffic. - # - # This setting should only normally be used within a private network of - # homeservers. - # - # Note that this list will replace those that are provided by your - # operating environment. Certificates must be in PEM format. - # - #federation_custom_ca_list: - # - myCA1.pem - # - myCA2.pem - # - myCA3.pem - """ - # Lowercase the string representation of boolean values - % { - x[0]: str(x[1]).lower() if isinstance(x[1], bool) else x[1] - for x in locals().items() - } - ) + if tls_certificate_path and tls_private_key_path: + return f"""\ + tls_certificate_path: {tls_certificate_path} + tls_private_key_path: {tls_private_key_path} + """ + else: + return "" def read_tls_certificate(self) -> crypto.X509: """Reads the TLS certificate from the configured file, and returns it diff --git a/synapse/config/tracer.py b/synapse/config/tracer.py index ae68a3dd1a..6fbf927f11 100644 --- a/synapse/config/tracer.py +++ b/synapse/config/tracer.py @@ -67,53 +67,3 @@ class TracerConfig(Config): ("opentracing", "force_tracing_for_users", f"index {i}"), ) self.force_tracing_for_users.add(u) - - def generate_config_section(cls, **kwargs: Any) -> str: - return """\ - ## Opentracing ## - - # These settings enable opentracing, which implements distributed tracing. - # This allows you to observe the causal chains of events across servers - # including requests, key lookups etc., across any server running - # synapse or any other other services which supports opentracing - # (specifically those implemented with Jaeger). - # - opentracing: - # tracing is disabled by default. Uncomment the following line to enable it. - # - #enabled: true - - # The list of homeservers we wish to send and receive span contexts and span baggage. - # See https://matrix-org.github.io/synapse/latest/opentracing.html. - # - # This is a list of regexes which are matched against the server_name of the - # homeserver. - # - # By default, it is empty, so no servers are matched. - # - #homeserver_whitelist: - # - ".*" - - # A list of the matrix IDs of users whose requests will always be traced, - # even if the tracing system would otherwise drop the traces due to - # probabilistic sampling. - # - # By default, the list is empty. - # - #force_tracing_for_users: - # - "@user1:server_name" - # - "@user2:server_name" - - # Jaeger can be configured to sample traces at different rates. - # All configuration options provided by Jaeger can be set here. - # Jaeger's configuration is mostly related to trace sampling which - # is documented here: - # https://www.jaegertracing.io/docs/latest/sampling/. - # - #jaeger_config: - # sampler: - # type: const - # param: 1 - # logging: - # false - """ diff --git a/synapse/config/user_directory.py b/synapse/config/user_directory.py index 010e791924..c9e18b91e9 100644 --- a/synapse/config/user_directory.py +++ b/synapse/config/user_directory.py @@ -35,42 +35,3 @@ class UserDirectoryConfig(Config): self.user_directory_search_prefer_local_users = user_directory_config.get( "prefer_local_users", False ) - - def generate_config_section(self, **kwargs: Any) -> str: - return """ - # User Directory configuration - # - user_directory: - # Defines whether users can search the user directory. If false then - # empty responses are returned to all queries. Defaults to true. - # - # Uncomment to disable the user directory. - # - #enabled: false - - # Defines whether to search all users visible to your HS when searching - # the user directory. If false, search results will only contain users - # visible in public rooms and users sharing a room with the requester. - # Defaults to false. - # - # NB. If you set this to true, and the last time the user_directory search - # indexes were (re)built was before Synapse 1.44, you'll have to - # rebuild the indexes in order to search through all known users. - # These indexes are built the first time Synapse starts; admins can - # manually trigger a rebuild via API following the instructions at - # https://matrix-org.github.io/synapse/latest/usage/administration/admin_api/background_updates.html#run - # - # Uncomment to return search results containing all known users, even if that - # user does not share a room with the requester. - # - #search_all_users: true - - # Defines whether to prefer local users in search query results. - # If True, local users are more likely to appear above remote users - # when searching the user directory. Defaults to false. - # - # Uncomment to prefer local over remote users in user directory search - # results. - # - #prefer_local_users: true - """ diff --git a/synapse/config/voip.py b/synapse/config/voip.py index 87c09abe24..43f0a0fa17 100644 --- a/synapse/config/voip.py +++ b/synapse/config/voip.py @@ -31,34 +31,3 @@ class VoipConfig(Config): config.get("turn_user_lifetime", "1h") ) self.turn_allow_guests = config.get("turn_allow_guests", True) - - def generate_config_section(self, **kwargs: Any) -> str: - return """\ - ## TURN ## - - # The public URIs of the TURN server to give to clients - # - #turn_uris: [] - - # The shared secret used to compute passwords for the TURN server - # - #turn_shared_secret: "YOUR_SHARED_SECRET" - - # The Username and password if the TURN server needs them and - # does not use a token - # - #turn_username: "TURNSERVER_USERNAME" - #turn_password: "TURNSERVER_PASSWORD" - - # How long generated TURN credentials last - # - #turn_user_lifetime: 1h - - # Whether guests should be allowed to use the TURN server. - # This defaults to True, otherwise VoIP will be unreliable for guests. - # However, it does introduce a slight security risk as it allows users to - # connect to arbitrary endpoints without having first signed up for a - # valid account (e.g. by passing a CAPTCHA). - # - #turn_allow_guests: true - """ diff --git a/synapse/config/workers.py b/synapse/config/workers.py index e1569b3c14..f2716422b5 100644 --- a/synapse/config/workers.py +++ b/synapse/config/workers.py @@ -410,55 +410,6 @@ class WorkerConfig(Config): # (By this point, these are either the same value or only one is not None.) return bool(new_option_should_run_here or legacy_option_should_run_here) - def generate_config_section(self, **kwargs: Any) -> str: - return """\ - ## Workers ## - - # Disables sending of outbound federation transactions on the main process. - # Uncomment if using a federation sender worker. - # - #send_federation: false - - # It is possible to run multiple federation sender workers, in which case the - # work is balanced across them. - # - # This configuration must be shared between all federation sender workers, and if - # changed all federation sender workers must be stopped at the same time and then - # started, to ensure that all instances are running with the same config (otherwise - # events may be dropped). - # - #federation_sender_instances: - # - federation_sender1 - - # When using workers this should be a map from `worker_name` to the - # HTTP replication listener of the worker, if configured. - # - #instance_map: - # worker1: - # host: localhost - # port: 8034 - - # Experimental: When using workers you can define which workers should - # handle event persistence and typing notifications. Any worker - # specified here must also be in the `instance_map`. - # - #stream_writers: - # events: worker1 - # typing: worker1 - - # The worker that is used to run background tasks (e.g. cleaning up expired - # data). If not provided this defaults to the main process. - # - #run_background_tasks_on: worker1 - - # A shared secret used by the replication APIs to authenticate HTTP requests - # from workers. - # - # By default this is unused and traffic is not authenticated. - # - #worker_replication_secret: "" - """ - def read_arguments(self, args: argparse.Namespace) -> None: # We support a bunch of command line arguments that override options in # the config. A lot of these options have a worker_* prefix when running diff --git a/synapse/event_auth.py b/synapse/event_auth.py index 4c0b587a76..e23503c1e0 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py @@ -45,9 +45,7 @@ if typing.TYPE_CHECKING: logger = logging.getLogger(__name__) -def validate_event_for_room_version( - room_version_obj: RoomVersion, event: "EventBase" -) -> None: +def validate_event_for_room_version(event: "EventBase") -> None: """Ensure that the event complies with the limits, and has the right signatures NB: does not *validate* the signatures - it assumes that any signatures present @@ -60,12 +58,10 @@ def validate_event_for_room_version( NB: This is used to check events that have been received over federation. As such, it can only enforce the checks specified in the relevant room version, to avoid a split-brain situation where some servers accept such events, and others reject - them. - - TODO: consider moving this into EventValidator + them. See also EventValidator, which contains extra checks which are applied only to + locally-generated events. Args: - room_version_obj: the version of the room which contains this event event: the event to be checked Raises: @@ -103,7 +99,7 @@ def validate_event_for_room_version( raise AuthError(403, "Event not signed by sending server") is_invite_via_allow_rule = ( - room_version_obj.msc3083_join_rules + event.room_version.msc3083_join_rules and event.type == EventTypes.Member and event.membership == Membership.JOIN and EventContentFields.AUTHORISING_USER in event.content @@ -117,7 +113,6 @@ def validate_event_for_room_version( def check_auth_rules_for_event( - room_version_obj: RoomVersion, event: "EventBase", auth_events: Iterable["EventBase"], ) -> None: @@ -136,7 +131,6 @@ def check_auth_rules_for_event( a bunch of other tests. Args: - room_version_obj: the version of the room event: the event being checked. auth_events: the room state to check the events against. @@ -205,7 +199,10 @@ def check_auth_rules_for_event( raise AuthError(403, "This room has been marked as unfederatable.") # 4. If type is m.room.aliases - if event.type == EventTypes.Aliases and room_version_obj.special_case_aliases_auth: + if ( + event.type == EventTypes.Aliases + and event.room_version.special_case_aliases_auth + ): # 4a. If event has no state_key, reject if not event.is_state(): raise AuthError(403, "Alias event must be a state event") @@ -225,7 +222,7 @@ def check_auth_rules_for_event( # 5. If type is m.room.membership if event.type == EventTypes.Member: - _is_membership_change_allowed(room_version_obj, event, auth_dict) + _is_membership_change_allowed(event.room_version, event, auth_dict) logger.debug("Allowing! %s", event) return @@ -247,17 +244,17 @@ def check_auth_rules_for_event( _can_send_event(event, auth_dict) if event.type == EventTypes.PowerLevels: - _check_power_levels(room_version_obj, event, auth_dict) + _check_power_levels(event.room_version, event, auth_dict) if event.type == EventTypes.Redaction: - check_redaction(room_version_obj, event, auth_dict) + check_redaction(event.room_version, event, auth_dict) if ( event.type == EventTypes.MSC2716_INSERTION or event.type == EventTypes.MSC2716_BATCH or event.type == EventTypes.MSC2716_MARKER ): - check_historical(room_version_obj, event, auth_dict) + check_historical(event.room_version, event, auth_dict) logger.debug("Allowing! %s", event) diff --git a/synapse/events/spamcheck.py b/synapse/events/spamcheck.py index d2e06c754e..32712d2042 100644 --- a/synapse/events/spamcheck.py +++ b/synapse/events/spamcheck.py @@ -28,7 +28,10 @@ from typing import ( Union, ) -from synapse.api.errors import Codes +# `Literal` appears with Python 3.8. +from typing_extensions import Literal + +import synapse from synapse.rest.media.v1._base import FileInfo from synapse.rest.media.v1.media_storage import ReadableFileWrapper from synapse.spam_checker_api import RegistrationBehaviour @@ -47,12 +50,12 @@ CHECK_EVENT_FOR_SPAM_CALLBACK = Callable[ Awaitable[ Union[ str, - Codes, + "synapse.api.errors.Codes", # Highly experimental, not officially part of the spamchecker API, may # disappear without warning depending on the results of ongoing # experiments. # Use this to return additional information as part of an error. - Tuple[Codes, Dict], + Tuple["synapse.api.errors.Codes", Dict], # Deprecated bool, ] @@ -62,12 +65,72 @@ SHOULD_DROP_FEDERATED_EVENT_CALLBACK = Callable[ ["synapse.events.EventBase"], Awaitable[Union[bool, str]], ] -USER_MAY_JOIN_ROOM_CALLBACK = Callable[[str, str, bool], Awaitable[bool]] -USER_MAY_INVITE_CALLBACK = Callable[[str, str, str], Awaitable[bool]] -USER_MAY_SEND_3PID_INVITE_CALLBACK = Callable[[str, str, str, str], Awaitable[bool]] -USER_MAY_CREATE_ROOM_CALLBACK = Callable[[str], Awaitable[bool]] -USER_MAY_CREATE_ROOM_ALIAS_CALLBACK = Callable[[str, RoomAlias], Awaitable[bool]] -USER_MAY_PUBLISH_ROOM_CALLBACK = Callable[[str, str], Awaitable[bool]] +USER_MAY_JOIN_ROOM_CALLBACK = Callable[ + [str, str, bool], + Awaitable[ + Union[ + Literal["NOT_SPAM"], + "synapse.api.errors.Codes", + # Deprecated + bool, + ] + ], +] +USER_MAY_INVITE_CALLBACK = Callable[ + [str, str, str], + Awaitable[ + Union[ + Literal["NOT_SPAM"], + "synapse.api.errors.Codes", + # Deprecated + bool, + ] + ], +] +USER_MAY_SEND_3PID_INVITE_CALLBACK = Callable[ + [str, str, str, str], + Awaitable[ + Union[ + Literal["NOT_SPAM"], + "synapse.api.errors.Codes", + # Deprecated + bool, + ] + ], +] +USER_MAY_CREATE_ROOM_CALLBACK = Callable[ + [str], + Awaitable[ + Union[ + Literal["NOT_SPAM"], + "synapse.api.errors.Codes", + # Deprecated + bool, + ] + ], +] +USER_MAY_CREATE_ROOM_ALIAS_CALLBACK = Callable[ + [str, RoomAlias], + Awaitable[ + Union[ + Literal["NOT_SPAM"], + "synapse.api.errors.Codes", + # Deprecated + bool, + ] + ], +] +USER_MAY_PUBLISH_ROOM_CALLBACK = Callable[ + [str, str], + Awaitable[ + Union[ + Literal["NOT_SPAM"], + "synapse.api.errors.Codes", + # Deprecated + bool, + ] + ], +] CHECK_USERNAME_FOR_SPAM_CALLBACK = Callable[[UserProfile], Awaitable[bool]] LEGACY_CHECK_REGISTRATION_FOR_SPAM_CALLBACK = Callable[ [ @@ -88,7 +151,14 @@ CHECK_REGISTRATION_FOR_SPAM_CALLBACK = Callable[ ] CHECK_MEDIA_FILE_FOR_SPAM_CALLBACK = Callable[ [ReadableFileWrapper, FileInfo], - Awaitable[bool], + Awaitable[ + Union[ + Literal["NOT_SPAM"], + "synapse.api.errors.Codes", + # Deprecated + bool, + ] + ], ] @@ -181,7 +251,7 @@ def load_legacy_spam_checkers(hs: "synapse.server.HomeServer") -> None: class SpamChecker: - NOT_SPAM = "NOT_SPAM" + NOT_SPAM: Literal["NOT_SPAM"] = "NOT_SPAM" def __init__(self, hs: "synapse.server.HomeServer") -> None: self.hs = hs @@ -275,7 +345,7 @@ class SpamChecker: async def check_event_for_spam( self, event: "synapse.events.EventBase" - ) -> Union[Tuple[Codes, Dict], str]: + ) -> Union[Tuple["synapse.api.errors.Codes", Dict], str]: """Checks if a given event is considered "spammy" by this server. If the server considers an event spammy, then it will be rejected if @@ -306,7 +376,7 @@ class SpamChecker: elif res is True: # This spam-checker rejects the event with deprecated # return value `True` - return Codes.FORBIDDEN + return (synapse.api.errors.Codes.FORBIDDEN, {}) elif not isinstance(res, str): # mypy complains that we can't reach this code because of the # return type in CHECK_EVENT_FOR_SPAM_CALLBACK, but we don't know @@ -352,7 +422,7 @@ class SpamChecker: async def user_may_join_room( self, user_id: str, room_id: str, is_invited: bool - ) -> bool: + ) -> Union["synapse.api.errors.Codes", Literal["NOT_SPAM"]]: """Checks if a given users is allowed to join a room. Not called when a user creates a room. @@ -362,54 +432,70 @@ class SpamChecker: is_invited: Whether the user is invited into the room Returns: - Whether the user may join the room + NOT_SPAM if the operation is permitted, Codes otherwise. """ for callback in self._user_may_join_room_callbacks: with Measure( self.clock, "{}.{}".format(callback.__module__, callback.__qualname__) ): - may_join_room = await delay_cancellation( - callback(user_id, room_id, is_invited) - ) - if may_join_room is False: - return False + res = await delay_cancellation(callback(user_id, room_id, is_invited)) + # Normalize return values to `Codes` or `"NOT_SPAM"`. + if res is True or res is self.NOT_SPAM: + continue + elif res is False: + return synapse.api.errors.Codes.FORBIDDEN + elif isinstance(res, synapse.api.errors.Codes): + return res + else: + logger.warning( + "Module returned invalid value, rejecting join as spam" + ) + return synapse.api.errors.Codes.FORBIDDEN - return True + # No spam-checker has rejected the request, let it pass. + return self.NOT_SPAM async def user_may_invite( self, inviter_userid: str, invitee_userid: str, room_id: str - ) -> bool: + ) -> Union["synapse.api.errors.Codes", Literal["NOT_SPAM"]]: """Checks if a given user may send an invite - If this method returns false, the invite will be rejected. - Args: inviter_userid: The user ID of the sender of the invitation invitee_userid: The user ID targeted in the invitation room_id: The room ID Returns: - True if the user may send an invite, otherwise False + NOT_SPAM if the operation is permitted, Codes otherwise. """ for callback in self._user_may_invite_callbacks: with Measure( self.clock, "{}.{}".format(callback.__module__, callback.__qualname__) ): - may_invite = await delay_cancellation( + res = await delay_cancellation( callback(inviter_userid, invitee_userid, room_id) ) - if may_invite is False: - return False + # Normalize return values to `Codes` or `"NOT_SPAM"`. + if res is True or res is self.NOT_SPAM: + continue + elif res is False: + return synapse.api.errors.Codes.FORBIDDEN + elif isinstance(res, synapse.api.errors.Codes): + return res + else: + logger.warning( + "Module returned invalid value, rejecting invite as spam" + ) + return synapse.api.errors.Codes.FORBIDDEN - return True + # No spam-checker has rejected the request, let it pass. + return self.NOT_SPAM async def user_may_send_3pid_invite( self, inviter_userid: str, medium: str, address: str, room_id: str - ) -> bool: + ) -> Union["synapse.api.errors.Codes", Literal["NOT_SPAM"]]: """Checks if a given user may invite a given threepid into the room - If this method returns false, the threepid invite will be rejected. - Note that if the threepid is already associated with a Matrix user ID, Synapse will call user_may_invite with said user ID instead. @@ -420,88 +506,113 @@ class SpamChecker: room_id: The room ID Returns: - True if the user may send the invite, otherwise False + NOT_SPAM if the operation is permitted, Codes otherwise. """ for callback in self._user_may_send_3pid_invite_callbacks: with Measure( self.clock, "{}.{}".format(callback.__module__, callback.__qualname__) ): - may_send_3pid_invite = await delay_cancellation( + res = await delay_cancellation( callback(inviter_userid, medium, address, room_id) ) - if may_send_3pid_invite is False: - return False + # Normalize return values to `Codes` or `"NOT_SPAM"`. + if res is True or res is self.NOT_SPAM: + continue + elif res is False: + return synapse.api.errors.Codes.FORBIDDEN + elif isinstance(res, synapse.api.errors.Codes): + return res + else: + logger.warning( + "Module returned invalid value, rejecting 3pid invite as spam" + ) + return synapse.api.errors.Codes.FORBIDDEN - return True + return self.NOT_SPAM - async def user_may_create_room(self, userid: str) -> bool: + async def user_may_create_room( + self, userid: str + ) -> Union["synapse.api.errors.Codes", Literal["NOT_SPAM"]]: """Checks if a given user may create a room - If this method returns false, the creation request will be rejected. - Args: userid: The ID of the user attempting to create a room - - Returns: - True if the user may create a room, otherwise False """ for callback in self._user_may_create_room_callbacks: with Measure( self.clock, "{}.{}".format(callback.__module__, callback.__qualname__) ): - may_create_room = await delay_cancellation(callback(userid)) - if may_create_room is False: - return False + res = await delay_cancellation(callback(userid)) + if res is True or res is self.NOT_SPAM: + continue + elif res is False: + return synapse.api.errors.Codes.FORBIDDEN + elif isinstance(res, synapse.api.errors.Codes): + return res + else: + logger.warning( + "Module returned invalid value, rejecting room creation as spam" + ) + return synapse.api.errors.Codes.FORBIDDEN - return True + return self.NOT_SPAM async def user_may_create_room_alias( self, userid: str, room_alias: RoomAlias - ) -> bool: + ) -> Union["synapse.api.errors.Codes", Literal["NOT_SPAM"]]: """Checks if a given user may create a room alias - If this method returns false, the association request will be rejected. - Args: userid: The ID of the user attempting to create a room alias room_alias: The alias to be created - Returns: - True if the user may create a room alias, otherwise False """ for callback in self._user_may_create_room_alias_callbacks: with Measure( self.clock, "{}.{}".format(callback.__module__, callback.__qualname__) ): - may_create_room_alias = await delay_cancellation( - callback(userid, room_alias) - ) - if may_create_room_alias is False: - return False + res = await delay_cancellation(callback(userid, room_alias)) + if res is True or res is self.NOT_SPAM: + continue + elif res is False: + return synapse.api.errors.Codes.FORBIDDEN + elif isinstance(res, synapse.api.errors.Codes): + return res + else: + logger.warning( + "Module returned invalid value, rejecting room create as spam" + ) + return synapse.api.errors.Codes.FORBIDDEN - return True + return self.NOT_SPAM - async def user_may_publish_room(self, userid: str, room_id: str) -> bool: + async def user_may_publish_room( + self, userid: str, room_id: str + ) -> Union["synapse.api.errors.Codes", Literal["NOT_SPAM"]]: """Checks if a given user may publish a room to the directory - If this method returns false, the publish request will be rejected. - Args: userid: The user ID attempting to publish the room room_id: The ID of the room that would be published - - Returns: - True if the user may publish the room, otherwise False """ for callback in self._user_may_publish_room_callbacks: with Measure( self.clock, "{}.{}".format(callback.__module__, callback.__qualname__) ): - may_publish_room = await delay_cancellation(callback(userid, room_id)) - if may_publish_room is False: - return False + res = await delay_cancellation(callback(userid, room_id)) + if res is True or res is self.NOT_SPAM: + continue + elif res is False: + return synapse.api.errors.Codes.FORBIDDEN + elif isinstance(res, synapse.api.errors.Codes): + return res + else: + logger.warning( + "Module returned invalid value, rejecting room publication as spam" + ) + return synapse.api.errors.Codes.FORBIDDEN - return True + return self.NOT_SPAM async def check_username_for_spam(self, user_profile: UserProfile) -> bool: """Checks if a user ID or display name are considered "spammy" by this server. @@ -567,7 +678,7 @@ class SpamChecker: async def check_media_file_for_spam( self, file_wrapper: ReadableFileWrapper, file_info: FileInfo - ) -> bool: + ) -> Union["synapse.api.errors.Codes", Literal["NOT_SPAM"]]: """Checks if a piece of newly uploaded media should be blocked. This will be called for local uploads, downloads of remote media, each @@ -580,31 +691,37 @@ class SpamChecker: async def check_media_file_for_spam( self, file: ReadableFileWrapper, file_info: FileInfo - ) -> bool: + ) -> Union[Codes, Literal["NOT_SPAM"]]: buffer = BytesIO() await file.write_chunks_to(buffer.write) if buffer.getvalue() == b"Hello World": - return True + return synapse.module_api.NOT_SPAM - return False + return Codes.FORBIDDEN Args: file: An object that allows reading the contents of the media. file_info: Metadata about the file. - - Returns: - True if the media should be blocked or False if it should be - allowed. """ for callback in self._check_media_file_for_spam_callbacks: with Measure( self.clock, "{}.{}".format(callback.__module__, callback.__qualname__) ): - spam = await delay_cancellation(callback(file_wrapper, file_info)) - if spam: - return True + res = await delay_cancellation(callback(file_wrapper, file_info)) + # Normalize return values to `Codes` or `"NOT_SPAM"`. + if res is False or res is self.NOT_SPAM: + continue + elif res is True: + return synapse.api.errors.Codes.FORBIDDEN + elif isinstance(res, synapse.api.errors.Codes): + return res + else: + logger.warning( + "Module returned invalid value, rejecting media file as spam" + ) + return synapse.api.errors.Codes.FORBIDDEN - return False + return self.NOT_SPAM diff --git a/synapse/events/validator.py b/synapse/events/validator.py index 29fa9b3880..27c8beba25 100644 --- a/synapse/events/validator.py +++ b/synapse/events/validator.py @@ -35,6 +35,10 @@ class EventValidator: def validate_new(self, event: EventBase, config: HomeServerConfig) -> None: """Validates the event has roughly the right format + Suitable for checking a locally-created event. It has stricter checks than + is appropriate for an event received over federation (for which, see + event_auth.validate_event_for_room_version) + Args: event: The event to validate. config: The homeserver's configuration. diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index ad475a913b..66e6305562 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -1642,10 +1642,6 @@ def _validate_hierarchy_event(d: JsonDict) -> None: if not isinstance(event_type, str): raise ValueError("Invalid event: 'event_type' must be a str") - room_id = d.get("room_id") - if not isinstance(room_id, str): - raise ValueError("Invalid event: 'room_id' must be a str") - state_key = d.get("state_key") if not isinstance(state_key, str): raise ValueError("Invalid event: 'state_key' must be a str") diff --git a/synapse/federation/sender/per_destination_queue.py b/synapse/federation/sender/per_destination_queue.py index 333ca9a97f..41d8b937af 100644 --- a/synapse/federation/sender/per_destination_queue.py +++ b/synapse/federation/sender/per_destination_queue.py @@ -37,6 +37,7 @@ from synapse.metrics import sent_transactions_counter from synapse.metrics.background_process_metrics import run_as_background_process from synapse.types import ReadReceipt from synapse.util.retryutils import NotRetryingDestination, get_retry_limiter +from synapse.visibility import filter_events_for_server if TYPE_CHECKING: import synapse.server @@ -77,6 +78,7 @@ class PerDestinationQueue: ): self._server_name = hs.hostname self._clock = hs.get_clock() + self._storage_controllers = hs.get_storage_controllers() self._store = hs.get_datastores().main self._transaction_manager = transaction_manager self._instance_name = hs.get_instance_name() @@ -442,6 +444,12 @@ class PerDestinationQueue: "This should not happen." % event_ids ) + logger.info( + "Catching up destination %s with %d PDUs", + self._destination, + len(catchup_pdus), + ) + # We send transactions with events from one room only, as its likely # that the remote will have to do additional processing, which may # take some time. It's better to give it small amounts of work @@ -487,19 +495,20 @@ class PerDestinationQueue: ): continue - # Filter out events where the server is not in the room, - # e.g. it may have left/been kicked. *Ideally* we'd pull - # out the kick and send that, but it's a rare edge case - # so we don't bother for now (the server that sent the - # kick should send it out if its online). - hosts = await self._state.get_hosts_in_room_at_events( - p.room_id, [p.event_id] - ) - if self._destination not in hosts: - continue - new_pdus.append(p) + # Filter out events where the server is not in the room, + # e.g. it may have left/been kicked. *Ideally* we'd pull + # out the kick and send that, but it's a rare edge case + # so we don't bother for now (the server that sent the + # kick should send it out if its online). + new_pdus = await filter_events_for_server( + self._storage_controllers, + self._destination, + new_pdus, + redact=False, + ) + # If we've filtered out all the extremities, fall back to # sending the original event. This should ensure that the # server gets at least some of missed events (especially if diff --git a/synapse/federation/transport/server/federation.py b/synapse/federation/transport/server/federation.py index 7dfb890661..f7884bfbe0 100644 --- a/synapse/federation/transport/server/federation.py +++ b/synapse/federation/transport/server/federation.py @@ -24,7 +24,6 @@ from typing import ( Union, ) -from matrix_common.versionstring import get_distribution_version_string from typing_extensions import Literal from synapse.api.constants import EduTypes @@ -42,6 +41,7 @@ from synapse.http.servlet import ( parse_strings_from_args, ) from synapse.types import JsonDict +from synapse.util import SYNAPSE_VERSION from synapse.util.ratelimitutils import FederationRateLimiter if TYPE_CHECKING: @@ -622,7 +622,7 @@ class FederationVersionServlet(BaseFederationServlet): { "server": { "name": "Synapse", - "version": get_distribution_version_string("matrix-synapse"), + "version": SYNAPSE_VERSION, } }, ) diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index fbafbbee6b..3d83236b0c 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -37,9 +37,7 @@ from typing import ( import attr import bcrypt -import pymacaroons import unpaddedbase64 -from pymacaroons.exceptions import MacaroonVerificationFailedException from twisted.internet.defer import CancelledError from twisted.web.server import Request @@ -69,7 +67,7 @@ from synapse.storage.roommember import ProfileInfo from synapse.types import JsonDict, Requester, UserID from synapse.util import stringutils as stringutils from synapse.util.async_helpers import delay_cancellation, maybe_awaitable -from synapse.util.macaroons import get_value_from_macaroon, satisfy_expiry +from synapse.util.macaroons import LoginTokenAttributes from synapse.util.msisdn import phone_number_to_msisdn from synapse.util.stringutils import base62_encode from synapse.util.threepids import canonicalise_email @@ -81,6 +79,8 @@ if TYPE_CHECKING: logger = logging.getLogger(__name__) +INVALID_USERNAME_OR_PASSWORD = "Invalid username or password" + def convert_client_dict_legacy_fields_to_identifier( submission: JsonDict, @@ -178,25 +178,13 @@ class SsoLoginExtraAttributes: extra_attributes: JsonDict -@attr.s(slots=True, frozen=True, auto_attribs=True) -class LoginTokenAttributes: - """Data we store in a short-term login token""" - - user_id: str - - auth_provider_id: str - """The SSO Identity Provider that the user authenticated with, to get this token.""" - - auth_provider_session_id: Optional[str] - """The session ID advertised by the SSO Identity Provider.""" - - class AuthHandler: SESSION_EXPIRE_MS = 48 * 60 * 60 * 1000 def __init__(self, hs: "HomeServer"): self.store = hs.get_datastores().main self.auth = hs.get_auth() + self.auth_blocking = hs.get_auth_blocking() self.clock = hs.get_clock() self.checkers: Dict[str, UserInteractiveAuthChecker] = {} for auth_checker_class in INTERACTIVE_AUTH_CHECKERS: @@ -983,7 +971,7 @@ class AuthHandler: not is_appservice_ghost or self.hs.config.appservice.track_appservice_user_ips ): - await self.auth.check_auth_blocking(user_id) + await self.auth_blocking.check_auth_blocking(user_id) access_token = self.generate_access_token(target_user_id_obj) await self.store.add_access_token_to_user( @@ -1215,7 +1203,9 @@ class AuthHandler: await self._failed_login_attempts_ratelimiter.can_do_action( None, (medium, address) ) - raise LoginError(403, "", errcode=Codes.FORBIDDEN) + raise LoginError( + 403, msg=INVALID_USERNAME_OR_PASSWORD, errcode=Codes.FORBIDDEN + ) identifier_dict = {"type": "m.id.user", "user": user_id} @@ -1341,7 +1331,7 @@ class AuthHandler: # We raise a 403 here, but note that if we're doing user-interactive # login, it turns all LoginErrors into a 401 anyway. - raise LoginError(403, "Invalid password", errcode=Codes.FORBIDDEN) + raise LoginError(403, msg=INVALID_USERNAME_OR_PASSWORD, errcode=Codes.FORBIDDEN) async def check_password_provider_3pid( self, medium: str, address: str, password: str @@ -1435,7 +1425,7 @@ class AuthHandler: except Exception: raise AuthError(403, "Invalid login token", errcode=Codes.FORBIDDEN) - await self.auth.check_auth_blocking(res.user_id) + await self.auth_blocking.check_auth_blocking(res.user_id) return res async def delete_access_token(self, access_token: str) -> None: @@ -1826,98 +1816,6 @@ class AuthHandler: return urllib.parse.urlunparse(url_parts) -@attr.s(slots=True, auto_attribs=True) -class MacaroonGenerator: - hs: "HomeServer" - - def generate_guest_access_token(self, user_id: str) -> str: - macaroon = self._generate_base_macaroon(user_id) - macaroon.add_first_party_caveat("type = access") - # Include a nonce, to make sure that each login gets a different - # access token. - macaroon.add_first_party_caveat( - "nonce = %s" % (stringutils.random_string_with_symbols(16),) - ) - macaroon.add_first_party_caveat("guest = true") - return macaroon.serialize() - - def generate_short_term_login_token( - self, - user_id: str, - auth_provider_id: str, - auth_provider_session_id: Optional[str] = None, - duration_in_ms: int = (2 * 60 * 1000), - ) -> str: - macaroon = self._generate_base_macaroon(user_id) - macaroon.add_first_party_caveat("type = login") - now = self.hs.get_clock().time_msec() - expiry = now + duration_in_ms - macaroon.add_first_party_caveat("time < %d" % (expiry,)) - macaroon.add_first_party_caveat("auth_provider_id = %s" % (auth_provider_id,)) - if auth_provider_session_id is not None: - macaroon.add_first_party_caveat( - "auth_provider_session_id = %s" % (auth_provider_session_id,) - ) - return macaroon.serialize() - - def verify_short_term_login_token(self, token: str) -> LoginTokenAttributes: - """Verify a short-term-login macaroon - - Checks that the given token is a valid, unexpired short-term-login token - minted by this server. - - Args: - token: the login token to verify - - Returns: - the user_id that this token is valid for - - Raises: - MacaroonVerificationFailedException if the verification failed - """ - macaroon = pymacaroons.Macaroon.deserialize(token) - user_id = get_value_from_macaroon(macaroon, "user_id") - auth_provider_id = get_value_from_macaroon(macaroon, "auth_provider_id") - - auth_provider_session_id: Optional[str] = None - try: - auth_provider_session_id = get_value_from_macaroon( - macaroon, "auth_provider_session_id" - ) - except MacaroonVerificationFailedException: - pass - - v = pymacaroons.Verifier() - v.satisfy_exact("gen = 1") - v.satisfy_exact("type = login") - v.satisfy_general(lambda c: c.startswith("user_id = ")) - v.satisfy_general(lambda c: c.startswith("auth_provider_id = ")) - v.satisfy_general(lambda c: c.startswith("auth_provider_session_id = ")) - satisfy_expiry(v, self.hs.get_clock().time_msec) - v.verify(macaroon, self.hs.config.key.macaroon_secret_key) - - return LoginTokenAttributes( - user_id=user_id, - auth_provider_id=auth_provider_id, - auth_provider_session_id=auth_provider_session_id, - ) - - def generate_delete_pusher_token(self, user_id: str) -> str: - macaroon = self._generate_base_macaroon(user_id) - macaroon.add_first_party_caveat("type = delete_pusher") - return macaroon.serialize() - - def _generate_base_macaroon(self, user_id: str) -> pymacaroons.Macaroon: - macaroon = pymacaroons.Macaroon( - location=self.hs.config.server.server_name, - identifier="key", - key=self.hs.config.key.macaroon_secret_key, - ) - macaroon.add_first_party_caveat("gen = 1") - macaroon.add_first_party_caveat("user_id = %s" % (user_id,)) - return macaroon - - def load_legacy_password_auth_providers(hs: "HomeServer") -> None: module_api = hs.get_module_api() for module, config in hs.config.authproviders.password_providers: diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index a0cbeedc30..b79c551703 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -398,35 +398,6 @@ class DeviceHandler(DeviceWorkerHandler): await self.delete_devices(user_id, user_devices) @trace - async def delete_device(self, user_id: str, device_id: str) -> None: - """Delete the given device - - Args: - user_id: The user to delete the device from. - device_id: The device to delete. - """ - - try: - await self.store.delete_device(user_id, device_id) - except errors.StoreError as e: - if e.code == 404: - # no match - set_tag("error", True) - log_kv( - {"reason": "User doesn't have device id.", "device_id": device_id} - ) - else: - raise - - await self._auth_handler.delete_access_tokens_for_user( - user_id, device_id=device_id - ) - - await self.store.delete_e2e_keys_by_device(user_id=user_id, device_id=device_id) - - await self.notify_device_update(user_id, [device_id]) - - @trace async def delete_all_devices_for_user( self, user_id: str, except_device_id: Optional[str] = None ) -> None: @@ -591,7 +562,7 @@ class DeviceHandler(DeviceWorkerHandler): user_id, device_id, device_data ) if old_device_id is not None: - await self.delete_device(user_id, old_device_id) + await self.delete_devices(user_id, [old_device_id]) return device_id async def get_dehydrated_device( @@ -638,7 +609,7 @@ class DeviceHandler(DeviceWorkerHandler): await self.store.update_device(user_id, device_id, old_device["display_name"]) # can't call self.delete_device because that will clobber the # access token so call the storage layer directly - await self.store.delete_device(user_id, old_device_id) + await self.store.delete_devices(user_id, [old_device_id]) await self.store.delete_e2e_keys_by_device( user_id=user_id, device_id=old_device_id ) diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index 1459a046de..8b0f16f965 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -28,6 +28,7 @@ from synapse.api.errors import ( SynapseError, ) from synapse.appservice import ApplicationService +from synapse.module_api import NOT_SPAM from synapse.storage.databases.main.directory import RoomAliasMapping from synapse.types import JsonDict, Requester, RoomAlias, UserID, get_domain_from_id @@ -141,10 +142,15 @@ class DirectoryHandler: 403, "You must be in the room to create an alias for it" ) - if not await self.spam_checker.user_may_create_room_alias( + spam_check = await self.spam_checker.user_may_create_room_alias( user_id, room_alias - ): - raise AuthError(403, "This user is not permitted to create this alias") + ) + if spam_check != self.spam_checker.NOT_SPAM: + raise AuthError( + 403, + "This user is not permitted to create this alias", + spam_check, + ) if not self.config.roomdirectory.is_alias_creation_allowed( user_id, room_id, room_alias_str @@ -430,9 +436,12 @@ class DirectoryHandler: """ user_id = requester.user.to_string() - if not await self.spam_checker.user_may_publish_room(user_id, room_id): + spam_check = await self.spam_checker.user_may_publish_room(user_id, room_id) + if spam_check != NOT_SPAM: raise AuthError( - 403, "This user is not permitted to publish rooms to the room list" + 403, + "This user is not permitted to publish rooms to the room list", + spam_check, ) if requester.is_guest: diff --git a/synapse/handlers/event_auth.py b/synapse/handlers/event_auth.py index 6bed464351..ed4149bd58 100644 --- a/synapse/handlers/event_auth.py +++ b/synapse/handlers/event_auth.py @@ -48,14 +48,13 @@ class EventAuthHandler: async def check_auth_rules_from_context( self, - room_version_obj: RoomVersion, event: EventBase, context: EventContext, ) -> None: """Check an event passes the auth rules at its own auth events""" auth_event_ids = event.auth_event_ids() auth_events_by_id = await self._store.get_events(auth_event_ids) - check_auth_rules_for_event(room_version_obj, event, auth_events_by_id.values()) + check_auth_rules_for_event(event, auth_events_by_id.values()) def compute_auth_events( self, diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 6a143440d3..34cc5ecd11 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -59,6 +59,7 @@ from synapse.federation.federation_client import InvalidResponseError from synapse.http.servlet import assert_params_in_dict from synapse.logging.context import nested_logging_context from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.module_api import NOT_SPAM from synapse.replication.http.federation import ( ReplicationCleanRoomRestServlet, ReplicationStoreRoomOnOutlierMembershipRestServlet, @@ -545,6 +546,7 @@ class FederationHandler: if ret.partial_state: # TODO(faster_joins): roll this back if we don't manage to start the # background resync (eg process_remote_join fails) + # https://github.com/matrix-org/synapse/issues/12998 await self.store.store_partial_state_room(room_id, ret.servers_in_room) max_stream_id = await self._federation_event_handler.process_remote_join( @@ -799,9 +801,7 @@ class FederationHandler: # The remote hasn't signed it yet, obviously. We'll do the full checks # when we get the event back in `on_send_join_request` - await self._event_auth_handler.check_auth_rules_from_context( - room_version, event, context - ) + await self._event_auth_handler.check_auth_rules_from_context(event, context) return event async def on_invite_request( @@ -821,11 +821,14 @@ class FederationHandler: if self.hs.config.server.block_non_admin_invites: raise SynapseError(403, "This server does not accept room invites") - if not await self.spam_checker.user_may_invite( + spam_check = await self.spam_checker.user_may_invite( event.sender, event.state_key, event.room_id - ): + ) + if spam_check != NOT_SPAM: raise SynapseError( - 403, "This user is not permitted to send invites to this server/user" + 403, + "This user is not permitted to send invites to this server/user", + spam_check, ) membership = event.content.get("membership") @@ -972,9 +975,7 @@ class FederationHandler: try: # The remote hasn't signed it yet, obviously. We'll do the full checks # when we get the event back in `on_send_leave_request` - await self._event_auth_handler.check_auth_rules_from_context( - room_version_obj, event, context - ) + await self._event_auth_handler.check_auth_rules_from_context(event, context) except AuthError as e: logger.warning("Failed to create new leave %r because %s", event, e) raise e @@ -1033,9 +1034,7 @@ class FederationHandler: try: # The remote hasn't signed it yet, obviously. We'll do the full checks # when we get the event back in `on_send_knock_request` - await self._event_auth_handler.check_auth_rules_from_context( - room_version_obj, event, context - ) + await self._event_auth_handler.check_auth_rules_from_context(event, context) except AuthError as e: logger.warning("Failed to create new knock %r because %s", event, e) raise e @@ -1206,9 +1205,9 @@ class FederationHandler: event.internal_metadata.send_on_behalf_of = self.hs.hostname try: - validate_event_for_room_version(room_version_obj, event) + validate_event_for_room_version(event) await self._event_auth_handler.check_auth_rules_from_context( - room_version_obj, event, context + event, context ) except AuthError as e: logger.warning("Denying new third party invite %r because %s", event, e) @@ -1258,10 +1257,8 @@ class FederationHandler: ) try: - validate_event_for_room_version(room_version_obj, event) - await self._event_auth_handler.check_auth_rules_from_context( - room_version_obj, event, context - ) + validate_event_for_room_version(event) + await self._event_auth_handler.check_auth_rules_from_context(event, context) except AuthError as e: logger.warning("Denying third party invite %r because %s", event, e) raise e @@ -1506,14 +1503,17 @@ class FederationHandler: # TODO(faster_joins): do we need to lock to avoid races? What happens if other # worker processes kick off a resync in parallel? Perhaps we should just elect # a single worker to do the resync. + # https://github.com/matrix-org/synapse/issues/12994 # # TODO(faster_joins): what happens if we leave the room during a resync? if we # really leave, that might mean we have difficulty getting the room state over # federation. + # https://github.com/matrix-org/synapse/issues/12802 # # TODO(faster_joins): we need some way of prioritising which homeservers in # `other_destinations` to try first, otherwise we'll spend ages trying dead # homeservers for large rooms. + # https://github.com/matrix-org/synapse/issues/12999 if initial_destination is None and len(other_destinations) == 0: raise ValueError( @@ -1543,9 +1543,11 @@ class FederationHandler: # all the events are updated, so we can update current state and # clear the lazy-loading flag. logger.info("Updating current state for %s", room_id) + # TODO(faster_joins): support workers + # https://github.com/matrix-org/synapse/issues/12994 assert ( self._storage_controllers.persistence is not None - ), "TODO(faster_joins): support for workers" + ), "worker-mode deployments not currently supported here" await self._storage_controllers.persistence.update_current_state( room_id ) @@ -1559,6 +1561,8 @@ class FederationHandler: ) # TODO(faster_joins) update room stats and user directory? + # https://github.com/matrix-org/synapse/issues/12814 + # https://github.com/matrix-org/synapse/issues/12815 return # we raced against more events arriving with partial state. Go round @@ -1566,6 +1570,8 @@ class FederationHandler: # TODO(faster_joins): there is still a race here, whereby incoming events which raced # with us will fail to be persisted after the call to `clear_partial_state_room` due to # having partial state. + # https://github.com/matrix-org/synapse/issues/12988 + # continue events = await self.store.get_events_as_list( @@ -1588,6 +1594,7 @@ class FederationHandler: # indefinitely is also not the right thing to do if we can # reach all homeservers and they all claim they don't have # the state we want. + # https://github.com/matrix-org/synapse/issues/13000 logger.error( "Failed to get state for %s at %s from %s because %s, " "giving up!", diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index 87a0608359..6c9e6a00b5 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -532,6 +532,7 @@ class FederationEventHandler: # # TODO(faster_joins): we probably need to be more intelligent, and # exclude partial-state prev_events from consideration + # https://github.com/matrix-org/synapse/issues/13001 logger.warning( "%s still has partial state: can't de-partial-state it yet", event.event_id, @@ -777,6 +778,7 @@ class FederationEventHandler: state_ids = await self._resolve_state_at_missing_prevs(origin, event) # TODO(faster_joins): make sure that _resolve_state_at_missing_prevs does # not return partial state + # https://github.com/matrix-org/synapse/issues/13002 await self._process_received_pdu( origin, event, state_ids=state_ids, backfilled=backfilled @@ -1428,9 +1430,6 @@ class FederationEventHandler: allow_rejected=True, ) - room_version = await self._store.get_room_version_id(room_id) - room_version_obj = KNOWN_ROOM_VERSIONS[room_version] - def prep(event: EventBase) -> Optional[Tuple[EventBase, EventContext]]: with nested_logging_context(suffix=event.event_id): auth = [] @@ -1453,8 +1452,8 @@ class FederationEventHandler: context = EventContext.for_outlier(self._storage_controllers) try: - validate_event_for_room_version(room_version_obj, event) - check_auth_rules_for_event(room_version_obj, event, auth) + validate_event_for_room_version(event) + check_auth_rules_for_event(event, auth) except AuthError as e: logger.warning("Rejecting %r because %s", event, e) context.rejected = RejectedReason.AUTH_ERROR @@ -1497,11 +1496,8 @@ class FederationEventHandler: assert not event.internal_metadata.outlier # first of all, check that the event itself is valid. - room_version = await self._store.get_room_version_id(event.room_id) - room_version_obj = KNOWN_ROOM_VERSIONS[room_version] - try: - validate_event_for_room_version(room_version_obj, event) + validate_event_for_room_version(event) except AuthError as e: logger.warning("While validating received event %r: %s", event, e) # TODO: use a different rejected reason here? @@ -1519,7 +1515,7 @@ class FederationEventHandler: # ... and check that the event passes auth at those auth events. try: - check_auth_rules_for_event(room_version_obj, event, claimed_auth_events) + check_auth_rules_for_event(event, claimed_auth_events) except AuthError as e: logger.warning( "While checking auth of %r against auth_events: %s", event, e @@ -1567,9 +1563,7 @@ class FederationEventHandler: auth_events_for_auth = calculated_auth_event_map try: - check_auth_rules_for_event( - room_version_obj, event, auth_events_for_auth.values() - ) + check_auth_rules_for_event(event, auth_events_for_auth.values()) except AuthError as e: logger.warning("Failed auth resolution for %r because %s", event, e) context.rejected = RejectedReason.AUTH_ERROR @@ -1669,7 +1663,7 @@ class FederationEventHandler: ) try: - check_auth_rules_for_event(room_version_obj, event, current_auth_events) + check_auth_rules_for_event(event, current_auth_events) except AuthError as e: logger.warning( "Soft-failing %r (from %s) because %s", diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index f455158a2c..189f52fe5a 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -42,7 +42,7 @@ from synapse.api.errors import ( SynapseError, UnsupportedRoomVersionError, ) -from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersions +from synapse.api.room_versions import KNOWN_ROOM_VERSIONS from synapse.api.urls import ConsentURIBuilder from synapse.event_auth import validate_event_for_room_version from synapse.events import EventBase, relation_from_event @@ -444,7 +444,7 @@ _DUMMY_EVENT_ROOM_EXCLUSION_EXPIRY = 7 * 24 * 60 * 60 * 1000 class EventCreationHandler: def __init__(self, hs: "HomeServer"): self.hs = hs - self.auth = hs.get_auth() + self.auth_blocking = hs.get_auth_blocking() self._event_auth_handler = hs.get_event_auth_handler() self.store = hs.get_datastores().main self._storage_controllers = hs.get_storage_controllers() @@ -605,7 +605,7 @@ class EventCreationHandler: Returns: Tuple of created event, Context """ - await self.auth.check_auth_blocking(requester=requester) + await self.auth_blocking.check_auth_blocking(requester=requester) if event_dict["type"] == EventTypes.Create and event_dict["state_key"] == "": room_version_id = event_dict["content"]["room_version"] @@ -954,14 +954,12 @@ class EventCreationHandler: "Spam-check module returned invalid error value. Expecting [code, dict], got %s", spam_check_result, ) - spam_check_result = Codes.FORBIDDEN - if isinstance(spam_check_result, Codes): - raise SynapseError( - 403, - "This message has been rejected as probable spam", - spam_check_result, - ) + raise SynapseError( + 403, + "This message has been rejected as probable spam", + Codes.FORBIDDEN, + ) # Backwards compatibility: if the return value is not an error code, it # means the module returned an error message to be included in the @@ -1102,6 +1100,7 @@ class EventCreationHandler: # # TODO(faster_joins): figure out how this works, and make sure that the # old state is complete. + # https://github.com/matrix-org/synapse/issues/13003 metadata = await self.store.get_metadata_for_events(state_event_ids) state_map_for_event: MutableStateMap[str] = {} @@ -1273,23 +1272,6 @@ class EventCreationHandler: ) return prev_event - if event.is_state() and (event.type, event.state_key) == ( - EventTypes.Create, - "", - ): - room_version_id = event.content.get( - "room_version", RoomVersions.V1.identifier - ) - maybe_room_version_obj = KNOWN_ROOM_VERSIONS.get(room_version_id) - if not maybe_room_version_obj: - raise UnsupportedRoomVersionError( - "Attempt to create a room with unsupported room version %s" - % (room_version_id,) - ) - room_version_obj = maybe_room_version_obj - else: - room_version_obj = await self.store.get_room_version(event.room_id) - if event.internal_metadata.is_out_of_band_membership(): # the only sort of out-of-band-membership events we expect to see here are # invite rejections and rescinded knocks that we have generated ourselves. @@ -1297,9 +1279,9 @@ class EventCreationHandler: assert event.content["membership"] == Membership.LEAVE else: try: - validate_event_for_room_version(room_version_obj, event) + validate_event_for_room_version(event) await self._event_auth_handler.check_auth_rules_from_context( - room_version_obj, event, context + event, context ) except AuthError as err: logger.warning("Denying new event %r because %s", event, err) diff --git a/synapse/handlers/oidc.py b/synapse/handlers/oidc.py index 9de61d554f..d7a8226900 100644 --- a/synapse/handlers/oidc.py +++ b/synapse/handlers/oidc.py @@ -18,7 +18,6 @@ from typing import TYPE_CHECKING, Any, Dict, Generic, List, Optional, TypeVar, U from urllib.parse import urlencode, urlparse import attr -import pymacaroons from authlib.common.security import generate_token from authlib.jose import JsonWebToken, jwt from authlib.oauth2.auth import ClientAuth @@ -44,7 +43,7 @@ from synapse.logging.context import make_deferred_yieldable from synapse.types import JsonDict, UserID, map_username_to_mxid_localpart from synapse.util import Clock, json_decoder from synapse.util.caches.cached_call import RetryOnExceptionCachedCall -from synapse.util.macaroons import get_value_from_macaroon, satisfy_expiry +from synapse.util.macaroons import MacaroonGenerator, OidcSessionData from synapse.util.templates import _localpart_from_email_filter if TYPE_CHECKING: @@ -105,9 +104,10 @@ class OidcHandler: # we should not have been instantiated if there is no configured provider. assert provider_confs - self._token_generator = OidcSessionTokenGenerator(hs) + self._macaroon_generator = hs.get_macaroon_generator() self._providers: Dict[str, "OidcProvider"] = { - p.idp_id: OidcProvider(hs, self._token_generator, p) for p in provider_confs + p.idp_id: OidcProvider(hs, self._macaroon_generator, p) + for p in provider_confs } async def load_metadata(self) -> None: @@ -216,7 +216,7 @@ class OidcHandler: # Deserialize the session token and verify it. try: - session_data = self._token_generator.verify_oidc_session_token( + session_data = self._macaroon_generator.verify_oidc_session_token( session, state ) except (MacaroonInitException, MacaroonDeserializationException, KeyError) as e: @@ -271,12 +271,12 @@ class OidcProvider: def __init__( self, hs: "HomeServer", - token_generator: "OidcSessionTokenGenerator", + macaroon_generator: MacaroonGenerator, provider: OidcProviderConfig, ): self._store = hs.get_datastores().main - self._token_generator = token_generator + self._macaroon_generaton = macaroon_generator self._config = provider self._callback_url: str = hs.config.oidc.oidc_callback_url @@ -761,7 +761,7 @@ class OidcProvider: if not client_redirect_url: client_redirect_url = b"" - cookie = self._token_generator.generate_oidc_session_token( + cookie = self._macaroon_generaton.generate_oidc_session_token( state=state, session_data=OidcSessionData( idp_id=self.idp_id, @@ -1112,121 +1112,6 @@ class JwtClientSecret: return self._cached_secret -class OidcSessionTokenGenerator: - """Methods for generating and checking OIDC Session cookies.""" - - def __init__(self, hs: "HomeServer"): - self._clock = hs.get_clock() - self._server_name = hs.hostname - self._macaroon_secret_key = hs.config.key.macaroon_secret_key - - def generate_oidc_session_token( - self, - state: str, - session_data: "OidcSessionData", - duration_in_ms: int = (60 * 60 * 1000), - ) -> str: - """Generates a signed token storing data about an OIDC session. - - When Synapse initiates an authorization flow, it creates a random state - and a random nonce. Those parameters are given to the provider and - should be verified when the client comes back from the provider. - It is also used to store the client_redirect_url, which is used to - complete the SSO login flow. - - Args: - state: The ``state`` parameter passed to the OIDC provider. - session_data: data to include in the session token. - duration_in_ms: An optional duration for the token in milliseconds. - Defaults to an hour. - - Returns: - A signed macaroon token with the session information. - """ - macaroon = pymacaroons.Macaroon( - location=self._server_name, - identifier="key", - key=self._macaroon_secret_key, - ) - macaroon.add_first_party_caveat("gen = 1") - macaroon.add_first_party_caveat("type = session") - macaroon.add_first_party_caveat("state = %s" % (state,)) - macaroon.add_first_party_caveat("idp_id = %s" % (session_data.idp_id,)) - macaroon.add_first_party_caveat("nonce = %s" % (session_data.nonce,)) - macaroon.add_first_party_caveat( - "client_redirect_url = %s" % (session_data.client_redirect_url,) - ) - macaroon.add_first_party_caveat( - "ui_auth_session_id = %s" % (session_data.ui_auth_session_id,) - ) - now = self._clock.time_msec() - expiry = now + duration_in_ms - macaroon.add_first_party_caveat("time < %d" % (expiry,)) - - return macaroon.serialize() - - def verify_oidc_session_token( - self, session: bytes, state: str - ) -> "OidcSessionData": - """Verifies and extract an OIDC session token. - - This verifies that a given session token was issued by this homeserver - and extract the nonce and client_redirect_url caveats. - - Args: - session: The session token to verify - state: The state the OIDC provider gave back - - Returns: - The data extracted from the session cookie - - Raises: - KeyError if an expected caveat is missing from the macaroon. - """ - macaroon = pymacaroons.Macaroon.deserialize(session) - - v = pymacaroons.Verifier() - v.satisfy_exact("gen = 1") - v.satisfy_exact("type = session") - v.satisfy_exact("state = %s" % (state,)) - v.satisfy_general(lambda c: c.startswith("nonce = ")) - v.satisfy_general(lambda c: c.startswith("idp_id = ")) - v.satisfy_general(lambda c: c.startswith("client_redirect_url = ")) - v.satisfy_general(lambda c: c.startswith("ui_auth_session_id = ")) - satisfy_expiry(v, self._clock.time_msec) - - v.verify(macaroon, self._macaroon_secret_key) - - # Extract the session data from the token. - nonce = get_value_from_macaroon(macaroon, "nonce") - idp_id = get_value_from_macaroon(macaroon, "idp_id") - client_redirect_url = get_value_from_macaroon(macaroon, "client_redirect_url") - ui_auth_session_id = get_value_from_macaroon(macaroon, "ui_auth_session_id") - return OidcSessionData( - nonce=nonce, - idp_id=idp_id, - client_redirect_url=client_redirect_url, - ui_auth_session_id=ui_auth_session_id, - ) - - -@attr.s(frozen=True, slots=True, auto_attribs=True) -class OidcSessionData: - """The attributes which are stored in a OIDC session cookie""" - - # the Identity Provider being used - idp_id: str - - # The `nonce` parameter passed to the OIDC provider. - nonce: str - - # The URL the client gave when it initiated the flow. ("" if this is a UI Auth) - client_redirect_url: str - - # The session ID of the ongoing UI Auth ("" if this is a login) - ui_auth_session_id: str - - class UserAttributeDict(TypedDict): localpart: Optional[str] confirm_localpart: bool diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 338204287f..c77d181722 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -91,6 +91,7 @@ class RegistrationHandler: self.clock = hs.get_clock() self.hs = hs self.auth = hs.get_auth() + self.auth_blocking = hs.get_auth_blocking() self._auth_handler = hs.get_auth_handler() self.profile_handler = hs.get_profile_handler() self.user_directory_handler = hs.get_user_directory_handler() @@ -276,7 +277,7 @@ class RegistrationHandler: # do not check_auth_blocking if the call is coming through the Admin API if not by_admin: - await self.auth.check_auth_blocking(threepid=threepid) + await self.auth_blocking.check_auth_blocking(threepid=threepid) if localpart is not None: await self.check_username(localpart, guest_access_token=guest_access_token) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 520663f172..75c0be8c36 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -62,6 +62,7 @@ from synapse.events.utils import copy_and_fixup_power_levels_contents from synapse.federation.federation_client import InvalidResponseError from synapse.handlers.federation import get_domains_from_state from synapse.handlers.relations import BundledAggregations +from synapse.module_api import NOT_SPAM from synapse.rest.admin._base import assert_user_is_admin from synapse.storage.state import StateFilter from synapse.streams import EventSource @@ -109,6 +110,7 @@ class RoomCreationHandler: self.store = hs.get_datastores().main self._storage_controllers = hs.get_storage_controllers() self.auth = hs.get_auth() + self.auth_blocking = hs.get_auth_blocking() self.clock = hs.get_clock() self.hs = hs self.spam_checker = hs.get_spam_checker() @@ -226,10 +228,9 @@ class RoomCreationHandler: }, }, ) - old_room_version = await self.store.get_room_version(old_room_id) - validate_event_for_room_version(old_room_version, tombstone_event) + validate_event_for_room_version(tombstone_event) await self._event_auth_handler.check_auth_rules_from_context( - old_room_version, tombstone_event, tombstone_context + tombstone_event, tombstone_context ) # Upgrade the room @@ -437,10 +438,9 @@ class RoomCreationHandler: """ user_id = requester.user.to_string() - if not await self.spam_checker.user_may_create_room(user_id): - raise SynapseError( - 403, "You are not permitted to create rooms", Codes.FORBIDDEN - ) + spam_check = await self.spam_checker.user_may_create_room(user_id) + if spam_check != NOT_SPAM: + raise SynapseError(403, "You are not permitted to create rooms", spam_check) creation_content: JsonDict = { "room_version": new_room_version.identifier, @@ -707,7 +707,7 @@ class RoomCreationHandler: """ user_id = requester.user.to_string() - await self.auth.check_auth_blocking(requester=requester) + await self.auth_blocking.check_auth_blocking(requester=requester) if ( self._server_notices_mxid is not None @@ -727,12 +727,12 @@ class RoomCreationHandler: invite_3pid_list = config.get("invite_3pid", []) invite_list = config.get("invite", []) - if not is_requester_admin and not ( - await self.spam_checker.user_may_create_room(user_id) - ): - raise SynapseError( - 403, "You are not permitted to create rooms", Codes.FORBIDDEN - ) + if not is_requester_admin: + spam_check = await self.spam_checker.user_may_create_room(user_id) + if spam_check != NOT_SPAM: + raise SynapseError( + 403, "You are not permitted to create rooms", spam_check + ) if ratelimit: await self.request_ratelimiter.ratelimit(requester) diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index d1199a0644..e89b7441ad 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -38,6 +38,7 @@ from synapse.event_auth import get_named_level, get_power_level_event from synapse.events import EventBase from synapse.events.snapshot import EventContext from synapse.handlers.profile import MAX_AVATAR_URL_LEN, MAX_DISPLAYNAME_LEN +from synapse.module_api import NOT_SPAM from synapse.storage.state import StateFilter from synapse.types import ( JsonDict, @@ -683,7 +684,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): if target_id == self._server_notices_mxid: raise SynapseError(HTTPStatus.FORBIDDEN, "Cannot invite this user") - block_invite = False + block_invite_code = None if ( self._server_notices_mxid is not None @@ -701,16 +702,19 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): "Blocking invite: user is not admin and non-admin " "invites disabled" ) - block_invite = True + block_invite_code = Codes.FORBIDDEN - if not await self.spam_checker.user_may_invite( + spam_check = await self.spam_checker.user_may_invite( requester.user.to_string(), target_id, room_id - ): + ) + if spam_check != NOT_SPAM: logger.info("Blocking invite due to spam checker") - block_invite = True + block_invite_code = spam_check - if block_invite: - raise SynapseError(403, "Invites have been disabled on this server") + if block_invite_code is not None: + raise SynapseError( + 403, "Invites have been disabled on this server", block_invite_code + ) # An empty prev_events list is allowed as long as the auth_event_ids are present if prev_event_ids is not None: @@ -818,11 +822,12 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): # We assume that if the spam checker allowed the user to create # a room then they're allowed to join it. and not new_room - and not await self.spam_checker.user_may_join_room( + ): + spam_check = await self.spam_checker.user_may_join_room( target.to_string(), room_id, is_invited=inviter is not None ) - ): - raise SynapseError(403, "Not allowed to join this room") + if spam_check != NOT_SPAM: + raise SynapseError(403, "Not allowed to join this room", spam_check) # Check if a remote join should be performed. remote_join, remote_room_hosts = await self._should_perform_remote_join( @@ -1369,13 +1374,14 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): ) else: # Check if the spamchecker(s) allow this invite to go through. - if not await self.spam_checker.user_may_send_3pid_invite( + spam_check = await self.spam_checker.user_may_send_3pid_invite( inviter_userid=requester.user.to_string(), medium=medium, address=address, room_id=room_id, - ): - raise SynapseError(403, "Cannot send threepid invite") + ) + if spam_check != NOT_SPAM: + raise SynapseError(403, "Cannot send threepid invite", spam_check) stream_id = await self._make_and_store_3pid_invite( requester, diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index b4ead79f97..af19c513be 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -237,7 +237,7 @@ class SyncHandler: self.event_sources = hs.get_event_sources() self.clock = hs.get_clock() self.state = hs.get_state_handler() - self.auth = hs.get_auth() + self.auth_blocking = hs.get_auth_blocking() self._storage_controllers = hs.get_storage_controllers() self._state_storage_controller = self._storage_controllers.state @@ -280,7 +280,7 @@ class SyncHandler: # not been exceeded (if not part of the group by this point, almost certain # auth_blocking will occur) user_id = sync_config.user.to_string() - await self.auth.check_auth_blocking(requester=requester) + await self.auth_blocking.check_auth_blocking(requester=requester) res = await self.response_cache.wrap( sync_config.request_key, diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 776ed43f03..c63d068f74 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -731,8 +731,11 @@ class MatrixFederationHttpClient: Returns: A list of headers to be added as "Authorization:" headers """ - if destination is None and destination_is is None: - raise ValueError("destination and destination_is cannot both be None!") + if not destination and not destination_is: + raise ValueError( + "At least one of the arguments destination and destination_is " + "must be a nonempty bytestring." + ) request: JsonDict = { "method": method.decode("ascii"), diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py index fffd83546c..496fce2ecc 100644 --- a/synapse/metrics/__init__.py +++ b/synapse/metrics/__init__.py @@ -35,7 +35,6 @@ from typing import ( ) import attr -from matrix_common.versionstring import get_distribution_version_string from prometheus_client import CollectorRegistry, Counter, Gauge, Histogram, Metric from prometheus_client.core import ( REGISTRY, @@ -54,6 +53,7 @@ from synapse.metrics._exposition import ( ) from synapse.metrics._gc import MIN_TIME_BETWEEN_GCS, install_gc_manager from synapse.metrics._types import Collector +from synapse.util import SYNAPSE_VERSION logger = logging.getLogger(__name__) @@ -419,7 +419,7 @@ build_info = Gauge( ) build_info.labels( " ".join([platform.python_implementation(), platform.python_version()]), - get_distribution_version_string("matrix-synapse"), + SYNAPSE_VERSION, " ".join([platform.system(), platform.release()]), ).set(1) diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index a8ad575fcd..6191c2dc96 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -115,6 +115,7 @@ from synapse.types import ( JsonDict, JsonMapping, Requester, + RoomAlias, StateMap, UserID, UserInfo, @@ -163,6 +164,7 @@ __all__ = [ "EventBase", "StateMap", "ProfileInfo", + "RoomAlias", "UserProfile", ] @@ -799,7 +801,7 @@ class ModuleApi: if device_id: # delete the device, which will also delete its access tokens yield defer.ensureDeferred( - self._hs.get_device_handler().delete_device(user_id, device_id) + self._hs.get_device_handler().delete_devices(user_id, [device_id]) ) else: # no associated device. Just delete the access token. diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py index 015c19b2d9..c2575ba3d9 100644 --- a/synapse/push/mailer.py +++ b/synapse/push/mailer.py @@ -860,13 +860,14 @@ class Mailer: A link to unsubscribe from email notifications. """ params = { - "access_token": self.macaroon_gen.generate_delete_pusher_token(user_id), + "access_token": self.macaroon_gen.generate_delete_pusher_token( + user_id, app_id, email_address + ), "app_id": app_id, "pushkey": email_address, } - # XXX: make r0 once API is stable - return "%s_matrix/client/unstable/pushers/remove?%s" % ( + return "%s_synapse/client/unsubscribe?%s" % ( self.hs.config.server.public_baseurl, urllib.parse.urlencode(params), ) diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index 1aa08f8d95..fa3266720b 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -20,8 +20,6 @@ import platform from http import HTTPStatus from typing import TYPE_CHECKING, Optional, Tuple -from matrix_common.versionstring import get_distribution_version_string - from synapse.api.errors import Codes, NotFoundError, SynapseError from synapse.http.server import HttpServer, JsonResource from synapse.http.servlet import RestServlet, parse_json_object_from_request @@ -88,6 +86,7 @@ from synapse.rest.admin.users import ( WhoisRestServlet, ) from synapse.types import JsonDict, RoomStreamToken +from synapse.util import SYNAPSE_VERSION if TYPE_CHECKING: from synapse.server import HomeServer @@ -100,7 +99,7 @@ class VersionServlet(RestServlet): def __init__(self, hs: "HomeServer"): self.res = { - "server_version": get_distribution_version_string("matrix-synapse"), + "server_version": SYNAPSE_VERSION, "python_version": platform.python_version(), } diff --git a/synapse/rest/admin/devices.py b/synapse/rest/admin/devices.py index cef46ba0dd..d934880102 100644 --- a/synapse/rest/admin/devices.py +++ b/synapse/rest/admin/devices.py @@ -80,7 +80,7 @@ class DeviceRestServlet(RestServlet): if u is None: raise NotFoundError("Unknown user") - await self.device_handler.delete_device(target_user.to_string(), device_id) + await self.device_handler.delete_devices(target_user.to_string(), [device_id]) return HTTPStatus.OK, {} async def on_PUT( diff --git a/synapse/rest/client/devices.py b/synapse/rest/client/devices.py index ad6fd6492b..6fab102437 100644 --- a/synapse/rest/client/devices.py +++ b/synapse/rest/client/devices.py @@ -147,7 +147,9 @@ class DeviceRestServlet(RestServlet): can_skip_ui_auth=True, ) - await self.device_handler.delete_device(requester.user.to_string(), device_id) + await self.device_handler.delete_devices( + requester.user.to_string(), [device_id] + ) return 200, {} async def on_PUT( diff --git a/synapse/rest/client/logout.py b/synapse/rest/client/logout.py index 193a6951b9..23dfa4518f 100644 --- a/synapse/rest/client/logout.py +++ b/synapse/rest/client/logout.py @@ -45,8 +45,8 @@ class LogoutRestServlet(RestServlet): access_token = self.auth.get_access_token_from_request(request) await self._auth_handler.delete_access_token(access_token) else: - await self._device_handler.delete_device( - requester.user.to_string(), requester.device_id + await self._device_handler.delete_devices( + requester.user.to_string(), [requester.device_id] ) return 200, {} diff --git a/synapse/rest/client/profile.py b/synapse/rest/client/profile.py index c684636c0a..c16d707909 100644 --- a/synapse/rest/client/profile.py +++ b/synapse/rest/client/profile.py @@ -13,7 +13,7 @@ # limitations under the License. """ This module contains REST servlets to do with profile: /profile/<paths> """ - +from http import HTTPStatus from typing import TYPE_CHECKING, Tuple from synapse.api.errors import Codes, SynapseError @@ -45,8 +45,12 @@ class ProfileDisplaynameRestServlet(RestServlet): requester = await self.auth.get_user_by_req(request) requester_user = requester.user - user = UserID.from_string(user_id) + if not UserID.is_valid(user_id): + raise SynapseError( + HTTPStatus.BAD_REQUEST, "Invalid user id", Codes.INVALID_PARAM + ) + user = UserID.from_string(user_id) await self.profile_handler.check_profile_query_allowed(user, requester_user) displayname = await self.profile_handler.get_displayname(user) @@ -98,8 +102,12 @@ class ProfileAvatarURLRestServlet(RestServlet): requester = await self.auth.get_user_by_req(request) requester_user = requester.user - user = UserID.from_string(user_id) + if not UserID.is_valid(user_id): + raise SynapseError( + HTTPStatus.BAD_REQUEST, "Invalid user id", Codes.INVALID_PARAM + ) + user = UserID.from_string(user_id) await self.profile_handler.check_profile_query_allowed(user, requester_user) avatar_url = await self.profile_handler.get_avatar_url(user) @@ -150,8 +158,12 @@ class ProfileRestServlet(RestServlet): requester = await self.auth.get_user_by_req(request) requester_user = requester.user - user = UserID.from_string(user_id) + if not UserID.is_valid(user_id): + raise SynapseError( + HTTPStatus.BAD_REQUEST, "Invalid user id", Codes.INVALID_PARAM + ) + user = UserID.from_string(user_id) await self.profile_handler.check_profile_query_allowed(user, requester_user) displayname = await self.profile_handler.get_displayname(user) diff --git a/synapse/rest/client/pusher.py b/synapse/rest/client/pusher.py index d6487c31dd..9a1f10f4be 100644 --- a/synapse/rest/client/pusher.py +++ b/synapse/rest/client/pusher.py @@ -1,4 +1,5 @@ # Copyright 2014-2016 OpenMarket Ltd +# Copyright 2022 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,17 +16,17 @@ import logging from typing import TYPE_CHECKING, Tuple -from synapse.api.errors import Codes, StoreError, SynapseError -from synapse.http.server import HttpServer, respond_with_html_bytes +from synapse.api.errors import Codes, SynapseError +from synapse.http.server import HttpServer from synapse.http.servlet import ( RestServlet, assert_params_in_dict, parse_json_object_from_request, - parse_string, ) from synapse.http.site import SynapseRequest from synapse.push import PusherConfigException from synapse.rest.client._base import client_patterns +from synapse.rest.synapse.client.unsubscribe import UnsubscribeResource from synapse.types import JsonDict if TYPE_CHECKING: @@ -132,48 +133,21 @@ class PushersSetRestServlet(RestServlet): return 200, {} -class PushersRemoveRestServlet(RestServlet): +class LegacyPushersRemoveRestServlet(UnsubscribeResource, RestServlet): """ - To allow pusher to be delete by clicking a link (ie. GET request) + A servlet to handle legacy "email unsubscribe" links, forwarding requests to the ``UnsubscribeResource`` + + This should be kept for some time, so unsubscribe links in past emails stay valid. """ - PATTERNS = client_patterns("/pushers/remove$", v1=True) - SUCCESS_HTML = b"<html><body>You have been unsubscribed</body><html>" - - def __init__(self, hs: "HomeServer"): - super().__init__() - self.hs = hs - self.notifier = hs.get_notifier() - self.auth = hs.get_auth() - self.pusher_pool = self.hs.get_pusherpool() + PATTERNS = client_patterns("/pushers/remove$", releases=[], v1=False, unstable=True) async def on_GET(self, request: SynapseRequest) -> None: - requester = await self.auth.get_user_by_req(request, rights="delete_pusher") - user = requester.user - - app_id = parse_string(request, "app_id", required=True) - pushkey = parse_string(request, "pushkey", required=True) - - try: - await self.pusher_pool.remove_pusher( - app_id=app_id, pushkey=pushkey, user_id=user.to_string() - ) - except StoreError as se: - if se.code != 404: - # This is fine: they're already unsubscribed - raise - - self.notifier.on_new_replication_data() - - respond_with_html_bytes( - request, - 200, - PushersRemoveRestServlet.SUCCESS_HTML, - ) - return None + # Forward the request to the UnsubscribeResource + await self._async_render(request) def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: PushersRestServlet(hs).register(http_server) PushersSetRestServlet(hs).register(http_server) - PushersRemoveRestServlet(hs).register(http_server) + LegacyPushersRemoveRestServlet(hs).register(http_server) diff --git a/synapse/rest/client/relations.py b/synapse/rest/client/relations.py index 3cae6d2b55..ce97080013 100644 --- a/synapse/rest/client/relations.py +++ b/synapse/rest/client/relations.py @@ -43,6 +43,7 @@ class RelationPaginationServlet(RestServlet): self.auth = hs.get_auth() self.store = hs.get_datastores().main self._relations_handler = hs.get_relations_handler() + self._msc3715_enabled = hs.config.experimental.msc3715_enabled async def on_GET( self, @@ -55,9 +56,15 @@ class RelationPaginationServlet(RestServlet): requester = await self.auth.get_user_by_req(request, allow_guest=True) limit = parse_integer(request, "limit", default=5) - direction = parse_string( - request, "org.matrix.msc3715.dir", default="b", allowed_values=["f", "b"] - ) + if self._msc3715_enabled: + direction = parse_string( + request, + "org.matrix.msc3715.dir", + default="b", + allowed_values=["f", "b"], + ) + else: + direction = "b" from_token_str = parse_string(request, "from") to_token_str = parse_string(request, "to") diff --git a/synapse/rest/media/v1/media_storage.py b/synapse/rest/media/v1/media_storage.py index 604f18bf52..9137417342 100644 --- a/synapse/rest/media/v1/media_storage.py +++ b/synapse/rest/media/v1/media_storage.py @@ -36,6 +36,7 @@ from twisted.internet.defer import Deferred from twisted.internet.interfaces import IConsumer from twisted.protocols.basic import FileSender +import synapse from synapse.api.errors import NotFoundError from synapse.logging.context import defer_to_thread, make_deferred_yieldable from synapse.util import Clock @@ -145,15 +146,15 @@ class MediaStorage: f.flush() f.close() - spam = await self.spam_checker.check_media_file_for_spam( + spam_check = await self.spam_checker.check_media_file_for_spam( ReadableFileWrapper(self.clock, fname), file_info ) - if spam: + if spam_check != synapse.module_api.NOT_SPAM: logger.info("Blocking media due to spam checker") # Note that we'll delete the stored media, due to the # try/except below. The media also won't be stored in # the DB. - raise SpamMediaException() + raise SpamMediaException(errcode=spam_check) for provider in self.storage_providers: await provider.store_file(path, file_info) diff --git a/synapse/rest/synapse/client/__init__.py b/synapse/rest/synapse/client/__init__.py index 6ad558f5d1..e55924f597 100644 --- a/synapse/rest/synapse/client/__init__.py +++ b/synapse/rest/synapse/client/__init__.py @@ -20,6 +20,7 @@ from synapse.rest.synapse.client.new_user_consent import NewUserConsentResource from synapse.rest.synapse.client.pick_idp import PickIdpResource from synapse.rest.synapse.client.pick_username import pick_username_resource from synapse.rest.synapse.client.sso_register import SsoRegisterResource +from synapse.rest.synapse.client.unsubscribe import UnsubscribeResource if TYPE_CHECKING: from synapse.server import HomeServer @@ -41,6 +42,8 @@ def build_synapse_client_resource_tree(hs: "HomeServer") -> Mapping[str, Resourc "/_synapse/client/pick_username": pick_username_resource(hs), "/_synapse/client/new_user_consent": NewUserConsentResource(hs), "/_synapse/client/sso_register": SsoRegisterResource(hs), + # Unsubscribe to notification emails link + "/_synapse/client/unsubscribe": UnsubscribeResource(hs), } # provider-specific SSO bits. Only load these if they are enabled, since they diff --git a/synapse/rest/synapse/client/unsubscribe.py b/synapse/rest/synapse/client/unsubscribe.py new file mode 100644 index 0000000000..60321018f9 --- /dev/null +++ b/synapse/rest/synapse/client/unsubscribe.py @@ -0,0 +1,64 @@ +# Copyright 2022 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import TYPE_CHECKING + +from synapse.api.errors import StoreError +from synapse.http.server import DirectServeHtmlResource, respond_with_html_bytes +from synapse.http.servlet import parse_string +from synapse.http.site import SynapseRequest + +if TYPE_CHECKING: + from synapse.server import HomeServer + + +class UnsubscribeResource(DirectServeHtmlResource): + """ + To allow pusher to be delete by clicking a link (ie. GET request) + """ + + SUCCESS_HTML = b"<html><body>You have been unsubscribed</body><html>" + + def __init__(self, hs: "HomeServer"): + super().__init__() + self.notifier = hs.get_notifier() + self.auth = hs.get_auth() + self.pusher_pool = hs.get_pusherpool() + self.macaroon_generator = hs.get_macaroon_generator() + + async def _async_render_GET(self, request: SynapseRequest) -> None: + token = parse_string(request, "access_token", required=True) + app_id = parse_string(request, "app_id", required=True) + pushkey = parse_string(request, "pushkey", required=True) + + user_id = self.macaroon_generator.verify_delete_pusher_token( + token, app_id, pushkey + ) + + try: + await self.pusher_pool.remove_pusher( + app_id=app_id, pushkey=pushkey, user_id=user_id + ) + except StoreError as se: + if se.code != 404: + # This is fine: they're already unsubscribed + raise + + self.notifier.on_new_replication_data() + + respond_with_html_bytes( + request, + 200, + UnsubscribeResource.SUCCESS_HTML, + ) diff --git a/synapse/server.py b/synapse/server.py index a66ec228db..181984a1a4 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -29,6 +29,7 @@ from twisted.web.iweb import IPolicyForHTTPS from twisted.web.resource import Resource from synapse.api.auth import Auth +from synapse.api.auth_blocking import AuthBlocking from synapse.api.filtering import Filtering from synapse.api.ratelimiting import Ratelimiter, RequestRatelimiter from synapse.appservice.api import ApplicationServiceApi @@ -55,7 +56,7 @@ from synapse.handlers.account_data import AccountDataHandler from synapse.handlers.account_validity import AccountValidityHandler from synapse.handlers.admin import AdminHandler from synapse.handlers.appservice import ApplicationServicesHandler -from synapse.handlers.auth import AuthHandler, MacaroonGenerator, PasswordAuthProvider +from synapse.handlers.auth import AuthHandler, PasswordAuthProvider from synapse.handlers.cas import CasHandler from synapse.handlers.deactivate_account import DeactivateAccountHandler from synapse.handlers.device import DeviceHandler, DeviceWorkerHandler @@ -129,6 +130,7 @@ from synapse.streams.events import EventSources from synapse.types import DomainSpecificString, ISynapseReactor from synapse.util import Clock from synapse.util.distributor import Distributor +from synapse.util.macaroons import MacaroonGenerator from synapse.util.ratelimitutils import FederationRateLimiter from synapse.util.stringutils import random_string @@ -380,6 +382,10 @@ class HomeServer(metaclass=abc.ABCMeta): return Auth(self) @cache_in_self + def get_auth_blocking(self) -> AuthBlocking: + return AuthBlocking(self) + + @cache_in_self def get_http_client_context_factory(self) -> IPolicyForHTTPS: if self.config.tls.use_insecure_ssl_client_just_for_testing_do_not_use: return InsecureInterceptableContextFactory() @@ -487,7 +493,9 @@ class HomeServer(metaclass=abc.ABCMeta): @cache_in_self def get_macaroon_generator(self) -> MacaroonGenerator: - return MacaroonGenerator(self) + return MacaroonGenerator( + self.get_clock(), self.hostname, self.config.key.macaroon_secret_key + ) @cache_in_self def get_device_handler(self): diff --git a/synapse/server_notices/resource_limits_server_notices.py b/synapse/server_notices/resource_limits_server_notices.py index 6863020778..3134cd2d3d 100644 --- a/synapse/server_notices/resource_limits_server_notices.py +++ b/synapse/server_notices/resource_limits_server_notices.py @@ -37,7 +37,7 @@ class ResourceLimitsServerNotices: self._server_notices_manager = hs.get_server_notices_manager() self._store = hs.get_datastores().main self._storage_controllers = hs.get_storage_controllers() - self._auth = hs.get_auth() + self._auth_blocking = hs.get_auth_blocking() self._config = hs.config self._resouce_limited = False self._account_data_handler = hs.get_account_data_handler() @@ -91,7 +91,7 @@ class ResourceLimitsServerNotices: # Normally should always pass in user_id to check_auth_blocking # if you have it, but in this case are checking what would happen # to other users if they were to arrive. - await self._auth.check_auth_blocking() + await self._auth_blocking.check_auth_blocking() except ResourceLimitError as e: limit_msg = e.msg limit_type = e.limit_type diff --git a/synapse/state/v1.py b/synapse/state/v1.py index 499a328201..8bbb4ce41c 100644 --- a/synapse/state/v1.py +++ b/synapse/state/v1.py @@ -30,7 +30,7 @@ from typing import ( from synapse import event_auth from synapse.api.constants import EventTypes from synapse.api.errors import AuthError -from synapse.api.room_versions import RoomVersion, RoomVersions +from synapse.api.room_versions import RoomVersion from synapse.events import EventBase from synapse.types import MutableStateMap, StateMap @@ -331,7 +331,6 @@ def _resolve_auth_events( try: # The signatures have already been checked at this point event_auth.check_auth_rules_for_event( - RoomVersions.V1, event, auth_events.values(), ) @@ -349,7 +348,6 @@ def _resolve_normal_events( try: # The signatures have already been checked at this point event_auth.check_auth_rules_for_event( - RoomVersions.V1, event, auth_events.values(), ) diff --git a/synapse/state/v2.py b/synapse/state/v2.py index c618df2fde..6a16f38a15 100644 --- a/synapse/state/v2.py +++ b/synapse/state/v2.py @@ -17,12 +17,14 @@ import itertools import logging from typing import ( Any, + Awaitable, Callable, Collection, Dict, Generator, Iterable, List, + Mapping, Optional, Sequence, Set, @@ -30,33 +32,58 @@ from typing import ( overload, ) -from typing_extensions import Literal +from typing_extensions import Literal, Protocol -import synapse.state from synapse import event_auth from synapse.api.constants import EventTypes from synapse.api.errors import AuthError from synapse.api.room_versions import RoomVersion from synapse.events import EventBase from synapse.types import MutableStateMap, StateMap -from synapse.util import Clock logger = logging.getLogger(__name__) +class Clock(Protocol): + # This is usually synapse.util.Clock, but it's replaced with a FakeClock in tests. + # We only ever sleep(0) though, so that other async functions can make forward + # progress without waiting for stateres to complete. + def sleep(self, duration_ms: float) -> Awaitable[None]: + ... + + +class StateResolutionStore(Protocol): + # This is usually synapse.state.StateResolutionStore, but it's replaced with a + # TestStateResolutionStore in tests. + def get_events( + self, event_ids: Collection[str], allow_rejected: bool = False + ) -> Awaitable[Dict[str, EventBase]]: + ... + + def get_auth_chain_difference( + self, room_id: str, state_sets: List[Set[str]] + ) -> Awaitable[Set[str]]: + ... + + # We want to await to the reactor occasionally during state res when dealing # with large data sets, so that we don't exhaust the reactor. This is done by # awaiting to reactor during loops every N iterations. _AWAIT_AFTER_ITERATIONS = 100 +__all__ = [ + "resolve_events_with_store", +] + + async def resolve_events_with_store( clock: Clock, room_id: str, room_version: RoomVersion, state_sets: Sequence[StateMap[str]], event_map: Optional[Dict[str, EventBase]], - state_res_store: "synapse.state.StateResolutionStore", + state_res_store: StateResolutionStore, ) -> StateMap[str]: """Resolves the state using the v2 state resolution algorithm @@ -194,7 +221,7 @@ async def _get_power_level_for_sender( room_id: str, event_id: str, event_map: Dict[str, EventBase], - state_res_store: "synapse.state.StateResolutionStore", + state_res_store: StateResolutionStore, ) -> int: """Return the power level of the sender of the given event according to their auth events. @@ -243,9 +270,9 @@ async def _get_power_level_for_sender( async def _get_auth_chain_difference( room_id: str, - state_sets: Sequence[StateMap[str]], + state_sets: Sequence[Mapping[Any, str]], event_map: Dict[str, EventBase], - state_res_store: "synapse.state.StateResolutionStore", + state_res_store: StateResolutionStore, ) -> Set[str]: """Compare the auth chains of each state set and return the set of events that only appear in some but not all of the auth chains. @@ -406,7 +433,7 @@ async def _add_event_and_auth_chain_to_graph( room_id: str, event_id: str, event_map: Dict[str, EventBase], - state_res_store: "synapse.state.StateResolutionStore", + state_res_store: StateResolutionStore, auth_diff: Set[str], ) -> None: """Helper function for _reverse_topological_power_sort that add the event @@ -440,7 +467,7 @@ async def _reverse_topological_power_sort( room_id: str, event_ids: Iterable[str], event_map: Dict[str, EventBase], - state_res_store: "synapse.state.StateResolutionStore", + state_res_store: StateResolutionStore, auth_diff: Set[str], ) -> List[str]: """Returns a list of the event_ids sorted by reverse topological ordering, @@ -501,7 +528,7 @@ async def _iterative_auth_checks( event_ids: List[str], base_state: StateMap[str], event_map: Dict[str, EventBase], - state_res_store: "synapse.state.StateResolutionStore", + state_res_store: StateResolutionStore, ) -> MutableStateMap[str]: """Sequentially apply auth checks to each event in given list, updating the state as it goes along. @@ -547,7 +574,6 @@ async def _iterative_auth_checks( try: event_auth.check_auth_rules_for_event( - room_version, event, auth_events.values(), ) @@ -570,7 +596,7 @@ async def _mainline_sort( event_ids: List[str], resolved_power_event_id: Optional[str], event_map: Dict[str, EventBase], - state_res_store: "synapse.state.StateResolutionStore", + state_res_store: StateResolutionStore, ) -> List[str]: """Returns a sorted list of event_ids sorted by mainline ordering based on the given event resolved_power_event_id @@ -639,7 +665,7 @@ async def _get_mainline_depth_for_event( event: EventBase, mainline_map: Dict[str, int], event_map: Dict[str, EventBase], - state_res_store: "synapse.state.StateResolutionStore", + state_res_store: StateResolutionStore, ) -> int: """Get the mainline depths for the given event based on the mainline map @@ -683,7 +709,7 @@ async def _get_event( room_id: str, event_id: str, event_map: Dict[str, EventBase], - state_res_store: "synapse.state.StateResolutionStore", + state_res_store: StateResolutionStore, allow_none: Literal[False] = False, ) -> EventBase: ... @@ -694,7 +720,7 @@ async def _get_event( room_id: str, event_id: str, event_map: Dict[str, EventBase], - state_res_store: "synapse.state.StateResolutionStore", + state_res_store: StateResolutionStore, allow_none: Literal[True], ) -> Optional[EventBase]: ... @@ -704,7 +730,7 @@ async def _get_event( room_id: str, event_id: str, event_map: Dict[str, EventBase], - state_res_store: "synapse.state.StateResolutionStore", + state_res_store: StateResolutionStore, allow_none: bool = False, ) -> Optional[EventBase]: """Helper function to look up event in event_map, falling back to looking diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py index b1e5208c76..555b4e77d2 100644 --- a/synapse/storage/background_updates.py +++ b/synapse/storage/background_updates.py @@ -507,25 +507,6 @@ class BackgroundUpdater: update_handler ) - def register_noop_background_update(self, update_name: str) -> None: - """Register a noop handler for a background update. - - This is useful when we previously did a background update, but no - longer wish to do the update. In this case the background update should - be removed from the schema delta files, but there may still be some - users who have the background update queued, so this method should - also be called to clear the update. - - Args: - update_name: Name of update - """ - - async def noop_update(progress: JsonDict, batch_size: int) -> int: - await self._end_background_update(update_name) - return 1 - - self.register_background_update_handler(update_name, noop_update) - def register_background_index_update( self, update_name: str, diff --git a/synapse/storage/controllers/persist_events.py b/synapse/storage/controllers/persist_events.py index 4caaa81808..4bcb99d06e 100644 --- a/synapse/storage/controllers/persist_events.py +++ b/synapse/storage/controllers/persist_events.py @@ -388,10 +388,13 @@ class EventsPersistenceStorageController: # TODO(faster_joins): get a real stream ordering, to make this work correctly # across workers. + # https://github.com/matrix-org/synapse/issues/12994 # # TODO(faster_joins): this can race against event persistence, in which case we # will end up with incorrect state. Perhaps we should make this a job we - # farm out to the event persister, somehow. + # farm out to the event persister thread, somehow. + # https://github.com/matrix-org/synapse/issues/13007 + # stream_id = self.main_store.get_room_max_stream_ordering() await self.persist_events_store.update_current_state(room_id, delta, stream_id) diff --git a/synapse/storage/controllers/state.py b/synapse/storage/controllers/state.py index 3b4cdb67eb..d3a44bc876 100644 --- a/synapse/storage/controllers/state.py +++ b/synapse/storage/controllers/state.py @@ -452,6 +452,9 @@ class StateStorageController: up to date. """ # FIXME(faster_joins): what do we do here? + # https://github.com/matrix-org/synapse/issues/12814 + # https://github.com/matrix-org/synapse/issues/12815 + # https://github.com/matrix-org/synapse/issues/13008 return await self.stores.main.get_partial_current_state_deltas( prev_stream_id, max_stream_id diff --git a/synapse/storage/databases/main/__init__.py b/synapse/storage/databases/main/__init__.py index 11d9d16c19..9121badb3a 100644 --- a/synapse/storage/databases/main/__init__.py +++ b/synapse/storage/databases/main/__init__.py @@ -45,7 +45,6 @@ from .event_push_actions import EventPushActionsStore from .events_bg_updates import EventsBackgroundUpdatesStore from .events_forward_extremities import EventForwardExtremitiesStore from .filtering import FilteringStore -from .group_server import GroupServerStore from .keys import KeyStore from .lock import LockStore from .media_repository import MediaRepositoryStore @@ -117,7 +116,6 @@ class DataStore( DeviceStore, DeviceInboxStore, UserDirectoryStore, - GroupServerStore, UserErasureStore, MonthlyActiveUsersWorkerStore, StatsStore, diff --git a/synapse/storage/databases/main/deviceinbox.py b/synapse/storage/databases/main/deviceinbox.py index 599b418383..422e0e65ca 100644 --- a/synapse/storage/databases/main/deviceinbox.py +++ b/synapse/storage/databases/main/deviceinbox.py @@ -834,8 +834,6 @@ class DeviceInboxWorkerStore(SQLBaseStore): class DeviceInboxBackgroundUpdateStore(SQLBaseStore): DEVICE_INBOX_STREAM_ID = "device_inbox_stream_drop" - REMOVE_DELETED_DEVICES = "remove_deleted_devices_from_device_inbox" - REMOVE_HIDDEN_DEVICES = "remove_hidden_devices_from_device_inbox" REMOVE_DEAD_DEVICES_FROM_INBOX = "remove_dead_devices_from_device_inbox" def __init__( @@ -857,15 +855,6 @@ class DeviceInboxBackgroundUpdateStore(SQLBaseStore): self.DEVICE_INBOX_STREAM_ID, self._background_drop_index_device_inbox ) - # Used to be a background update that deletes all device_inboxes for deleted - # devices. - self.db_pool.updates.register_noop_background_update( - self.REMOVE_DELETED_DEVICES - ) - # Used to be a background update that deletes all device_inboxes for hidden - # devices. - self.db_pool.updates.register_noop_background_update(self.REMOVE_HIDDEN_DEVICES) - self.db_pool.updates.register_background_update_handler( self.REMOVE_DEAD_DEVICES_FROM_INBOX, self._remove_dead_devices_from_device_inbox, diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index d900064c07..2414a7dc38 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -1240,15 +1240,6 @@ class DeviceBackgroundUpdateStore(SQLBaseStore): self._remove_duplicate_outbound_pokes, ) - # a pair of background updates that were added during the 1.14 release cycle, - # but replaced with 58/06dlols_unique_idx.py - self.db_pool.updates.register_noop_background_update( - "device_lists_outbound_last_success_unique_idx", - ) - self.db_pool.updates.register_noop_background_update( - "drop_device_lists_outbound_last_success_non_unique_idx", - ) - async def _drop_device_list_streams_non_unique_indexes( self, progress: JsonDict, batch_size: int ) -> int: @@ -1433,16 +1424,6 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore): ) raise StoreError(500, "Problem storing device.") - async def delete_device(self, user_id: str, device_id: str) -> None: - """Delete a device and its device_inbox. - - Args: - user_id: The ID of the user which owns the device - device_id: The ID of the device to delete - """ - - await self.delete_devices(user_id, [device_id]) - async def delete_devices(self, user_id: str, device_ids: List[str]) -> None: """Deletes several devices. diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index 17e35cf63e..a8773374be 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -46,7 +46,7 @@ from synapse.storage.database import ( ) from synapse.storage.databases.main.events_worker import EventCacheEntry from synapse.storage.databases.main.search import SearchEntry -from synapse.storage.engines.postgres import PostgresEngine +from synapse.storage.engines import PostgresEngine from synapse.storage.util.id_generators import AbstractStreamIdGenerator from synapse.storage.util.sequence import SequenceGenerator from synapse.types import JsonDict, StateMap, get_domain_from_id diff --git a/synapse/storage/databases/main/events_bg_updates.py b/synapse/storage/databases/main/events_bg_updates.py index d5f0059665..bea34a4c4a 100644 --- a/synapse/storage/databases/main/events_bg_updates.py +++ b/synapse/storage/databases/main/events_bg_updates.py @@ -177,11 +177,6 @@ class EventsBackgroundUpdatesStore(SQLBaseStore): self._purged_chain_cover_index, ) - # The event_thread_relation background update was replaced with the - # event_arbitrary_relations one, which handles any relation to avoid - # needed to potentially crawl the entire events table in the future. - self.db_pool.updates.register_noop_background_update("event_thread_relation") - self.db_pool.updates.register_background_update_handler( "event_arbitrary_relations", self._event_arbitrary_relations, diff --git a/synapse/storage/databases/main/group_server.py b/synapse/storage/databases/main/group_server.py deleted file mode 100644 index c15a7136b6..0000000000 --- a/synapse/storage/databases/main/group_server.py +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright 2017 Vector Creations Ltd -# Copyright 2018 New Vector Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from typing import TYPE_CHECKING - -from synapse.storage._base import SQLBaseStore -from synapse.storage.database import DatabasePool, LoggingDatabaseConnection - -if TYPE_CHECKING: - from synapse.server import HomeServer - - -class GroupServerStore(SQLBaseStore): - def __init__( - self, - database: DatabasePool, - db_conn: LoggingDatabaseConnection, - hs: "HomeServer", - ): - # Register a legacy groups background update as a no-op. - database.updates.register_noop_background_update("local_group_updates_index") - super().__init__(database, db_conn, hs) diff --git a/synapse/storage/databases/main/media_repository.py b/synapse/storage/databases/main/media_repository.py index d028be16de..9b172a64d8 100644 --- a/synapse/storage/databases/main/media_repository.py +++ b/synapse/storage/databases/main/media_repository.py @@ -37,9 +37,6 @@ from synapse.types import JsonDict, UserID if TYPE_CHECKING: from synapse.server import HomeServer -BG_UPDATE_REMOVE_MEDIA_REPO_INDEX_WITHOUT_METHOD = ( - "media_repository_drop_index_wo_method" -) BG_UPDATE_REMOVE_MEDIA_REPO_INDEX_WITHOUT_METHOD_2 = ( "media_repository_drop_index_wo_method_2" ) @@ -111,13 +108,6 @@ class MediaRepositoryBackgroundUpdateStore(SQLBaseStore): unique=True, ) - # the original impl of _drop_media_index_without_method was broken (see - # https://github.com/matrix-org/synapse/issues/8649), so we replace the original - # impl with a no-op and run the fixed migration as - # media_repository_drop_index_wo_method_2. - self.db_pool.updates.register_noop_background_update( - BG_UPDATE_REMOVE_MEDIA_REPO_INDEX_WITHOUT_METHOD - ) self.db_pool.updates.register_background_update_handler( BG_UPDATE_REMOVE_MEDIA_REPO_INDEX_WITHOUT_METHOD_2, self._drop_media_index_without_method, diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py index 21e954ccc1..b6106affa6 100644 --- a/synapse/storage/databases/main/receipts.py +++ b/synapse/storage/databases/main/receipts.py @@ -36,6 +36,7 @@ from synapse.storage.database import ( LoggingTransaction, ) from synapse.storage.engines import PostgresEngine +from synapse.storage.engines._base import IsolationLevel from synapse.storage.util.id_generators import ( AbstractStreamIdTracker, MultiWriterIdGenerator, @@ -764,6 +765,10 @@ class ReceiptsWorkerStore(SQLBaseStore): linearized_event_id, data, stream_id=stream_id, + # Read committed is actually beneficial here because we check for a receipt with + # greater stream order, and checking the very latest data at select time is better + # than the data at transaction start time. + isolation_level=IsolationLevel.READ_COMMITTED, ) # If the receipt was older than the currently persisted one, nothing to do. diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py index 4991360b70..cb63cd9b7d 100644 --- a/synapse/storage/databases/main/registration.py +++ b/synapse/storage/databases/main/registration.py @@ -1805,21 +1805,10 @@ class RegistrationBackgroundUpdateStore(RegistrationWorkerStore): columns=["creation_ts"], ) - # we no longer use refresh tokens, but it's possible that some people - # might have a background update queued to build this index. Just - # clear the background update. - self.db_pool.updates.register_noop_background_update( - "refresh_tokens_device_index" - ) - self.db_pool.updates.register_background_update_handler( "users_set_deactivated_flag", self._background_update_set_deactivated_flag ) - self.db_pool.updates.register_noop_background_update( - "user_threepids_grandfather" - ) - self.db_pool.updates.register_background_index_update( "user_external_ids_user_id_idx", index_name="user_external_ids_user_id_idx", diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py index 68d4fc2e64..5760d3428e 100644 --- a/synapse/storage/databases/main/room.py +++ b/synapse/storage/databases/main/room.py @@ -1112,6 +1112,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): # this can race with incoming events, so we watch out for FK errors. # TODO(faster_joins): this still doesn't completely fix the race, since the persist process # is not atomic. I fear we need an application-level lock. + # https://github.com/matrix-org/synapse/issues/12988 try: await self.db_pool.runInteraction( "clear_partial_state_room", self._clear_partial_state_room_txn, room_id @@ -1119,6 +1120,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): return True except self.db_pool.engine.module.DatabaseError as e: # TODO(faster_joins): how do we distinguish between FK errors and other errors? + # https://github.com/matrix-org/synapse/issues/12988 logger.warning( "Exception while clearing lazy partial-state-room %s, retrying: %s", room_id, diff --git a/synapse/storage/databases/main/search.py b/synapse/storage/databases/main/search.py index 78e0773b2a..f6e24b68d2 100644 --- a/synapse/storage/databases/main/search.py +++ b/synapse/storage/databases/main/search.py @@ -113,7 +113,6 @@ class SearchBackgroundUpdateStore(SearchWorkerStore): EVENT_SEARCH_UPDATE_NAME = "event_search" EVENT_SEARCH_ORDER_UPDATE_NAME = "event_search_order" - EVENT_SEARCH_USE_GIST_POSTGRES_NAME = "event_search_postgres_gist" EVENT_SEARCH_USE_GIN_POSTGRES_NAME = "event_search_postgres_gin" EVENT_SEARCH_DELETE_NON_STRINGS = "event_search_sqlite_delete_non_strings" @@ -132,15 +131,6 @@ class SearchBackgroundUpdateStore(SearchWorkerStore): self.EVENT_SEARCH_ORDER_UPDATE_NAME, self._background_reindex_search_order ) - # we used to have a background update to turn the GIN index into a - # GIST one; we no longer do that (obviously) because we actually want - # a GIN index. However, it's possible that some people might still have - # the background update queued, so we register a handler to clear the - # background update. - self.db_pool.updates.register_noop_background_update( - self.EVENT_SEARCH_USE_GIST_POSTGRES_NAME - ) - self.db_pool.updates.register_background_update_handler( self.EVENT_SEARCH_USE_GIN_POSTGRES_NAME, self._background_reindex_gin_search ) diff --git a/synapse/storage/databases/main/state.py b/synapse/storage/databases/main/state.py index bdd00273cd..9674c4a757 100644 --- a/synapse/storage/databases/main/state.py +++ b/synapse/storage/databases/main/state.py @@ -127,13 +127,8 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): NotFoundError: if the room is unknown """ - # First we try looking up room version from the database, but for old - # rooms we might not have added the room version to it yet so we fall - # back to previous behaviour and look in current state events. - # # We really should have an entry in the rooms table for every room we - # care about, but let's be a bit paranoid (at least while the background - # update is happening) to avoid breaking existing rooms. + # care about, but let's be a bit paranoid. room_version = self.db_pool.simple_select_one_onecol_txn( txn, table="rooms", @@ -440,6 +435,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): ) # TODO(faster_joins): need to do something about workers here + # https://github.com/matrix-org/synapse/issues/12994 txn.call_after(self.is_partial_state_event.invalidate, (event.event_id,)) txn.call_after( self._get_state_group_for_event.prefill, diff --git a/synapse/storage/databases/main/stats.py b/synapse/storage/databases/main/stats.py index b95dbef678..538451b05f 100644 --- a/synapse/storage/databases/main/stats.py +++ b/synapse/storage/databases/main/stats.py @@ -120,11 +120,6 @@ class StatsStore(StateDeltasStore): self.db_pool.updates.register_background_update_handler( "populate_stats_process_users", self._populate_stats_process_users ) - # we no longer need to perform clean-up, but we will give ourselves - # the potential to reintroduce it in the future – so documentation - # will still encourage the use of this no-op handler. - self.db_pool.updates.register_noop_background_update("populate_stats_cleanup") - self.db_pool.updates.register_noop_background_update("populate_stats_prepare") async def _populate_stats_process_users( self, progress: JsonDict, batch_size: int diff --git a/synapse/storage/engines/__init__.py b/synapse/storage/engines/__init__.py index f51b3d228e..a182e8a098 100644 --- a/synapse/storage/engines/__init__.py +++ b/synapse/storage/engines/__init__.py @@ -11,11 +11,35 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Mapping +from typing import Any, Mapping, NoReturn from ._base import BaseDatabaseEngine, IncorrectDatabaseSetup -from .postgres import PostgresEngine -from .sqlite import Sqlite3Engine + +# The classes `PostgresEngine` and `Sqlite3Engine` must always be importable, because +# we use `isinstance(engine, PostgresEngine)` to write different queries for postgres +# and sqlite. But the database driver modules are both optional: they may not be +# installed. To account for this, create dummy classes on import failure so we can +# still run `isinstance()` checks. +try: + from .postgres import PostgresEngine +except ImportError: + + class PostgresEngine(BaseDatabaseEngine): # type: ignore[no-redef] + def __new__(cls, *args: object, **kwargs: object) -> NoReturn: # type: ignore[misc] + raise RuntimeError( + f"Cannot create {cls.__name__} -- psycopg2 module is not installed" + ) + + +try: + from .sqlite import Sqlite3Engine +except ImportError: + + class Sqlite3Engine(BaseDatabaseEngine): # type: ignore[no-redef] + def __new__(cls, *args: object, **kwargs: object) -> NoReturn: # type: ignore[misc] + raise RuntimeError( + f"Cannot create {cls.__name__} -- sqlite3 module is not installed" + ) def create_engine(database_config: Mapping[str, Any]) -> BaseDatabaseEngine: @@ -30,4 +54,10 @@ def create_engine(database_config: Mapping[str, Any]) -> BaseDatabaseEngine: raise RuntimeError("Unsupported database engine '%s'" % (name,)) -__all__ = ["create_engine", "BaseDatabaseEngine", "IncorrectDatabaseSetup"] +__all__ = [ + "create_engine", + "BaseDatabaseEngine", + "PostgresEngine", + "Sqlite3Engine", + "IncorrectDatabaseSetup", +] diff --git a/synapse/storage/engines/postgres.py b/synapse/storage/engines/postgres.py index 391f8ed24a..517f9d5f98 100644 --- a/synapse/storage/engines/postgres.py +++ b/synapse/storage/engines/postgres.py @@ -15,6 +15,8 @@ import logging from typing import TYPE_CHECKING, Any, Mapping, NoReturn, Optional, Tuple, cast +import psycopg2.extensions + from synapse.storage.engines._base import ( BaseDatabaseEngine, IncorrectDatabaseSetup, @@ -23,18 +25,14 @@ from synapse.storage.engines._base import ( from synapse.storage.types import Cursor if TYPE_CHECKING: - import psycopg2 # noqa: F401 - from synapse.storage.database import LoggingDatabaseConnection logger = logging.getLogger(__name__) -class PostgresEngine(BaseDatabaseEngine["psycopg2.connection"]): +class PostgresEngine(BaseDatabaseEngine[psycopg2.extensions.connection]): def __init__(self, database_config: Mapping[str, Any]): - import psycopg2.extensions - super().__init__(psycopg2, database_config) psycopg2.extensions.register_type(psycopg2.extensions.UNICODE) @@ -69,7 +67,9 @@ class PostgresEngine(BaseDatabaseEngine["psycopg2.connection"]): return collation, ctype def check_database( - self, db_conn: "psycopg2.connection", allow_outdated_version: bool = False + self, + db_conn: psycopg2.extensions.connection, + allow_outdated_version: bool = False, ) -> None: # Get the version of PostgreSQL that we're using. As per the psycopg2 # docs: The number is formed by converting the major, minor, and @@ -176,8 +176,6 @@ class PostgresEngine(BaseDatabaseEngine["psycopg2.connection"]): return True def is_deadlock(self, error: Exception) -> bool: - import psycopg2.extensions - if isinstance(error, psycopg2.DatabaseError): # https://www.postgresql.org/docs/current/static/errcodes-appendix.html # "40001" serialization_failure @@ -185,7 +183,7 @@ class PostgresEngine(BaseDatabaseEngine["psycopg2.connection"]): return error.pgcode in ["40001", "40P01"] return False - def is_connection_closed(self, conn: "psycopg2.connection") -> bool: + def is_connection_closed(self, conn: psycopg2.extensions.connection) -> bool: return bool(conn.closed) def lock_table(self, txn: Cursor, table: str) -> None: @@ -205,18 +203,16 @@ class PostgresEngine(BaseDatabaseEngine["psycopg2.connection"]): else: return "%i.%i.%i" % (numver / 10000, (numver % 10000) / 100, numver % 100) - def in_transaction(self, conn: "psycopg2.connection") -> bool: - import psycopg2.extensions - + def in_transaction(self, conn: psycopg2.extensions.connection) -> bool: return conn.status != psycopg2.extensions.STATUS_READY def attempt_to_set_autocommit( - self, conn: "psycopg2.connection", autocommit: bool + self, conn: psycopg2.extensions.connection, autocommit: bool ) -> None: return conn.set_session(autocommit=autocommit) def attempt_to_set_isolation_level( - self, conn: "psycopg2.connection", isolation_level: Optional[int] + self, conn: psycopg2.extensions.connection, isolation_level: Optional[int] ) -> None: if isolation_level is None: isolation_level = self.default_isolation_level diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py index c33df42084..09a2b58f4c 100644 --- a/synapse/storage/prepare_database.py +++ b/synapse/storage/prepare_database.py @@ -23,8 +23,7 @@ from typing_extensions import Counter as CounterType from synapse.config.homeserver import HomeServerConfig from synapse.storage.database import LoggingDatabaseConnection -from synapse.storage.engines import BaseDatabaseEngine -from synapse.storage.engines.postgres import PostgresEngine +from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine from synapse.storage.schema import SCHEMA_COMPAT_VERSION, SCHEMA_VERSION from synapse.storage.types import Cursor diff --git a/synapse/storage/schema/main/delta/71/01remove_noop_background_updates.sql b/synapse/storage/schema/main/delta/71/01remove_noop_background_updates.sql new file mode 100644 index 0000000000..fa96ac50c2 --- /dev/null +++ b/synapse/storage/schema/main/delta/71/01remove_noop_background_updates.sql @@ -0,0 +1,61 @@ +/* Copyright 2022 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- Clean-up background updates which should no longer be run. Previously these +-- used the (now removed) register_noop_background_update method. + +-- Used to be a background update that deletes all device_inboxes for deleted +-- devices. +DELETE FROM background_updates WHERE update_name = 'remove_deleted_devices_from_device_inbox'; +-- Used to be a background update that deletes all device_inboxes for hidden +-- devices. +DELETE FROM background_updates WHERE update_name = 'remove_hidden_devices_from_device_inbox'; + +-- A pair of background updates that were added during the 1.14 release cycle, +-- but replaced with 58/06dlols_unique_idx.py +DELETE FROM background_updates WHERE update_name = 'device_lists_outbound_last_success_unique_idx'; +DELETE FROM background_updates WHERE update_name = 'drop_device_lists_outbound_last_success_non_unique_idx'; + +-- The event_thread_relation background update was replaced with the +-- event_arbitrary_relations one, which handles any relation to avoid +-- needed to potentially crawl the entire events table in the future. +DELETE FROM background_updates WHERE update_name = 'event_thread_relation'; + +-- A legacy groups background update. +DELETE FROM background_updates WHERE update_name = 'local_group_updates_index'; + +-- The original impl of _drop_media_index_without_method was broken (see +-- https://github.com/matrix-org/synapse/issues/8649), so we replace the original +-- impl with a no-op and run the fixed migration as +-- media_repository_drop_index_wo_method_2. +DELETE FROM background_updates WHERE update_name = 'media_repository_drop_index_wo_method'; + +-- We no longer use refresh tokens, but it's possible that some people +-- might have a background update queued to build this index. Just +-- clear the background update. +DELETE FROM background_updates WHERE update_name = 'refresh_tokens_device_index'; + +DELETE FROM background_updates WHERE update_name = 'user_threepids_grandfather'; + +-- We used to have a background update to turn the GIN index into a +-- GIST one; we no longer do that (obviously) because we actually want +-- a GIN index. However, it's possible that some people might still have +-- the background update queued, so we register a handler to clear the +-- background update. +DELETE FROM background_updates WHERE update_name = 'event_search_postgres_gist'; + +-- We no longer need to perform clean-up. +DELETE FROM background_updates WHERE update_name = 'populate_stats_cleanup'; +DELETE FROM background_updates WHERE update_name = 'populate_stats_prepare'; diff --git a/synapse/storage/state.py b/synapse/storage/state.py index 96aaffb53c..af3bab2c15 100644 --- a/synapse/storage/state.py +++ b/synapse/storage/state.py @@ -546,6 +546,7 @@ class StateFilter: # the sender of a piece of state wasn't actually in the room, then clearly that # state shouldn't have been returned. # We should at least add some tests around this to see what happens. + # https://github.com/matrix-org/synapse/issues/13006 # if we haven't requested membership events, then it depends on the value of # 'include_others' diff --git a/synapse/types.py b/synapse/types.py index 0586d2cbb9..668d48d646 100644 --- a/synapse/types.py +++ b/synapse/types.py @@ -267,7 +267,6 @@ class DomainSpecificString(metaclass=abc.ABCMeta): ) domain = parts[1] - # This code will need changing if we want to support multiple domain # names on one HS return cls(localpart=parts[0], domain=domain) @@ -279,6 +278,8 @@ class DomainSpecificString(metaclass=abc.ABCMeta): @classmethod def is_valid(cls: Type[DS], s: str) -> bool: """Parses the input string and attempts to ensure it is valid.""" + # TODO: this does not reject an empty localpart or an overly-long string. + # See https://spec.matrix.org/v1.2/appendices/#identifier-grammar try: obj = cls.from_string(s) # Apply additional validation to the domain. This is only done diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py index d8046b7553..6323d452e7 100644 --- a/synapse/util/__init__.py +++ b/synapse/util/__init__.py @@ -19,6 +19,7 @@ from typing import Any, Callable, Dict, Generator, Optional import attr from frozendict import frozendict +from matrix_common.versionstring import get_distribution_version_string from twisted.internet import defer, task from twisted.internet.defer import Deferred @@ -183,3 +184,8 @@ def log_failure( if not consumeErrors: return failure return None + + +# Version string with git info. Computed here once so that we don't invoke git multiple +# times. +SYNAPSE_VERSION = get_distribution_version_string("matrix-synapse", __file__) diff --git a/synapse/util/macaroons.py b/synapse/util/macaroons.py index 84e4f6ff55..df77edcce2 100644 --- a/synapse/util/macaroons.py +++ b/synapse/util/macaroons.py @@ -17,8 +17,14 @@ from typing import Callable, Optional +import attr import pymacaroons from pymacaroons.exceptions import MacaroonVerificationFailedException +from typing_extensions import Literal + +from synapse.util import Clock, stringutils + +MacaroonType = Literal["access", "delete_pusher", "session", "login"] def get_value_from_macaroon(macaroon: pymacaroons.Macaroon, key: str) -> str: @@ -86,3 +92,305 @@ def satisfy_expiry(v: pymacaroons.Verifier, get_time_ms: Callable[[], int]) -> N return time_msec < expiry v.satisfy_general(verify_expiry_caveat) + + +@attr.s(frozen=True, slots=True, auto_attribs=True) +class OidcSessionData: + """The attributes which are stored in a OIDC session cookie""" + + idp_id: str + """The Identity Provider being used""" + + nonce: str + """The `nonce` parameter passed to the OIDC provider.""" + + client_redirect_url: str + """The URL the client gave when it initiated the flow. ("" if this is a UI Auth)""" + + ui_auth_session_id: str + """The session ID of the ongoing UI Auth ("" if this is a login)""" + + +@attr.s(slots=True, frozen=True, auto_attribs=True) +class LoginTokenAttributes: + """Data we store in a short-term login token""" + + user_id: str + + auth_provider_id: str + """The SSO Identity Provider that the user authenticated with, to get this token.""" + + auth_provider_session_id: Optional[str] + """The session ID advertised by the SSO Identity Provider.""" + + +class MacaroonGenerator: + def __init__(self, clock: Clock, location: str, secret_key: bytes): + self._clock = clock + self._location = location + self._secret_key = secret_key + + def generate_guest_access_token(self, user_id: str) -> str: + """Generate a guest access token for the given user ID + + Args: + user_id: The user ID for which the guest token should be generated. + + Returns: + A signed access token for that guest user. + """ + nonce = stringutils.random_string_with_symbols(16) + macaroon = self._generate_base_macaroon("access") + macaroon.add_first_party_caveat(f"user_id = {user_id}") + macaroon.add_first_party_caveat(f"nonce = {nonce}") + macaroon.add_first_party_caveat("guest = true") + return macaroon.serialize() + + def generate_delete_pusher_token( + self, user_id: str, app_id: str, pushkey: str + ) -> str: + """Generate a signed token used for unsubscribing from email notifications + + Args: + user_id: The user for which this token will be valid. + app_id: The app_id for this pusher. + pushkey: The unique identifier of this pusher. + + Returns: + A signed token which can be used in unsubscribe links. + """ + macaroon = self._generate_base_macaroon("delete_pusher") + macaroon.add_first_party_caveat(f"user_id = {user_id}") + macaroon.add_first_party_caveat(f"app_id = {app_id}") + macaroon.add_first_party_caveat(f"pushkey = {pushkey}") + return macaroon.serialize() + + def generate_short_term_login_token( + self, + user_id: str, + auth_provider_id: str, + auth_provider_session_id: Optional[str] = None, + duration_in_ms: int = (2 * 60 * 1000), + ) -> str: + """Generate a short-term login token used during SSO logins + + Args: + user_id: The user for which the token is valid. + auth_provider_id: The SSO IdP the user used. + auth_provider_session_id: The session ID got during login from the SSO IdP. + + Returns: + A signed token valid for using as a ``m.login.token`` token. + """ + now = self._clock.time_msec() + expiry = now + duration_in_ms + macaroon = self._generate_base_macaroon("login") + macaroon.add_first_party_caveat(f"user_id = {user_id}") + macaroon.add_first_party_caveat(f"time < {expiry}") + macaroon.add_first_party_caveat(f"auth_provider_id = {auth_provider_id}") + if auth_provider_session_id is not None: + macaroon.add_first_party_caveat( + f"auth_provider_session_id = {auth_provider_session_id}" + ) + return macaroon.serialize() + + def generate_oidc_session_token( + self, + state: str, + session_data: OidcSessionData, + duration_in_ms: int = (60 * 60 * 1000), + ) -> str: + """Generates a signed token storing data about an OIDC session. + + When Synapse initiates an authorization flow, it creates a random state + and a random nonce. Those parameters are given to the provider and + should be verified when the client comes back from the provider. + It is also used to store the client_redirect_url, which is used to + complete the SSO login flow. + + Args: + state: The ``state`` parameter passed to the OIDC provider. + session_data: data to include in the session token. + duration_in_ms: An optional duration for the token in milliseconds. + Defaults to an hour. + + Returns: + A signed macaroon token with the session information. + """ + now = self._clock.time_msec() + expiry = now + duration_in_ms + macaroon = self._generate_base_macaroon("session") + macaroon.add_first_party_caveat(f"state = {state}") + macaroon.add_first_party_caveat(f"idp_id = {session_data.idp_id}") + macaroon.add_first_party_caveat(f"nonce = {session_data.nonce}") + macaroon.add_first_party_caveat( + f"client_redirect_url = {session_data.client_redirect_url}" + ) + macaroon.add_first_party_caveat( + f"ui_auth_session_id = {session_data.ui_auth_session_id}" + ) + macaroon.add_first_party_caveat(f"time < {expiry}") + + return macaroon.serialize() + + def verify_short_term_login_token(self, token: str) -> LoginTokenAttributes: + """Verify a short-term-login macaroon + + Checks that the given token is a valid, unexpired short-term-login token + minted by this server. + + Args: + token: The login token to verify. + + Returns: + A set of attributes carried by this token, including the + ``user_id`` and informations about the SSO IDP used during that + login. + + Raises: + MacaroonVerificationFailedException if the verification failed + """ + macaroon = pymacaroons.Macaroon.deserialize(token) + + v = self._base_verifier("login") + v.satisfy_general(lambda c: c.startswith("user_id = ")) + v.satisfy_general(lambda c: c.startswith("auth_provider_id = ")) + v.satisfy_general(lambda c: c.startswith("auth_provider_session_id = ")) + satisfy_expiry(v, self._clock.time_msec) + v.verify(macaroon, self._secret_key) + + user_id = get_value_from_macaroon(macaroon, "user_id") + auth_provider_id = get_value_from_macaroon(macaroon, "auth_provider_id") + + auth_provider_session_id: Optional[str] = None + try: + auth_provider_session_id = get_value_from_macaroon( + macaroon, "auth_provider_session_id" + ) + except MacaroonVerificationFailedException: + pass + + return LoginTokenAttributes( + user_id=user_id, + auth_provider_id=auth_provider_id, + auth_provider_session_id=auth_provider_session_id, + ) + + def verify_guest_token(self, token: str) -> str: + """Verify a guest access token macaroon + + Checks that the given token is a valid, unexpired guest access token + minted by this server. + + Args: + token: The access token to verify. + + Returns: + The ``user_id`` that this token is valid for. + + Raises: + MacaroonVerificationFailedException if the verification failed + """ + macaroon = pymacaroons.Macaroon.deserialize(token) + user_id = get_value_from_macaroon(macaroon, "user_id") + + # At some point, Synapse would generate macaroons without the "guest" + # caveat for regular users. Because of how macaroon verification works, + # to avoid validating those as guest tokens, we explicitely verify if + # the macaroon includes the "guest = true" caveat. + is_guest = any( + (caveat.caveat_id == "guest = true" for caveat in macaroon.caveats) + ) + + if not is_guest: + raise MacaroonVerificationFailedException("Macaroon is not a guest token") + + v = self._base_verifier("access") + v.satisfy_exact("guest = true") + v.satisfy_general(lambda c: c.startswith("user_id = ")) + v.satisfy_general(lambda c: c.startswith("nonce = ")) + satisfy_expiry(v, self._clock.time_msec) + v.verify(macaroon, self._secret_key) + + return user_id + + def verify_delete_pusher_token(self, token: str, app_id: str, pushkey: str) -> str: + """Verify a token from an email unsubscribe link + + Args: + token: The token to verify. + app_id: The app_id of the pusher to delete. + pushkey: The unique identifier of the pusher to delete. + + Return: + The ``user_id`` for which this token is valid. + + Raises: + MacaroonVerificationFailedException if the verification failed + """ + macaroon = pymacaroons.Macaroon.deserialize(token) + user_id = get_value_from_macaroon(macaroon, "user_id") + + v = self._base_verifier("delete_pusher") + v.satisfy_exact(f"app_id = {app_id}") + v.satisfy_exact(f"pushkey = {pushkey}") + v.satisfy_general(lambda c: c.startswith("user_id = ")) + v.verify(macaroon, self._secret_key) + + return user_id + + def verify_oidc_session_token(self, session: bytes, state: str) -> OidcSessionData: + """Verifies and extract an OIDC session token. + + This verifies that a given session token was issued by this homeserver + and extract the nonce and client_redirect_url caveats. + + Args: + session: The session token to verify + state: The state the OIDC provider gave back + + Returns: + The data extracted from the session cookie + + Raises: + KeyError if an expected caveat is missing from the macaroon. + """ + macaroon = pymacaroons.Macaroon.deserialize(session) + + v = self._base_verifier("session") + v.satisfy_exact(f"state = {state}") + v.satisfy_general(lambda c: c.startswith("nonce = ")) + v.satisfy_general(lambda c: c.startswith("idp_id = ")) + v.satisfy_general(lambda c: c.startswith("client_redirect_url = ")) + v.satisfy_general(lambda c: c.startswith("ui_auth_session_id = ")) + satisfy_expiry(v, self._clock.time_msec) + + v.verify(macaroon, self._secret_key) + + # Extract the session data from the token. + nonce = get_value_from_macaroon(macaroon, "nonce") + idp_id = get_value_from_macaroon(macaroon, "idp_id") + client_redirect_url = get_value_from_macaroon(macaroon, "client_redirect_url") + ui_auth_session_id = get_value_from_macaroon(macaroon, "ui_auth_session_id") + return OidcSessionData( + nonce=nonce, + idp_id=idp_id, + client_redirect_url=client_redirect_url, + ui_auth_session_id=ui_auth_session_id, + ) + + def _generate_base_macaroon(self, type: MacaroonType) -> pymacaroons.Macaroon: + macaroon = pymacaroons.Macaroon( + location=self._location, + identifier="key", + key=self._secret_key, + ) + macaroon.add_first_party_caveat("gen = 1") + macaroon.add_first_party_caveat(f"type = {type}") + return macaroon + + def _base_verifier(self, type: MacaroonType) -> pymacaroons.Verifier: + v = pymacaroons.Verifier() + v.satisfy_exact("gen = 1") + v.satisfy_exact(f"type = {type}") + return v diff --git a/tests/api/test_auth.py b/tests/api/test_auth.py index bc75ddd3e9..dfcfaf79b6 100644 --- a/tests/api/test_auth.py +++ b/tests/api/test_auth.py @@ -19,6 +19,7 @@ import pymacaroons from twisted.test.proto_helpers import MemoryReactor from synapse.api.auth import Auth +from synapse.api.auth_blocking import AuthBlocking from synapse.api.constants import UserTypes from synapse.api.errors import ( AuthError, @@ -49,7 +50,7 @@ class AuthTestCase(unittest.HomeserverTestCase): # AuthBlocking reads from the hs' config on initialization. We need to # modify its config instead of the hs' - self.auth_blocking = self.auth._auth_blocking + self.auth_blocking = AuthBlocking(hs) self.test_user = "@foo:bar" self.test_token = b"_test_token_" @@ -312,9 +313,7 @@ class AuthTestCase(unittest.HomeserverTestCase): self.assertEqual(self.store.insert_client_ip.call_count, 2) def test_get_user_from_macaroon(self): - self.store.get_user_by_access_token = simple_async_mock( - TokenLookupResult(user_id="@baldrick:matrix.org", device_id="device") - ) + self.store.get_user_by_access_token = simple_async_mock(None) user_id = "@baldrick:matrix.org" macaroon = pymacaroons.Macaroon( @@ -322,17 +321,14 @@ class AuthTestCase(unittest.HomeserverTestCase): identifier="key", key=self.hs.config.key.macaroon_secret_key, ) + # "Legacy" macaroons should not work for regular users not in the database macaroon.add_first_party_caveat("gen = 1") macaroon.add_first_party_caveat("type = access") macaroon.add_first_party_caveat("user_id = %s" % (user_id,)) - user_info = self.get_success( - self.auth.get_user_by_access_token(macaroon.serialize()) + serialized = macaroon.serialize() + self.get_failure( + self.auth.get_user_by_access_token(serialized), InvalidClientTokenError ) - self.assertEqual(user_id, user_info.user_id) - - # TODO: device_id should come from the macaroon, but currently comes - # from the db. - self.assertEqual(user_info.device_id, "device") def test_get_guest_user_from_macaroon(self): self.store.get_user_by_id = simple_async_mock({"is_guest": True}) @@ -362,20 +358,22 @@ class AuthTestCase(unittest.HomeserverTestCase): small_number_of_users = 1 # Ensure no error thrown - self.get_success(self.auth.check_auth_blocking()) + self.get_success(self.auth_blocking.check_auth_blocking()) self.auth_blocking._limit_usage_by_mau = True self.store.get_monthly_active_count = simple_async_mock(lots_of_users) - e = self.get_failure(self.auth.check_auth_blocking(), ResourceLimitError) + e = self.get_failure( + self.auth_blocking.check_auth_blocking(), ResourceLimitError + ) self.assertEqual(e.value.admin_contact, self.hs.config.server.admin_contact) self.assertEqual(e.value.errcode, Codes.RESOURCE_LIMIT_EXCEEDED) self.assertEqual(e.value.code, 403) # Ensure does not throw an error self.store.get_monthly_active_count = simple_async_mock(small_number_of_users) - self.get_success(self.auth.check_auth_blocking()) + self.get_success(self.auth_blocking.check_auth_blocking()) def test_blocking_mau__depending_on_user_type(self): self.auth_blocking._max_mau_value = 50 @@ -383,15 +381,18 @@ class AuthTestCase(unittest.HomeserverTestCase): self.store.get_monthly_active_count = simple_async_mock(100) # Support users allowed - self.get_success(self.auth.check_auth_blocking(user_type=UserTypes.SUPPORT)) + self.get_success( + self.auth_blocking.check_auth_blocking(user_type=UserTypes.SUPPORT) + ) self.store.get_monthly_active_count = simple_async_mock(100) # Bots not allowed self.get_failure( - self.auth.check_auth_blocking(user_type=UserTypes.BOT), ResourceLimitError + self.auth_blocking.check_auth_blocking(user_type=UserTypes.BOT), + ResourceLimitError, ) self.store.get_monthly_active_count = simple_async_mock(100) # Real users not allowed - self.get_failure(self.auth.check_auth_blocking(), ResourceLimitError) + self.get_failure(self.auth_blocking.check_auth_blocking(), ResourceLimitError) def test_blocking_mau__appservice_requester_allowed_when_not_tracking_ips(self): self.auth_blocking._max_mau_value = 50 @@ -419,7 +420,7 @@ class AuthTestCase(unittest.HomeserverTestCase): app_service=appservice, authenticated_entity="@appservice:server", ) - self.get_success(self.auth.check_auth_blocking(requester=requester)) + self.get_success(self.auth_blocking.check_auth_blocking(requester=requester)) def test_blocking_mau__appservice_requester_disallowed_when_tracking_ips(self): self.auth_blocking._max_mau_value = 50 @@ -448,7 +449,8 @@ class AuthTestCase(unittest.HomeserverTestCase): authenticated_entity="@appservice:server", ) self.get_failure( - self.auth.check_auth_blocking(requester=requester), ResourceLimitError + self.auth_blocking.check_auth_blocking(requester=requester), + ResourceLimitError, ) def test_reserved_threepid(self): @@ -459,18 +461,21 @@ class AuthTestCase(unittest.HomeserverTestCase): unknown_threepid = {"medium": "email", "address": "unreserved@server.com"} self.auth_blocking._mau_limits_reserved_threepids = [threepid] - self.get_failure(self.auth.check_auth_blocking(), ResourceLimitError) + self.get_failure(self.auth_blocking.check_auth_blocking(), ResourceLimitError) self.get_failure( - self.auth.check_auth_blocking(threepid=unknown_threepid), ResourceLimitError + self.auth_blocking.check_auth_blocking(threepid=unknown_threepid), + ResourceLimitError, ) - self.get_success(self.auth.check_auth_blocking(threepid=threepid)) + self.get_success(self.auth_blocking.check_auth_blocking(threepid=threepid)) def test_hs_disabled(self): self.auth_blocking._hs_disabled = True self.auth_blocking._hs_disabled_message = "Reason for being disabled" - e = self.get_failure(self.auth.check_auth_blocking(), ResourceLimitError) + e = self.get_failure( + self.auth_blocking.check_auth_blocking(), ResourceLimitError + ) self.assertEqual(e.value.admin_contact, self.hs.config.server.admin_contact) self.assertEqual(e.value.errcode, Codes.RESOURCE_LIMIT_EXCEEDED) self.assertEqual(e.value.code, 403) @@ -485,7 +490,9 @@ class AuthTestCase(unittest.HomeserverTestCase): self.auth_blocking._hs_disabled = True self.auth_blocking._hs_disabled_message = "Reason for being disabled" - e = self.get_failure(self.auth.check_auth_blocking(), ResourceLimitError) + e = self.get_failure( + self.auth_blocking.check_auth_blocking(), ResourceLimitError + ) self.assertEqual(e.value.admin_contact, self.hs.config.server.admin_contact) self.assertEqual(e.value.errcode, Codes.RESOURCE_LIMIT_EXCEEDED) self.assertEqual(e.value.code, 403) @@ -495,4 +502,4 @@ class AuthTestCase(unittest.HomeserverTestCase): user = "@user:server" self.auth_blocking._server_notices_mxid = user self.auth_blocking._hs_disabled_message = "Reason for being disabled" - self.get_success(self.auth.check_auth_blocking(user)) + self.get_success(self.auth_blocking.check_auth_blocking(user)) diff --git a/tests/federation/transport/server/test__base.py b/tests/federation/transport/server/test__base.py index e63885c1c9..d33e86db4c 100644 --- a/tests/federation/transport/server/test__base.py +++ b/tests/federation/transport/server/test__base.py @@ -24,7 +24,7 @@ from synapse.types import JsonDict from synapse.util.ratelimitutils import FederationRateLimiter from tests import unittest -from tests.http.server._base import EndpointCancellationTestHelperMixin +from tests.http.server._base import test_disconnect class CancellableFederationServlet(BaseFederationServlet): @@ -54,9 +54,7 @@ class CancellableFederationServlet(BaseFederationServlet): return HTTPStatus.OK, {"result": True} -class BaseFederationServletCancellationTests( - unittest.FederatingHomeserverTestCase, EndpointCancellationTestHelperMixin -): +class BaseFederationServletCancellationTests(unittest.FederatingHomeserverTestCase): """Tests for `BaseFederationServlet` cancellation.""" skip = "`BaseFederationServlet` does not support cancellation yet." @@ -86,7 +84,7 @@ class BaseFederationServletCancellationTests( # request won't be processed. self.pump() - self._test_disconnect( + test_disconnect( self.reactor, channel, expect_cancellation=True, @@ -106,7 +104,7 @@ class BaseFederationServletCancellationTests( # request won't be processed. self.pump() - self._test_disconnect( + test_disconnect( self.reactor, channel, expect_cancellation=False, diff --git a/tests/handlers/test_auth.py b/tests/handlers/test_auth.py index 67a7829769..7106799d44 100644 --- a/tests/handlers/test_auth.py +++ b/tests/handlers/test_auth.py @@ -38,7 +38,7 @@ class AuthTestCase(unittest.HomeserverTestCase): # MAU tests # AuthBlocking reads from the hs' config on initialization. We need to # modify its config instead of the hs' - self.auth_blocking = hs.get_auth()._auth_blocking + self.auth_blocking = hs.get_auth_blocking() self.auth_blocking._max_mau_value = 50 self.small_number_of_users = 1 diff --git a/tests/handlers/test_device.py b/tests/handlers/test_device.py index 01ea7d2a42..b8b465d35b 100644 --- a/tests/handlers/test_device.py +++ b/tests/handlers/test_device.py @@ -154,7 +154,7 @@ class DeviceTestCase(unittest.HomeserverTestCase): self._record_users() # delete the device - self.get_success(self.handler.delete_device(user1, "abc")) + self.get_success(self.handler.delete_devices(user1, ["abc"])) # check the device was deleted self.get_failure(self.handler.get_device(user1, "abc"), NotFoundError) @@ -179,7 +179,7 @@ class DeviceTestCase(unittest.HomeserverTestCase): ) # delete the device - self.get_success(self.handler.delete_device(user1, "abc")) + self.get_success(self.handler.delete_devices(user1, ["abc"])) # check that the device_inbox was deleted res = self.get_success( diff --git a/tests/handlers/test_oidc.py b/tests/handlers/test_oidc.py index 1231aed944..e6cd3af7b7 100644 --- a/tests/handlers/test_oidc.py +++ b/tests/handlers/test_oidc.py @@ -25,7 +25,7 @@ from synapse.handlers.sso import MappingException from synapse.server import HomeServer from synapse.types import JsonDict, UserID from synapse.util import Clock -from synapse.util.macaroons import get_value_from_macaroon +from synapse.util.macaroons import OidcSessionData, get_value_from_macaroon from tests.test_utils import FakeResponse, get_awaitable_result, simple_async_mock from tests.unittest import HomeserverTestCase, override_config @@ -1227,7 +1227,7 @@ class OidcHandlerTestCase(HomeserverTestCase): ) -> str: from synapse.handlers.oidc import OidcSessionData - return self.handler._token_generator.generate_oidc_session_token( + return self.handler._macaroon_generator.generate_oidc_session_token( state=state, session_data=OidcSessionData( idp_id="oidc", @@ -1251,7 +1251,6 @@ async def _make_callback_with_userinfo( userinfo: the OIDC userinfo dict client_redirect_url: the URL to redirect to on success. """ - from synapse.handlers.oidc import OidcSessionData handler = hs.get_oidc_handler() provider = handler._providers["oidc"] @@ -1260,7 +1259,7 @@ async def _make_callback_with_userinfo( provider._fetch_userinfo = simple_async_mock(return_value=userinfo) # type: ignore[assignment] state = "state" - session = handler._token_generator.generate_oidc_session_token( + session = handler._macaroon_generator.generate_oidc_session_token( state=state, session_data=OidcSessionData( idp_id="oidc", diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py index b6ba19c739..23f35d5bf5 100644 --- a/tests/handlers/test_register.py +++ b/tests/handlers/test_register.py @@ -699,7 +699,7 @@ class RegistrationTestCase(unittest.HomeserverTestCase): """ if localpart is None: raise SynapseError(400, "Request must include user id") - await self.hs.get_auth().check_auth_blocking() + await self.hs.get_auth_blocking().check_auth_blocking() need_register = True try: diff --git a/tests/handlers/test_room_summary.py b/tests/handlers/test_room_summary.py index 0546655690..aa650756e4 100644 --- a/tests/handlers/test_room_summary.py +++ b/tests/handlers/test_room_summary.py @@ -178,7 +178,7 @@ class SpaceSummaryTestCase(unittest.HomeserverTestCase): result_room_ids.append(result_room["room_id"]) result_children_ids.append( [ - (cs["room_id"], cs["state_key"]) + (result_room["room_id"], cs["state_key"]) for cs in result_room["children_state"] ] ) diff --git a/tests/handlers/test_stats.py b/tests/handlers/test_stats.py index ecd78fa369..05f9ec3c51 100644 --- a/tests/handlers/test_stats.py +++ b/tests/handlers/test_stats.py @@ -46,16 +46,9 @@ class StatsRoomTests(unittest.HomeserverTestCase): self.get_success( self.store.db_pool.simple_insert( "background_updates", - {"update_name": "populate_stats_prepare", "progress_json": "{}"}, - ) - ) - self.get_success( - self.store.db_pool.simple_insert( - "background_updates", { "update_name": "populate_stats_process_rooms", "progress_json": "{}", - "depends_on": "populate_stats_prepare", }, ) ) @@ -69,16 +62,6 @@ class StatsRoomTests(unittest.HomeserverTestCase): }, ) ) - self.get_success( - self.store.db_pool.simple_insert( - "background_updates", - { - "update_name": "populate_stats_cleanup", - "progress_json": "{}", - "depends_on": "populate_stats_process_users", - }, - ) - ) async def get_all_room_state(self): return await self.store.db_pool.simple_select_list( @@ -533,7 +516,6 @@ class StatsRoomTests(unittest.HomeserverTestCase): { "update_name": "populate_stats_process_rooms", "progress_json": "{}", - "depends_on": "populate_stats_prepare", }, ) ) @@ -547,16 +529,6 @@ class StatsRoomTests(unittest.HomeserverTestCase): }, ) ) - self.get_success( - self.store.db_pool.simple_insert( - "background_updates", - { - "update_name": "populate_stats_cleanup", - "progress_json": "{}", - "depends_on": "populate_stats_process_users", - }, - ) - ) self.wait_for_background_updates() diff --git a/tests/handlers/test_sync.py b/tests/handlers/test_sync.py index db3302a4c7..ecc7cc6461 100644 --- a/tests/handlers/test_sync.py +++ b/tests/handlers/test_sync.py @@ -45,7 +45,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase): # AuthBlocking reads from the hs' config on initialization. We need to # modify its config instead of the hs' - self.auth_blocking = self.hs.get_auth()._auth_blocking + self.auth_blocking = self.hs.get_auth_blocking() def test_wait_for_sync_for_user_auth_blocking(self): user_id1 = "@user1:test" diff --git a/tests/http/server/_base.py b/tests/http/server/_base.py index b9f1a381aa..994d8880b0 100644 --- a/tests/http/server/_base.py +++ b/tests/http/server/_base.py @@ -12,89 +12,543 @@ # See the License for the specific language governing permissions and # limitations under the License. +import inspect +import itertools +import logging from http import HTTPStatus -from typing import Any, Callable, Optional, Union +from typing import ( + Any, + Callable, + ContextManager, + Dict, + List, + Optional, + Set, + Tuple, + TypeVar, + Union, +) from unittest import mock +from unittest.mock import Mock +from twisted.internet.defer import Deferred from twisted.internet.error import ConnectionDone +from twisted.python.failure import Failure +from twisted.test.proto_helpers import MemoryReactorClock +from twisted.web.server import Site from synapse.http.server import ( HTTP_STATUS_REQUEST_CANCELLED, respond_with_html_bytes, respond_with_json, ) +from synapse.http.site import SynapseRequest +from synapse.logging.context import LoggingContext, make_deferred_yieldable from synapse.types import JsonDict -from tests import unittest -from tests.server import FakeChannel, ThreadedMemoryReactorClock +from tests.server import FakeChannel, make_request +from tests.unittest import logcontext_clean +logger = logging.getLogger(__name__) -class EndpointCancellationTestHelperMixin(unittest.TestCase): - """Provides helper methods for testing cancellation of endpoints.""" - def _test_disconnect( - self, - reactor: ThreadedMemoryReactorClock, - channel: FakeChannel, - expect_cancellation: bool, - expected_body: Union[bytes, JsonDict], - expected_code: Optional[int] = None, - ) -> None: - """Disconnects an in-flight request and checks the response. +T = TypeVar("T") - Args: - reactor: The twisted reactor running the request handler. - channel: The `FakeChannel` for the request. - expect_cancellation: `True` if request processing is expected to be - cancelled, `False` if the request should run to completion. - expected_body: The expected response for the request. - expected_code: The expected status code for the request. Defaults to `200` - or `499` depending on `expect_cancellation`. - """ - # Determine the expected status code. - if expected_code is None: - if expect_cancellation: - expected_code = HTTP_STATUS_REQUEST_CANCELLED - else: - expected_code = HTTPStatus.OK - - request = channel.request - self.assertFalse( - channel.is_finished(), + +def test_disconnect( + reactor: MemoryReactorClock, + channel: FakeChannel, + expect_cancellation: bool, + expected_body: Union[bytes, JsonDict], + expected_code: Optional[int] = None, +) -> None: + """Disconnects an in-flight request and checks the response. + + Args: + reactor: The twisted reactor running the request handler. + channel: The `FakeChannel` for the request. + expect_cancellation: `True` if request processing is expected to be cancelled, + `False` if the request should run to completion. + expected_body: The expected response for the request. + expected_code: The expected status code for the request. Defaults to `200` or + `499` depending on `expect_cancellation`. + """ + # Determine the expected status code. + if expected_code is None: + if expect_cancellation: + expected_code = HTTP_STATUS_REQUEST_CANCELLED + else: + expected_code = HTTPStatus.OK + + request = channel.request + if channel.is_finished(): + raise AssertionError( "Request finished before we could disconnect - " - "was `await_result=False` passed to `make_request`?", + "ensure `await_result=False` is passed to `make_request`.", ) - # We're about to disconnect the request. This also disconnects the channel, so - # we have to rely on mocks to extract the response. - respond_method: Callable[..., Any] - if isinstance(expected_body, bytes): - respond_method = respond_with_html_bytes + # We're about to disconnect the request. This also disconnects the channel, so we + # have to rely on mocks to extract the response. + respond_method: Callable[..., Any] + if isinstance(expected_body, bytes): + respond_method = respond_with_html_bytes + else: + respond_method = respond_with_json + + with mock.patch( + f"synapse.http.server.{respond_method.__name__}", wraps=respond_method + ) as respond_mock: + # Disconnect the request. + request.connectionLost(reason=ConnectionDone()) + + if expect_cancellation: + # An immediate cancellation is expected. + respond_mock.assert_called_once() else: - respond_method = respond_with_json + respond_mock.assert_not_called() - with mock.patch( - f"synapse.http.server.{respond_method.__name__}", wraps=respond_method - ) as respond_mock: - # Disconnect the request. - request.connectionLost(reason=ConnectionDone()) + # The handler is expected to run to completion. + reactor.advance(1.0) + respond_mock.assert_called_once() - if expect_cancellation: - # An immediate cancellation is expected. - respond_mock.assert_called_once() - args, _kwargs = respond_mock.call_args - code, body = args[1], args[2] - self.assertEqual(code, expected_code) - self.assertEqual(request.code, expected_code) - self.assertEqual(body, expected_body) - else: - respond_mock.assert_not_called() - - # The handler is expected to run to completion. - reactor.pump([1.0]) + args, _kwargs = respond_mock.call_args + code, body = args[1], args[2] + + if code != expected_code: + raise AssertionError( + f"{code} != {expected_code} : " + "Request did not finish with the expected status code." + ) + + if request.code != expected_code: + raise AssertionError( + f"{request.code} != {expected_code} : " + "Request did not finish with the expected status code." + ) + + if body != expected_body: + raise AssertionError( + f"{body!r} != {expected_body!r} : " + "Request did not finish with the expected status code." + ) + + +@logcontext_clean +def make_request_with_cancellation_test( + test_name: str, + reactor: MemoryReactorClock, + site: Site, + method: str, + path: str, + content: Union[bytes, str, JsonDict] = b"", +) -> FakeChannel: + """Performs a request repeatedly, disconnecting at successive `await`s, until + one completes. + + Fails if: + * A logging context is lost during cancellation. + * A logging context get restarted after it is marked as finished, eg. if + a request's logging context is used by some processing started by the + request, but the request neglects to cancel that processing or wait for it + to complete. + + Note that "Re-starting finished log context" errors get raised within the + request handling code and may or may not get caught. These errors will + likely manifest as a different logging context error at a later point. When + debugging logging context failures, setting a breakpoint in + `logcontext_error` can prove useful. + * A request gets stuck, possibly due to a previous cancellation. + * The request does not return a 499 when the client disconnects. + This implies that a `CancelledError` was swallowed somewhere. + + It is up to the caller to verify that the request returns the correct data when + it finally runs to completion. + + Note that this function can only cover a single code path and does not guarantee + that an endpoint is compatible with cancellation on every code path. + To allow inspection of the code path that is being tested, this function will + log the stack trace at every `await` that gets cancelled. To view these log + lines, `trial` can be run with the `SYNAPSE_TEST_LOG_LEVEL=INFO` environment + variable, which will include the log lines in `_trial_temp/test.log`. + Alternatively, `_log_for_request` can be modified to write to `sys.stdout`. + + Args: + test_name: The name of the test, which will be logged. + reactor: The twisted reactor running the request handler. + site: The twisted `Site` to use to render the request. + method: The HTTP request method ("verb"). + path: The HTTP path, suitably URL encoded (e.g. escaped UTF-8 & spaces and + such). + content: The body of the request. + + Returns: + The `FakeChannel` object which stores the result of the final request that + runs to completion. + """ + # To process a request, a coroutine run is created for the async method handling + # the request. That method may then start other coroutine runs, wrapped in + # `Deferred`s. + # + # We would like to trigger a cancellation at the first `await`, re-run the + # request and cancel at the second `await`, and so on. By patching + # `Deferred.__next__`, we can intercept `await`s, track which ones we have or + # have not seen, and force them to block when they wouldn't have. + + # The set of previously seen `await`s. + # Each element is a stringified stack trace. + seen_awaits: Set[Tuple[str, ...]] = set() + + _log_for_request( + 0, f"Running make_request_with_cancellation_test for {test_name}..." + ) + + for request_number in itertools.count(1): + deferred_patch = Deferred__next__Patch(seen_awaits, request_number) + + try: + with mock.patch( + "synapse.http.server.respond_with_json", wraps=respond_with_json + ) as respond_mock: + with deferred_patch.patch(): + # Start the request. + channel = make_request( + reactor, site, method, path, content, await_result=False + ) + request = channel.request + + # Run the request until we see a new `await` which we have not + # yet cancelled at, or it completes. + while not respond_mock.called and not deferred_patch.new_await_seen: + previous_awaits_seen = deferred_patch.awaits_seen + + reactor.advance(0.0) + + if deferred_patch.awaits_seen == previous_awaits_seen: + # We didn't see any progress. Try advancing the clock. + reactor.advance(1.0) + + if deferred_patch.awaits_seen == previous_awaits_seen: + # We still didn't see any progress. The request might be + # stuck. + raise AssertionError( + "Request appears to be stuck, possibly due to a " + "previous cancelled request" + ) + + if respond_mock.called: + # The request ran to completion and we are done with testing it. + + # `respond_with_json` writes the response asynchronously, so we + # might have to give the reactor a kick before the channel gets + # the response. + deferred_patch.unblock_awaits() + channel.await_result() + + return channel + + # Disconnect the client and wait for the response. + request.connectionLost(reason=ConnectionDone()) + + _log_for_request(request_number, "--- disconnected ---") + + # Advance the reactor just enough to get a response. + # We don't want to advance the reactor too far, because we can only + # detect re-starts of finished logging contexts after we set the + # finished flag below. + for _ in range(2): + # We may need to pump the reactor to allow `delay_cancellation`s to + # finish. + if not respond_mock.called: + reactor.advance(0.0) + + # Try advancing the clock if that didn't work. + if not respond_mock.called: + reactor.advance(1.0) + + # `delay_cancellation`s may be waiting for processing that we've + # forced to block. Try unblocking them, followed by another round of + # pumping the reactor. + if not respond_mock.called: + deferred_patch.unblock_awaits() + + # Mark the request's logging context as finished. If it gets + # activated again, an `AssertionError` will be raised and bubble up + # through request handling code. This `AssertionError` may or may not be + # caught. Eventually some other code will deactivate the logging + # context which will raise a different `AssertionError` because + # resource usage won't have been correctly tracked. + if isinstance(request, SynapseRequest) and request.logcontext: + request.logcontext.finished = True + + # Check that the request finished with a 499, + # ie. the `CancelledError` wasn't swallowed. respond_mock.assert_called_once() - args, _kwargs = respond_mock.call_args - code, body = args[1], args[2] - self.assertEqual(code, expected_code) - self.assertEqual(request.code, expected_code) - self.assertEqual(body, expected_body) + + if request.code != HTTP_STATUS_REQUEST_CANCELLED: + raise AssertionError( + f"{request.code} != {HTTP_STATUS_REQUEST_CANCELLED} : " + "Cancelled request did not finish with the correct status code." + ) + finally: + # Unblock any processing that might be shared between requests, if we + # haven't already done so. + deferred_patch.unblock_awaits() + + assert False, "unreachable" # noqa: B011 + + +class Deferred__next__Patch: + """A `Deferred.__next__` patch that will intercept `await`s and force them + to block once it sees a new `await`. + + When done with the patch, `unblock_awaits()` must be called to clean up after any + `await`s that were forced to block, otherwise processing shared between multiple + requests, such as database queries started by `@cached`, will become permanently + stuck. + + Usage: + seen_awaits = set() + deferred_patch = Deferred__next__Patch(seen_awaits, 1) + try: + with deferred_patch.patch(): + # do things + ... + finally: + deferred_patch.unblock_awaits() + """ + + def __init__(self, seen_awaits: Set[Tuple[str, ...]], request_number: int): + """ + Args: + seen_awaits: The set of stack traces of `await`s that have been previously + seen. When the `Deferred.__next__` patch sees a new `await`, it will add + it to the set. + request_number: The request number to log against. + """ + self._request_number = request_number + self._seen_awaits = seen_awaits + + self._original_Deferred___next__ = Deferred.__next__ + + # The number of `await`s on `Deferred`s we have seen so far. + self.awaits_seen = 0 + + # Whether we have seen a new `await` not in `seen_awaits`. + self.new_await_seen = False + + # To force `await`s on resolved `Deferred`s to block, we make up a new + # unresolved `Deferred` and return it out of `Deferred.__next__` / + # `coroutine.send()`. We have to resolve it later, in case the `await`ing + # coroutine is part of some shared processing, such as `@cached`. + self._to_unblock: Dict[Deferred, Union[object, Failure]] = {} + + # The last stack we logged. + self._previous_stack: List[inspect.FrameInfo] = [] + + def patch(self) -> ContextManager[Mock]: + """Returns a context manager which patches `Deferred.__next__`.""" + + def Deferred___next__( + deferred: "Deferred[T]", value: object = None + ) -> "Deferred[T]": + """Intercepts `await`s on `Deferred`s and rigs them to block once we have + seen enough of them. + + `Deferred.__next__` will normally: + * return `self` if the `Deferred` is unresolved, in which case + `coroutine.send()` will return the `Deferred`, and + `_defer.inlineCallbacks` will stop running the coroutine until the + `Deferred` is resolved. + * raise a `StopIteration(result)`, containing the result of the `await`. + * raise another exception, which will come out of the `await`. + """ + self.awaits_seen += 1 + + stack = _get_stack(skip_frames=1) + stack_hash = _hash_stack(stack) + + if stack_hash not in self._seen_awaits: + # Block at the current `await` onwards. + self._seen_awaits.add(stack_hash) + self.new_await_seen = True + + if not self.new_await_seen: + # This `await` isn't interesting. Let it proceed normally. + + # Don't log the stack. It's been seen before in a previous run. + self._previous_stack = stack + + return self._original_Deferred___next__(deferred, value) + + # We want to block at the current `await`. + if deferred.called and not deferred.paused: + # This `Deferred` already has a result. + # We return a new, unresolved, `Deferred` for `_inlineCallbacks` to wait + # on. This blocks the coroutine that did this `await`. + # We queue it up for unblocking later. + new_deferred: "Deferred[T]" = Deferred() + self._to_unblock[new_deferred] = deferred.result + + _log_await_stack( + stack, + self._previous_stack, + self._request_number, + "force-blocked await", + ) + self._previous_stack = stack + + return make_deferred_yieldable(new_deferred) + + # This `Deferred` does not have a result yet. + # The `await` will block normally, so we don't have to do anything. + _log_await_stack( + stack, + self._previous_stack, + self._request_number, + "blocking await", + ) + self._previous_stack = stack + + return self._original_Deferred___next__(deferred, value) + + return mock.patch.object(Deferred, "__next__", new=Deferred___next__) + + def unblock_awaits(self) -> None: + """Unblocks any shared processing that we forced to block. + + Must be called when done, otherwise processing shared between multiple requests, + such as database queries started by `@cached`, will become permanently stuck. + """ + to_unblock = self._to_unblock + self._to_unblock = {} + for deferred, result in to_unblock.items(): + deferred.callback(result) + + +def _log_for_request(request_number: int, message: str) -> None: + """Logs a message for an iteration of `make_request_with_cancellation_test`.""" + # We want consistent alignment when logging stack traces, so ensure the logging + # context has a fixed width name. + with LoggingContext(name=f"request-{request_number:<2}"): + logger.info(message) + + +def _log_await_stack( + stack: List[inspect.FrameInfo], + previous_stack: List[inspect.FrameInfo], + request_number: int, + note: str, +) -> None: + """Logs the stack for an `await` in `make_request_with_cancellation_test`. + + Only logs the part of the stack that has changed since the previous call. + + Example output looks like: + ``` + delay_cancellation:750 (synapse/util/async_helpers.py:750) + DatabasePool._runInteraction:768 (synapse/storage/database.py:768) + > *blocked on await* at DatabasePool.runWithConnection:891 (synapse/storage/database.py:891) + ``` + + Args: + stack: The stack to log, as returned by `_get_stack()`. + previous_stack: The previous stack logged, with callers appearing before + callees. + request_number: The request number to log against. + note: A note to attach to the last stack frame, eg. "blocked on await". + """ + for i, frame_info in enumerate(stack[:-1]): + # Skip any frames in common with the previous logging. + if i < len(previous_stack) and frame_info == previous_stack[i]: + continue + + frame = _format_stack_frame(frame_info) + message = f"{' ' * i}{frame}" + _log_for_request(request_number, message) + + # Always print the final frame with the `await`. + # If the frame with the `await` started another coroutine run, we may have already + # printed a deeper stack which includes our final frame. We want to log where all + # `await`s happen, so we reprint the frame in this case. + i = len(stack) - 1 + frame_info = stack[i] + frame = _format_stack_frame(frame_info) + message = f"{' ' * i}> *{note}* at {frame}" + _log_for_request(request_number, message) + + +def _format_stack_frame(frame_info: inspect.FrameInfo) -> str: + """Returns a string representation of a stack frame. + + Used for debug logging. + + Returns: + A string, formatted like + "JsonResource._async_render:559 (synapse/http/server.py:559)". + """ + method_name = _get_stack_frame_method_name(frame_info) + + return ( + f"{method_name}:{frame_info.lineno} ({frame_info.filename}:{frame_info.lineno})" + ) + + +def _get_stack(skip_frames: int) -> List[inspect.FrameInfo]: + """Captures the stack for a request. + + Skips any twisted frames and stops at `JsonResource.wrapped_async_request_handler`. + + Used for debug logging. + + Returns: + A list of `inspect.FrameInfo`s, with callers appearing before callees. + """ + stack = [] + + skip_frames += 1 # Also skip `get_stack` itself. + + for frame_info in inspect.stack()[skip_frames:]: + # Skip any twisted `inlineCallbacks` gunk. + if "/twisted/" in frame_info.filename: + continue + + # Exclude the reactor frame, upwards. + method_name = _get_stack_frame_method_name(frame_info) + if method_name == "ThreadedMemoryReactorClock.advance": + break + + stack.append(frame_info) + + # Stop at `JsonResource`'s `wrapped_async_request_handler`, which is the entry + # point for request handling. + if frame_info.function == "wrapped_async_request_handler": + break + + return stack[::-1] + + +def _get_stack_frame_method_name(frame_info: inspect.FrameInfo) -> str: + """Returns the name of a stack frame's method. + + eg. "JsonResource._async_render". + """ + method_name = frame_info.function + + # Prefix the class name for instance methods. + frame_self = frame_info.frame.f_locals.get("self") + if frame_self: + method = getattr(frame_self, method_name, None) + if method: + method_name = method.__qualname__ + else: + # We couldn't find the method on `self`. + # Make something up. It's useful to know which class "contains" a + # function anyway. + method_name = f"{type(frame_self).__name__} {method_name}" + + return method_name + + +def _hash_stack(stack: List[inspect.FrameInfo]): + """Turns a stack into a hashable value that can be put into a set.""" + return tuple(_format_stack_frame(frame) for frame in stack) diff --git a/tests/http/test_fedclient.py b/tests/http/test_matrixfederationclient.py index 006dbab093..be9eaf34e8 100644 --- a/tests/http/test_fedclient.py +++ b/tests/http/test_matrixfederationclient.py @@ -617,3 +617,17 @@ class FederationClientTests(HomeserverTestCase): self.assertIsInstance(f.value, RequestSendFailed) self.assertTrue(transport.disconnecting) + + def test_build_auth_headers_rejects_falsey_destinations(self) -> None: + with self.assertRaises(ValueError): + self.cl.build_auth_headers(None, b"GET", b"https://example.com") + with self.assertRaises(ValueError): + self.cl.build_auth_headers(b"", b"GET", b"https://example.com") + with self.assertRaises(ValueError): + self.cl.build_auth_headers( + None, b"GET", b"https://example.com", destination_is=b"" + ) + with self.assertRaises(ValueError): + self.cl.build_auth_headers( + b"", b"GET", b"https://example.com", destination_is=b"" + ) diff --git a/tests/http/test_servlet.py b/tests/http/test_servlet.py index b3655d7b44..bb966c80c6 100644 --- a/tests/http/test_servlet.py +++ b/tests/http/test_servlet.py @@ -30,7 +30,7 @@ from synapse.server import HomeServer from synapse.types import JsonDict from tests import unittest -from tests.http.server._base import EndpointCancellationTestHelperMixin +from tests.http.server._base import test_disconnect def make_request(content): @@ -108,9 +108,7 @@ class CancellableRestServlet(RestServlet): return HTTPStatus.OK, {"result": True} -class TestRestServletCancellation( - unittest.HomeserverTestCase, EndpointCancellationTestHelperMixin -): +class TestRestServletCancellation(unittest.HomeserverTestCase): """Tests for `RestServlet` cancellation.""" servlets = [ @@ -120,7 +118,7 @@ class TestRestServletCancellation( def test_cancellable_disconnect(self) -> None: """Test that handlers with the `@cancellable` flag can be cancelled.""" channel = self.make_request("GET", "/sleep", await_result=False) - self._test_disconnect( + test_disconnect( self.reactor, channel, expect_cancellation=True, @@ -130,7 +128,7 @@ class TestRestServletCancellation( def test_uncancellable_disconnect(self) -> None: """Test that handlers without the `@cancellable` flag cannot be cancelled.""" channel = self.make_request("POST", "/sleep", await_result=False) - self._test_disconnect( + test_disconnect( self.reactor, channel, expect_cancellation=False, diff --git a/tests/replication/http/test__base.py b/tests/replication/http/test__base.py index a5ab093a27..822a957c3a 100644 --- a/tests/replication/http/test__base.py +++ b/tests/replication/http/test__base.py @@ -25,7 +25,7 @@ from synapse.server import HomeServer from synapse.types import JsonDict from tests import unittest -from tests.http.server._base import EndpointCancellationTestHelperMixin +from tests.http.server._base import test_disconnect class CancellableReplicationEndpoint(ReplicationEndpoint): @@ -69,9 +69,7 @@ class UncancellableReplicationEndpoint(ReplicationEndpoint): return HTTPStatus.OK, {"result": True} -class ReplicationEndpointCancellationTestCase( - unittest.HomeserverTestCase, EndpointCancellationTestHelperMixin -): +class ReplicationEndpointCancellationTestCase(unittest.HomeserverTestCase): """Tests for `ReplicationEndpoint` cancellation.""" def create_test_resource(self): @@ -87,7 +85,7 @@ class ReplicationEndpointCancellationTestCase( """Test that handlers with the `@cancellable` flag can be cancelled.""" path = f"{REPLICATION_PREFIX}/{CancellableReplicationEndpoint.NAME}/" channel = self.make_request("POST", path, await_result=False) - self._test_disconnect( + test_disconnect( self.reactor, channel, expect_cancellation=True, @@ -98,7 +96,7 @@ class ReplicationEndpointCancellationTestCase( """Test that handlers without the `@cancellable` flag cannot be cancelled.""" path = f"{REPLICATION_PREFIX}/{UncancellableReplicationEndpoint.NAME}/" channel = self.make_request("POST", path, await_result=False) - self._test_disconnect( + test_disconnect( self.reactor, channel, expect_cancellation=False, diff --git a/tests/rest/client/test_profile.py b/tests/rest/client/test_profile.py index 77c3ced42e..29bed0e872 100644 --- a/tests/rest/client/test_profile.py +++ b/tests/rest/client/test_profile.py @@ -13,6 +13,8 @@ # limitations under the License. """Tests REST events for /profile paths.""" +import urllib.parse +from http import HTTPStatus from typing import Any, Dict, Optional from twisted.test.proto_helpers import MemoryReactor @@ -49,6 +51,12 @@ class ProfileTestCase(unittest.HomeserverTestCase): res = self._get_displayname() self.assertEqual(res, "owner") + def test_get_displayname_rejects_bad_username(self) -> None: + channel = self.make_request( + "GET", f"/profile/{urllib.parse.quote('@alice:')}/displayname" + ) + self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST, channel.result) + def test_set_displayname(self) -> None: channel = self.make_request( "PUT", diff --git a/tests/rest/client/test_relations.py b/tests/rest/client/test_relations.py index 62e4db23ef..aa84906548 100644 --- a/tests/rest/client/test_relations.py +++ b/tests/rest/client/test_relations.py @@ -728,6 +728,7 @@ class RelationsTestCase(BaseRelationsTestCase): class RelationPaginationTestCase(BaseRelationsTestCase): + @unittest.override_config({"experimental_features": {"msc3715_enabled": True}}) def test_basic_paginate_relations(self) -> None: """Tests that calling pagination API correctly the latest relations.""" channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") diff --git a/tests/rest/client/test_rooms.py b/tests/rest/client/test_rooms.py index f523d89b8f..35c59ee9e0 100644 --- a/tests/rest/client/test_rooms.py +++ b/tests/rest/client/test_rooms.py @@ -18,10 +18,13 @@ """Tests REST events for /rooms paths.""" import json -from typing import Any, Dict, Iterable, List, Optional +from typing import Any, Dict, Iterable, List, Optional, Union from unittest.mock import Mock, call from urllib import parse as urlparse +# `Literal` appears with Python 3.8. +from typing_extensions import Literal + from twisted.test.proto_helpers import MemoryReactor import synapse.rest.admin @@ -42,6 +45,7 @@ from synapse.util import Clock from synapse.util.stringutils import random_string from tests import unittest +from tests.http.server._base import make_request_with_cancellation_test from tests.test_utils import make_awaitable PATH_PREFIX = b"/_matrix/client/api/v1" @@ -471,6 +475,49 @@ class RoomPermissionsTestCase(RoomBase): ) +class RoomStateTestCase(RoomBase): + """Tests /rooms/$room_id/state.""" + + user_id = "@sid1:red" + + def test_get_state_cancellation(self) -> None: + """Test cancellation of a `/rooms/$room_id/state` request.""" + room_id = self.helper.create_room_as(self.user_id) + channel = make_request_with_cancellation_test( + "test_state_cancellation", + self.reactor, + self.site, + "GET", + "/rooms/%s/state" % room_id, + ) + + self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertCountEqual( + [state_event["type"] for state_event in channel.json_body], + { + "m.room.create", + "m.room.power_levels", + "m.room.join_rules", + "m.room.member", + "m.room.history_visibility", + }, + ) + + def test_get_state_event_cancellation(self) -> None: + """Test cancellation of a `/rooms/$room_id/state/$event_type` request.""" + room_id = self.helper.create_room_as(self.user_id) + channel = make_request_with_cancellation_test( + "test_state_cancellation", + self.reactor, + self.site, + "GET", + "/rooms/%s/state/m.room.member/%s" % (room_id, self.user_id), + ) + + self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(channel.json_body, {"membership": "join"}) + + class RoomsMemberListTestCase(RoomBase): """Tests /rooms/$room_id/members/list REST events.""" @@ -591,6 +638,62 @@ class RoomsMemberListTestCase(RoomBase): channel = self.make_request("GET", room_path) self.assertEqual(200, channel.code, msg=channel.result["body"]) + def test_get_member_list_cancellation(self) -> None: + """Test cancellation of a `/rooms/$room_id/members` request.""" + room_id = self.helper.create_room_as(self.user_id) + channel = make_request_with_cancellation_test( + "test_get_member_list_cancellation", + self.reactor, + self.site, + "GET", + "/rooms/%s/members" % room_id, + ) + + self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(len(channel.json_body["chunk"]), 1) + self.assertLessEqual( + { + "content": {"membership": "join"}, + "room_id": room_id, + "sender": self.user_id, + "state_key": self.user_id, + "type": "m.room.member", + "user_id": self.user_id, + }.items(), + channel.json_body["chunk"][0].items(), + ) + + def test_get_member_list_with_at_token_cancellation(self) -> None: + """Test cancellation of a `/rooms/$room_id/members?at=<sync token>` request.""" + room_id = self.helper.create_room_as(self.user_id) + + # first sync to get an at token + channel = self.make_request("GET", "/sync") + self.assertEqual(200, channel.code) + sync_token = channel.json_body["next_batch"] + + channel = make_request_with_cancellation_test( + "test_get_member_list_with_at_token_cancellation", + self.reactor, + self.site, + "GET", + "/rooms/%s/members?at=%s" % (room_id, sync_token), + ) + + self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(len(channel.json_body["chunk"]), 1) + self.assertLessEqual( + { + "content": {"membership": "join"}, + "room_id": room_id, + "sender": self.user_id, + "state_key": self.user_id, + "type": "m.room.member", + "user_id": self.user_id, + }.items(), + channel.json_body["chunk"][0].items(), + ) + class RoomsCreateTestCase(RoomBase): """Tests /rooms and /rooms/$room_id REST events.""" @@ -677,9 +780,11 @@ class RoomsCreateTestCase(RoomBase): channel = self.make_request("POST", "/createRoom", content) self.assertEqual(200, channel.code) - def test_spam_checker_may_join_room(self) -> None: + def test_spam_checker_may_join_room_deprecated(self) -> None: """Tests that the user_may_join_room spam checker callback is correctly bypassed when creating a new room. + + In this test, we use the deprecated API in which callbacks return a bool. """ async def user_may_join_room( @@ -701,6 +806,32 @@ class RoomsCreateTestCase(RoomBase): self.assertEqual(join_mock.call_count, 0) + def test_spam_checker_may_join_room(self) -> None: + """Tests that the user_may_join_room spam checker callback is correctly bypassed + when creating a new room. + + In this test, we use the more recent API in which callbacks return a `Union[Codes, Literal["NOT_SPAM"]]`. + """ + + async def user_may_join_room( + mxid: str, + room_id: str, + is_invite: bool, + ) -> Codes: + return Codes.CONSENT_NOT_GIVEN + + join_mock = Mock(side_effect=user_may_join_room) + self.hs.get_spam_checker()._user_may_join_room_callbacks.append(join_mock) + + channel = self.make_request( + "POST", + "/createRoom", + {}, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + self.assertEqual(join_mock.call_count, 0) + class RoomTopicTestCase(RoomBase): """Tests /rooms/$room_id/topic REST events.""" @@ -911,9 +1042,11 @@ class RoomJoinTestCase(RoomBase): self.room2 = self.helper.create_room_as(room_creator=self.user1, tok=self.tok1) self.room3 = self.helper.create_room_as(room_creator=self.user1, tok=self.tok1) - def test_spam_checker_may_join_room(self) -> None: + def test_spam_checker_may_join_room_deprecated(self) -> None: """Tests that the user_may_join_room spam checker callback is correctly called and blocks room joins when needed. + + This test uses the deprecated API, in which callbacks return booleans. """ # Register a dummy callback. Make it allow all room joins for now. @@ -926,6 +1059,8 @@ class RoomJoinTestCase(RoomBase): ) -> bool: return return_value + # `spec` argument is needed for this function mock to have `__qualname__`, which + # is needed for `Measure` metrics buried in SpamChecker. callback_mock = Mock(side_effect=user_may_join_room, spec=lambda *x: None) self.hs.get_spam_checker()._user_may_join_room_callbacks.append(callback_mock) @@ -968,6 +1103,67 @@ class RoomJoinTestCase(RoomBase): return_value = False self.helper.join(self.room3, self.user2, expect_code=403, tok=self.tok2) + def test_spam_checker_may_join_room(self) -> None: + """Tests that the user_may_join_room spam checker callback is correctly called + and blocks room joins when needed. + + This test uses the latest API to this day, in which callbacks return `NOT_SPAM` or `Codes`. + """ + + # Register a dummy callback. Make it allow all room joins for now. + return_value: Union[Literal["NOT_SPAM"], Codes] = synapse.module_api.NOT_SPAM + + async def user_may_join_room( + userid: str, + room_id: str, + is_invited: bool, + ) -> Union[Literal["NOT_SPAM"], Codes]: + return return_value + + # `spec` argument is needed for this function mock to have `__qualname__`, which + # is needed for `Measure` metrics buried in SpamChecker. + callback_mock = Mock(side_effect=user_may_join_room, spec=lambda *x: None) + self.hs.get_spam_checker()._user_may_join_room_callbacks.append(callback_mock) + + # Join a first room, without being invited to it. + self.helper.join(self.room1, self.user2, tok=self.tok2) + + # Check that the callback was called with the right arguments. + expected_call_args = ( + ( + self.user2, + self.room1, + False, + ), + ) + self.assertEqual( + callback_mock.call_args, + expected_call_args, + callback_mock.call_args, + ) + + # Join a second room, this time with an invite for it. + self.helper.invite(self.room2, self.user1, self.user2, tok=self.tok1) + self.helper.join(self.room2, self.user2, tok=self.tok2) + + # Check that the callback was called with the right arguments. + expected_call_args = ( + ( + self.user2, + self.room2, + True, + ), + ) + self.assertEqual( + callback_mock.call_args, + expected_call_args, + callback_mock.call_args, + ) + + # Now make the callback deny all room joins, and check that a join actually fails. + return_value = Codes.CONSENT_NOT_GIVEN + self.helper.join(self.room3, self.user2, expect_code=403, tok=self.tok2) + class RoomJoinRatelimitTestCase(RoomBase): user_id = "@sid1:red" @@ -2845,9 +3041,14 @@ class ThreepidInviteTestCase(unittest.HomeserverTestCase): self.room_id = self.helper.create_room_as(self.user_id, tok=self.tok) - def test_threepid_invite_spamcheck(self) -> None: + def test_threepid_invite_spamcheck_deprecated(self) -> None: + """ + Test allowing/blocking threepid invites with a spam-check module. + + In this test, we use the deprecated API in which callbacks return a bool. + """ # Mock a few functions to prevent the test from failing due to failing to talk to - # a remote IS. We keep the mock for _mock_make_and_store_3pid_invite around so we + # a remote IS. We keep the mock for make_and_store_3pid_invite around so we # can check its call_count later on during the test. make_invite_mock = Mock(return_value=make_awaitable(0)) self.hs.get_room_member_handler()._make_and_store_3pid_invite = make_invite_mock @@ -2901,3 +3102,67 @@ class ThreepidInviteTestCase(unittest.HomeserverTestCase): # Also check that it stopped before calling _make_and_store_3pid_invite. make_invite_mock.assert_called_once() + + def test_threepid_invite_spamcheck(self) -> None: + """ + Test allowing/blocking threepid invites with a spam-check module. + + In this test, we use the more recent API in which callbacks return a `Union[Codes, Literal["NOT_SPAM"]]`.""" + # Mock a few functions to prevent the test from failing due to failing to talk to + # a remote IS. We keep the mock for make_and_store_3pid_invite around so we + # can check its call_count later on during the test. + make_invite_mock = Mock(return_value=make_awaitable(0)) + self.hs.get_room_member_handler()._make_and_store_3pid_invite = make_invite_mock + self.hs.get_identity_handler().lookup_3pid = Mock( + return_value=make_awaitable(None), + ) + + # Add a mock to the spamchecker callbacks for user_may_send_3pid_invite. Make it + # allow everything for now. + # `spec` argument is needed for this function mock to have `__qualname__`, which + # is needed for `Measure` metrics buried in SpamChecker. + mock = Mock( + return_value=make_awaitable(synapse.module_api.NOT_SPAM), + spec=lambda *x: None, + ) + self.hs.get_spam_checker()._user_may_send_3pid_invite_callbacks.append(mock) + + # Send a 3PID invite into the room and check that it succeeded. + email_to_invite = "teresa@example.com" + channel = self.make_request( + method="POST", + path="/rooms/" + self.room_id + "/invite", + content={ + "id_server": "example.com", + "id_access_token": "sometoken", + "medium": "email", + "address": email_to_invite, + }, + access_token=self.tok, + ) + self.assertEqual(channel.code, 200) + + # Check that the callback was called with the right params. + mock.assert_called_with(self.user_id, "email", email_to_invite, self.room_id) + + # Check that the call to send the invite was made. + make_invite_mock.assert_called_once() + + # Now change the return value of the callback to deny any invite and test that + # we can't send the invite. + mock.return_value = make_awaitable(Codes.CONSENT_NOT_GIVEN) + channel = self.make_request( + method="POST", + path="/rooms/" + self.room_id + "/invite", + content={ + "id_server": "example.com", + "id_access_token": "sometoken", + "medium": "email", + "address": email_to_invite, + }, + access_token=self.tok, + ) + self.assertEqual(channel.code, 403) + + # Also check that it stopped before calling _make_and_store_3pid_invite. + make_invite_mock.assert_called_once() diff --git a/tests/server_notices/test_resource_limits_server_notices.py b/tests/server_notices/test_resource_limits_server_notices.py index 07e29788e5..e07ae78fc4 100644 --- a/tests/server_notices/test_resource_limits_server_notices.py +++ b/tests/server_notices/test_resource_limits_server_notices.py @@ -96,7 +96,9 @@ class TestResourceLimitsServerNotices(unittest.HomeserverTestCase): def test_maybe_send_server_notice_to_user_remove_blocked_notice(self): """Test when user has blocked notice, but should have it removed""" - self._rlsn._auth.check_auth_blocking = Mock(return_value=make_awaitable(None)) + self._rlsn._auth_blocking.check_auth_blocking = Mock( + return_value=make_awaitable(None) + ) mock_event = Mock( type=EventTypes.Message, content={"msgtype": ServerNoticeMsgType} ) @@ -112,7 +114,7 @@ class TestResourceLimitsServerNotices(unittest.HomeserverTestCase): """ Test when user has blocked notice, but notice ought to be there (NOOP) """ - self._rlsn._auth.check_auth_blocking = Mock( + self._rlsn._auth_blocking.check_auth_blocking = Mock( return_value=make_awaitable(None), side_effect=ResourceLimitError(403, "foo"), ) @@ -132,7 +134,7 @@ class TestResourceLimitsServerNotices(unittest.HomeserverTestCase): """ Test when user does not have blocked notice, but should have one """ - self._rlsn._auth.check_auth_blocking = Mock( + self._rlsn._auth_blocking.check_auth_blocking = Mock( return_value=make_awaitable(None), side_effect=ResourceLimitError(403, "foo"), ) @@ -145,7 +147,9 @@ class TestResourceLimitsServerNotices(unittest.HomeserverTestCase): """ Test when user does not have blocked notice, nor should they (NOOP) """ - self._rlsn._auth.check_auth_blocking = Mock(return_value=make_awaitable(None)) + self._rlsn._auth_blocking.check_auth_blocking = Mock( + return_value=make_awaitable(None) + ) self.get_success(self._rlsn.maybe_send_server_notice_to_user(self.user_id)) @@ -156,7 +160,9 @@ class TestResourceLimitsServerNotices(unittest.HomeserverTestCase): Test when user is not part of the MAU cohort - this should not ever happen - but ... """ - self._rlsn._auth.check_auth_blocking = Mock(return_value=make_awaitable(None)) + self._rlsn._auth_blocking.check_auth_blocking = Mock( + return_value=make_awaitable(None) + ) self._rlsn._store.user_last_seen_monthly_active = Mock( return_value=make_awaitable(None) ) @@ -170,7 +176,7 @@ class TestResourceLimitsServerNotices(unittest.HomeserverTestCase): Test that when server is over MAU limit and alerting is suppressed, then an alert message is not sent into the room """ - self._rlsn._auth.check_auth_blocking = Mock( + self._rlsn._auth_blocking.check_auth_blocking = Mock( return_value=make_awaitable(None), side_effect=ResourceLimitError( 403, "foo", limit_type=LimitBlockingTypes.MONTHLY_ACTIVE_USER @@ -185,7 +191,7 @@ class TestResourceLimitsServerNotices(unittest.HomeserverTestCase): """ Test that when a server is disabled, that MAU limit alerting is ignored. """ - self._rlsn._auth.check_auth_blocking = Mock( + self._rlsn._auth_blocking.check_auth_blocking = Mock( return_value=make_awaitable(None), side_effect=ResourceLimitError( 403, "foo", limit_type=LimitBlockingTypes.HS_DISABLED @@ -202,7 +208,7 @@ class TestResourceLimitsServerNotices(unittest.HomeserverTestCase): When the room is already in a blocked state, test that when alerting is suppressed that the room is returned to an unblocked state. """ - self._rlsn._auth.check_auth_blocking = Mock( + self._rlsn._auth_blocking.check_auth_blocking = Mock( return_value=make_awaitable(None), side_effect=ResourceLimitError( 403, "foo", limit_type=LimitBlockingTypes.MONTHLY_ACTIVE_USER diff --git a/tests/state/test_v2.py b/tests/state/test_v2.py index 8370a27195..78b83d97b6 100644 --- a/tests/state/test_v2.py +++ b/tests/state/test_v2.py @@ -13,7 +13,17 @@ # limitations under the License. import itertools -from typing import List +from typing import ( + Collection, + Dict, + Iterable, + List, + Mapping, + Optional, + Set, + Tuple, + TypeVar, +) import attr @@ -22,13 +32,13 @@ from twisted.internet import defer from synapse.api.constants import EventTypes, JoinRules, Membership from synapse.api.room_versions import RoomVersions from synapse.event_auth import auth_types_for_event -from synapse.events import make_event_from_dict +from synapse.events import EventBase, make_event_from_dict from synapse.state.v2 import ( _get_auth_chain_difference, lexicographical_topological_sort, resolve_events_with_store, ) -from synapse.types import EventID +from synapse.types import EventID, StateMap from tests import unittest @@ -48,7 +58,7 @@ ORIGIN_SERVER_TS = 0 class FakeClock: - def sleep(self, msec): + def sleep(self, msec: float) -> "defer.Deferred[None]": return defer.succeed(None) @@ -60,7 +70,14 @@ class FakeEvent: as domain. """ - def __init__(self, id, sender, type, state_key, content): + def __init__( + self, + id: str, + sender: str, + type: str, + state_key: Optional[str], + content: Mapping[str, object], + ): self.node_id = id self.event_id = EventID(id, "example.com").to_string() self.sender = sender @@ -69,12 +86,12 @@ class FakeEvent: self.content = content self.room_id = ROOM_ID - def to_event(self, auth_events, prev_events): + def to_event(self, auth_events: List[str], prev_events: List[str]) -> EventBase: """Given the auth_events and prev_events, convert to a Frozen Event Args: - auth_events (list[str]): list of event_ids - prev_events (list[str]): list of event_ids + auth_events: list of event_ids + prev_events: list of event_ids Returns: FrozenEvent @@ -164,7 +181,7 @@ INITIAL_EDGES = ["START", "IMZ", "IMC", "IMB", "IJR", "IPOWER", "IMA", "CREATE"] class StateTestCase(unittest.TestCase): - def test_ban_vs_pl(self): + def test_ban_vs_pl(self) -> None: events = [ FakeEvent( id="PA", @@ -202,7 +219,7 @@ class StateTestCase(unittest.TestCase): self.do_check(events, edges, expected_state_ids) - def test_join_rule_evasion(self): + def test_join_rule_evasion(self) -> None: events = [ FakeEvent( id="JR", @@ -226,7 +243,7 @@ class StateTestCase(unittest.TestCase): self.do_check(events, edges, expected_state_ids) - def test_offtopic_pl(self): + def test_offtopic_pl(self) -> None: events = [ FakeEvent( id="PA", @@ -257,7 +274,7 @@ class StateTestCase(unittest.TestCase): self.do_check(events, edges, expected_state_ids) - def test_topic_basic(self): + def test_topic_basic(self) -> None: events = [ FakeEvent( id="T1", sender=ALICE, type=EventTypes.Topic, state_key="", content={} @@ -297,7 +314,7 @@ class StateTestCase(unittest.TestCase): self.do_check(events, edges, expected_state_ids) - def test_topic_reset(self): + def test_topic_reset(self) -> None: events = [ FakeEvent( id="T1", sender=ALICE, type=EventTypes.Topic, state_key="", content={} @@ -327,7 +344,7 @@ class StateTestCase(unittest.TestCase): self.do_check(events, edges, expected_state_ids) - def test_topic(self): + def test_topic(self) -> None: events = [ FakeEvent( id="T1", sender=ALICE, type=EventTypes.Topic, state_key="", content={} @@ -380,7 +397,7 @@ class StateTestCase(unittest.TestCase): self.do_check(events, edges, expected_state_ids) - def test_mainline_sort(self): + def test_mainline_sort(self) -> None: """Tests that the mainline ordering works correctly.""" events = [ @@ -434,22 +451,26 @@ class StateTestCase(unittest.TestCase): self.do_check(events, edges, expected_state_ids) - def do_check(self, events, edges, expected_state_ids): + def do_check( + self, + events: List[FakeEvent], + edges: List[List[str]], + expected_state_ids: List[str], + ) -> None: """Take a list of events and edges and calculate the state of the graph at END, and asserts it matches `expected_state_ids` Args: - events (list[FakeEvent]) - edges (list[list[str]]): A list of chains of event edges, e.g. + events + edges: A list of chains of event edges, e.g. `[[A, B, C]]` are edges A->B and B->C. - expected_state_ids (list[str]): The expected state at END, (excluding + expected_state_ids: The expected state at END, (excluding the keys that haven't changed since START). """ # We want to sort the events into topological order for processing. - graph = {} + graph: Dict[str, Set[str]] = {} - # node_id -> FakeEvent - fake_event_map = {} + fake_event_map: Dict[str, FakeEvent] = {} for ev in itertools.chain(INITIAL_EVENTS, events): graph[ev.node_id] = set() @@ -462,10 +483,8 @@ class StateTestCase(unittest.TestCase): for a, b in pairwise(edge_list): graph[a].add(b) - # event_id -> FrozenEvent - event_map = {} - # node_id -> state - state_at_event = {} + event_map: Dict[str, EventBase] = {} + state_at_event: Dict[str, StateMap[str]] = {} # We copy the map as the sort consumes the graph graph_copy = {k: set(v) for k, v in graph.items()} @@ -496,7 +515,16 @@ class StateTestCase(unittest.TestCase): if fake_event.state_key is not None: state_after[(fake_event.type, fake_event.state_key)] = event_id - auth_types = set(auth_types_for_event(RoomVersions.V6, fake_event)) + # This type ignore is a bit sad. Things we have tried: + # 1. Define a `GenericEvent` Protocol satisfied by FakeEvent, EventBase and + # EventBuilder. But this is Hard because the relevant attributes are + # DictProperty[T] descriptors on EventBase but normal Ts on FakeEvent. + # 2. Define a `GenericEvent` Protocol describing `FakeEvent` only, and + # change this function to accept Union[Event, EventBase, EventBuilder]. + # This seems reasonable to me, but mypy isn't happy. I think that's + # a mypy bug, see https://github.com/python/mypy/issues/5570 + # Instead, resort to a type-ignore. + auth_types = set(auth_types_for_event(RoomVersions.V6, fake_event)) # type: ignore[arg-type] auth_events = [] for key in auth_types: @@ -530,8 +558,14 @@ class StateTestCase(unittest.TestCase): class LexicographicalTestCase(unittest.TestCase): - def test_simple(self): - graph = {"l": {"o"}, "m": {"n", "o"}, "n": {"o"}, "o": set(), "p": {"o"}} + def test_simple(self) -> None: + graph: Dict[str, Set[str]] = { + "l": {"o"}, + "m": {"n", "o"}, + "n": {"o"}, + "o": set(), + "p": {"o"}, + } res = list(lexicographical_topological_sort(graph, key=lambda x: x)) @@ -539,7 +573,7 @@ class LexicographicalTestCase(unittest.TestCase): class SimpleParamStateTestCase(unittest.TestCase): - def setUp(self): + def setUp(self) -> None: # We build up a simple DAG. event_map = {} @@ -627,7 +661,7 @@ class SimpleParamStateTestCase(unittest.TestCase): ] } - def test_event_map_none(self): + def test_event_map_none(self) -> None: # Test that we correctly handle passing `None` as the event_map state_d = resolve_events_with_store( @@ -649,7 +683,7 @@ class AuthChainDifferenceTestCase(unittest.TestCase): events. """ - def test_simple(self): + def test_simple(self) -> None: # Test getting the auth difference for a simple chain with a single # unpersisted event: # @@ -695,7 +729,7 @@ class AuthChainDifferenceTestCase(unittest.TestCase): self.assertEqual(difference, {c.event_id}) - def test_multiple_unpersisted_chain(self): + def test_multiple_unpersisted_chain(self) -> None: # Test getting the auth difference for a simple chain with multiple # unpersisted events: # @@ -752,7 +786,7 @@ class AuthChainDifferenceTestCase(unittest.TestCase): self.assertEqual(difference, {d.event_id, c.event_id}) - def test_unpersisted_events_different_sets(self): + def test_unpersisted_events_different_sets(self) -> None: # Test getting the auth difference for with multiple unpersisted events # in different branches: # @@ -820,7 +854,10 @@ class AuthChainDifferenceTestCase(unittest.TestCase): self.assertEqual(difference, {d.event_id, e.event_id}) -def pairwise(iterable): +T = TypeVar("T") + + +def pairwise(iterable: Iterable[T]) -> Iterable[Tuple[T, T]]: "s -> (s0,s1), (s1,s2), (s2, s3), ..." a, b = itertools.tee(iterable) next(b, None) @@ -829,24 +866,26 @@ def pairwise(iterable): @attr.s class TestStateResolutionStore: - event_map = attr.ib() + event_map: Dict[str, EventBase] = attr.ib() - def get_events(self, event_ids, allow_rejected=False): + def get_events( + self, event_ids: Collection[str], allow_rejected: bool = False + ) -> "defer.Deferred[Dict[str, EventBase]]": """Get events from the database Args: - event_ids (list): The event_ids of the events to fetch - allow_rejected (bool): If True return rejected events. + event_ids: The event_ids of the events to fetch + allow_rejected: If True return rejected events. Returns: - Deferred[dict[str, FrozenEvent]]: Dict from event_id to event. + Dict from event_id to event. """ return defer.succeed( {eid: self.event_map[eid] for eid in event_ids if eid in self.event_map} ) - def _get_auth_chain(self, event_ids: List[str]) -> List[str]: + def _get_auth_chain(self, event_ids: Iterable[str]) -> List[str]: """Gets the full auth chain for a set of events (including rejected events). @@ -880,7 +919,9 @@ class TestStateResolutionStore: return list(result) - def get_auth_chain_difference(self, room_id, auth_sets): + def get_auth_chain_difference( + self, room_id: str, auth_sets: List[Set[str]] + ) -> "defer.Deferred[Set[str]]": chains = [frozenset(self._get_auth_chain(a)) for a in auth_sets] common = set(chains[0]).intersection(*chains[1:]) diff --git a/tests/test_event_auth.py b/tests/test_event_auth.py index e2c506e5a4..229ecd84a6 100644 --- a/tests/test_event_auth.py +++ b/tests/test_event_auth.py @@ -15,10 +15,12 @@ import unittest from typing import Optional +from parameterized import parameterized + from synapse import event_auth from synapse.api.constants import EventContentFields from synapse.api.errors import AuthError -from synapse.api.room_versions import RoomVersions +from synapse.api.room_versions import EventFormatVersions, RoomVersion, RoomVersions from synapse.events import EventBase, make_event_from_dict from synapse.types import JsonDict, get_domain_from_id @@ -30,38 +32,39 @@ class EventAuthTestCase(unittest.TestCase): """ creator = "@creator:example.com" auth_events = [ - _create_event(creator), - _join_event(creator), + _create_event(RoomVersions.V9, creator), + _join_event(RoomVersions.V9, creator), ] # creator should be able to send state event_auth.check_auth_rules_for_event( - RoomVersions.V9, - _random_state_event(creator), + _random_state_event(RoomVersions.V9, creator), auth_events, ) # ... but a rejected join_rules event should cause it to be rejected - rejected_join_rules = _join_rules_event(creator, "public") + rejected_join_rules = _join_rules_event( + RoomVersions.V9, + creator, + "public", + ) rejected_join_rules.rejected_reason = "stinky" auth_events.append(rejected_join_rules) self.assertRaises( AuthError, event_auth.check_auth_rules_for_event, - RoomVersions.V9, - _random_state_event(creator), + _random_state_event(RoomVersions.V9, creator), auth_events, ) # ... even if there is *also* a good join rules - auth_events.append(_join_rules_event(creator, "public")) + auth_events.append(_join_rules_event(RoomVersions.V9, creator, "public")) self.assertRaises( AuthError, event_auth.check_auth_rules_for_event, - RoomVersions.V9, - _random_state_event(creator), + _random_state_event(RoomVersions.V9, creator), auth_events, ) @@ -73,15 +76,14 @@ class EventAuthTestCase(unittest.TestCase): creator = "@creator:example.com" joiner = "@joiner:example.com" auth_events = [ - _create_event(creator), - _join_event(creator), - _join_event(joiner), + _create_event(RoomVersions.V1, creator), + _join_event(RoomVersions.V1, creator), + _join_event(RoomVersions.V1, joiner), ] # creator should be able to send state event_auth.check_auth_rules_for_event( - RoomVersions.V1, - _random_state_event(creator), + _random_state_event(RoomVersions.V1, creator), auth_events, ) @@ -89,8 +91,7 @@ class EventAuthTestCase(unittest.TestCase): self.assertRaises( AuthError, event_auth.check_auth_rules_for_event, - RoomVersions.V1, - _random_state_event(joiner), + _random_state_event(RoomVersions.V1, joiner), auth_events, ) @@ -104,28 +105,28 @@ class EventAuthTestCase(unittest.TestCase): king = "@joiner2:example.com" auth_events = [ - _create_event(creator), - _join_event(creator), + _create_event(RoomVersions.V1, creator), + _join_event(RoomVersions.V1, creator), _power_levels_event( - creator, {"state_default": "30", "users": {pleb: "29", king: "30"}} + RoomVersions.V1, + creator, + {"state_default": "30", "users": {pleb: "29", king: "30"}}, ), - _join_event(pleb), - _join_event(king), + _join_event(RoomVersions.V1, pleb), + _join_event(RoomVersions.V1, king), ] # pleb should not be able to send state self.assertRaises( AuthError, event_auth.check_auth_rules_for_event, - RoomVersions.V1, - _random_state_event(pleb), + _random_state_event(RoomVersions.V1, pleb), auth_events, ), # king should be able to send state event_auth.check_auth_rules_for_event( - RoomVersions.V1, - _random_state_event(king), + _random_state_event(RoomVersions.V1, king), auth_events, ) @@ -134,37 +135,33 @@ class EventAuthTestCase(unittest.TestCase): creator = "@creator:example.com" other = "@other:example.com" auth_events = [ - _create_event(creator), - _join_event(creator), + _create_event(RoomVersions.V1, creator), + _join_event(RoomVersions.V1, creator), ] # creator should be able to send aliases event_auth.check_auth_rules_for_event( - RoomVersions.V1, - _alias_event(creator), + _alias_event(RoomVersions.V1, creator), auth_events, ) # Reject an event with no state key. with self.assertRaises(AuthError): event_auth.check_auth_rules_for_event( - RoomVersions.V1, - _alias_event(creator, state_key=""), + _alias_event(RoomVersions.V1, creator, state_key=""), auth_events, ) # If the domain of the sender does not match the state key, reject. with self.assertRaises(AuthError): event_auth.check_auth_rules_for_event( - RoomVersions.V1, - _alias_event(creator, state_key="test.com"), + _alias_event(RoomVersions.V1, creator, state_key="test.com"), auth_events, ) # Note that the member does *not* need to be in the room. event_auth.check_auth_rules_for_event( - RoomVersions.V1, - _alias_event(other), + _alias_event(RoomVersions.V1, other), auth_events, ) @@ -173,38 +170,35 @@ class EventAuthTestCase(unittest.TestCase): creator = "@creator:example.com" other = "@other:example.com" auth_events = [ - _create_event(creator), - _join_event(creator), + _create_event(RoomVersions.V6, creator), + _join_event(RoomVersions.V6, creator), ] # creator should be able to send aliases event_auth.check_auth_rules_for_event( - RoomVersions.V6, - _alias_event(creator), + _alias_event(RoomVersions.V6, creator), auth_events, ) # No particular checks are done on the state key. event_auth.check_auth_rules_for_event( - RoomVersions.V6, - _alias_event(creator, state_key=""), + _alias_event(RoomVersions.V6, creator, state_key=""), auth_events, ) event_auth.check_auth_rules_for_event( - RoomVersions.V6, - _alias_event(creator, state_key="test.com"), + _alias_event(RoomVersions.V6, creator, state_key="test.com"), auth_events, ) # Per standard auth rules, the member must be in the room. with self.assertRaises(AuthError): event_auth.check_auth_rules_for_event( - RoomVersions.V6, - _alias_event(other), + _alias_event(RoomVersions.V6, other), auth_events, ) - def test_msc2209(self): + @parameterized.expand([(RoomVersions.V1, True), (RoomVersions.V6, False)]) + def test_notifications(self, room_version: RoomVersion, allow_modification: bool): """ Notifications power levels get checked due to MSC2209. """ @@ -212,28 +206,26 @@ class EventAuthTestCase(unittest.TestCase): pleb = "@joiner:example.com" auth_events = [ - _create_event(creator), - _join_event(creator), + _create_event(room_version, creator), + _join_event(room_version, creator), _power_levels_event( - creator, {"state_default": "30", "users": {pleb: "30"}} + room_version, creator, {"state_default": "30", "users": {pleb: "30"}} ), - _join_event(pleb), + _join_event(room_version, pleb), ] - # pleb should be able to modify the notifications power level. - event_auth.check_auth_rules_for_event( - RoomVersions.V1, - _power_levels_event(pleb, {"notifications": {"room": 100}}), - auth_events, + pl_event = _power_levels_event( + room_version, pleb, {"notifications": {"room": 100}} ) - # But an MSC2209 room rejects this change. - with self.assertRaises(AuthError): - event_auth.check_auth_rules_for_event( - RoomVersions.V6, - _power_levels_event(pleb, {"notifications": {"room": 100}}), - auth_events, - ) + # on room V1, pleb should be able to modify the notifications power level. + if allow_modification: + event_auth.check_auth_rules_for_event(pl_event, auth_events) + + else: + # But an MSC2209 room rejects this change. + with self.assertRaises(AuthError): + event_auth.check_auth_rules_for_event(pl_event, auth_events) def test_join_rules_public(self): """ @@ -243,58 +235,60 @@ class EventAuthTestCase(unittest.TestCase): pleb = "@joiner:example.com" auth_events = { - ("m.room.create", ""): _create_event(creator), - ("m.room.member", creator): _join_event(creator), - ("m.room.join_rules", ""): _join_rules_event(creator, "public"), + ("m.room.create", ""): _create_event(RoomVersions.V6, creator), + ("m.room.member", creator): _join_event(RoomVersions.V6, creator), + ("m.room.join_rules", ""): _join_rules_event( + RoomVersions.V6, creator, "public" + ), } # Check join. event_auth.check_auth_rules_for_event( - RoomVersions.V6, - _join_event(pleb), + _join_event(RoomVersions.V6, pleb), auth_events.values(), ) # A user cannot be force-joined to a room. with self.assertRaises(AuthError): event_auth.check_auth_rules_for_event( - RoomVersions.V6, - _member_event(pleb, "join", sender=creator), + _member_event(RoomVersions.V6, pleb, "join", sender=creator), auth_events.values(), ) # Banned should be rejected. - auth_events[("m.room.member", pleb)] = _member_event(pleb, "ban") + auth_events[("m.room.member", pleb)] = _member_event( + RoomVersions.V6, pleb, "ban" + ) with self.assertRaises(AuthError): event_auth.check_auth_rules_for_event( - RoomVersions.V6, - _join_event(pleb), + _join_event(RoomVersions.V6, pleb), auth_events.values(), ) # A user who left can re-join. - auth_events[("m.room.member", pleb)] = _member_event(pleb, "leave") + auth_events[("m.room.member", pleb)] = _member_event( + RoomVersions.V6, pleb, "leave" + ) event_auth.check_auth_rules_for_event( - RoomVersions.V6, - _join_event(pleb), + _join_event(RoomVersions.V6, pleb), auth_events.values(), ) # A user can send a join if they're in the room. - auth_events[("m.room.member", pleb)] = _member_event(pleb, "join") + auth_events[("m.room.member", pleb)] = _member_event( + RoomVersions.V6, pleb, "join" + ) event_auth.check_auth_rules_for_event( - RoomVersions.V6, - _join_event(pleb), + _join_event(RoomVersions.V6, pleb), auth_events.values(), ) # A user can accept an invite. auth_events[("m.room.member", pleb)] = _member_event( - pleb, "invite", sender=creator + RoomVersions.V6, pleb, "invite", sender=creator ) event_auth.check_auth_rules_for_event( - RoomVersions.V6, - _join_event(pleb), + _join_event(RoomVersions.V6, pleb), auth_events.values(), ) @@ -306,64 +300,88 @@ class EventAuthTestCase(unittest.TestCase): pleb = "@joiner:example.com" auth_events = { - ("m.room.create", ""): _create_event(creator), - ("m.room.member", creator): _join_event(creator), - ("m.room.join_rules", ""): _join_rules_event(creator, "invite"), + ("m.room.create", ""): _create_event(RoomVersions.V6, creator), + ("m.room.member", creator): _join_event(RoomVersions.V6, creator), + ("m.room.join_rules", ""): _join_rules_event( + RoomVersions.V6, creator, "invite" + ), } # A join without an invite is rejected. with self.assertRaises(AuthError): event_auth.check_auth_rules_for_event( - RoomVersions.V6, - _join_event(pleb), + _join_event(RoomVersions.V6, pleb), auth_events.values(), ) # A user cannot be force-joined to a room. with self.assertRaises(AuthError): event_auth.check_auth_rules_for_event( - RoomVersions.V6, - _member_event(pleb, "join", sender=creator), + _member_event(RoomVersions.V6, pleb, "join", sender=creator), auth_events.values(), ) # Banned should be rejected. - auth_events[("m.room.member", pleb)] = _member_event(pleb, "ban") + auth_events[("m.room.member", pleb)] = _member_event( + RoomVersions.V6, pleb, "ban" + ) with self.assertRaises(AuthError): event_auth.check_auth_rules_for_event( - RoomVersions.V6, - _join_event(pleb), + _join_event(RoomVersions.V6, pleb), auth_events.values(), ) # A user who left cannot re-join. - auth_events[("m.room.member", pleb)] = _member_event(pleb, "leave") + auth_events[("m.room.member", pleb)] = _member_event( + RoomVersions.V6, pleb, "leave" + ) with self.assertRaises(AuthError): event_auth.check_auth_rules_for_event( - RoomVersions.V6, - _join_event(pleb), + _join_event(RoomVersions.V6, pleb), auth_events.values(), ) # A user can send a join if they're in the room. - auth_events[("m.room.member", pleb)] = _member_event(pleb, "join") + auth_events[("m.room.member", pleb)] = _member_event( + RoomVersions.V6, pleb, "join" + ) event_auth.check_auth_rules_for_event( - RoomVersions.V6, - _join_event(pleb), + _join_event(RoomVersions.V6, pleb), auth_events.values(), ) # A user can accept an invite. auth_events[("m.room.member", pleb)] = _member_event( - pleb, "invite", sender=creator + RoomVersions.V6, pleb, "invite", sender=creator ) event_auth.check_auth_rules_for_event( - RoomVersions.V6, - _join_event(pleb), + _join_event(RoomVersions.V6, pleb), auth_events.values(), ) - def test_join_rules_msc3083_restricted(self): + def test_join_rules_restricted_old_room(self) -> None: + """Old room versions should reject joins to restricted rooms""" + creator = "@creator:example.com" + pleb = "@joiner:example.com" + + auth_events = { + ("m.room.create", ""): _create_event(RoomVersions.V6, creator), + ("m.room.member", creator): _join_event(RoomVersions.V6, creator), + ("m.room.power_levels", ""): _power_levels_event( + RoomVersions.V6, creator, {"invite": 0} + ), + ("m.room.join_rules", ""): _join_rules_event( + RoomVersions.V6, creator, "restricted" + ), + } + + with self.assertRaises(AuthError): + event_auth.check_auth_rules_for_event( + _join_event(RoomVersions.V6, pleb), + auth_events.values(), + ) + + def test_join_rules_msc3083_restricted(self) -> None: """ Test joining a restricted room from MSC3083. @@ -377,29 +395,25 @@ class EventAuthTestCase(unittest.TestCase): pleb = "@joiner:example.com" auth_events = { - ("m.room.create", ""): _create_event(creator), - ("m.room.member", creator): _join_event(creator), - ("m.room.power_levels", ""): _power_levels_event(creator, {"invite": 0}), - ("m.room.join_rules", ""): _join_rules_event(creator, "restricted"), + ("m.room.create", ""): _create_event(RoomVersions.V8, creator), + ("m.room.member", creator): _join_event(RoomVersions.V8, creator), + ("m.room.power_levels", ""): _power_levels_event( + RoomVersions.V8, creator, {"invite": 0} + ), + ("m.room.join_rules", ""): _join_rules_event( + RoomVersions.V8, creator, "restricted" + ), } - # Older room versions don't understand this join rule - with self.assertRaises(AuthError): - event_auth.check_auth_rules_for_event( - RoomVersions.V6, - _join_event(pleb), - auth_events.values(), - ) - # A properly formatted join event should work. authorised_join_event = _join_event( + RoomVersions.V8, pleb, additional_content={ EventContentFields.AUTHORISING_USER: "@creator:example.com" }, ) event_auth.check_auth_rules_for_event( - RoomVersions.V8, authorised_join_event, auth_events.values(), ) @@ -408,14 +422,16 @@ class EventAuthTestCase(unittest.TestCase): # are done properly). pl_auth_events = auth_events.copy() pl_auth_events[("m.room.power_levels", "")] = _power_levels_event( - creator, {"invite": 100, "users": {"@inviter:foo.test": 150}} + RoomVersions.V8, + creator, + {"invite": 100, "users": {"@inviter:foo.test": 150}}, ) pl_auth_events[("m.room.member", "@inviter:foo.test")] = _join_event( - "@inviter:foo.test" + RoomVersions.V8, "@inviter:foo.test" ) event_auth.check_auth_rules_for_event( - RoomVersions.V8, _join_event( + RoomVersions.V8, pleb, additional_content={ EventContentFields.AUTHORISING_USER: "@inviter:foo.test" @@ -427,20 +443,21 @@ class EventAuthTestCase(unittest.TestCase): # A join which is missing an authorised server is rejected. with self.assertRaises(AuthError): event_auth.check_auth_rules_for_event( - RoomVersions.V8, - _join_event(pleb), + _join_event(RoomVersions.V8, pleb), auth_events.values(), ) # An join authorised by a user who is not in the room is rejected. pl_auth_events = auth_events.copy() pl_auth_events[("m.room.power_levels", "")] = _power_levels_event( - creator, {"invite": 100, "users": {"@other:example.com": 150}} + RoomVersions.V8, + creator, + {"invite": 100, "users": {"@other:example.com": 150}}, ) with self.assertRaises(AuthError): event_auth.check_auth_rules_for_event( - RoomVersions.V8, _join_event( + RoomVersions.V8, pleb, additional_content={ EventContentFields.AUTHORISING_USER: "@other:example.com" @@ -453,8 +470,8 @@ class EventAuthTestCase(unittest.TestCase): # *would* be valid, but is sent be a different user.) with self.assertRaises(AuthError): event_auth.check_auth_rules_for_event( - RoomVersions.V8, _member_event( + RoomVersions.V8, pleb, "join", sender=creator, @@ -466,39 +483,41 @@ class EventAuthTestCase(unittest.TestCase): ) # Banned should be rejected. - auth_events[("m.room.member", pleb)] = _member_event(pleb, "ban") + auth_events[("m.room.member", pleb)] = _member_event( + RoomVersions.V8, pleb, "ban" + ) with self.assertRaises(AuthError): event_auth.check_auth_rules_for_event( - RoomVersions.V8, authorised_join_event, auth_events.values(), ) # A user who left can re-join. - auth_events[("m.room.member", pleb)] = _member_event(pleb, "leave") + auth_events[("m.room.member", pleb)] = _member_event( + RoomVersions.V8, pleb, "leave" + ) event_auth.check_auth_rules_for_event( - RoomVersions.V8, authorised_join_event, auth_events.values(), ) # A user can send a join if they're in the room. (This doesn't need to # be authorised since the user is already joined.) - auth_events[("m.room.member", pleb)] = _member_event(pleb, "join") + auth_events[("m.room.member", pleb)] = _member_event( + RoomVersions.V8, pleb, "join" + ) event_auth.check_auth_rules_for_event( - RoomVersions.V8, - _join_event(pleb), + _join_event(RoomVersions.V8, pleb), auth_events.values(), ) # A user can accept an invite. (This doesn't need to be authorised since # the user was invited.) auth_events[("m.room.member", pleb)] = _member_event( - pleb, "invite", sender=creator + RoomVersions.V8, pleb, "invite", sender=creator ) event_auth.check_auth_rules_for_event( - RoomVersions.V8, - _join_event(pleb), + _join_event(RoomVersions.V8, pleb), auth_events.values(), ) @@ -508,20 +527,25 @@ class EventAuthTestCase(unittest.TestCase): TEST_ROOM_ID = "!test:room" -def _create_event(user_id: str) -> EventBase: +def _create_event( + room_version: RoomVersion, + user_id: str, +) -> EventBase: return make_event_from_dict( { "room_id": TEST_ROOM_ID, - "event_id": _get_event_id(), + **_maybe_get_event_id_dict_for_room_version(room_version), "type": "m.room.create", "state_key": "", "sender": user_id, "content": {"creator": user_id}, - } + }, + room_version=room_version, ) def _member_event( + room_version: RoomVersion, user_id: str, membership: str, sender: Optional[str] = None, @@ -530,79 +554,102 @@ def _member_event( return make_event_from_dict( { "room_id": TEST_ROOM_ID, - "event_id": _get_event_id(), + **_maybe_get_event_id_dict_for_room_version(room_version), "type": "m.room.member", "sender": sender or user_id, "state_key": user_id, "content": {"membership": membership, **(additional_content or {})}, "prev_events": [], - } + }, + room_version=room_version, ) -def _join_event(user_id: str, additional_content: Optional[dict] = None) -> EventBase: - return _member_event(user_id, "join", additional_content=additional_content) +def _join_event( + room_version: RoomVersion, + user_id: str, + additional_content: Optional[dict] = None, +) -> EventBase: + return _member_event( + room_version, + user_id, + "join", + additional_content=additional_content, + ) -def _power_levels_event(sender: str, content: JsonDict) -> EventBase: +def _power_levels_event( + room_version: RoomVersion, + sender: str, + content: JsonDict, +) -> EventBase: return make_event_from_dict( { "room_id": TEST_ROOM_ID, - "event_id": _get_event_id(), + **_maybe_get_event_id_dict_for_room_version(room_version), "type": "m.room.power_levels", "sender": sender, "state_key": "", "content": content, - } + }, + room_version=room_version, ) -def _alias_event(sender: str, **kwargs) -> EventBase: +def _alias_event(room_version: RoomVersion, sender: str, **kwargs) -> EventBase: data = { "room_id": TEST_ROOM_ID, - "event_id": _get_event_id(), + **_maybe_get_event_id_dict_for_room_version(room_version), "type": "m.room.aliases", "sender": sender, "state_key": get_domain_from_id(sender), "content": {"aliases": []}, } data.update(**kwargs) - return make_event_from_dict(data) + return make_event_from_dict(data, room_version=room_version) -def _random_state_event(sender: str) -> EventBase: +def _random_state_event(room_version: RoomVersion, sender: str) -> EventBase: return make_event_from_dict( { "room_id": TEST_ROOM_ID, - "event_id": _get_event_id(), + **_maybe_get_event_id_dict_for_room_version(room_version), "type": "test.state", "sender": sender, "state_key": "", "content": {"membership": "join"}, - } + }, + room_version=room_version, ) -def _join_rules_event(sender: str, join_rule: str) -> EventBase: +def _join_rules_event( + room_version: RoomVersion, sender: str, join_rule: str +) -> EventBase: return make_event_from_dict( { "room_id": TEST_ROOM_ID, - "event_id": _get_event_id(), + **_maybe_get_event_id_dict_for_room_version(room_version), "type": "m.room.join_rules", "sender": sender, "state_key": "", "content": { "join_rule": join_rule, }, - } + }, + room_version=room_version, ) event_count = 0 -def _get_event_id() -> str: +def _maybe_get_event_id_dict_for_room_version(room_version: RoomVersion) -> dict: + """If this room version needs it, generate an event id""" + if room_version.event_format != EventFormatVersions.V1: + return {} + global event_count c = event_count event_count += 1 - return "!%i:example.com" % (c,) + return {"event_id": "!%i:example.com" % (c,)} diff --git a/tests/test_server.py b/tests/test_server.py index 0f1eb43cbc..847432f791 100644 --- a/tests/test_server.py +++ b/tests/test_server.py @@ -34,7 +34,7 @@ from synapse.types import JsonDict from synapse.util import Clock from tests import unittest -from tests.http.server._base import EndpointCancellationTestHelperMixin +from tests.http.server._base import test_disconnect from tests.server import ( FakeSite, ThreadedMemoryReactorClock, @@ -407,7 +407,7 @@ class CancellableDirectServeHtmlResource(DirectServeHtmlResource): return HTTPStatus.OK, b"ok" -class DirectServeJsonResourceCancellationTests(EndpointCancellationTestHelperMixin): +class DirectServeJsonResourceCancellationTests(unittest.TestCase): """Tests for `DirectServeJsonResource` cancellation.""" def setUp(self): @@ -421,7 +421,7 @@ class DirectServeJsonResourceCancellationTests(EndpointCancellationTestHelperMix channel = make_request( self.reactor, self.site, "GET", "/sleep", await_result=False ) - self._test_disconnect( + test_disconnect( self.reactor, channel, expect_cancellation=True, @@ -433,7 +433,7 @@ class DirectServeJsonResourceCancellationTests(EndpointCancellationTestHelperMix channel = make_request( self.reactor, self.site, "POST", "/sleep", await_result=False ) - self._test_disconnect( + test_disconnect( self.reactor, channel, expect_cancellation=False, @@ -441,7 +441,7 @@ class DirectServeJsonResourceCancellationTests(EndpointCancellationTestHelperMix ) -class DirectServeHtmlResourceCancellationTests(EndpointCancellationTestHelperMixin): +class DirectServeHtmlResourceCancellationTests(unittest.TestCase): """Tests for `DirectServeHtmlResource` cancellation.""" def setUp(self): @@ -455,7 +455,7 @@ class DirectServeHtmlResourceCancellationTests(EndpointCancellationTestHelperMix channel = make_request( self.reactor, self.site, "GET", "/sleep", await_result=False ) - self._test_disconnect( + test_disconnect( self.reactor, channel, expect_cancellation=True, @@ -467,6 +467,6 @@ class DirectServeHtmlResourceCancellationTests(EndpointCancellationTestHelperMix channel = make_request( self.reactor, self.site, "POST", "/sleep", await_result=False ) - self._test_disconnect( + test_disconnect( self.reactor, channel, expect_cancellation=False, expected_body=b"ok" ) diff --git a/tests/test_state.py b/tests/test_state.py index 95f81bebae..b005dd8d0f 100644 --- a/tests/test_state.py +++ b/tests/test_state.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import Collection, Dict, List, Optional +from typing import Collection, Dict, List, Optional, cast from unittest.mock import Mock from twisted.internet import defer @@ -22,6 +22,8 @@ from synapse.api.room_versions import RoomVersions from synapse.events import make_event_from_dict from synapse.events.snapshot import EventContext from synapse.state import StateHandler, StateResolutionHandler +from synapse.util import Clock +from synapse.util.macaroons import MacaroonGenerator from tests import unittest @@ -190,13 +192,18 @@ class StateTestCase(unittest.TestCase): "get_clock", "get_state_resolution_handler", "get_account_validity_handler", + "get_macaroon_generator", "hostname", ] ) + clock = cast(Clock, MockClock()) hs.config = default_config("tesths", True) hs.get_datastores.return_value = Mock(main=self.dummy_store) hs.get_state_handler.return_value = None - hs.get_clock.return_value = MockClock() + hs.get_clock.return_value = clock + hs.get_macaroon_generator.return_value = MacaroonGenerator( + clock, "tesths", b"verysecret" + ) hs.get_auth.return_value = Auth(hs) hs.get_state_resolution_handler = lambda: StateResolutionHandler(hs) hs.get_storage_controllers.return_value = storage_controllers diff --git a/tests/test_types.py b/tests/test_types.py index 0b10dae848..d8d82a517e 100644 --- a/tests/test_types.py +++ b/tests/test_types.py @@ -26,10 +26,21 @@ class UserIDTestCase(unittest.HomeserverTestCase): self.assertEqual("test", user.domain) self.assertEqual(True, self.hs.is_mine(user)) - def test_pase_empty(self): + def test_parse_rejects_empty_id(self): with self.assertRaises(SynapseError): UserID.from_string("") + def test_parse_rejects_missing_sigil(self): + with self.assertRaises(SynapseError): + UserID.from_string("alice:example.com") + + def test_parse_rejects_missing_separator(self): + with self.assertRaises(SynapseError): + UserID.from_string("@alice.example.com") + + def test_validation_rejects_missing_domain(self): + self.assertFalse(UserID.is_valid("@alice:")) + def test_build(self): user = UserID("5678efgh", "my.domain") diff --git a/tests/unittest.py b/tests/unittest.py index e7f255b4fa..c645dd3563 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -315,7 +315,7 @@ class HomeserverTestCase(TestCase): "is_guest": False, } - async def get_user_by_req(request, allow_guest=False, rights="access"): + async def get_user_by_req(request, allow_guest=False): assert self.helper.auth_user_id is not None return create_requester( UserID.from_string(self.helper.auth_user_id), diff --git a/tests/util/test_macaroons.py b/tests/util/test_macaroons.py new file mode 100644 index 0000000000..32125f7bb7 --- /dev/null +++ b/tests/util/test_macaroons.py @@ -0,0 +1,146 @@ +# Copyright 2022 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from pymacaroons.exceptions import MacaroonVerificationFailedException + +from synapse.util.macaroons import MacaroonGenerator, OidcSessionData + +from tests.server import get_clock +from tests.unittest import TestCase + + +class MacaroonGeneratorTestCase(TestCase): + def setUp(self): + self.reactor, hs_clock = get_clock() + self.macaroon_generator = MacaroonGenerator(hs_clock, "tesths", b"verysecret") + self.other_macaroon_generator = MacaroonGenerator( + hs_clock, "tesths", b"anothersecretkey" + ) + + def test_guest_access_token(self): + """Test the generation and verification of guest access tokens""" + token = self.macaroon_generator.generate_guest_access_token("@user:tesths") + user_id = self.macaroon_generator.verify_guest_token(token) + self.assertEqual(user_id, "@user:tesths") + + # Raises with another secret key + with self.assertRaises(MacaroonVerificationFailedException): + self.other_macaroon_generator.verify_guest_token(token) + + # Check that an old access token without the guest caveat does not work + macaroon = self.macaroon_generator._generate_base_macaroon("access") + macaroon.add_first_party_caveat(f"user_id = {user_id}") + macaroon.add_first_party_caveat("nonce = 0123456789abcdef") + token = macaroon.serialize() + + with self.assertRaises(MacaroonVerificationFailedException): + self.macaroon_generator.verify_guest_token(token) + + def test_delete_pusher_token(self): + """Test the generation and verification of delete_pusher tokens""" + token = self.macaroon_generator.generate_delete_pusher_token( + "@user:tesths", "m.mail", "john@example.com" + ) + user_id = self.macaroon_generator.verify_delete_pusher_token( + token, "m.mail", "john@example.com" + ) + self.assertEqual(user_id, "@user:tesths") + + # Raises with another secret key + with self.assertRaises(MacaroonVerificationFailedException): + self.other_macaroon_generator.verify_delete_pusher_token( + token, "m.mail", "john@example.com" + ) + + # Raises when verifying for another pushkey + with self.assertRaises(MacaroonVerificationFailedException): + self.macaroon_generator.verify_delete_pusher_token( + token, "m.mail", "other@example.com" + ) + + # Raises when verifying for another app_id + with self.assertRaises(MacaroonVerificationFailedException): + self.macaroon_generator.verify_delete_pusher_token( + token, "somethingelse", "john@example.com" + ) + + # Check that an old token without the app_id and pushkey still works + macaroon = self.macaroon_generator._generate_base_macaroon("delete_pusher") + macaroon.add_first_party_caveat("user_id = @user:tesths") + token = macaroon.serialize() + user_id = self.macaroon_generator.verify_delete_pusher_token( + token, "m.mail", "john@example.com" + ) + self.assertEqual(user_id, "@user:tesths") + + def test_short_term_login_token(self): + """Test the generation and verification of short-term login tokens""" + token = self.macaroon_generator.generate_short_term_login_token( + user_id="@user:tesths", + auth_provider_id="oidc", + auth_provider_session_id="sid", + duration_in_ms=2 * 60 * 1000, + ) + + info = self.macaroon_generator.verify_short_term_login_token(token) + self.assertEqual(info.user_id, "@user:tesths") + self.assertEqual(info.auth_provider_id, "oidc") + self.assertEqual(info.auth_provider_session_id, "sid") + + # Raises with another secret key + with self.assertRaises(MacaroonVerificationFailedException): + self.other_macaroon_generator.verify_short_term_login_token(token) + + # Wait a minute + self.reactor.pump([60]) + # Shouldn't raise + self.macaroon_generator.verify_short_term_login_token(token) + # Wait another minute + self.reactor.pump([60]) + # Should raise since it expired + with self.assertRaises(MacaroonVerificationFailedException): + self.macaroon_generator.verify_short_term_login_token(token) + + def test_oidc_session_token(self): + """Test the generation and verification of OIDC session cookies""" + state = "arandomstate" + session_data = OidcSessionData( + idp_id="oidc", + nonce="nonce", + client_redirect_url="https://example.com/", + ui_auth_session_id="", + ) + token = self.macaroon_generator.generate_oidc_session_token( + state, session_data, duration_in_ms=2 * 60 * 1000 + ).encode("utf-8") + info = self.macaroon_generator.verify_oidc_session_token(token, state) + self.assertEqual(session_data, info) + + # Raises with another secret key + with self.assertRaises(MacaroonVerificationFailedException): + self.other_macaroon_generator.verify_oidc_session_token(token, state) + + # Should raise with another state + with self.assertRaises(MacaroonVerificationFailedException): + self.macaroon_generator.verify_oidc_session_token(token, "anotherstate") + + # Wait a minute + self.reactor.pump([60]) + # Shouldn't raise + self.macaroon_generator.verify_oidc_session_token(token, state) + # Wait another minute + self.reactor.pump([60]) + # Should raise since it expired + with self.assertRaises(MacaroonVerificationFailedException): + self.macaroon_generator.verify_oidc_session_token(token, state) |