diff options
136 files changed, 2614 insertions, 1376 deletions
diff --git a/.github/ISSUE_TEMPLATE/BUG_REPORT.md b/.github/ISSUE_TEMPLATE/BUG_REPORT.md deleted file mode 100644 index 978b699886..0000000000 --- a/.github/ISSUE_TEMPLATE/BUG_REPORT.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -name: Bug report -about: Create a report to help us improve - ---- - -<!-- - -**THIS IS NOT A SUPPORT CHANNEL!** -**IF YOU HAVE SUPPORT QUESTIONS ABOUT RUNNING OR CONFIGURING YOUR OWN HOME SERVER**, -please ask in **#synapse:matrix.org** (using a matrix.org account if necessary) - -If you want to report a security issue, please see https://matrix.org/security-disclosure-policy/ - -This is a bug report template. By following the instructions below and -filling out the sections with your information, you will help the us to get all -the necessary data to fix your issue. - -You can also preview your report before submitting it. You may remove sections -that aren't relevant to your particular case. - -Text between <!-- and --​> marks will be invisible in the report. - ---> - -### Description - -<!-- Describe here the problem that you are experiencing --> - -### Steps to reproduce - -- list the steps -- that reproduce the bug -- using hyphens as bullet points - -<!-- -Describe how what happens differs from what you expected. - -If you can identify any relevant log snippets from _homeserver.log_, please include -those (please be careful to remove any personal or private data). Please surround them with -``` (three backticks, on a line on their own), so that they are formatted legibly. ---> - -### Version information - -<!-- IMPORTANT: please answer the following questions, to help us narrow down the problem --> - -<!-- Was this issue identified on matrix.org or another homeserver? --> -- **Homeserver**: - -If not matrix.org: - -<!-- - What version of Synapse is running? - -You can find the Synapse version with this command: - -$ curl http://localhost:8008/_synapse/admin/v1/server_version - -(You may need to replace `localhost:8008` if Synapse is not configured to -listen on that port.) ---> -- **Version**: - -- **Install method**: -<!-- examples: package manager/git clone/pip --> - -- **Platform**: -<!-- -Tell us about the environment in which your homeserver is operating -distro, hardware, if it's running in a vm/container, etc. ---> diff --git a/.github/ISSUE_TEMPLATE/BUG_REPORT.yml b/.github/ISSUE_TEMPLATE/BUG_REPORT.yml new file mode 100644 index 0000000000..1b304198bc --- /dev/null +++ b/.github/ISSUE_TEMPLATE/BUG_REPORT.yml @@ -0,0 +1,103 @@ +name: Bug report +description: Create a report to help us improve +body: + - type: markdown + attributes: + value: | + **THIS IS NOT A SUPPORT CHANNEL!** + **IF YOU HAVE SUPPORT QUESTIONS ABOUT RUNNING OR CONFIGURING YOUR OWN HOME SERVER**, please ask in **[#synapse:matrix.org](https://matrix.to/#/#synapse:matrix.org)** (using a matrix.org account if necessary). + + If you want to report a security issue, please see https://matrix.org/security-disclosure-policy/ + + This is a bug report form. By following the instructions below and completing the sections with your information, you will help the us to get all the necessary data to fix your issue. + + You can also preview your report before submitting it. + - type: textarea + id: description + attributes: + label: Description + description: Describe the problem that you are experiencing + validations: + required: true + - type: textarea + id: reproduction_steps + attributes: + label: Steps to reproduce + description: | + Describe the series of steps that leads you to the problem. + + Describe how what happens differs from what you expected. + placeholder: Tell us what you see! + value: | + - list the steps + - that reproduce the bug + - using hyphens as bullet points + validations: + required: true + - type: markdown + attributes: + value: | + --- + + **IMPORTANT**: please answer the following questions, to help us narrow down the problem. + - type: input + id: homeserver + attributes: + label: Homeserver + description: Which homeserver was this issue identified on? (matrix.org, another homeserver, etc) + validations: + required: true + - type: input + id: version + attributes: + label: Synapse Version + description: | + What version of Synapse is this homeserver running? + + You can find the Synapse version by visiting https://yourserver.example.com/_matrix/federation/v1/version + + or with this command: + + ``` + $ curl http://localhost:8008/_synapse/admin/v1/server_version + ``` + + (You may need to replace `localhost:8008` if Synapse is not configured to listen on that port.) + validations: + required: true + - type: dropdown + id: install_method + attributes: + label: Installation Method + options: + - Docker (matrixdotorg/synapse) + - Debian packages from packages.matrix.org + - pip (from PyPI) + - Other (please mention below) + - type: textarea + id: platform + attributes: + label: Platform + description: | + Tell us about the environment in which your homeserver is operating... + e.g. distro, hardware, if it's running in a vm/container, etc. + validations: + required: true + - type: textarea + id: logs + attributes: + label: Relevant log output + description: | + Please copy and paste any relevant log output, ideally at INFO or DEBUG log level. + This will be automatically formatted into code, so there is no need for backticks. + + Please be careful to remove any personal or private data. + + **Bug reports are usually very difficult to diagnose without logging.** + render: shell + validations: + required: true + - type: textarea + id: anything_else + attributes: + label: Anything else that would be useful to know? diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 83ab727378..0b70ffc643 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -310,6 +310,16 @@ jobs: needs: linting-done runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + include: + - arrangement: monolith + database: SQLite + + - arrangement: monolith + database: Postgres + steps: # The path is set via a file given by $GITHUB_PATH. We need both Go 1.17 and GOPATH on the path to run Complement. # See https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#adding-a-system-path @@ -337,7 +347,7 @@ jobs: - run: | set -o pipefail - COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | gotestfmt + POSTGRES=${{ (matrix.database == 'Postgres') && 1 }} COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | gotestfmt shell: bash name: Run Complement Tests diff --git a/changelog.d/12674.misc b/changelog.d/12674.misc new file mode 100644 index 0000000000..c8a8f32f0a --- /dev/null +++ b/changelog.d/12674.misc @@ -0,0 +1 @@ +Add tests for cancellation of `GET /rooms/$room_id/members` and `GET /rooms/$room_id/state` requests. diff --git a/changelog.d/12737.doc b/changelog.d/12737.doc new file mode 100644 index 0000000000..ab2d1f2fd9 --- /dev/null +++ b/changelog.d/12737.doc @@ -0,0 +1 @@ +Add documentation for how to configure Synapse with Workers using Docker Compose. Includes example worker config and docker-compose.yaml. Contributed by @Thumbscrew. \ No newline at end of file diff --git a/changelog.d/12738.misc b/changelog.d/12738.misc new file mode 100644 index 0000000000..8252223475 --- /dev/null +++ b/changelog.d/12738.misc @@ -0,0 +1 @@ +Report login failures due to unknown third party identifiers in the same way as failures due to invalid passwords. This prevents an attacker from using the error response to determine if the identifier exists. Contributed by Daniel Aloni. \ No newline at end of file diff --git a/changelog.d/12857.feature b/changelog.d/12857.feature new file mode 100644 index 0000000000..ddd1dbe685 --- /dev/null +++ b/changelog.d/12857.feature @@ -0,0 +1 @@ +Port spam-checker API callbacks to a new, richer API. This is part of an ongoing change to let spam-checker modules inform users of the reason their event or operation is rejected. diff --git a/changelog.d/12881.misc b/changelog.d/12881.misc new file mode 100644 index 0000000000..8a83182bd4 --- /dev/null +++ b/changelog.d/12881.misc @@ -0,0 +1 @@ +Merge the Complement testing Docker images into a single, multi-purpose image. \ No newline at end of file diff --git a/changelog.d/12929.misc b/changelog.d/12929.misc new file mode 100644 index 0000000000..20718d258d --- /dev/null +++ b/changelog.d/12929.misc @@ -0,0 +1 @@ +Clean up the test code for client disconnection. diff --git a/changelog.d/12954.misc b/changelog.d/12954.misc new file mode 100644 index 0000000000..20bf136732 --- /dev/null +++ b/changelog.d/12954.misc @@ -0,0 +1 @@ +Replace noop background updates with `DELETE` delta. diff --git a/changelog.d/12957.misc b/changelog.d/12957.misc new file mode 100644 index 0000000000..0c075276ec --- /dev/null +++ b/changelog.d/12957.misc @@ -0,0 +1 @@ +Use lower isolation level when inserting read receipts to avoid serialization errors. Contributed by Nick @ Beeper. diff --git a/changelog.d/12963.misc b/changelog.d/12963.misc new file mode 100644 index 0000000000..d57e1aca6b --- /dev/null +++ b/changelog.d/12963.misc @@ -0,0 +1 @@ +Reduce the amount of state we pull from the DB. diff --git a/changelog.d/12965.misc b/changelog.d/12965.misc new file mode 100644 index 0000000000..cc2823e12b --- /dev/null +++ b/changelog.d/12965.misc @@ -0,0 +1 @@ +Enable testing against PostgreSQL databases in Complement CI. \ No newline at end of file diff --git a/changelog.d/12969.misc b/changelog.d/12969.misc new file mode 100644 index 0000000000..05de7ce839 --- /dev/null +++ b/changelog.d/12969.misc @@ -0,0 +1 @@ +Fix an inaccurate comment. diff --git a/changelog.d/12970.misc b/changelog.d/12970.misc new file mode 100644 index 0000000000..8f874aa07b --- /dev/null +++ b/changelog.d/12970.misc @@ -0,0 +1 @@ +Remove the `delete_device` method and always call `delete_devices`. diff --git a/changelog.d/12973.bugfix b/changelog.d/12973.bugfix new file mode 100644 index 0000000000..1bf45854ff --- /dev/null +++ b/changelog.d/12973.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse 1.58 where Synapse would not report full version information when installed from a git checkout. This is a best-effort affair and not guaranteed to be stable. diff --git a/changelog.d/12979.bugfix b/changelog.d/12979.bugfix new file mode 100644 index 0000000000..6b54408025 --- /dev/null +++ b/changelog.d/12979.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse 1.60 where Synapse would fail to start if the `sqlite3` module was not available. diff --git a/changelog.d/12982.misc b/changelog.d/12982.misc new file mode 100644 index 0000000000..036b69efe6 --- /dev/null +++ b/changelog.d/12982.misc @@ -0,0 +1 @@ +Use a GitHub form for issues rather than a hard-to-read, easy-to-ignore template. \ No newline at end of file diff --git a/changelog.d/12984.misc b/changelog.d/12984.misc new file mode 100644 index 0000000000..a902017180 --- /dev/null +++ b/changelog.d/12984.misc @@ -0,0 +1 @@ +Move [MSC3715](https://github.com/matrix-org/matrix-spec-proposals/pull/3715) behind an experimental config flag. diff --git a/changelog.d/12985.misc b/changelog.d/12985.misc new file mode 100644 index 0000000000..d5ab9eedea --- /dev/null +++ b/changelog.d/12985.misc @@ -0,0 +1 @@ +Add type annotations to `tests.state.test_v2`. diff --git a/changelog.d/12990.misc b/changelog.d/12990.misc new file mode 100644 index 0000000000..c68f6a731e --- /dev/null +++ b/changelog.d/12990.misc @@ -0,0 +1 @@ +Fix documentation for running complement tests. diff --git a/changelog.d/12991.bugfix b/changelog.d/12991.bugfix new file mode 100644 index 0000000000..c6e388d5b9 --- /dev/null +++ b/changelog.d/12991.bugfix @@ -0,0 +1,2 @@ +Fix a bug where non-standard information was required when requesting the `/hierarchy` API over federation. Introduced +in Synapse v1.41.0. diff --git a/changelog.d/13004.misc b/changelog.d/13004.misc new file mode 100644 index 0000000000..d8e93d87af --- /dev/null +++ b/changelog.d/13004.misc @@ -0,0 +1 @@ +Faster joins: add issue links to the TODO comments in the code. diff --git a/changelog.d/13013.misc b/changelog.d/13013.misc new file mode 100644 index 0000000000..903c6a3c8a --- /dev/null +++ b/changelog.d/13013.misc @@ -0,0 +1 @@ +Modernize the `contrib/graph/` scripts. diff --git a/changelog.d/13017.misc b/changelog.d/13017.misc new file mode 100644 index 0000000000..b314687f9c --- /dev/null +++ b/changelog.d/13017.misc @@ -0,0 +1 @@ +Remove redundant `room_version` parameters from event auth functions. diff --git a/changelog.d/13021.misc b/changelog.d/13021.misc new file mode 100644 index 0000000000..84c41cdf59 --- /dev/null +++ b/changelog.d/13021.misc @@ -0,0 +1 @@ +Decouple `synapse.api.auth_blocking.AuthBlocking` from `synapse.api.auth.Auth`. diff --git a/changelog.d/13022.doc b/changelog.d/13022.doc new file mode 100644 index 0000000000..4d6ac7ae94 --- /dev/null +++ b/changelog.d/13022.doc @@ -0,0 +1 @@ +Ensure the [Poetry cheat sheet](https://matrix-org.github.io/synapse/develop/development/dependencies.html) is available in the online documentation. diff --git a/changelog.d/13023.doc b/changelog.d/13023.doc new file mode 100644 index 0000000000..5589c7492c --- /dev/null +++ b/changelog.d/13023.doc @@ -0,0 +1 @@ +Mention removed community/group worker endpoints in upgrade.md. Contributed by @olmari. \ No newline at end of file diff --git a/contrib/docker_compose_workers/README.md b/contrib/docker_compose_workers/README.md new file mode 100644 index 0000000000..4dbfee2853 --- /dev/null +++ b/contrib/docker_compose_workers/README.md @@ -0,0 +1,125 @@ +# Setting up Synapse with Workers using Docker Compose + +This directory describes how deploy and manage Synapse and workers via [Docker Compose](https://docs.docker.com/compose/). + +Example worker configuration files can be found [here](workers). + +All examples and snippets assume that your Synapse service is called `synapse` in your Docker Compose file. + +An example Docker Compose file can be found [here](docker-compose.yaml). + +## Worker Service Examples in Docker Compose + +In order to start the Synapse container as a worker, you must specify an `entrypoint` that loads both the `homeserver.yaml` and the configuration for the worker (`synapse-generic-worker-1.yaml` in the example below). You must also include the worker type in the environment variable `SYNAPSE_WORKER` or alternatively pass `-m synapse.app.generic_worker` as part of the `entrypoint` after `"/start.py", "run"`). + +### Generic Worker Example + +```yaml +synapse-generic-worker-1: + image: matrixdotorg/synapse:latest + container_name: synapse-generic-worker-1 + restart: unless-stopped + entrypoint: ["/start.py", "run", "--config-path=/data/homeserver.yaml", "--config-path=/data/workers/synapse-generic-worker-1.yaml"] + healthcheck: + test: ["CMD-SHELL", "curl -fSs http://localhost:8081/health || exit 1"] + start_period: "5s" + interval: "15s" + timeout: "5s" + volumes: + - ${VOLUME_PATH}/data:/data:rw # Replace VOLUME_PATH with the path to your Synapse volume + environment: + SYNAPSE_WORKER: synapse.app.generic_worker + # Expose port if required so your reverse proxy can send requests to this worker + # Port configuration will depend on how the http listener is defined in the worker configuration file + ports: + - 8081:8081 + depends_on: + - synapse +``` + +### Federation Sender Example + +Please note: The federation sender does not receive REST API calls so no exposed ports are required. + +```yaml +synapse-federation-sender-1: + image: matrixdotorg/synapse:latest + container_name: synapse-federation-sender-1 + restart: unless-stopped + entrypoint: ["/start.py", "run", "--config-path=/data/homeserver.yaml", "--config-path=/data/workers/synapse-federation-sender-1.yaml"] + healthcheck: + disable: true + volumes: + - ${VOLUME_PATH}/data:/data:rw # Replace VOLUME_PATH with the path to your Synapse volume + environment: + SYNAPSE_WORKER: synapse.app.federation_sender + depends_on: + - synapse +``` + +## `homeserver.yaml` Configuration + +### Enable Redis + +Locate the `redis` section of your `homeserver.yaml` and enable and configure it: + +```yaml +redis: + enabled: true + host: redis + port: 6379 + # password: <secret_password> +``` + +This assumes that your Redis service is called `redis` in your Docker Compose file. + +### Add a replication Listener + +Locate the `listeners` section of your `homeserver.yaml` and add the following replication listener: + +```yaml +listeners: + # Other listeners + + - port: 9093 + type: http + resources: + - names: [replication] +``` + +This listener is used by the workers for replication and is referred to in worker config files using the following settings: + +```yaml +worker_replication_host: synapse +worker_replication_http_port: 9093 +``` + +### Add Workers to `instance_map` + +Locate the `instance_map` section of your `homeserver.yaml` and populate it with your workers: + +```yaml +instance_map: + synapse-generic-worker-1: # The worker_name setting in your worker configuration file + host: synapse-generic-worker-1 # The name of the worker service in your Docker Compose file + port: 8034 # The port assigned to the replication listener in your worker config file + synapse-federation-sender-1: + host: synapse-federation-sender-1 + port: 8034 +``` + +### Configure Federation Senders + +This section is applicable if you are using Federation senders (synapse.app.federation_sender). Locate the `send_federation` and `federation_sender_instances` settings in your `homeserver.yaml` and configure them: + +```yaml +# This will disable federation sending on the main Synapse instance +send_federation: false + +federation_sender_instances: + - synapse-federation-sender-1 # The worker_name setting in your federation sender worker configuration file +``` + +## Other Worker types + +Using the concepts shown here it is possible to create other worker types in Docker Compose. See the [Workers](https://matrix-org.github.io/synapse/latest/workers.html#available-worker-applications) documentation for a list of available workers. \ No newline at end of file diff --git a/contrib/docker_compose_workers/docker-compose.yaml b/contrib/docker_compose_workers/docker-compose.yaml new file mode 100644 index 0000000000..eaf02c2af9 --- /dev/null +++ b/contrib/docker_compose_workers/docker-compose.yaml @@ -0,0 +1,77 @@ +networks: + backend: + +services: + postgres: + image: postgres:latest + restart: unless-stopped + volumes: + - ${VOLUME_PATH}/var/lib/postgresql/data:/var/lib/postgresql/data:rw + networks: + - backend + environment: + POSTGRES_DB: synapse + POSTGRES_USER: synapse_user + POSTGRES_PASSWORD: postgres + POSTGRES_INITDB_ARGS: --encoding=UTF8 --locale=C + + redis: + image: redis:latest + restart: unless-stopped + networks: + - backend + + synapse: + image: matrixdotorg/synapse:latest + container_name: synapse + restart: unless-stopped + volumes: + - ${VOLUME_PATH}/data:/data:rw + ports: + - 8008:8008 + networks: + - backend + environment: + SYNAPSE_CONFIG_DIR: /data + SYNAPSE_CONFIG_PATH: /data/homeserver.yaml + depends_on: + - postgres + + synapse-generic-worker-1: + image: matrixdotorg/synapse:latest + container_name: synapse-generic-worker-1 + restart: unless-stopped + entrypoint: ["/start.py", "run", "--config-path=/data/homeserver.yaml", "--config-path=/data/workers/synapse-generic-worker-1.yaml"] + healthcheck: + test: ["CMD-SHELL", "curl -fSs http://localhost:8081/health || exit 1"] + start_period: "5s" + interval: "15s" + timeout: "5s" + networks: + - backend + volumes: + - ${VOLUME_PATH}/data:/data:rw # Replace VOLUME_PATH with the path to your Synapse volume + environment: + SYNAPSE_WORKER: synapse.app.generic_worker + # Expose port if required so your reverse proxy can send requests to this worker + # Port configuration will depend on how the http listener is defined in the worker configuration file + ports: + - 8081:8081 + depends_on: + - synapse + + synapse-federation-sender-1: + image: matrixdotorg/synapse:latest + container_name: synapse-federation-sender-1 + restart: unless-stopped + entrypoint: ["/start.py", "run", "--config-path=/data/homeserver.yaml", "--config-path=/data/workers/synapse-federation-sender-1.yaml"] + healthcheck: + disable: true + networks: + - backend + volumes: + - ${VOLUME_PATH}/data:/data:rw # Replace VOLUME_PATH with the path to your Synapse volume + environment: + SYNAPSE_WORKER: synapse.app.federation_sender + depends_on: + - synapse diff --git a/contrib/docker_compose_workers/workers/synapse-federation-sender-1.yaml b/contrib/docker_compose_workers/workers/synapse-federation-sender-1.yaml new file mode 100644 index 0000000000..5ba42a92d2 --- /dev/null +++ b/contrib/docker_compose_workers/workers/synapse-federation-sender-1.yaml @@ -0,0 +1,14 @@ +worker_app: synapse.app.federation_sender +worker_name: synapse-federation-sender-1 + +# The replication listener on the main synapse process. +worker_replication_host: synapse +worker_replication_http_port: 9093 + +worker_listeners: + - type: http + port: 8034 + resources: + - names: [replication] + +worker_log_config: /data/federation_sender.log.config diff --git a/contrib/docker_compose_workers/workers/synapse-generic-worker-1.yaml b/contrib/docker_compose_workers/workers/synapse-generic-worker-1.yaml new file mode 100644 index 0000000000..694584105a --- /dev/null +++ b/contrib/docker_compose_workers/workers/synapse-generic-worker-1.yaml @@ -0,0 +1,19 @@ +worker_app: synapse.app.generic_worker +worker_name: synapse-generic-worker-1 + +# The replication listener on the main synapse process. +worker_replication_host: synapse +worker_replication_http_port: 9093 + +worker_listeners: + - type: http + port: 8034 + resources: + - names: [replication] + - type: http + port: 8081 + x_forwarded: true + resources: + - names: [client, federation] + +worker_log_config: /data/worker.log.config diff --git a/contrib/graph/graph.py b/contrib/graph/graph.py index fdbac087bd..3c4f47dbd2 100644 --- a/contrib/graph/graph.py +++ b/contrib/graph/graph.py @@ -1,11 +1,3 @@ -import argparse -import cgi -import datetime -import json - -import pydot -import urllib2 - # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,12 +12,25 @@ import urllib2 # See the License for the specific language governing permissions and # limitations under the License. +import argparse +import cgi +import datetime +import json +import urllib.request +from typing import List + +import pydot + -def make_name(pdu_id, origin): - return "%s@%s" % (pdu_id, origin) +def make_name(pdu_id: str, origin: str) -> str: + return f"{pdu_id}@{origin}" -def make_graph(pdus, room, filename_prefix): +def make_graph(pdus: List[dict], filename_prefix: str) -> None: + """ + Generate a dot and SVG file for a graph of events in the room based on the + topological ordering by querying a homeserver. + """ pdu_map = {} node_map = {} @@ -111,10 +116,10 @@ def make_graph(pdus, room, filename_prefix): graph.write_svg("%s.svg" % filename_prefix, prog="dot") -def get_pdus(host, room): +def get_pdus(host: str, room: str) -> List[dict]: transaction = json.loads( - urllib2.urlopen( - "http://%s/_matrix/federation/v1/context/%s/" % (host, room) + urllib.request.urlopen( + f"http://{host}/_matrix/federation/v1/context/{room}/" ).read() ) @@ -141,4 +146,4 @@ if __name__ == "__main__": pdus = get_pdus(host, room) - make_graph(pdus, room, prefix) + make_graph(pdus, prefix) diff --git a/contrib/graph/graph2.py b/contrib/graph/graph2.py index 0980231e4a..b46094ce0a 100644 --- a/contrib/graph/graph2.py +++ b/contrib/graph/graph2.py @@ -14,22 +14,31 @@ import argparse -import cgi import datetime +import html import json import sqlite3 import pydot -from synapse.events import FrozenEvent +from synapse.api.room_versions import KNOWN_ROOM_VERSIONS +from synapse.events import make_event_from_dict from synapse.util.frozenutils import unfreeze -def make_graph(db_name, room_id, file_prefix, limit): +def make_graph(db_name: str, room_id: str, file_prefix: str, limit: int) -> None: + """ + Generate a dot and SVG file for a graph of events in the room based on the + topological ordering by reading from a Synapse SQLite database. + """ conn = sqlite3.connect(db_name) + sql = "SELECT room_version FROM rooms WHERE room_id = ?" + c = conn.execute(sql, (room_id,)) + room_version = KNOWN_ROOM_VERSIONS[c.fetchone()[0]] + sql = ( - "SELECT json FROM event_json as j " + "SELECT json, internal_metadata FROM event_json as j " "INNER JOIN events as e ON e.event_id = j.event_id " "WHERE j.room_id = ?" ) @@ -43,7 +52,10 @@ def make_graph(db_name, room_id, file_prefix, limit): c = conn.execute(sql, args) - events = [FrozenEvent(json.loads(e[0])) for e in c.fetchall()] + events = [ + make_event_from_dict(json.loads(e[0]), room_version, json.loads(e[1])) + for e in c.fetchall() + ] events.sort(key=lambda e: e.depth) @@ -84,7 +96,7 @@ def make_graph(db_name, room_id, file_prefix, limit): "name": event.event_id, "type": event.type, "state_key": event.get("state_key", None), - "content": cgi.escape(content, quote=True), + "content": html.escape(content, quote=True), "time": t, "depth": event.depth, "state_group": state_group, @@ -96,11 +108,11 @@ def make_graph(db_name, room_id, file_prefix, limit): graph.add_node(node) for event in events: - for prev_id, _ in event.prev_events: + for prev_id in event.prev_event_ids(): try: end_node = node_map[prev_id] except Exception: - end_node = pydot.Node(name=prev_id, label="<<b>%s</b>>" % (prev_id,)) + end_node = pydot.Node(name=prev_id, label=f"<<b>{prev_id}</b>>") node_map[prev_id] = end_node graph.add_node(end_node) @@ -112,7 +124,7 @@ def make_graph(db_name, room_id, file_prefix, limit): if len(event_ids) <= 1: continue - cluster = pydot.Cluster(str(group), label="<State Group: %s>" % (str(group),)) + cluster = pydot.Cluster(str(group), label=f"<State Group: {str(group)}>") for event_id in event_ids: cluster.add_node(node_map[event_id]) @@ -126,7 +138,7 @@ def make_graph(db_name, room_id, file_prefix, limit): if __name__ == "__main__": parser = argparse.ArgumentParser( description="Generate a PDU graph for a given room by talking " - "to the given homeserver to get the list of PDUs. \n" + "to the given Synapse SQLite file to get the list of PDUs. \n" "Requires pydot." ) parser.add_argument( diff --git a/contrib/graph/graph3.py b/contrib/graph/graph3.py index dd0c19368b..a28a1594c7 100644 --- a/contrib/graph/graph3.py +++ b/contrib/graph/graph3.py @@ -1,13 +1,3 @@ -import argparse -import cgi -import datetime - -import pydot -import simplejson as json - -from synapse.events import FrozenEvent -from synapse.util.frozenutils import unfreeze - # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,15 +12,35 @@ from synapse.util.frozenutils import unfreeze # See the License for the specific language governing permissions and # limitations under the License. +import argparse +import datetime +import html +import json + +import pydot -def make_graph(file_name, room_id, file_prefix, limit): +from synapse.api.room_versions import KNOWN_ROOM_VERSIONS +from synapse.events import make_event_from_dict +from synapse.util.frozenutils import unfreeze + + +def make_graph(file_name: str, file_prefix: str, limit: int) -> None: + """ + Generate a dot and SVG file for a graph of events in the room based on the + topological ordering by reading line-delimited JSON from a file. + """ print("Reading lines") with open(file_name) as f: lines = f.readlines() print("Read lines") - events = [FrozenEvent(json.loads(line)) for line in lines] + # Figure out the room version, assume the first line is the create event. + room_version = KNOWN_ROOM_VERSIONS[ + json.loads(lines[0]).get("content", {}).get("room_version") + ] + + events = [make_event_from_dict(json.loads(line), room_version) for line in lines] print("Loaded events.") @@ -66,8 +76,8 @@ def make_graph(file_name, room_id, file_prefix, limit): content.append( "<b>%s</b>: %s," % ( - cgi.escape(key, quote=True).encode("ascii", "xmlcharrefreplace"), - cgi.escape(value, quote=True).encode("ascii", "xmlcharrefreplace"), + html.escape(key, quote=True).encode("ascii", "xmlcharrefreplace"), + html.escape(value, quote=True).encode("ascii", "xmlcharrefreplace"), ) ) @@ -101,11 +111,11 @@ def make_graph(file_name, room_id, file_prefix, limit): print("Created Nodes") for event in events: - for prev_id, _ in event.prev_events: + for prev_id in event.prev_event_ids(): try: end_node = node_map[prev_id] except Exception: - end_node = pydot.Node(name=prev_id, label="<<b>%s</b>>" % (prev_id,)) + end_node = pydot.Node(name=prev_id, label=f"<<b>{prev_id}</b>>") node_map[prev_id] = end_node graph.add_node(end_node) @@ -139,8 +149,7 @@ if __name__ == "__main__": ) parser.add_argument("-l", "--limit", help="Only retrieve the last N events.") parser.add_argument("event_file") - parser.add_argument("room") args = parser.parse_args() - make_graph(args.event_file, args.room, args.prefix, args.limit) + make_graph(args.event_file, args.prefix, args.limit) diff --git a/docker/Dockerfile-workers b/docker/Dockerfile-workers index 24b03585f9..83db0a95b9 100644 --- a/docker/Dockerfile-workers +++ b/docker/Dockerfile-workers @@ -1,5 +1,6 @@ # Inherit from the official Synapse docker image -FROM matrixdotorg/synapse +ARG SYNAPSE_VERSION=latest +FROM matrixdotorg/synapse:$SYNAPSE_VERSION # Install deps RUN \ diff --git a/docker/README-testing.md b/docker/README-testing.md index c38cae7530..1f0423f09b 100644 --- a/docker/README-testing.md +++ b/docker/README-testing.md @@ -8,79 +8,50 @@ docker images that can be run inside Complement for testing purposes. Note that running Synapse's unit tests from within the docker image is not supported. -## Testing with SQLite and single-process Synapse +## Using the Complement launch script -> Note that `scripts-dev/complement.sh` is a script that will automatically build -> and run an SQLite-based, single-process of Synapse against Complement. +`scripts-dev/complement.sh` is a script that will automatically build +and run Synapse against Complement. +Consult the [contributing guide][guideComplementSh] for instructions on how to use it. -The instructions below will set up Complement testing for a single-process, -SQLite-based Synapse deployment. -Start by building the base Synapse docker image. If you wish to run tests with the latest -release of Synapse, instead of your current checkout, you can skip this step. From the -root of the repository: - -```sh -docker build -t matrixdotorg/synapse -f docker/Dockerfile . -``` - -This will build an image with the tag `matrixdotorg/synapse`. - -Next, build the Synapse image for Complement. +[guideComplementSh]: https://matrix-org.github.io/synapse/latest/development/contributing_guide.html#run-the-integration-tests-complement -```sh -docker build -t complement-synapse -f "docker/complement/Dockerfile" docker/complement -``` +## Building and running the images manually -This will build an image with the tag `complement-synapse`, which can be handed to -Complement for testing via the `COMPLEMENT_BASE_IMAGE` environment variable. Refer to -[Complement's documentation](https://github.com/matrix-org/complement/#running) for -how to run the tests, as well as the various available command line flags. - -## Testing with PostgreSQL and single or multi-process Synapse +Under some circumstances, you may wish to build the images manually. +The instructions below will lead you to doing that. -The above docker image only supports running Synapse with SQLite and in a -single-process topology. The following instructions are used to build a Synapse image for -Complement that supports either single or multi-process topology with a PostgreSQL -database backend. - -As with the single-process image, build the base Synapse docker image. If you wish to run -tests with the latest release of Synapse, instead of your current checkout, you can skip -this step. From the root of the repository: +Start by building the base Synapse docker image. If you wish to run tests with the latest +release of Synapse, instead of your current checkout, you can skip this step. From the +root of the repository: ```sh docker build -t matrixdotorg/synapse -f docker/Dockerfile . ``` -This will build an image with the tag `matrixdotorg/synapse`. - -Next, we build a new image with worker support based on `matrixdotorg/synapse:latest`. -Again, from the root of the repository: +Next, build the workerised Synapse docker image, which is a layer over the base +image. ```sh docker build -t matrixdotorg/synapse-workers -f docker/Dockerfile-workers . ``` -This will build an image with the tag` matrixdotorg/synapse-workers`. - -It's worth noting at this point that this image is fully functional, and -can be used for testing against locally. See instructions for using the container -under -[Running the Dockerfile-worker image standalone](#running-the-dockerfile-worker-image-standalone) -below. - -Finally, build the Synapse image for Complement, which is based on -`matrixdotorg/synapse-workers`. +Finally, build the multi-purpose image for Complement, which is a layer over the workers image. ```sh -docker build -t matrixdotorg/complement-synapse-workers -f docker/complement/SynapseWorkers.Dockerfile docker/complement +docker build -t complement-synapse -f docker/complement/Dockerfile docker/complement ``` -This will build an image with the tag `complement-synapse-workers`, which can be handed to +This will build an image with the tag `complement-synapse`, which can be handed to Complement for testing via the `COMPLEMENT_BASE_IMAGE` environment variable. Refer to [Complement's documentation](https://github.com/matrix-org/complement/#running) for how to run the tests, as well as the various available command line flags. +See [the Complement image README](./complement/README.md) for information about the +expected environment variables. + + ## Running the Dockerfile-worker image standalone For manual testing of a multi-process Synapse instance in Docker, @@ -113,6 +84,9 @@ docker run -d --name synapse \ ...substituting `POSTGRES*` variables for those that match a postgres host you have available (usually a running postgres docker container). + +### Workers + The `SYNAPSE_WORKER_TYPES` environment variable is a comma-separated list of workers to use when running the container. All possible worker names are defined by the keys of the `WORKERS_CONFIG` variable in [this script](configure_workers_and_start.py), which the @@ -125,8 +99,11 @@ type, simply specify the type multiple times in `SYNAPSE_WORKER_TYPES` (e.g `SYNAPSE_WORKER_TYPES=event_creator,event_creator...`). Otherwise, `SYNAPSE_WORKER_TYPES` can either be left empty or unset to spawn no workers -(leaving only the main process). The container is configured to use redis-based worker -mode. +(leaving only the main process). +The container will only be configured to use Redis-based worker mode if there are +workers enabled. + +### Logging Logs for workers and the main process are logged to stdout and can be viewed with standard `docker logs` tooling. Worker logs contain their worker name @@ -136,3 +113,21 @@ Setting `SYNAPSE_WORKERS_WRITE_LOGS_TO_DISK=1` will cause worker logs to be writ `<data_dir>/logs/<worker_name>.log`. Logs are kept for 1 week and rotate every day at 00: 00, according to the container's clock. Logging for the main process must still be configured by modifying the homeserver's log config in your Synapse data volume. + + +### Application Services + +Setting the `SYNAPSE_AS_REGISTRATION_DIR` environment variable to the path of +a directory (within the container) will cause the configuration script to scan +that directory for `.yaml`/`.yml` registration files. +Synapse will be configured to load these configuration files. + + +### TLS Termination + +Nginx is present in the image to route requests to the appropriate workers, +but it does not serve TLS by default. + +You can configure `SYNAPSE_TLS_CERT` and `SYNAPSE_TLS_KEY` to point to a +TLS certificate and key (respectively), both in PEM (textual) format. +In this case, Nginx will additionally serve using HTTPS on port 8448. diff --git a/docker/complement/Dockerfile b/docker/complement/Dockerfile index 4823ce7364..50684c956d 100644 --- a/docker/complement/Dockerfile +++ b/docker/complement/Dockerfile @@ -1,22 +1,45 @@ -# A dockerfile which builds an image suitable for testing Synapse under -# complement. - +# This dockerfile builds on top of 'docker/Dockerfile-workers' in matrix-org/synapse +# by including a built-in postgres instance, as well as setting up the homeserver so +# that it is ready for testing via Complement. +# +# Instructions for building this image from those it depends on is detailed in this guide: +# https://github.com/matrix-org/synapse/blob/develop/docker/README-testing.md#testing-with-postgresql-and-single-or-multi-process-synapse ARG SYNAPSE_VERSION=latest +FROM matrixdotorg/synapse-workers:$SYNAPSE_VERSION + +# Install postgresql +RUN apt-get update && \ + DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -y postgresql-13 + +# Configure a user and create a database for Synapse +RUN pg_ctlcluster 13 main start && su postgres -c "echo \ + \"ALTER USER postgres PASSWORD 'somesecret'; \ + CREATE DATABASE synapse \ + ENCODING 'UTF8' \ + LC_COLLATE='C' \ + LC_CTYPE='C' \ + template=template0;\" | psql" && pg_ctlcluster 13 main stop + +# Extend the shared homeserver config to disable rate-limiting, +# set Complement's static shared secret, enable registration, amongst other +# tweaks to get Synapse ready for testing. +# To do this, we copy the old template out of the way and then include it +# with Jinja2. +RUN mv /conf/shared.yaml.j2 /conf/shared-orig.yaml.j2 +COPY conf/workers-shared-extra.yaml.j2 /conf/shared.yaml.j2 -FROM matrixdotorg/synapse:${SYNAPSE_VERSION} - -ENV SERVER_NAME=localhost - -COPY conf/* /conf/ +WORKDIR /data -# generate a signing key -RUN generate_signing_key -o /conf/server.signing.key +COPY conf/postgres.supervisord.conf /etc/supervisor/conf.d/postgres.conf -WORKDIR /data +# Copy the entrypoint +COPY conf/start_for_complement.sh / +# Expose nginx's listener ports EXPOSE 8008 8448 -ENTRYPOINT ["/conf/start.sh"] +ENTRYPOINT ["/start_for_complement.sh"] +# Update the healthcheck to have a shorter check interval HEALTHCHECK --start-period=5s --interval=1s --timeout=1s \ - CMD curl -fSs http://localhost:8008/health || exit 1 + CMD /bin/sh /healthcheck.sh diff --git a/docker/complement/README.md b/docker/complement/README.md index e075418e4a..37c39e2dfc 100644 --- a/docker/complement/README.md +++ b/docker/complement/README.md @@ -1 +1,32 @@ -Stuff for building the docker image used for testing under complement. +# Unified Complement image for Synapse + +This is an image for testing Synapse with [the *Complement* integration test suite][complement]. +It contains some insecure defaults that are only suitable for testing purposes, +so **please don't use this image for a production server**. + +This multi-purpose image is built on top of `Dockerfile-workers` in the parent directory +and can be switched using environment variables between the following configurations: + +- Monolithic Synapse with SQLite (`SYNAPSE_COMPLEMENT_DATABASE=sqlite`) +- Monolithic Synapse with Postgres (`SYNAPSE_COMPLEMENT_DATABASE=postgres`) +- Workerised Synapse with Postgres (`SYNAPSE_COMPLEMENT_DATABASE=postgres` and `SYNAPSE_COMPLEMENT_USE_WORKERS=true`) + +The image is self-contained; it contains an integrated Postgres, Redis and Nginx. + + +## How to get Complement to pass the environment variables through + +To pass these environment variables, use [Complement's `COMPLEMENT_SHARE_ENV_PREFIX`][complementEnv] +variable to configure an environment prefix to pass through, then prefix the above options +with that prefix. + +Example: +``` +COMPLEMENT_SHARE_ENV_PREFIX=PASS_ PASS_SYNAPSE_COMPLEMENT_DATABASE=postgres +``` + +Consult `scripts-dev/complement.sh` in the repository root for a real example. + + +[complement]: https://github.com/matrix-org/complement +[complementEnv]: https://github.com/matrix-org/complement/pull/382 diff --git a/docker/complement/SynapseWorkers.Dockerfile b/docker/complement/SynapseWorkers.Dockerfile deleted file mode 100644 index 99a09cbc2b..0000000000 --- a/docker/complement/SynapseWorkers.Dockerfile +++ /dev/null @@ -1,40 +0,0 @@ -# This dockerfile builds on top of 'docker/Dockerfile-worker' in matrix-org/synapse -# by including a built-in postgres instance, as well as setting up the homeserver so -# that it is ready for testing via Complement. -# -# Instructions for building this image from those it depends on is detailed in this guide: -# https://github.com/matrix-org/synapse/blob/develop/docker/README-testing.md#testing-with-postgresql-and-single-or-multi-process-synapse -FROM matrixdotorg/synapse-workers - -# Install postgresql -RUN apt-get update && \ - DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -y postgresql-13 - -# Configure a user and create a database for Synapse -RUN pg_ctlcluster 13 main start && su postgres -c "echo \ - \"ALTER USER postgres PASSWORD 'somesecret'; \ - CREATE DATABASE synapse \ - ENCODING 'UTF8' \ - LC_COLLATE='C' \ - LC_CTYPE='C' \ - template=template0;\" | psql" && pg_ctlcluster 13 main stop - -# Modify the shared homeserver config with postgres support, certificate setup -# and the disabling of rate-limiting -COPY conf-workers/workers-shared.yaml /conf/workers/shared.yaml - -WORKDIR /data - -COPY conf-workers/postgres.supervisord.conf /etc/supervisor/conf.d/postgres.conf - -# Copy the entrypoint -COPY conf-workers/start-complement-synapse-workers.sh / - -# Expose nginx's listener ports -EXPOSE 8008 8448 - -ENTRYPOINT ["/start-complement-synapse-workers.sh"] - -# Update the healthcheck to have a shorter check interval -HEALTHCHECK --start-period=5s --interval=1s --timeout=1s \ - CMD /bin/sh /healthcheck.sh diff --git a/docker/complement/conf-workers/start-complement-synapse-workers.sh b/docker/complement/conf-workers/start-complement-synapse-workers.sh deleted file mode 100755 index b7e2444000..0000000000 --- a/docker/complement/conf-workers/start-complement-synapse-workers.sh +++ /dev/null @@ -1,61 +0,0 @@ -#!/bin/bash -# -# Default ENTRYPOINT for the docker image used for testing synapse with workers under complement - -set -e - -function log { - d=$(date +"%Y-%m-%d %H:%M:%S,%3N") - echo "$d $@" -} - -# Set the server name of the homeserver -export SYNAPSE_SERVER_NAME=${SERVER_NAME} - -# No need to report stats here -export SYNAPSE_REPORT_STATS=no - -# Set postgres authentication details which will be placed in the homeserver config file -export POSTGRES_PASSWORD=somesecret -export POSTGRES_USER=postgres -export POSTGRES_HOST=localhost - -# Specify the workers to test with -export SYNAPSE_WORKER_TYPES="\ - event_persister, \ - event_persister, \ - background_worker, \ - frontend_proxy, \ - event_creator, \ - user_dir, \ - media_repository, \ - federation_inbound, \ - federation_reader, \ - federation_sender, \ - synchrotron, \ - appservice, \ - pusher" - -# Add Complement's appservice registration directory, if there is one -# (It can be absent when there are no application services in this test!) -if [ -d /complement/appservice ]; then - export SYNAPSE_AS_REGISTRATION_DIR=/complement/appservice -fi - -# Generate a TLS key, then generate a certificate by having Complement's CA sign it -# Note that both the key and certificate are in PEM format (not DER). -openssl genrsa -out /conf/server.tls.key 2048 - -openssl req -new -key /conf/server.tls.key -out /conf/server.tls.csr \ - -subj "/CN=${SERVER_NAME}" - -openssl x509 -req -in /conf/server.tls.csr \ - -CA /complement/ca/ca.crt -CAkey /complement/ca/ca.key -set_serial 1 \ - -out /conf/server.tls.crt - -export SYNAPSE_TLS_CERT=/conf/server.tls.crt -export SYNAPSE_TLS_KEY=/conf/server.tls.key - -# Run the script that writes the necessary config files and starts supervisord, which in turn -# starts everything else -exec /configure_workers_and_start.py diff --git a/docker/complement/conf/homeserver.yaml b/docker/complement/conf/homeserver.yaml deleted file mode 100644 index e2be540bbb..0000000000 --- a/docker/complement/conf/homeserver.yaml +++ /dev/null @@ -1,129 +0,0 @@ -## Server ## - -server_name: SERVER_NAME -log_config: /conf/log_config.yaml -report_stats: False -signing_key_path: /conf/server.signing.key -trusted_key_servers: [] -enable_registration: true -enable_registration_without_verification: true - -## Listeners ## - -tls_certificate_path: /conf/server.tls.crt -tls_private_key_path: /conf/server.tls.key -bcrypt_rounds: 4 -registration_shared_secret: complement - -listeners: - - port: 8448 - bind_addresses: ['::'] - type: http - tls: true - resources: - - names: [federation] - - - port: 8008 - bind_addresses: ['::'] - type: http - - resources: - - names: [client] - -## Database ## - -database: - name: "sqlite3" - args: - # We avoid /data, as it is a volume and is not transferred when the container is committed, - # which is a fundamental necessity in complement. - database: "/conf/homeserver.db" - -## Federation ## - -# trust certs signed by the complement CA -federation_custom_ca_list: -- /complement/ca/ca.crt - -# unblacklist RFC1918 addresses -ip_range_blacklist: [] - -# Disable server rate-limiting -rc_federation: - window_size: 1000 - sleep_limit: 10 - sleep_delay: 500 - reject_limit: 99999 - concurrent: 3 - -rc_message: - per_second: 9999 - burst_count: 9999 - -rc_registration: - per_second: 9999 - burst_count: 9999 - -rc_login: - address: - per_second: 9999 - burst_count: 9999 - account: - per_second: 9999 - burst_count: 9999 - failed_attempts: - per_second: 9999 - burst_count: 9999 - -rc_admin_redaction: - per_second: 9999 - burst_count: 9999 - -rc_joins: - local: - per_second: 9999 - burst_count: 9999 - remote: - per_second: 9999 - burst_count: 9999 - -rc_3pid_validation: - per_second: 1000 - burst_count: 1000 - -rc_invites: - per_room: - per_second: 1000 - burst_count: 1000 - per_user: - per_second: 1000 - burst_count: 1000 - -federation_rr_transactions_per_room_per_second: 9999 - -## API Configuration ## - -# A list of application service config files to use -# -app_service_config_files: -AS_REGISTRATION_FILES - -## Experimental Features ## - -experimental_features: - # Enable spaces support - spaces_enabled: true - # Enable history backfilling support - msc2716_enabled: true - # server-side support for partial state in /send_join responses - msc3706_enabled: true - # client-side support for partial state in /send_join responses - faster_joins: true - # Enable jump to date endpoint - msc3030_enabled: true - -server_notices: - system_mxid_localpart: _server - system_mxid_display_name: "Server Alert" - system_mxid_avatar_url: "" - room_name: "Server Alert" diff --git a/docker/complement/conf/log_config.yaml b/docker/complement/conf/log_config.yaml deleted file mode 100644 index c33fd6cd00..0000000000 --- a/docker/complement/conf/log_config.yaml +++ /dev/null @@ -1,24 +0,0 @@ -version: 1 - -formatters: - precise: - format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s' - -filters: - context: - (): synapse.logging.context.LoggingContextFilter - request: "" - -handlers: - console: - class: logging.StreamHandler - formatter: precise - filters: [context] - # log to stdout, for easier use with 'docker logs' - stream: 'ext://sys.stdout' - -root: - level: INFO - handlers: [console] - -disable_existing_loggers: false diff --git a/docker/complement/conf-workers/postgres.supervisord.conf b/docker/complement/conf/postgres.supervisord.conf index 5608342d1a..5dae3e6330 100644 --- a/docker/complement/conf-workers/postgres.supervisord.conf +++ b/docker/complement/conf/postgres.supervisord.conf @@ -1,6 +1,9 @@ [program:postgres] command=/usr/local/bin/prefix-log /usr/bin/pg_ctlcluster 13 main start --foreground +# Only start if START_POSTGRES=1 +autostart=%(ENV_START_POSTGRES)s + # Lower priority number = starts first priority=1 diff --git a/docker/complement/conf/start.sh b/docker/complement/conf/start.sh deleted file mode 100755 index 5d8d0fe016..0000000000 --- a/docker/complement/conf/start.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/sh - -set -e - -sed -i "s/SERVER_NAME/${SERVER_NAME}/g" /conf/homeserver.yaml - -# Add the application service registration files to the homeserver.yaml config -for filename in /complement/appservice/*.yaml; do - [ -f "$filename" ] || break - - as_id=$(basename "$filename" .yaml) - - # Insert the path to the registration file and the AS_REGISTRATION_FILES marker after - # so we can add the next application service in the next iteration of this for loop - sed -i "s/AS_REGISTRATION_FILES/ - \/complement\/appservice\/${as_id}.yaml\nAS_REGISTRATION_FILES/g" /conf/homeserver.yaml -done -# Remove the AS_REGISTRATION_FILES entry -sed -i "s/AS_REGISTRATION_FILES//g" /conf/homeserver.yaml - -# generate an ssl key and cert for the server, signed by the complement CA -openssl genrsa -out /conf/server.tls.key 2048 - -openssl req -new -key /conf/server.tls.key -out /conf/server.tls.csr \ - -subj "/CN=${SERVER_NAME}" -openssl x509 -req -in /conf/server.tls.csr \ - -CA /complement/ca/ca.crt -CAkey /complement/ca/ca.key -set_serial 1 \ - -out /conf/server.tls.crt - -exec python -m synapse.app.homeserver -c /conf/homeserver.yaml "$@" - diff --git a/docker/complement/conf/start_for_complement.sh b/docker/complement/conf/start_for_complement.sh new file mode 100755 index 0000000000..b9c97ab687 --- /dev/null +++ b/docker/complement/conf/start_for_complement.sh @@ -0,0 +1,90 @@ +#!/bin/bash +# +# Default ENTRYPOINT for the docker image used for testing synapse with workers under complement + +set -e + +echo "Complement Synapse launcher" +echo " Args: $@" +echo " Env: SYNAPSE_COMPLEMENT_DATABASE=$SYNAPSE_COMPLEMENT_DATABASE SYNAPSE_COMPLEMENT_USE_WORKERS=$SYNAPSE_COMPLEMENT_USE_WORKERS" + +function log { + d=$(date +"%Y-%m-%d %H:%M:%S,%3N") + echo "$d $@" +} + +# Set the server name of the homeserver +export SYNAPSE_SERVER_NAME=${SERVER_NAME} + +# No need to report stats here +export SYNAPSE_REPORT_STATS=no + + +case "$SYNAPSE_COMPLEMENT_DATABASE" in + postgres) + # Set postgres authentication details which will be placed in the homeserver config file + export POSTGRES_PASSWORD=somesecret + export POSTGRES_USER=postgres + export POSTGRES_HOST=localhost + + # configure supervisord to start postgres + export START_POSTGRES=true + ;; + + sqlite) + # Configure supervisord not to start Postgres, as we don't need it + export START_POSTGRES=false + ;; + + *) + echo "Unknown Synapse database: SYNAPSE_COMPLEMENT_DATABASE=$SYNAPSE_COMPLEMENT_DATABASE" >&2 + exit 1 + ;; +esac + + +if [[ -n "$SYNAPSE_COMPLEMENT_USE_WORKERS" ]]; then + # Specify the workers to test with + export SYNAPSE_WORKER_TYPES="\ + event_persister, \ + event_persister, \ + background_worker, \ + frontend_proxy, \ + event_creator, \ + user_dir, \ + media_repository, \ + federation_inbound, \ + federation_reader, \ + federation_sender, \ + synchrotron, \ + appservice, \ + pusher" +else + # Empty string here means 'main process only' + export SYNAPSE_WORKER_TYPES="" +fi + + +# Add Complement's appservice registration directory, if there is one +# (It can be absent when there are no application services in this test!) +if [ -d /complement/appservice ]; then + export SYNAPSE_AS_REGISTRATION_DIR=/complement/appservice +fi + +# Generate a TLS key, then generate a certificate by having Complement's CA sign it +# Note that both the key and certificate are in PEM format (not DER). +openssl genrsa -out /conf/server.tls.key 2048 + +openssl req -new -key /conf/server.tls.key -out /conf/server.tls.csr \ + -subj "/CN=${SERVER_NAME}" + +openssl x509 -req -in /conf/server.tls.csr \ + -CA /complement/ca/ca.crt -CAkey /complement/ca/ca.key -set_serial 1 \ + -out /conf/server.tls.crt + +export SYNAPSE_TLS_CERT=/conf/server.tls.crt +export SYNAPSE_TLS_KEY=/conf/server.tls.key + +# Run the script that writes the necessary config files and starts supervisord, which in turn +# starts everything else +exec /configure_workers_and_start.py diff --git a/docker/complement/conf-workers/workers-shared.yaml b/docker/complement/conf/workers-shared-extra.yaml.j2 index cd7b50c65c..a5b1b6bb8b 100644 --- a/docker/complement/conf-workers/workers-shared.yaml +++ b/docker/complement/conf/workers-shared-extra.yaml.j2 @@ -1,3 +1,11 @@ +{# + This file extends the default 'shared' configuration file (from the 'synapse-workers' + docker image) with Complement-specific tweak. + + The base configuration is moved out of the default path to `shared-orig.yaml.j2` + in the Complement Dockerfile and below we include that original file. +#} + ## Server ## report_stats: False trusted_key_servers: [] @@ -76,10 +84,16 @@ federation_rr_transactions_per_room_per_second: 9999 ## Experimental Features ## experimental_features: - # Enable history backfilling support - msc2716_enabled: true # Enable spaces support spaces_enabled: true + # Enable history backfilling support + msc2716_enabled: true + # server-side support for partial state in /send_join responses + msc3706_enabled: true + {% if not workers_in_use %} + # client-side support for partial state in /send_join responses + faster_joins: true + {% endif %} # Enable jump to date endpoint msc3030_enabled: true @@ -88,3 +102,5 @@ server_notices: system_mxid_display_name: "Server Alert" system_mxid_avatar_url: "" room_name: "Server Alert" + +{% include "shared-orig.yaml.j2" %} diff --git a/docker/conf-workers/shared.yaml.j2 b/docker/conf-workers/shared.yaml.j2 index 644ed788f3..92d25386dc 100644 --- a/docker/conf-workers/shared.yaml.j2 +++ b/docker/conf-workers/shared.yaml.j2 @@ -3,8 +3,10 @@ # configure_workers_and_start.py uses and amends to this file depending on the workers # that have been selected. +{% if enable_redis %} redis: enabled: true +{% endif %} {% if appservice_registrations is not none %} ## Application Services ## diff --git a/docker/conf-workers/supervisord.conf.j2 b/docker/conf-workers/supervisord.conf.j2 index ca1f7aef8e..7afab05133 100644 --- a/docker/conf-workers/supervisord.conf.j2 +++ b/docker/conf-workers/supervisord.conf.j2 @@ -28,6 +28,9 @@ stderr_logfile_maxbytes=0 username=redis autorestart=true +# Redis can be disabled if the image is being used without workers +autostart={{ enable_redis }} + [program:synapse_main] command=/usr/local/bin/prefix-log /usr/local/bin/python -m synapse.app.homeserver --config-path="{{ main_config_path }}" --config-path=/conf/workers/shared.yaml priority=10 @@ -41,4 +44,4 @@ autorestart=unexpected exitcodes=0 # Additional process blocks -{{ worker_config }} \ No newline at end of file +{{ worker_config }} diff --git a/docker/configure_workers_and_start.py b/docker/configure_workers_and_start.py index f7dac90222..64697e0354 100755 --- a/docker/configure_workers_and_start.py +++ b/docker/configure_workers_and_start.py @@ -37,8 +37,8 @@ import sys from pathlib import Path from typing import Any, Dict, List, Mapping, MutableMapping, NoReturn, Set -import jinja2 import yaml +from jinja2 import Environment, FileSystemLoader MAIN_PROCESS_HTTP_LISTENER_PORT = 8080 @@ -236,12 +236,13 @@ def convert(src: str, dst: str, **template_vars: object) -> None: template_vars: The arguments to replace placeholder variables in the template with. """ # Read the template file - with open(src) as infile: - template = infile.read() + # We disable autoescape to prevent template variables from being escaped, + # as we're not using HTML. + env = Environment(loader=FileSystemLoader(os.path.dirname(src)), autoescape=False) + template = env.get_template(os.path.basename(src)) - # Generate a string from the template. We disable autoescape to prevent template - # variables from being escaped. - rendered = jinja2.Template(template, autoescape=False).render(**template_vars) + # Generate a string from the template. + rendered = template.render(**template_vars) # Write the generated contents to a file # @@ -378,8 +379,8 @@ def generate_worker_files( nginx_locations = {} # Read the desired worker configuration from the environment - worker_types_env = environ.get("SYNAPSE_WORKER_TYPES") - if worker_types_env is None: + worker_types_env = environ.get("SYNAPSE_WORKER_TYPES", "").strip() + if not worker_types_env: # No workers, just the main process worker_types = [] else: @@ -506,12 +507,16 @@ def generate_worker_files( if reg_path.suffix.lower() in (".yaml", ".yml") ] + workers_in_use = len(worker_types) > 0 + # Shared homeserver config convert( "/conf/shared.yaml.j2", "/conf/workers/shared.yaml", shared_worker_config=yaml.dump(shared_config), appservice_registrations=appservice_registrations, + enable_redis=workers_in_use, + workers_in_use=workers_in_use, ) # Nginx config @@ -531,6 +536,7 @@ def generate_worker_files( "/etc/supervisor/supervisord.conf", main_config_path=config_path, worker_config=supervisord_config, + enable_redis=workers_in_use, ) # healthcheck config diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index 8400a6539a..d7cf2df112 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -88,6 +88,7 @@ - [OpenTracing](opentracing.md) - [Database Schemas](development/database_schema.md) - [Experimental features](development/experimental_features.md) + - [Dependency management](development/dependencies.md) - [Synapse Architecture]() - [Cancellation](development/synapse_architecture/cancellation.md) - [Log Contexts](log_contexts.md) diff --git a/docs/development/contributing_guide.md b/docs/development/contributing_guide.md index 2b3714df66..c2f04a3905 100644 --- a/docs/development/contributing_guide.md +++ b/docs/development/contributing_guide.md @@ -304,6 +304,11 @@ To run a specific test, you can specify the whole name structure: COMPLEMENT_DIR=../complement ./scripts-dev/complement.sh -run TestImportHistoricalMessages/parallel/Historical_events_resolve_in_the_correct_order ``` +The above will run a monolithic (single-process) Synapse with SQLite as the database. For other configurations, try: + +- Passing `POSTGRES=1` as an environment variable to use the Postgres database instead. +- Passing `WORKERS=1` as an environment variable to use a workerised setup instead. This option implies the use of Postgres. + ### Access database for homeserver after Complement test runs. diff --git a/docs/modules/spam_checker_callbacks.md b/docs/modules/spam_checker_callbacks.md index ad35e667ed..8ca7d5bdbf 100644 --- a/docs/modules/spam_checker_callbacks.md +++ b/docs/modules/spam_checker_callbacks.md @@ -38,15 +38,13 @@ this callback. _First introduced in Synapse v1.37.0_ +_Changed in Synapse v1.61.0: `synapse.module_api.NOT_SPAM` and `synapse.module_api.errors.Codes` can be returned by this callback. Returning a boolean is now deprecated._ + ```python -async def user_may_join_room(user: str, room: str, is_invited: bool) -> bool +async def user_may_join_room(user: str, room: str, is_invited: bool) -> Union["synapse.module_api.NOT_SPAM", "synapse.module_api.errors.Codes", bool] ``` -Called when a user is trying to join a room. The module must return a `bool` to indicate -whether the user can join the room. Return `False` to prevent the user from joining the -room; otherwise return `True` to permit the joining. - -The user is represented by their Matrix user ID (e.g. +Called when a user is trying to join a room. The user is represented by their Matrix user ID (e.g. `@alice:example.com`) and the room is represented by its Matrix ID (e.g. `!room:example.com`). The module is also given a boolean to indicate whether the user currently has a pending invite in the room. @@ -54,46 +52,67 @@ currently has a pending invite in the room. This callback isn't called if the join is performed by a server administrator, or in the context of a room creation. +The callback must return one of: + - `synapse.module_api.NOT_SPAM`, to allow the operation. Other callbacks may still + decide to reject it. + - `synapse.module_api.errors.Codes` to reject the operation with an error code. In case + of doubt, `synapse.module_api.errors.Codes.FORBIDDEN` is a good error code. + - (deprecated) `False`, which is the same as returning `synapse.module_api.NOT_SPAM`. + - (deprecated) `True`, which is the same as returning `synapse.module_api.errors.Codes.FORBIDDEN`. + If multiple modules implement this callback, they will be considered in order. If a -callback returns `True`, Synapse falls through to the next one. The value of the first -callback that does not return `True` will be used. If this happens, Synapse will not call -any of the subsequent implementations of this callback. +callback returns `synapse.module_api.NOT_SPAM`, Synapse falls through to the next one. +The value of the first callback that does not return `synapse.module_api.NOT_SPAM` will +be used. If this happens, Synapse will not call any of the subsequent implementations of +this callback. ### `user_may_invite` _First introduced in Synapse v1.37.0_ +_Changed in Synapse v1.61.0: `synapse.module_api.NOT_SPAM` and `synapse.module_api.errors.Codes` can be returned by this callback. Returning a boolean is now deprecated._ + ```python -async def user_may_invite(inviter: str, invitee: str, room_id: str) -> bool +async def user_may_invite(inviter: str, invitee: str, room_id: str) -> Union["synapse.module_api.NOT_SPAM", "synapse.module_api.errors.Codes", bool] ``` -Called when processing an invitation. The module must return a `bool` indicating whether -the inviter can invite the invitee to the given room. Both inviter and invitee are -represented by their Matrix user ID (e.g. `@alice:example.com`). Return `False` to prevent -the invitation; otherwise return `True` to permit it. +Called when processing an invitation. Both inviter and invitee are +represented by their Matrix user ID (e.g. `@alice:example.com`). + + +The callback must return one of: + - `synapse.module_api.NOT_SPAM`, to allow the operation. Other callbacks may still + decide to reject it. + - `synapse.module_api.errors.Codes` to reject the operation with an error code. In case + of doubt, `synapse.module_api.errors.Codes.FORBIDDEN` is a good error code. + + - (deprecated) `False`, which is the same as returning `synapse.module_api.NOT_SPAM`. + - (deprecated) `True`, which is the same as returning `synapse.module_api.errors.Codes.FORBIDDEN`. If multiple modules implement this callback, they will be considered in order. If a -callback returns `True`, Synapse falls through to the next one. The value of the first -callback that does not return `True` will be used. If this happens, Synapse will not call -any of the subsequent implementations of this callback. +callback returns `synapse.module_api.NOT_SPAM`, Synapse falls through to the next one. +The value of the first callback that does not return `synapse.module_api.NOT_SPAM` will +be used. If this happens, Synapse will not call any of the subsequent implementations of +this callback. + ### `user_may_send_3pid_invite` _First introduced in Synapse v1.45.0_ +_Changed in Synapse v1.61.0: `synapse.module_api.NOT_SPAM` and `synapse.module_api.errors.Codes` can be returned by this callback. Returning a boolean is now deprecated._ + ```python async def user_may_send_3pid_invite( inviter: str, medium: str, address: str, room_id: str, -) -> bool +) -> Union["synapse.module_api.NOT_SPAM", "synapse.module_api.errors.Codes", bool] ``` Called when processing an invitation using a third-party identifier (also called a 3PID, -e.g. an email address or a phone number). The module must return a `bool` indicating -whether the inviter can invite the invitee to the given room. Return `False` to prevent -the invitation; otherwise return `True` to permit it. +e.g. an email address or a phone number). The inviter is represented by their Matrix user ID (e.g. `@alice:example.com`), and the invitee is represented by its medium (e.g. "email") and its address @@ -115,63 +134,108 @@ await user_may_send_3pid_invite( **Note**: If the third-party identifier is already associated with a matrix user ID, [`user_may_invite`](#user_may_invite) will be used instead. +The callback must return one of: + - `synapse.module_api.NOT_SPAM`, to allow the operation. Other callbacks may still + decide to reject it. + - `synapse.module_api.errors.Codes` to reject the operation with an error code. In case + of doubt, `synapse.module_api.errors.Codes.FORBIDDEN` is a good error code. + + - (deprecated) `False`, which is the same as returning `synapse.module_api.NOT_SPAM`. + - (deprecated) `True`, which is the same as returning `synapse.module_api.errors.Codes.FORBIDDEN`. + If multiple modules implement this callback, they will be considered in order. If a -callback returns `True`, Synapse falls through to the next one. The value of the first -callback that does not return `True` will be used. If this happens, Synapse will not call -any of the subsequent implementations of this callback. +callback returns `synapse.module_api.NOT_SPAM`, Synapse falls through to the next one. +The value of the first callback that does not return `synapse.module_api.NOT_SPAM` will +be used. If this happens, Synapse will not call any of the subsequent implementations of +this callback. + ### `user_may_create_room` _First introduced in Synapse v1.37.0_ +_Changed in Synapse v1.61.0: `synapse.module_api.NOT_SPAM` and `synapse.module_api.errors.Codes` can be returned by this callback. Returning a boolean is now deprecated._ + ```python -async def user_may_create_room(user: str) -> bool +async def user_may_create_room(user_id: str) -> Union["synapse.module_api.NOT_SPAM", "synapse.module_api.errors.Codes", bool] ``` -Called when processing a room creation request. The module must return a `bool` indicating -whether the given user (represented by their Matrix user ID) is allowed to create a room. -Return `False` to prevent room creation; otherwise return `True` to permit it. +Called when processing a room creation request. + +The callback must return one of: + - `synapse.module_api.NOT_SPAM`, to allow the operation. Other callbacks may still + decide to reject it. + - `synapse.module_api.errors.Codes` to reject the operation with an error code. In case + of doubt, `synapse.module_api.errors.Codes.FORBIDDEN` is a good error code. + + - (deprecated) `False`, which is the same as returning `synapse.module_api.NOT_SPAM`. + - (deprecated) `True`, which is the same as returning `synapse.module_api.errors.Codes.FORBIDDEN`. If multiple modules implement this callback, they will be considered in order. If a -callback returns `True`, Synapse falls through to the next one. The value of the first -callback that does not return `True` will be used. If this happens, Synapse will not call -any of the subsequent implementations of this callback. +callback returns `synapse.module_api.NOT_SPAM`, Synapse falls through to the next one. +The value of the first callback that does not return `synapse.module_api.NOT_SPAM` will +be used. If this happens, Synapse will not call any of the subsequent implementations of +this callback. + + ### `user_may_create_room_alias` _First introduced in Synapse v1.37.0_ +_Changed in Synapse v1.61.0: `synapse.module_api.NOT_SPAM` and `synapse.module_api.errors.Codes` can be returned by this callback. Returning a boolean is now deprecated._ + ```python -async def user_may_create_room_alias(user: str, room_alias: "synapse.types.RoomAlias") -> bool +async def user_may_create_room_alias(user_id: str, room_alias: "synapse.module_api.RoomAlias") -> Union["synapse.module_api.NOT_SPAM", "synapse.module_api.errors.Codes", bool] ``` -Called when trying to associate an alias with an existing room. The module must return a -`bool` indicating whether the given user (represented by their Matrix user ID) is allowed -to set the given alias. Return `False` to prevent the alias creation; otherwise return -`True` to permit it. +Called when trying to associate an alias with an existing room. + +The callback must return one of: + - `synapse.module_api.NOT_SPAM`, to allow the operation. Other callbacks may still + decide to reject it. + - `synapse.module_api.errors.Codes` to reject the operation with an error code. In case + of doubt, `synapse.module_api.errors.Codes.FORBIDDEN` is a good error code. + + - (deprecated) `False`, which is the same as returning `synapse.module_api.NOT_SPAM`. + - (deprecated) `True`, which is the same as returning `synapse.module_api.errors.Codes.FORBIDDEN`. If multiple modules implement this callback, they will be considered in order. If a -callback returns `True`, Synapse falls through to the next one. The value of the first -callback that does not return `True` will be used. If this happens, Synapse will not call -any of the subsequent implementations of this callback. +callback returns `synapse.module_api.NOT_SPAM`, Synapse falls through to the next one. +The value of the first callback that does not return `synapse.module_api.NOT_SPAM` will +be used. If this happens, Synapse will not call any of the subsequent implementations of +this callback. + + ### `user_may_publish_room` _First introduced in Synapse v1.37.0_ +_Changed in Synapse v1.61.0: `synapse.module_api.NOT_SPAM` and `synapse.module_api.errors.Codes` can be returned by this callback. Returning a boolean is now deprecated._ + ```python -async def user_may_publish_room(user: str, room_id: str) -> bool +async def user_may_publish_room(user_id: str, room_id: str) -> Union["synapse.module_api.NOT_SPAM", "synapse.module_api.errors.Codes", bool] ``` -Called when trying to publish a room to the homeserver's public rooms directory. The -module must return a `bool` indicating whether the given user (represented by their -Matrix user ID) is allowed to publish the given room. Return `False` to prevent the -room from being published; otherwise return `True` to permit its publication. +Called when trying to publish a room to the homeserver's public rooms directory. + +The callback must return one of: + - `synapse.module_api.NOT_SPAM`, to allow the operation. Other callbacks may still + decide to reject it. + - `synapse.module_api.errors.Codes` to reject the operation with an error code. In case + of doubt, `synapse.module_api.errors.Codes.FORBIDDEN` is a good error code. + + - (deprecated) `False`, which is the same as returning `synapse.module_api.NOT_SPAM`. + - (deprecated) `True`, which is the same as returning `synapse.module_api.errors.Codes.FORBIDDEN`. If multiple modules implement this callback, they will be considered in order. If a -callback returns `True`, Synapse falls through to the next one. The value of the first -callback that does not return `True` will be used. If this happens, Synapse will not call -any of the subsequent implementations of this callback. +callback returns `synapse.module_api.NOT_SPAM`, Synapse falls through to the next one. +The value of the first callback that does not return `synapse.module_api.NOT_SPAM` will +be used. If this happens, Synapse will not call any of the subsequent implementations of +this callback. + + ### `check_username_for_spam` @@ -239,21 +303,32 @@ this callback. _First introduced in Synapse v1.37.0_ +_Changed in Synapse v1.61.0: `synapse.module_api.NOT_SPAM` and `synapse.module_api.errors.Codes` can be returned by this callback. Returning a boolean is now deprecated._ + ```python async def check_media_file_for_spam( file_wrapper: "synapse.rest.media.v1.media_storage.ReadableFileWrapper", file_info: "synapse.rest.media.v1._base.FileInfo", -) -> bool +) -> Union["synapse.module_api.NOT_SPAM", "synapse.module_api.errors.Codes", bool] ``` -Called when storing a local or remote file. The module must return a `bool` indicating -whether the given file should be excluded from the homeserver's media store. Return -`True` to prevent this file from being stored; otherwise return `False`. +Called when storing a local or remote file. + +The callback must return one of: + - `synapse.module_api.NOT_SPAM`, to allow the operation. Other callbacks may still + decide to reject it. + - `synapse.module_api.errors.Codes` to reject the operation with an error code. In case + of doubt, `synapse.module_api.errors.Codes.FORBIDDEN` is a good error code. + + - (deprecated) `False`, which is the same as returning `synapse.module_api.NOT_SPAM`. + - (deprecated) `True`, which is the same as returning `synapse.module_api.errors.Codes.FORBIDDEN`. If multiple modules implement this callback, they will be considered in order. If a -callback returns `False`, Synapse falls through to the next one. The value of the first -callback that does not return `False` will be used. If this happens, Synapse will not call -any of the subsequent implementations of this callback. +callback returns `synapse.module_api.NOT_SPAM`, Synapse falls through to the next one. +The value of the first callback that does not return `synapse.module_api.NOT_SPAM` will +be used. If this happens, Synapse will not call any of the subsequent implementations of +this callback. + ### `should_drop_federated_event` @@ -316,6 +391,9 @@ class ListSpamChecker: resource=IsUserEvilResource(config), ) - async def check_event_for_spam(self, event: "synapse.events.EventBase") -> Union[bool, str]: - return event.sender not in self.evil_users + async def check_event_for_spam(self, event: "synapse.events.EventBase") -> Union[Literal["NOT_SPAM"], Codes]: + if event.sender in self.evil_users: + return Codes.FORBIDDEN + else: + return synapse.module_api.NOT_SPAM ``` diff --git a/docs/upgrade.md b/docs/upgrade.md index 5ac29abb08..312f0b87fe 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -89,6 +89,47 @@ process, for example: dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb ``` +# Upgrading to v1.62.0 + +## New signatures for spam checker callbacks + +As a followup to changes in v1.60.0, the following spam-checker callbacks have changed signature: + +- `user_may_join_room` +- `user_may_invite` +- `user_may_send_3pid_invite` +- `user_may_create_room` +- `user_may_create_room_alias` +- `user_may_publish_room` +- `check_media_file_for_spam` + +For each of these methods, the previous callback signature has been deprecated. + +Whereas callbacks used to return `bool`, they should now return `Union["synapse.module_api.NOT_SPAM", "synapse.module_api.errors.Codes"]`. + +For instance, if your module implements `user_may_join_room` as follows: + +```python +async def user_may_join_room(self, user_id: str, room_id: str, is_invited: bool) + if ...: + # Request is spam + return False + # Request is not spam + return True +``` + +you should rewrite it as follows: + +```python +async def user_may_join_room(self, user_id: str, room_id: str, is_invited: bool) + if ...: + # Request is spam, mark it as forbidden (you may use some more precise error + # code if it is useful). + return synapse.module_api.errors.Codes.FORBIDDEN + # Request is not spam, mark it as such. + return synapse.module_api.NOT_SPAM +``` + # Upgrading to v1.61.0 ## Removal of deprecated community/groups diff --git a/mypy.ini b/mypy.ini index fe3e3f9b8e..7973f2ac01 100644 --- a/mypy.ini +++ b/mypy.ini @@ -56,7 +56,6 @@ exclude = (?x) |tests/rest/media/v1/test_media_storage.py |tests/server.py |tests/server_notices/test_resource_limits_server_notices.py - |tests/state/test_v2.py |tests/test_metrics.py |tests/test_server.py |tests/test_state.py @@ -115,6 +114,9 @@ disallow_untyped_defs = False [mypy-tests.handlers.test_user_directory] disallow_untyped_defs = True +[mypy-tests.state.test_profile] +disallow_untyped_defs = True + [mypy-tests.storage.test_profile] disallow_untyped_defs = True diff --git a/poetry.lock b/poetry.lock index 7c561e3182..8a54a939fe 100644 --- a/poetry.lock +++ b/poetry.lock @@ -524,7 +524,7 @@ python-versions = ">=3.7" [[package]] name = "matrix-common" -version = "1.1.0" +version = "1.2.1" description = "Common utilities for Synapse, Sydent and Sygnal" category = "main" optional = false @@ -535,7 +535,7 @@ attrs = "*" importlib-metadata = {version = ">=1.4", markers = "python_version < \"3.8\""} [package.extras] -dev = ["tox", "twisted", "aiounittest", "mypy (==0.910)", "black (==21.9b0)", "flake8 (==4.0.1)", "isort (==5.9.3)"] +dev = ["tox", "twisted", "aiounittest", "mypy (==0.910)", "black (==22.3.0)", "flake8 (==4.0.1)", "isort (==5.9.3)", "build (==0.8.0)", "twine (==4.0.1)"] test = ["tox", "twisted", "aiounittest"] [[package]] @@ -1563,7 +1563,7 @@ url_preview = ["lxml"] [metadata] lock-version = "1.1" python-versions = "^3.7.1" -content-hash = "539e5326f401472d1ffc8325d53d72e544cd70156b3f43f32f1285c4c131f831" +content-hash = "c1bb4dabba1e87517e25ca7bf778e8082fbc960a51d83819aec3a154110a374f" [metadata.files] attrs = [ @@ -2042,8 +2042,8 @@ markupsafe = [ {file = "MarkupSafe-2.1.0.tar.gz", hash = "sha256:80beaf63ddfbc64a0452b841d8036ca0611e049650e20afcb882f5d3c266d65f"}, ] matrix-common = [ - {file = "matrix_common-1.1.0-py3-none-any.whl", hash = "sha256:5d6dfd777503b2f3a031b566e6af25b6e95f9c0818ef57d954c3190fce5eb407"}, - {file = "matrix_common-1.1.0.tar.gz", hash = "sha256:a8238748afc2b37079818367fed5156f355771b07c8ff0a175934f47e0ff3276"}, + {file = "matrix_common-1.2.1-py3-none-any.whl", hash = "sha256:946709c405944a0d4b1d73207b77eb064b6dbfc5d70a69471320b06d8ce98b20"}, + {file = "matrix_common-1.2.1.tar.gz", hash = "sha256:a99dcf02a6bd95b24a5a61b354888a2ac92bf2b4b839c727b8dd9da2cdfa3853"}, ] matrix-synapse-ldap3 = [ {file = "matrix-synapse-ldap3-0.2.0.tar.gz", hash = "sha256:91a0715b43a41ec3033244174fca20846836da98fda711fb01687f7199eecd2e"}, diff --git a/pyproject.toml b/pyproject.toml index 8b21bdc837..3c64e248ab 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -150,7 +150,7 @@ typing-extensions = ">=3.10.0.1" cryptography = ">=3.4.7" # ijson 3.1.4 fixes a bug with "." in property names ijson = ">=3.1.4" -matrix-common = "~=1.1.0" +matrix-common = "~=1.2.1" # We need packaging.requirements.Requirement, added in 16.1. packaging = ">=16.1" # At the time of writing, we only use functions from the version `importlib.metadata` diff --git a/scripts-dev/complement.sh b/scripts-dev/complement.sh index 3c472c576e..30b974b954 100755 --- a/scripts-dev/complement.sh +++ b/scripts-dev/complement.sh @@ -18,7 +18,7 @@ # argument to the script. Complement will then only run those tests. If # no regex is supplied, all tests are run. For example; # -# ./complement.sh "TestOutboundFederation(Profile|Send)" +# ./complement.sh -run "TestOutboundFederation(Profile|Send)" # # Exit if a line returns a non-zero exit code @@ -43,17 +43,29 @@ fi # Build the base Synapse image from the local checkout docker build -t matrixdotorg/synapse -f "docker/Dockerfile" . +# Build the workers docker image (from the base Synapse image we just built). +docker build -t matrixdotorg/synapse-workers -f "docker/Dockerfile-workers" . + +# Build the unified Complement image (from the worker Synapse image we just built). +docker build -t complement-synapse \ + -f "docker/complement/Dockerfile" "docker/complement" + +export COMPLEMENT_BASE_IMAGE=complement-synapse + extra_test_args=() test_tags="synapse_blacklist,msc2716,msc3030,msc3787" -# If we're using workers, modify the docker files slightly. +# All environment variables starting with PASS_ will be shared. +# (The prefix is stripped off before reaching the container.) +export COMPLEMENT_SHARE_ENV_PREFIX=PASS_ + if [[ -n "$WORKERS" ]]; then - # Build the workers docker image (from the base Synapse image). - docker build -t matrixdotorg/synapse-workers -f "docker/Dockerfile-workers" . + # Use workers. + export PASS_SYNAPSE_COMPLEMENT_USE_WORKERS=true - export COMPLEMENT_BASE_IMAGE=complement-synapse-workers - COMPLEMENT_DOCKERFILE=SynapseWorkers.Dockerfile + # Workers can only use Postgres as a database. + export PASS_SYNAPSE_COMPLEMENT_DATABASE=postgres # And provide some more configuration to complement. @@ -65,17 +77,18 @@ if [[ -n "$WORKERS" ]]; then # ... and it takes longer than 10m to run the whole suite. extra_test_args+=("-timeout=60m") else - export COMPLEMENT_BASE_IMAGE=complement-synapse - COMPLEMENT_DOCKERFILE=Dockerfile + export PASS_SYNAPSE_COMPLEMENT_USE_WORKERS= + if [[ -n "$POSTGRES" ]]; then + export PASS_SYNAPSE_COMPLEMENT_DATABASE=postgres + else + export PASS_SYNAPSE_COMPLEMENT_DATABASE=sqlite + fi # We only test faster room joins on monoliths, because they are purposefully # being developed without worker support to start with. test_tags="$test_tags,faster_joins" fi -# Build the Complement image from the Synapse image we just built. -docker build -t $COMPLEMENT_BASE_IMAGE -f "docker/complement/$COMPLEMENT_DOCKERFILE" "docker/complement" - # Run the tests! echo "Images built; running complement" cd "$COMPLEMENT_DIR" diff --git a/synapse/__init__.py b/synapse/__init__.py index 1613941759..b1369aca8f 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -20,8 +20,6 @@ import json import os import sys -from matrix_common.versionstring import get_distribution_version_string - # Check that we're not running on an unsupported Python version. if sys.version_info < (3, 7): print("Synapse requires Python 3.7 or above.") @@ -70,7 +68,9 @@ try: except ImportError: pass -__version__ = get_distribution_version_string("matrix-synapse") +import synapse.util + +__version__ = synapse.util.SYNAPSE_VERSION if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when diff --git a/synapse/_scripts/synapse_port_db.py b/synapse/_scripts/synapse_port_db.py index 361b51d2fa..9586086c03 100755 --- a/synapse/_scripts/synapse_port_db.py +++ b/synapse/_scripts/synapse_port_db.py @@ -40,7 +40,6 @@ from typing import ( ) import yaml -from matrix_common.versionstring import get_distribution_version_string from typing_extensions import TypedDict from twisted.internet import defer, reactor as reactor_ @@ -62,7 +61,6 @@ from synapse.storage.databases.main.end_to_end_keys import EndToEndKeyBackground from synapse.storage.databases.main.events_bg_updates import ( EventsBackgroundUpdatesStore, ) -from synapse.storage.databases.main.group_server import GroupServerStore from synapse.storage.databases.main.media_repository import ( MediaRepositoryBackgroundUpdateStore, ) @@ -84,7 +82,7 @@ from synapse.storage.databases.state.bg_updates import StateBackgroundUpdateStor from synapse.storage.engines import create_engine from synapse.storage.prepare_database import prepare_database from synapse.types import ISynapseReactor -from synapse.util import Clock +from synapse.util import SYNAPSE_VERSION, Clock # Cast safety: Twisted does some naughty magic which replaces the # twisted.internet.reactor module with a Reactor instance at runtime. @@ -219,7 +217,6 @@ class Store( PushRuleStore, PusherWorkerStore, PresenceBackgroundUpdateStore, - GroupServerStore, ): def execute(self, f: Callable[..., R], *args: Any, **kwargs: Any) -> Awaitable[R]: return self.db_pool.runInteraction(f.__name__, f, *args, **kwargs) @@ -258,9 +255,7 @@ class MockHomeserver: self.clock = Clock(reactor) self.config = config self.hostname = config.server.server_name - self.version_string = "Synapse/" + get_distribution_version_string( - "matrix-synapse" - ) + self.version_string = SYNAPSE_VERSION def get_clock(self) -> Clock: return self.clock diff --git a/synapse/_scripts/update_synapse_database.py b/synapse/_scripts/update_synapse_database.py index c443522c05..b4aeae6dd5 100755 --- a/synapse/_scripts/update_synapse_database.py +++ b/synapse/_scripts/update_synapse_database.py @@ -19,7 +19,6 @@ import sys from typing import cast import yaml -from matrix_common.versionstring import get_distribution_version_string from twisted.internet import defer, reactor as reactor_ @@ -28,6 +27,7 @@ from synapse.metrics.background_process_metrics import run_as_background_process from synapse.server import HomeServer from synapse.storage import DataStore from synapse.types import ISynapseReactor +from synapse.util import SYNAPSE_VERSION # Cast safety: Twisted does some naughty magic which replaces the # twisted.internet.reactor module with a Reactor instance at runtime. @@ -43,8 +43,7 @@ class MockHomeserver(HomeServer): hostname=config.server.server_name, config=config, reactor=reactor, - version_string="Synapse/" - + get_distribution_version_string("matrix-synapse"), + version_string=f"Synapse/{SYNAPSE_VERSION}", ) diff --git a/synapse/api/auth.py b/synapse/api/auth.py index 5a410f805a..c037ccb984 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -20,7 +20,6 @@ from netaddr import IPAddress from twisted.web.server import Request from synapse import event_auth -from synapse.api.auth_blocking import AuthBlocking from synapse.api.constants import EventTypes, HistoryVisibility, Membership from synapse.api.errors import ( AuthError, @@ -67,8 +66,6 @@ class Auth: 10000, "token_cache" ) - self._auth_blocking = AuthBlocking(self.hs) - self._track_appservice_user_ips = hs.config.appservice.track_appservice_user_ips self._track_puppeted_user_ips = hs.config.api.track_puppeted_user_ips self._macaroon_secret_key = hs.config.key.macaroon_secret_key @@ -711,14 +708,3 @@ class Auth: "User %s not in room %s, and room previews are disabled" % (user_id, room_id), ) - - async def check_auth_blocking( - self, - user_id: Optional[str] = None, - threepid: Optional[dict] = None, - user_type: Optional[str] = None, - requester: Optional[Requester] = None, - ) -> None: - await self._auth_blocking.check_auth_blocking( - user_id=user_id, threepid=threepid, user_type=user_type, requester=requester - ) diff --git a/synapse/app/_base.py b/synapse/app/_base.py index a3446ac6e8..84e389a6cd 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -37,7 +37,6 @@ from typing import ( ) from cryptography.utils import CryptographyDeprecationWarning -from matrix_common.versionstring import get_distribution_version_string from typing_extensions import ParamSpec import twisted @@ -68,6 +67,7 @@ from synapse.metrics import install_gc_manager, register_threadpool from synapse.metrics.background_process_metrics import wrap_as_background_process from synapse.metrics.jemalloc import setup_jemalloc_stats from synapse.types import ISynapseReactor +from synapse.util import SYNAPSE_VERSION from synapse.util.caches.lrucache import setup_expire_lru_cache_entries from synapse.util.daemonize import daemonize_process from synapse.util.gai_resolver import GAIResolver @@ -540,7 +540,7 @@ def setup_sentry(hs: "HomeServer") -> None: sentry_sdk.init( dsn=hs.config.metrics.sentry_dsn, - release=get_distribution_version_string("matrix-synapse"), + release=SYNAPSE_VERSION, ) # We set some default tags that give some context to this instance diff --git a/synapse/app/admin_cmd.py b/synapse/app/admin_cmd.py index 6fedf681f8..561621a285 100644 --- a/synapse/app/admin_cmd.py +++ b/synapse/app/admin_cmd.py @@ -19,8 +19,6 @@ import sys import tempfile from typing import List, Optional -from matrix_common.versionstring import get_distribution_version_string - from twisted.internet import defer, task import synapse @@ -43,6 +41,7 @@ from synapse.replication.slave.storage.registration import SlavedRegistrationSto from synapse.server import HomeServer from synapse.storage.databases.main.room import RoomWorkerStore from synapse.types import StateMap +from synapse.util import SYNAPSE_VERSION from synapse.util.logcontext import LoggingContext logger = logging.getLogger("synapse.app.admin_cmd") @@ -220,7 +219,7 @@ def start(config_options: List[str]) -> None: ss = AdminCmdServer( config.server.server_name, config=config, - version_string="Synapse/" + get_distribution_version_string("matrix-synapse"), + version_string=f"Synapse/{SYNAPSE_VERSION}", ) setup_logging(ss, config, use_worker_options=True) diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index 89f8998f0e..4a987fb759 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -16,8 +16,6 @@ import logging import sys from typing import Dict, List, Optional, Tuple -from matrix_common.versionstring import get_distribution_version_string - from twisted.internet import address from twisted.web.resource import Resource @@ -121,6 +119,7 @@ from synapse.storage.databases.main.transactions import TransactionWorkerStore from synapse.storage.databases.main.ui_auth import UIAuthWorkerStore from synapse.storage.databases.main.user_directory import UserDirectoryStore from synapse.types import JsonDict +from synapse.util import SYNAPSE_VERSION from synapse.util.httpresourcetree import create_resource_tree logger = logging.getLogger("synapse.app.generic_worker") @@ -447,7 +446,7 @@ def start(config_options: List[str]) -> None: hs = GenericWorkerServer( config.server.server_name, config=config, - version_string="Synapse/" + get_distribution_version_string("matrix-synapse"), + version_string=f"Synapse/{SYNAPSE_VERSION}", ) setup_logging(hs, config, use_worker_options=True) diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 4c6c0658ab..745e704141 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -18,8 +18,6 @@ import os import sys from typing import Dict, Iterable, List -from matrix_common.versionstring import get_distribution_version_string - from twisted.internet.tcp import Port from twisted.web.resource import EncodingResourceWrapper, Resource from twisted.web.server import GzipEncoderFactory @@ -69,7 +67,7 @@ from synapse.rest.synapse.client import build_synapse_client_resource_tree from synapse.rest.well_known import well_known_resource from synapse.server import HomeServer from synapse.storage import DataStore -from synapse.util.check_dependencies import check_requirements +from synapse.util.check_dependencies import VERSION, check_requirements from synapse.util.httpresourcetree import create_resource_tree from synapse.util.module_loader import load_module @@ -371,7 +369,7 @@ def setup(config_options: List[str]) -> SynapseHomeServer: hs = SynapseHomeServer( config.server.server_name, config=config, - version_string="Synapse/" + get_distribution_version_string("matrix-synapse"), + version_string=f"Synapse/{VERSION}", ) synapse.config.logger.setup_logging(hs, config, use_worker_options=False) diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index f2dfd49b07..0a285dba31 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -84,3 +84,6 @@ class ExperimentalConfig(Config): # MSC3772: A push rule for mutual relations. self.msc3772_enabled: bool = experimental.get("msc3772_enabled", False) + + # MSC3715: dir param on /relations. + self.msc3715_enabled: bool = experimental.get("msc3715_enabled", False) diff --git a/synapse/config/logger.py b/synapse/config/logger.py index 470b8b4492..82a5b5fa12 100644 --- a/synapse/config/logger.py +++ b/synapse/config/logger.py @@ -22,7 +22,6 @@ from string import Template from typing import TYPE_CHECKING, Any, Dict, Optional import yaml -from matrix_common.versionstring import get_distribution_version_string from zope.interface import implementer from twisted.logger import ( @@ -37,6 +36,7 @@ from synapse.logging.context import LoggingContextFilter from synapse.logging.filter import MetadataFilter from synapse.types import JsonDict +from ..util import SYNAPSE_VERSION from ._base import Config, ConfigError if TYPE_CHECKING: @@ -349,7 +349,7 @@ def setup_logging( logging.warning( "Server %s version %s", sys.argv[0], - get_distribution_version_string("matrix-synapse"), + SYNAPSE_VERSION, ) logging.info("Server hostname: %s", config.server.server_name) logging.info("Instance name: %s", hs.get_instance_name()) diff --git a/synapse/event_auth.py b/synapse/event_auth.py index 4c0b587a76..e23503c1e0 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py @@ -45,9 +45,7 @@ if typing.TYPE_CHECKING: logger = logging.getLogger(__name__) -def validate_event_for_room_version( - room_version_obj: RoomVersion, event: "EventBase" -) -> None: +def validate_event_for_room_version(event: "EventBase") -> None: """Ensure that the event complies with the limits, and has the right signatures NB: does not *validate* the signatures - it assumes that any signatures present @@ -60,12 +58,10 @@ def validate_event_for_room_version( NB: This is used to check events that have been received over federation. As such, it can only enforce the checks specified in the relevant room version, to avoid a split-brain situation where some servers accept such events, and others reject - them. - - TODO: consider moving this into EventValidator + them. See also EventValidator, which contains extra checks which are applied only to + locally-generated events. Args: - room_version_obj: the version of the room which contains this event event: the event to be checked Raises: @@ -103,7 +99,7 @@ def validate_event_for_room_version( raise AuthError(403, "Event not signed by sending server") is_invite_via_allow_rule = ( - room_version_obj.msc3083_join_rules + event.room_version.msc3083_join_rules and event.type == EventTypes.Member and event.membership == Membership.JOIN and EventContentFields.AUTHORISING_USER in event.content @@ -117,7 +113,6 @@ def validate_event_for_room_version( def check_auth_rules_for_event( - room_version_obj: RoomVersion, event: "EventBase", auth_events: Iterable["EventBase"], ) -> None: @@ -136,7 +131,6 @@ def check_auth_rules_for_event( a bunch of other tests. Args: - room_version_obj: the version of the room event: the event being checked. auth_events: the room state to check the events against. @@ -205,7 +199,10 @@ def check_auth_rules_for_event( raise AuthError(403, "This room has been marked as unfederatable.") # 4. If type is m.room.aliases - if event.type == EventTypes.Aliases and room_version_obj.special_case_aliases_auth: + if ( + event.type == EventTypes.Aliases + and event.room_version.special_case_aliases_auth + ): # 4a. If event has no state_key, reject if not event.is_state(): raise AuthError(403, "Alias event must be a state event") @@ -225,7 +222,7 @@ def check_auth_rules_for_event( # 5. If type is m.room.membership if event.type == EventTypes.Member: - _is_membership_change_allowed(room_version_obj, event, auth_dict) + _is_membership_change_allowed(event.room_version, event, auth_dict) logger.debug("Allowing! %s", event) return @@ -247,17 +244,17 @@ def check_auth_rules_for_event( _can_send_event(event, auth_dict) if event.type == EventTypes.PowerLevels: - _check_power_levels(room_version_obj, event, auth_dict) + _check_power_levels(event.room_version, event, auth_dict) if event.type == EventTypes.Redaction: - check_redaction(room_version_obj, event, auth_dict) + check_redaction(event.room_version, event, auth_dict) if ( event.type == EventTypes.MSC2716_INSERTION or event.type == EventTypes.MSC2716_BATCH or event.type == EventTypes.MSC2716_MARKER ): - check_historical(room_version_obj, event, auth_dict) + check_historical(event.room_version, event, auth_dict) logger.debug("Allowing! %s", event) diff --git a/synapse/events/spamcheck.py b/synapse/events/spamcheck.py index d2e06c754e..32712d2042 100644 --- a/synapse/events/spamcheck.py +++ b/synapse/events/spamcheck.py @@ -28,7 +28,10 @@ from typing import ( Union, ) -from synapse.api.errors import Codes +# `Literal` appears with Python 3.8. +from typing_extensions import Literal + +import synapse from synapse.rest.media.v1._base import FileInfo from synapse.rest.media.v1.media_storage import ReadableFileWrapper from synapse.spam_checker_api import RegistrationBehaviour @@ -47,12 +50,12 @@ CHECK_EVENT_FOR_SPAM_CALLBACK = Callable[ Awaitable[ Union[ str, - Codes, + "synapse.api.errors.Codes", # Highly experimental, not officially part of the spamchecker API, may # disappear without warning depending on the results of ongoing # experiments. # Use this to return additional information as part of an error. - Tuple[Codes, Dict], + Tuple["synapse.api.errors.Codes", Dict], # Deprecated bool, ] @@ -62,12 +65,72 @@ SHOULD_DROP_FEDERATED_EVENT_CALLBACK = Callable[ ["synapse.events.EventBase"], Awaitable[Union[bool, str]], ] -USER_MAY_JOIN_ROOM_CALLBACK = Callable[[str, str, bool], Awaitable[bool]] -USER_MAY_INVITE_CALLBACK = Callable[[str, str, str], Awaitable[bool]] -USER_MAY_SEND_3PID_INVITE_CALLBACK = Callable[[str, str, str, str], Awaitable[bool]] -USER_MAY_CREATE_ROOM_CALLBACK = Callable[[str], Awaitable[bool]] -USER_MAY_CREATE_ROOM_ALIAS_CALLBACK = Callable[[str, RoomAlias], Awaitable[bool]] -USER_MAY_PUBLISH_ROOM_CALLBACK = Callable[[str, str], Awaitable[bool]] +USER_MAY_JOIN_ROOM_CALLBACK = Callable[ + [str, str, bool], + Awaitable[ + Union[ + Literal["NOT_SPAM"], + "synapse.api.errors.Codes", + # Deprecated + bool, + ] + ], +] +USER_MAY_INVITE_CALLBACK = Callable[ + [str, str, str], + Awaitable[ + Union[ + Literal["NOT_SPAM"], + "synapse.api.errors.Codes", + # Deprecated + bool, + ] + ], +] +USER_MAY_SEND_3PID_INVITE_CALLBACK = Callable[ + [str, str, str, str], + Awaitable[ + Union[ + Literal["NOT_SPAM"], + "synapse.api.errors.Codes", + # Deprecated + bool, + ] + ], +] +USER_MAY_CREATE_ROOM_CALLBACK = Callable[ + [str], + Awaitable[ + Union[ + Literal["NOT_SPAM"], + "synapse.api.errors.Codes", + # Deprecated + bool, + ] + ], +] +USER_MAY_CREATE_ROOM_ALIAS_CALLBACK = Callable[ + [str, RoomAlias], + Awaitable[ + Union[ + Literal["NOT_SPAM"], + "synapse.api.errors.Codes", + # Deprecated + bool, + ] + ], +] +USER_MAY_PUBLISH_ROOM_CALLBACK = Callable[ + [str, str], + Awaitable[ + Union[ + Literal["NOT_SPAM"], + "synapse.api.errors.Codes", + # Deprecated + bool, + ] + ], +] CHECK_USERNAME_FOR_SPAM_CALLBACK = Callable[[UserProfile], Awaitable[bool]] LEGACY_CHECK_REGISTRATION_FOR_SPAM_CALLBACK = Callable[ [ @@ -88,7 +151,14 @@ CHECK_REGISTRATION_FOR_SPAM_CALLBACK = Callable[ ] CHECK_MEDIA_FILE_FOR_SPAM_CALLBACK = Callable[ [ReadableFileWrapper, FileInfo], - Awaitable[bool], + Awaitable[ + Union[ + Literal["NOT_SPAM"], + "synapse.api.errors.Codes", + # Deprecated + bool, + ] + ], ] @@ -181,7 +251,7 @@ def load_legacy_spam_checkers(hs: "synapse.server.HomeServer") -> None: class SpamChecker: - NOT_SPAM = "NOT_SPAM" + NOT_SPAM: Literal["NOT_SPAM"] = "NOT_SPAM" def __init__(self, hs: "synapse.server.HomeServer") -> None: self.hs = hs @@ -275,7 +345,7 @@ class SpamChecker: async def check_event_for_spam( self, event: "synapse.events.EventBase" - ) -> Union[Tuple[Codes, Dict], str]: + ) -> Union[Tuple["synapse.api.errors.Codes", Dict], str]: """Checks if a given event is considered "spammy" by this server. If the server considers an event spammy, then it will be rejected if @@ -306,7 +376,7 @@ class SpamChecker: elif res is True: # This spam-checker rejects the event with deprecated # return value `True` - return Codes.FORBIDDEN + return (synapse.api.errors.Codes.FORBIDDEN, {}) elif not isinstance(res, str): # mypy complains that we can't reach this code because of the # return type in CHECK_EVENT_FOR_SPAM_CALLBACK, but we don't know @@ -352,7 +422,7 @@ class SpamChecker: async def user_may_join_room( self, user_id: str, room_id: str, is_invited: bool - ) -> bool: + ) -> Union["synapse.api.errors.Codes", Literal["NOT_SPAM"]]: """Checks if a given users is allowed to join a room. Not called when a user creates a room. @@ -362,54 +432,70 @@ class SpamChecker: is_invited: Whether the user is invited into the room Returns: - Whether the user may join the room + NOT_SPAM if the operation is permitted, Codes otherwise. """ for callback in self._user_may_join_room_callbacks: with Measure( self.clock, "{}.{}".format(callback.__module__, callback.__qualname__) ): - may_join_room = await delay_cancellation( - callback(user_id, room_id, is_invited) - ) - if may_join_room is False: - return False + res = await delay_cancellation(callback(user_id, room_id, is_invited)) + # Normalize return values to `Codes` or `"NOT_SPAM"`. + if res is True or res is self.NOT_SPAM: + continue + elif res is False: + return synapse.api.errors.Codes.FORBIDDEN + elif isinstance(res, synapse.api.errors.Codes): + return res + else: + logger.warning( + "Module returned invalid value, rejecting join as spam" + ) + return synapse.api.errors.Codes.FORBIDDEN - return True + # No spam-checker has rejected the request, let it pass. + return self.NOT_SPAM async def user_may_invite( self, inviter_userid: str, invitee_userid: str, room_id: str - ) -> bool: + ) -> Union["synapse.api.errors.Codes", Literal["NOT_SPAM"]]: """Checks if a given user may send an invite - If this method returns false, the invite will be rejected. - Args: inviter_userid: The user ID of the sender of the invitation invitee_userid: The user ID targeted in the invitation room_id: The room ID Returns: - True if the user may send an invite, otherwise False + NOT_SPAM if the operation is permitted, Codes otherwise. """ for callback in self._user_may_invite_callbacks: with Measure( self.clock, "{}.{}".format(callback.__module__, callback.__qualname__) ): - may_invite = await delay_cancellation( + res = await delay_cancellation( callback(inviter_userid, invitee_userid, room_id) ) - if may_invite is False: - return False + # Normalize return values to `Codes` or `"NOT_SPAM"`. + if res is True or res is self.NOT_SPAM: + continue + elif res is False: + return synapse.api.errors.Codes.FORBIDDEN + elif isinstance(res, synapse.api.errors.Codes): + return res + else: + logger.warning( + "Module returned invalid value, rejecting invite as spam" + ) + return synapse.api.errors.Codes.FORBIDDEN - return True + # No spam-checker has rejected the request, let it pass. + return self.NOT_SPAM async def user_may_send_3pid_invite( self, inviter_userid: str, medium: str, address: str, room_id: str - ) -> bool: + ) -> Union["synapse.api.errors.Codes", Literal["NOT_SPAM"]]: """Checks if a given user may invite a given threepid into the room - If this method returns false, the threepid invite will be rejected. - Note that if the threepid is already associated with a Matrix user ID, Synapse will call user_may_invite with said user ID instead. @@ -420,88 +506,113 @@ class SpamChecker: room_id: The room ID Returns: - True if the user may send the invite, otherwise False + NOT_SPAM if the operation is permitted, Codes otherwise. """ for callback in self._user_may_send_3pid_invite_callbacks: with Measure( self.clock, "{}.{}".format(callback.__module__, callback.__qualname__) ): - may_send_3pid_invite = await delay_cancellation( + res = await delay_cancellation( callback(inviter_userid, medium, address, room_id) ) - if may_send_3pid_invite is False: - return False + # Normalize return values to `Codes` or `"NOT_SPAM"`. + if res is True or res is self.NOT_SPAM: + continue + elif res is False: + return synapse.api.errors.Codes.FORBIDDEN + elif isinstance(res, synapse.api.errors.Codes): + return res + else: + logger.warning( + "Module returned invalid value, rejecting 3pid invite as spam" + ) + return synapse.api.errors.Codes.FORBIDDEN - return True + return self.NOT_SPAM - async def user_may_create_room(self, userid: str) -> bool: + async def user_may_create_room( + self, userid: str + ) -> Union["synapse.api.errors.Codes", Literal["NOT_SPAM"]]: """Checks if a given user may create a room - If this method returns false, the creation request will be rejected. - Args: userid: The ID of the user attempting to create a room - - Returns: - True if the user may create a room, otherwise False """ for callback in self._user_may_create_room_callbacks: with Measure( self.clock, "{}.{}".format(callback.__module__, callback.__qualname__) ): - may_create_room = await delay_cancellation(callback(userid)) - if may_create_room is False: - return False + res = await delay_cancellation(callback(userid)) + if res is True or res is self.NOT_SPAM: + continue + elif res is False: + return synapse.api.errors.Codes.FORBIDDEN + elif isinstance(res, synapse.api.errors.Codes): + return res + else: + logger.warning( + "Module returned invalid value, rejecting room creation as spam" + ) + return synapse.api.errors.Codes.FORBIDDEN - return True + return self.NOT_SPAM async def user_may_create_room_alias( self, userid: str, room_alias: RoomAlias - ) -> bool: + ) -> Union["synapse.api.errors.Codes", Literal["NOT_SPAM"]]: """Checks if a given user may create a room alias - If this method returns false, the association request will be rejected. - Args: userid: The ID of the user attempting to create a room alias room_alias: The alias to be created - Returns: - True if the user may create a room alias, otherwise False """ for callback in self._user_may_create_room_alias_callbacks: with Measure( self.clock, "{}.{}".format(callback.__module__, callback.__qualname__) ): - may_create_room_alias = await delay_cancellation( - callback(userid, room_alias) - ) - if may_create_room_alias is False: - return False + res = await delay_cancellation(callback(userid, room_alias)) + if res is True or res is self.NOT_SPAM: + continue + elif res is False: + return synapse.api.errors.Codes.FORBIDDEN + elif isinstance(res, synapse.api.errors.Codes): + return res + else: + logger.warning( + "Module returned invalid value, rejecting room create as spam" + ) + return synapse.api.errors.Codes.FORBIDDEN - return True + return self.NOT_SPAM - async def user_may_publish_room(self, userid: str, room_id: str) -> bool: + async def user_may_publish_room( + self, userid: str, room_id: str + ) -> Union["synapse.api.errors.Codes", Literal["NOT_SPAM"]]: """Checks if a given user may publish a room to the directory - If this method returns false, the publish request will be rejected. - Args: userid: The user ID attempting to publish the room room_id: The ID of the room that would be published - - Returns: - True if the user may publish the room, otherwise False """ for callback in self._user_may_publish_room_callbacks: with Measure( self.clock, "{}.{}".format(callback.__module__, callback.__qualname__) ): - may_publish_room = await delay_cancellation(callback(userid, room_id)) - if may_publish_room is False: - return False + res = await delay_cancellation(callback(userid, room_id)) + if res is True or res is self.NOT_SPAM: + continue + elif res is False: + return synapse.api.errors.Codes.FORBIDDEN + elif isinstance(res, synapse.api.errors.Codes): + return res + else: + logger.warning( + "Module returned invalid value, rejecting room publication as spam" + ) + return synapse.api.errors.Codes.FORBIDDEN - return True + return self.NOT_SPAM async def check_username_for_spam(self, user_profile: UserProfile) -> bool: """Checks if a user ID or display name are considered "spammy" by this server. @@ -567,7 +678,7 @@ class SpamChecker: async def check_media_file_for_spam( self, file_wrapper: ReadableFileWrapper, file_info: FileInfo - ) -> bool: + ) -> Union["synapse.api.errors.Codes", Literal["NOT_SPAM"]]: """Checks if a piece of newly uploaded media should be blocked. This will be called for local uploads, downloads of remote media, each @@ -580,31 +691,37 @@ class SpamChecker: async def check_media_file_for_spam( self, file: ReadableFileWrapper, file_info: FileInfo - ) -> bool: + ) -> Union[Codes, Literal["NOT_SPAM"]]: buffer = BytesIO() await file.write_chunks_to(buffer.write) if buffer.getvalue() == b"Hello World": - return True + return synapse.module_api.NOT_SPAM - return False + return Codes.FORBIDDEN Args: file: An object that allows reading the contents of the media. file_info: Metadata about the file. - - Returns: - True if the media should be blocked or False if it should be - allowed. """ for callback in self._check_media_file_for_spam_callbacks: with Measure( self.clock, "{}.{}".format(callback.__module__, callback.__qualname__) ): - spam = await delay_cancellation(callback(file_wrapper, file_info)) - if spam: - return True + res = await delay_cancellation(callback(file_wrapper, file_info)) + # Normalize return values to `Codes` or `"NOT_SPAM"`. + if res is False or res is self.NOT_SPAM: + continue + elif res is True: + return synapse.api.errors.Codes.FORBIDDEN + elif isinstance(res, synapse.api.errors.Codes): + return res + else: + logger.warning( + "Module returned invalid value, rejecting media file as spam" + ) + return synapse.api.errors.Codes.FORBIDDEN - return False + return self.NOT_SPAM diff --git a/synapse/events/validator.py b/synapse/events/validator.py index 29fa9b3880..27c8beba25 100644 --- a/synapse/events/validator.py +++ b/synapse/events/validator.py @@ -35,6 +35,10 @@ class EventValidator: def validate_new(self, event: EventBase, config: HomeServerConfig) -> None: """Validates the event has roughly the right format + Suitable for checking a locally-created event. It has stricter checks than + is appropriate for an event received over federation (for which, see + event_auth.validate_event_for_room_version) + Args: event: The event to validate. config: The homeserver's configuration. diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index ad475a913b..66e6305562 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -1642,10 +1642,6 @@ def _validate_hierarchy_event(d: JsonDict) -> None: if not isinstance(event_type, str): raise ValueError("Invalid event: 'event_type' must be a str") - room_id = d.get("room_id") - if not isinstance(room_id, str): - raise ValueError("Invalid event: 'room_id' must be a str") - state_key = d.get("state_key") if not isinstance(state_key, str): raise ValueError("Invalid event: 'state_key' must be a str") diff --git a/synapse/federation/sender/per_destination_queue.py b/synapse/federation/sender/per_destination_queue.py index 333ca9a97f..41d8b937af 100644 --- a/synapse/federation/sender/per_destination_queue.py +++ b/synapse/federation/sender/per_destination_queue.py @@ -37,6 +37,7 @@ from synapse.metrics import sent_transactions_counter from synapse.metrics.background_process_metrics import run_as_background_process from synapse.types import ReadReceipt from synapse.util.retryutils import NotRetryingDestination, get_retry_limiter +from synapse.visibility import filter_events_for_server if TYPE_CHECKING: import synapse.server @@ -77,6 +78,7 @@ class PerDestinationQueue: ): self._server_name = hs.hostname self._clock = hs.get_clock() + self._storage_controllers = hs.get_storage_controllers() self._store = hs.get_datastores().main self._transaction_manager = transaction_manager self._instance_name = hs.get_instance_name() @@ -442,6 +444,12 @@ class PerDestinationQueue: "This should not happen." % event_ids ) + logger.info( + "Catching up destination %s with %d PDUs", + self._destination, + len(catchup_pdus), + ) + # We send transactions with events from one room only, as its likely # that the remote will have to do additional processing, which may # take some time. It's better to give it small amounts of work @@ -487,19 +495,20 @@ class PerDestinationQueue: ): continue - # Filter out events where the server is not in the room, - # e.g. it may have left/been kicked. *Ideally* we'd pull - # out the kick and send that, but it's a rare edge case - # so we don't bother for now (the server that sent the - # kick should send it out if its online). - hosts = await self._state.get_hosts_in_room_at_events( - p.room_id, [p.event_id] - ) - if self._destination not in hosts: - continue - new_pdus.append(p) + # Filter out events where the server is not in the room, + # e.g. it may have left/been kicked. *Ideally* we'd pull + # out the kick and send that, but it's a rare edge case + # so we don't bother for now (the server that sent the + # kick should send it out if its online). + new_pdus = await filter_events_for_server( + self._storage_controllers, + self._destination, + new_pdus, + redact=False, + ) + # If we've filtered out all the extremities, fall back to # sending the original event. This should ensure that the # server gets at least some of missed events (especially if diff --git a/synapse/federation/transport/server/federation.py b/synapse/federation/transport/server/federation.py index 7dfb890661..f7884bfbe0 100644 --- a/synapse/federation/transport/server/federation.py +++ b/synapse/federation/transport/server/federation.py @@ -24,7 +24,6 @@ from typing import ( Union, ) -from matrix_common.versionstring import get_distribution_version_string from typing_extensions import Literal from synapse.api.constants import EduTypes @@ -42,6 +41,7 @@ from synapse.http.servlet import ( parse_strings_from_args, ) from synapse.types import JsonDict +from synapse.util import SYNAPSE_VERSION from synapse.util.ratelimitutils import FederationRateLimiter if TYPE_CHECKING: @@ -622,7 +622,7 @@ class FederationVersionServlet(BaseFederationServlet): { "server": { "name": "Synapse", - "version": get_distribution_version_string("matrix-synapse"), + "version": SYNAPSE_VERSION, } }, ) diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index fbafbbee6b..60d13040a2 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -81,6 +81,8 @@ if TYPE_CHECKING: logger = logging.getLogger(__name__) +INVALID_USERNAME_OR_PASSWORD = "Invalid username or password" + def convert_client_dict_legacy_fields_to_identifier( submission: JsonDict, @@ -197,6 +199,7 @@ class AuthHandler: def __init__(self, hs: "HomeServer"): self.store = hs.get_datastores().main self.auth = hs.get_auth() + self.auth_blocking = hs.get_auth_blocking() self.clock = hs.get_clock() self.checkers: Dict[str, UserInteractiveAuthChecker] = {} for auth_checker_class in INTERACTIVE_AUTH_CHECKERS: @@ -983,7 +986,7 @@ class AuthHandler: not is_appservice_ghost or self.hs.config.appservice.track_appservice_user_ips ): - await self.auth.check_auth_blocking(user_id) + await self.auth_blocking.check_auth_blocking(user_id) access_token = self.generate_access_token(target_user_id_obj) await self.store.add_access_token_to_user( @@ -1215,7 +1218,9 @@ class AuthHandler: await self._failed_login_attempts_ratelimiter.can_do_action( None, (medium, address) ) - raise LoginError(403, "", errcode=Codes.FORBIDDEN) + raise LoginError( + 403, msg=INVALID_USERNAME_OR_PASSWORD, errcode=Codes.FORBIDDEN + ) identifier_dict = {"type": "m.id.user", "user": user_id} @@ -1341,7 +1346,7 @@ class AuthHandler: # We raise a 403 here, but note that if we're doing user-interactive # login, it turns all LoginErrors into a 401 anyway. - raise LoginError(403, "Invalid password", errcode=Codes.FORBIDDEN) + raise LoginError(403, msg=INVALID_USERNAME_OR_PASSWORD, errcode=Codes.FORBIDDEN) async def check_password_provider_3pid( self, medium: str, address: str, password: str @@ -1435,7 +1440,7 @@ class AuthHandler: except Exception: raise AuthError(403, "Invalid login token", errcode=Codes.FORBIDDEN) - await self.auth.check_auth_blocking(res.user_id) + await self.auth_blocking.check_auth_blocking(res.user_id) return res async def delete_access_token(self, access_token: str) -> None: diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index a0cbeedc30..b79c551703 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -398,35 +398,6 @@ class DeviceHandler(DeviceWorkerHandler): await self.delete_devices(user_id, user_devices) @trace - async def delete_device(self, user_id: str, device_id: str) -> None: - """Delete the given device - - Args: - user_id: The user to delete the device from. - device_id: The device to delete. - """ - - try: - await self.store.delete_device(user_id, device_id) - except errors.StoreError as e: - if e.code == 404: - # no match - set_tag("error", True) - log_kv( - {"reason": "User doesn't have device id.", "device_id": device_id} - ) - else: - raise - - await self._auth_handler.delete_access_tokens_for_user( - user_id, device_id=device_id - ) - - await self.store.delete_e2e_keys_by_device(user_id=user_id, device_id=device_id) - - await self.notify_device_update(user_id, [device_id]) - - @trace async def delete_all_devices_for_user( self, user_id: str, except_device_id: Optional[str] = None ) -> None: @@ -591,7 +562,7 @@ class DeviceHandler(DeviceWorkerHandler): user_id, device_id, device_data ) if old_device_id is not None: - await self.delete_device(user_id, old_device_id) + await self.delete_devices(user_id, [old_device_id]) return device_id async def get_dehydrated_device( @@ -638,7 +609,7 @@ class DeviceHandler(DeviceWorkerHandler): await self.store.update_device(user_id, device_id, old_device["display_name"]) # can't call self.delete_device because that will clobber the # access token so call the storage layer directly - await self.store.delete_device(user_id, old_device_id) + await self.store.delete_devices(user_id, [old_device_id]) await self.store.delete_e2e_keys_by_device( user_id=user_id, device_id=old_device_id ) diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index 1459a046de..8b0f16f965 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -28,6 +28,7 @@ from synapse.api.errors import ( SynapseError, ) from synapse.appservice import ApplicationService +from synapse.module_api import NOT_SPAM from synapse.storage.databases.main.directory import RoomAliasMapping from synapse.types import JsonDict, Requester, RoomAlias, UserID, get_domain_from_id @@ -141,10 +142,15 @@ class DirectoryHandler: 403, "You must be in the room to create an alias for it" ) - if not await self.spam_checker.user_may_create_room_alias( + spam_check = await self.spam_checker.user_may_create_room_alias( user_id, room_alias - ): - raise AuthError(403, "This user is not permitted to create this alias") + ) + if spam_check != self.spam_checker.NOT_SPAM: + raise AuthError( + 403, + "This user is not permitted to create this alias", + spam_check, + ) if not self.config.roomdirectory.is_alias_creation_allowed( user_id, room_id, room_alias_str @@ -430,9 +436,12 @@ class DirectoryHandler: """ user_id = requester.user.to_string() - if not await self.spam_checker.user_may_publish_room(user_id, room_id): + spam_check = await self.spam_checker.user_may_publish_room(user_id, room_id) + if spam_check != NOT_SPAM: raise AuthError( - 403, "This user is not permitted to publish rooms to the room list" + 403, + "This user is not permitted to publish rooms to the room list", + spam_check, ) if requester.is_guest: diff --git a/synapse/handlers/event_auth.py b/synapse/handlers/event_auth.py index 6bed464351..ed4149bd58 100644 --- a/synapse/handlers/event_auth.py +++ b/synapse/handlers/event_auth.py @@ -48,14 +48,13 @@ class EventAuthHandler: async def check_auth_rules_from_context( self, - room_version_obj: RoomVersion, event: EventBase, context: EventContext, ) -> None: """Check an event passes the auth rules at its own auth events""" auth_event_ids = event.auth_event_ids() auth_events_by_id = await self._store.get_events(auth_event_ids) - check_auth_rules_for_event(room_version_obj, event, auth_events_by_id.values()) + check_auth_rules_for_event(event, auth_events_by_id.values()) def compute_auth_events( self, diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 6a143440d3..34cc5ecd11 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -59,6 +59,7 @@ from synapse.federation.federation_client import InvalidResponseError from synapse.http.servlet import assert_params_in_dict from synapse.logging.context import nested_logging_context from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.module_api import NOT_SPAM from synapse.replication.http.federation import ( ReplicationCleanRoomRestServlet, ReplicationStoreRoomOnOutlierMembershipRestServlet, @@ -545,6 +546,7 @@ class FederationHandler: if ret.partial_state: # TODO(faster_joins): roll this back if we don't manage to start the # background resync (eg process_remote_join fails) + # https://github.com/matrix-org/synapse/issues/12998 await self.store.store_partial_state_room(room_id, ret.servers_in_room) max_stream_id = await self._federation_event_handler.process_remote_join( @@ -799,9 +801,7 @@ class FederationHandler: # The remote hasn't signed it yet, obviously. We'll do the full checks # when we get the event back in `on_send_join_request` - await self._event_auth_handler.check_auth_rules_from_context( - room_version, event, context - ) + await self._event_auth_handler.check_auth_rules_from_context(event, context) return event async def on_invite_request( @@ -821,11 +821,14 @@ class FederationHandler: if self.hs.config.server.block_non_admin_invites: raise SynapseError(403, "This server does not accept room invites") - if not await self.spam_checker.user_may_invite( + spam_check = await self.spam_checker.user_may_invite( event.sender, event.state_key, event.room_id - ): + ) + if spam_check != NOT_SPAM: raise SynapseError( - 403, "This user is not permitted to send invites to this server/user" + 403, + "This user is not permitted to send invites to this server/user", + spam_check, ) membership = event.content.get("membership") @@ -972,9 +975,7 @@ class FederationHandler: try: # The remote hasn't signed it yet, obviously. We'll do the full checks # when we get the event back in `on_send_leave_request` - await self._event_auth_handler.check_auth_rules_from_context( - room_version_obj, event, context - ) + await self._event_auth_handler.check_auth_rules_from_context(event, context) except AuthError as e: logger.warning("Failed to create new leave %r because %s", event, e) raise e @@ -1033,9 +1034,7 @@ class FederationHandler: try: # The remote hasn't signed it yet, obviously. We'll do the full checks # when we get the event back in `on_send_knock_request` - await self._event_auth_handler.check_auth_rules_from_context( - room_version_obj, event, context - ) + await self._event_auth_handler.check_auth_rules_from_context(event, context) except AuthError as e: logger.warning("Failed to create new knock %r because %s", event, e) raise e @@ -1206,9 +1205,9 @@ class FederationHandler: event.internal_metadata.send_on_behalf_of = self.hs.hostname try: - validate_event_for_room_version(room_version_obj, event) + validate_event_for_room_version(event) await self._event_auth_handler.check_auth_rules_from_context( - room_version_obj, event, context + event, context ) except AuthError as e: logger.warning("Denying new third party invite %r because %s", event, e) @@ -1258,10 +1257,8 @@ class FederationHandler: ) try: - validate_event_for_room_version(room_version_obj, event) - await self._event_auth_handler.check_auth_rules_from_context( - room_version_obj, event, context - ) + validate_event_for_room_version(event) + await self._event_auth_handler.check_auth_rules_from_context(event, context) except AuthError as e: logger.warning("Denying third party invite %r because %s", event, e) raise e @@ -1506,14 +1503,17 @@ class FederationHandler: # TODO(faster_joins): do we need to lock to avoid races? What happens if other # worker processes kick off a resync in parallel? Perhaps we should just elect # a single worker to do the resync. + # https://github.com/matrix-org/synapse/issues/12994 # # TODO(faster_joins): what happens if we leave the room during a resync? if we # really leave, that might mean we have difficulty getting the room state over # federation. + # https://github.com/matrix-org/synapse/issues/12802 # # TODO(faster_joins): we need some way of prioritising which homeservers in # `other_destinations` to try first, otherwise we'll spend ages trying dead # homeservers for large rooms. + # https://github.com/matrix-org/synapse/issues/12999 if initial_destination is None and len(other_destinations) == 0: raise ValueError( @@ -1543,9 +1543,11 @@ class FederationHandler: # all the events are updated, so we can update current state and # clear the lazy-loading flag. logger.info("Updating current state for %s", room_id) + # TODO(faster_joins): support workers + # https://github.com/matrix-org/synapse/issues/12994 assert ( self._storage_controllers.persistence is not None - ), "TODO(faster_joins): support for workers" + ), "worker-mode deployments not currently supported here" await self._storage_controllers.persistence.update_current_state( room_id ) @@ -1559,6 +1561,8 @@ class FederationHandler: ) # TODO(faster_joins) update room stats and user directory? + # https://github.com/matrix-org/synapse/issues/12814 + # https://github.com/matrix-org/synapse/issues/12815 return # we raced against more events arriving with partial state. Go round @@ -1566,6 +1570,8 @@ class FederationHandler: # TODO(faster_joins): there is still a race here, whereby incoming events which raced # with us will fail to be persisted after the call to `clear_partial_state_room` due to # having partial state. + # https://github.com/matrix-org/synapse/issues/12988 + # continue events = await self.store.get_events_as_list( @@ -1588,6 +1594,7 @@ class FederationHandler: # indefinitely is also not the right thing to do if we can # reach all homeservers and they all claim they don't have # the state we want. + # https://github.com/matrix-org/synapse/issues/13000 logger.error( "Failed to get state for %s at %s from %s because %s, " "giving up!", diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index 87a0608359..6c9e6a00b5 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -532,6 +532,7 @@ class FederationEventHandler: # # TODO(faster_joins): we probably need to be more intelligent, and # exclude partial-state prev_events from consideration + # https://github.com/matrix-org/synapse/issues/13001 logger.warning( "%s still has partial state: can't de-partial-state it yet", event.event_id, @@ -777,6 +778,7 @@ class FederationEventHandler: state_ids = await self._resolve_state_at_missing_prevs(origin, event) # TODO(faster_joins): make sure that _resolve_state_at_missing_prevs does # not return partial state + # https://github.com/matrix-org/synapse/issues/13002 await self._process_received_pdu( origin, event, state_ids=state_ids, backfilled=backfilled @@ -1428,9 +1430,6 @@ class FederationEventHandler: allow_rejected=True, ) - room_version = await self._store.get_room_version_id(room_id) - room_version_obj = KNOWN_ROOM_VERSIONS[room_version] - def prep(event: EventBase) -> Optional[Tuple[EventBase, EventContext]]: with nested_logging_context(suffix=event.event_id): auth = [] @@ -1453,8 +1452,8 @@ class FederationEventHandler: context = EventContext.for_outlier(self._storage_controllers) try: - validate_event_for_room_version(room_version_obj, event) - check_auth_rules_for_event(room_version_obj, event, auth) + validate_event_for_room_version(event) + check_auth_rules_for_event(event, auth) except AuthError as e: logger.warning("Rejecting %r because %s", event, e) context.rejected = RejectedReason.AUTH_ERROR @@ -1497,11 +1496,8 @@ class FederationEventHandler: assert not event.internal_metadata.outlier # first of all, check that the event itself is valid. - room_version = await self._store.get_room_version_id(event.room_id) - room_version_obj = KNOWN_ROOM_VERSIONS[room_version] - try: - validate_event_for_room_version(room_version_obj, event) + validate_event_for_room_version(event) except AuthError as e: logger.warning("While validating received event %r: %s", event, e) # TODO: use a different rejected reason here? @@ -1519,7 +1515,7 @@ class FederationEventHandler: # ... and check that the event passes auth at those auth events. try: - check_auth_rules_for_event(room_version_obj, event, claimed_auth_events) + check_auth_rules_for_event(event, claimed_auth_events) except AuthError as e: logger.warning( "While checking auth of %r against auth_events: %s", event, e @@ -1567,9 +1563,7 @@ class FederationEventHandler: auth_events_for_auth = calculated_auth_event_map try: - check_auth_rules_for_event( - room_version_obj, event, auth_events_for_auth.values() - ) + check_auth_rules_for_event(event, auth_events_for_auth.values()) except AuthError as e: logger.warning("Failed auth resolution for %r because %s", event, e) context.rejected = RejectedReason.AUTH_ERROR @@ -1669,7 +1663,7 @@ class FederationEventHandler: ) try: - check_auth_rules_for_event(room_version_obj, event, current_auth_events) + check_auth_rules_for_event(event, current_auth_events) except AuthError as e: logger.warning( "Soft-failing %r (from %s) because %s", diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index f455158a2c..189f52fe5a 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -42,7 +42,7 @@ from synapse.api.errors import ( SynapseError, UnsupportedRoomVersionError, ) -from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersions +from synapse.api.room_versions import KNOWN_ROOM_VERSIONS from synapse.api.urls import ConsentURIBuilder from synapse.event_auth import validate_event_for_room_version from synapse.events import EventBase, relation_from_event @@ -444,7 +444,7 @@ _DUMMY_EVENT_ROOM_EXCLUSION_EXPIRY = 7 * 24 * 60 * 60 * 1000 class EventCreationHandler: def __init__(self, hs: "HomeServer"): self.hs = hs - self.auth = hs.get_auth() + self.auth_blocking = hs.get_auth_blocking() self._event_auth_handler = hs.get_event_auth_handler() self.store = hs.get_datastores().main self._storage_controllers = hs.get_storage_controllers() @@ -605,7 +605,7 @@ class EventCreationHandler: Returns: Tuple of created event, Context """ - await self.auth.check_auth_blocking(requester=requester) + await self.auth_blocking.check_auth_blocking(requester=requester) if event_dict["type"] == EventTypes.Create and event_dict["state_key"] == "": room_version_id = event_dict["content"]["room_version"] @@ -954,14 +954,12 @@ class EventCreationHandler: "Spam-check module returned invalid error value. Expecting [code, dict], got %s", spam_check_result, ) - spam_check_result = Codes.FORBIDDEN - if isinstance(spam_check_result, Codes): - raise SynapseError( - 403, - "This message has been rejected as probable spam", - spam_check_result, - ) + raise SynapseError( + 403, + "This message has been rejected as probable spam", + Codes.FORBIDDEN, + ) # Backwards compatibility: if the return value is not an error code, it # means the module returned an error message to be included in the @@ -1102,6 +1100,7 @@ class EventCreationHandler: # # TODO(faster_joins): figure out how this works, and make sure that the # old state is complete. + # https://github.com/matrix-org/synapse/issues/13003 metadata = await self.store.get_metadata_for_events(state_event_ids) state_map_for_event: MutableStateMap[str] = {} @@ -1273,23 +1272,6 @@ class EventCreationHandler: ) return prev_event - if event.is_state() and (event.type, event.state_key) == ( - EventTypes.Create, - "", - ): - room_version_id = event.content.get( - "room_version", RoomVersions.V1.identifier - ) - maybe_room_version_obj = KNOWN_ROOM_VERSIONS.get(room_version_id) - if not maybe_room_version_obj: - raise UnsupportedRoomVersionError( - "Attempt to create a room with unsupported room version %s" - % (room_version_id,) - ) - room_version_obj = maybe_room_version_obj - else: - room_version_obj = await self.store.get_room_version(event.room_id) - if event.internal_metadata.is_out_of_band_membership(): # the only sort of out-of-band-membership events we expect to see here are # invite rejections and rescinded knocks that we have generated ourselves. @@ -1297,9 +1279,9 @@ class EventCreationHandler: assert event.content["membership"] == Membership.LEAVE else: try: - validate_event_for_room_version(room_version_obj, event) + validate_event_for_room_version(event) await self._event_auth_handler.check_auth_rules_from_context( - room_version_obj, event, context + event, context ) except AuthError as err: logger.warning("Denying new event %r because %s", event, err) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 338204287f..c77d181722 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -91,6 +91,7 @@ class RegistrationHandler: self.clock = hs.get_clock() self.hs = hs self.auth = hs.get_auth() + self.auth_blocking = hs.get_auth_blocking() self._auth_handler = hs.get_auth_handler() self.profile_handler = hs.get_profile_handler() self.user_directory_handler = hs.get_user_directory_handler() @@ -276,7 +277,7 @@ class RegistrationHandler: # do not check_auth_blocking if the call is coming through the Admin API if not by_admin: - await self.auth.check_auth_blocking(threepid=threepid) + await self.auth_blocking.check_auth_blocking(threepid=threepid) if localpart is not None: await self.check_username(localpart, guest_access_token=guest_access_token) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 520663f172..75c0be8c36 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -62,6 +62,7 @@ from synapse.events.utils import copy_and_fixup_power_levels_contents from synapse.federation.federation_client import InvalidResponseError from synapse.handlers.federation import get_domains_from_state from synapse.handlers.relations import BundledAggregations +from synapse.module_api import NOT_SPAM from synapse.rest.admin._base import assert_user_is_admin from synapse.storage.state import StateFilter from synapse.streams import EventSource @@ -109,6 +110,7 @@ class RoomCreationHandler: self.store = hs.get_datastores().main self._storage_controllers = hs.get_storage_controllers() self.auth = hs.get_auth() + self.auth_blocking = hs.get_auth_blocking() self.clock = hs.get_clock() self.hs = hs self.spam_checker = hs.get_spam_checker() @@ -226,10 +228,9 @@ class RoomCreationHandler: }, }, ) - old_room_version = await self.store.get_room_version(old_room_id) - validate_event_for_room_version(old_room_version, tombstone_event) + validate_event_for_room_version(tombstone_event) await self._event_auth_handler.check_auth_rules_from_context( - old_room_version, tombstone_event, tombstone_context + tombstone_event, tombstone_context ) # Upgrade the room @@ -437,10 +438,9 @@ class RoomCreationHandler: """ user_id = requester.user.to_string() - if not await self.spam_checker.user_may_create_room(user_id): - raise SynapseError( - 403, "You are not permitted to create rooms", Codes.FORBIDDEN - ) + spam_check = await self.spam_checker.user_may_create_room(user_id) + if spam_check != NOT_SPAM: + raise SynapseError(403, "You are not permitted to create rooms", spam_check) creation_content: JsonDict = { "room_version": new_room_version.identifier, @@ -707,7 +707,7 @@ class RoomCreationHandler: """ user_id = requester.user.to_string() - await self.auth.check_auth_blocking(requester=requester) + await self.auth_blocking.check_auth_blocking(requester=requester) if ( self._server_notices_mxid is not None @@ -727,12 +727,12 @@ class RoomCreationHandler: invite_3pid_list = config.get("invite_3pid", []) invite_list = config.get("invite", []) - if not is_requester_admin and not ( - await self.spam_checker.user_may_create_room(user_id) - ): - raise SynapseError( - 403, "You are not permitted to create rooms", Codes.FORBIDDEN - ) + if not is_requester_admin: + spam_check = await self.spam_checker.user_may_create_room(user_id) + if spam_check != NOT_SPAM: + raise SynapseError( + 403, "You are not permitted to create rooms", spam_check + ) if ratelimit: await self.request_ratelimiter.ratelimit(requester) diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index d1199a0644..e89b7441ad 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -38,6 +38,7 @@ from synapse.event_auth import get_named_level, get_power_level_event from synapse.events import EventBase from synapse.events.snapshot import EventContext from synapse.handlers.profile import MAX_AVATAR_URL_LEN, MAX_DISPLAYNAME_LEN +from synapse.module_api import NOT_SPAM from synapse.storage.state import StateFilter from synapse.types import ( JsonDict, @@ -683,7 +684,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): if target_id == self._server_notices_mxid: raise SynapseError(HTTPStatus.FORBIDDEN, "Cannot invite this user") - block_invite = False + block_invite_code = None if ( self._server_notices_mxid is not None @@ -701,16 +702,19 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): "Blocking invite: user is not admin and non-admin " "invites disabled" ) - block_invite = True + block_invite_code = Codes.FORBIDDEN - if not await self.spam_checker.user_may_invite( + spam_check = await self.spam_checker.user_may_invite( requester.user.to_string(), target_id, room_id - ): + ) + if spam_check != NOT_SPAM: logger.info("Blocking invite due to spam checker") - block_invite = True + block_invite_code = spam_check - if block_invite: - raise SynapseError(403, "Invites have been disabled on this server") + if block_invite_code is not None: + raise SynapseError( + 403, "Invites have been disabled on this server", block_invite_code + ) # An empty prev_events list is allowed as long as the auth_event_ids are present if prev_event_ids is not None: @@ -818,11 +822,12 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): # We assume that if the spam checker allowed the user to create # a room then they're allowed to join it. and not new_room - and not await self.spam_checker.user_may_join_room( + ): + spam_check = await self.spam_checker.user_may_join_room( target.to_string(), room_id, is_invited=inviter is not None ) - ): - raise SynapseError(403, "Not allowed to join this room") + if spam_check != NOT_SPAM: + raise SynapseError(403, "Not allowed to join this room", spam_check) # Check if a remote join should be performed. remote_join, remote_room_hosts = await self._should_perform_remote_join( @@ -1369,13 +1374,14 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): ) else: # Check if the spamchecker(s) allow this invite to go through. - if not await self.spam_checker.user_may_send_3pid_invite( + spam_check = await self.spam_checker.user_may_send_3pid_invite( inviter_userid=requester.user.to_string(), medium=medium, address=address, room_id=room_id, - ): - raise SynapseError(403, "Cannot send threepid invite") + ) + if spam_check != NOT_SPAM: + raise SynapseError(403, "Cannot send threepid invite", spam_check) stream_id = await self._make_and_store_3pid_invite( requester, diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index b4ead79f97..af19c513be 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -237,7 +237,7 @@ class SyncHandler: self.event_sources = hs.get_event_sources() self.clock = hs.get_clock() self.state = hs.get_state_handler() - self.auth = hs.get_auth() + self.auth_blocking = hs.get_auth_blocking() self._storage_controllers = hs.get_storage_controllers() self._state_storage_controller = self._storage_controllers.state @@ -280,7 +280,7 @@ class SyncHandler: # not been exceeded (if not part of the group by this point, almost certain # auth_blocking will occur) user_id = sync_config.user.to_string() - await self.auth.check_auth_blocking(requester=requester) + await self.auth_blocking.check_auth_blocking(requester=requester) res = await self.response_cache.wrap( sync_config.request_key, diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py index fffd83546c..496fce2ecc 100644 --- a/synapse/metrics/__init__.py +++ b/synapse/metrics/__init__.py @@ -35,7 +35,6 @@ from typing import ( ) import attr -from matrix_common.versionstring import get_distribution_version_string from prometheus_client import CollectorRegistry, Counter, Gauge, Histogram, Metric from prometheus_client.core import ( REGISTRY, @@ -54,6 +53,7 @@ from synapse.metrics._exposition import ( ) from synapse.metrics._gc import MIN_TIME_BETWEEN_GCS, install_gc_manager from synapse.metrics._types import Collector +from synapse.util import SYNAPSE_VERSION logger = logging.getLogger(__name__) @@ -419,7 +419,7 @@ build_info = Gauge( ) build_info.labels( " ".join([platform.python_implementation(), platform.python_version()]), - get_distribution_version_string("matrix-synapse"), + SYNAPSE_VERSION, " ".join([platform.system(), platform.release()]), ).set(1) diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index a8ad575fcd..6191c2dc96 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -115,6 +115,7 @@ from synapse.types import ( JsonDict, JsonMapping, Requester, + RoomAlias, StateMap, UserID, UserInfo, @@ -163,6 +164,7 @@ __all__ = [ "EventBase", "StateMap", "ProfileInfo", + "RoomAlias", "UserProfile", ] @@ -799,7 +801,7 @@ class ModuleApi: if device_id: # delete the device, which will also delete its access tokens yield defer.ensureDeferred( - self._hs.get_device_handler().delete_device(user_id, device_id) + self._hs.get_device_handler().delete_devices(user_id, [device_id]) ) else: # no associated device. Just delete the access token. diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index 1aa08f8d95..fa3266720b 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -20,8 +20,6 @@ import platform from http import HTTPStatus from typing import TYPE_CHECKING, Optional, Tuple -from matrix_common.versionstring import get_distribution_version_string - from synapse.api.errors import Codes, NotFoundError, SynapseError from synapse.http.server import HttpServer, JsonResource from synapse.http.servlet import RestServlet, parse_json_object_from_request @@ -88,6 +86,7 @@ from synapse.rest.admin.users import ( WhoisRestServlet, ) from synapse.types import JsonDict, RoomStreamToken +from synapse.util import SYNAPSE_VERSION if TYPE_CHECKING: from synapse.server import HomeServer @@ -100,7 +99,7 @@ class VersionServlet(RestServlet): def __init__(self, hs: "HomeServer"): self.res = { - "server_version": get_distribution_version_string("matrix-synapse"), + "server_version": SYNAPSE_VERSION, "python_version": platform.python_version(), } diff --git a/synapse/rest/admin/devices.py b/synapse/rest/admin/devices.py index cef46ba0dd..d934880102 100644 --- a/synapse/rest/admin/devices.py +++ b/synapse/rest/admin/devices.py @@ -80,7 +80,7 @@ class DeviceRestServlet(RestServlet): if u is None: raise NotFoundError("Unknown user") - await self.device_handler.delete_device(target_user.to_string(), device_id) + await self.device_handler.delete_devices(target_user.to_string(), [device_id]) return HTTPStatus.OK, {} async def on_PUT( diff --git a/synapse/rest/client/devices.py b/synapse/rest/client/devices.py index ad6fd6492b..6fab102437 100644 --- a/synapse/rest/client/devices.py +++ b/synapse/rest/client/devices.py @@ -147,7 +147,9 @@ class DeviceRestServlet(RestServlet): can_skip_ui_auth=True, ) - await self.device_handler.delete_device(requester.user.to_string(), device_id) + await self.device_handler.delete_devices( + requester.user.to_string(), [device_id] + ) return 200, {} async def on_PUT( diff --git a/synapse/rest/client/logout.py b/synapse/rest/client/logout.py index 193a6951b9..23dfa4518f 100644 --- a/synapse/rest/client/logout.py +++ b/synapse/rest/client/logout.py @@ -45,8 +45,8 @@ class LogoutRestServlet(RestServlet): access_token = self.auth.get_access_token_from_request(request) await self._auth_handler.delete_access_token(access_token) else: - await self._device_handler.delete_device( - requester.user.to_string(), requester.device_id + await self._device_handler.delete_devices( + requester.user.to_string(), [requester.device_id] ) return 200, {} diff --git a/synapse/rest/client/relations.py b/synapse/rest/client/relations.py index 3cae6d2b55..ce97080013 100644 --- a/synapse/rest/client/relations.py +++ b/synapse/rest/client/relations.py @@ -43,6 +43,7 @@ class RelationPaginationServlet(RestServlet): self.auth = hs.get_auth() self.store = hs.get_datastores().main self._relations_handler = hs.get_relations_handler() + self._msc3715_enabled = hs.config.experimental.msc3715_enabled async def on_GET( self, @@ -55,9 +56,15 @@ class RelationPaginationServlet(RestServlet): requester = await self.auth.get_user_by_req(request, allow_guest=True) limit = parse_integer(request, "limit", default=5) - direction = parse_string( - request, "org.matrix.msc3715.dir", default="b", allowed_values=["f", "b"] - ) + if self._msc3715_enabled: + direction = parse_string( + request, + "org.matrix.msc3715.dir", + default="b", + allowed_values=["f", "b"], + ) + else: + direction = "b" from_token_str = parse_string(request, "from") to_token_str = parse_string(request, "to") diff --git a/synapse/rest/media/v1/media_storage.py b/synapse/rest/media/v1/media_storage.py index 604f18bf52..9137417342 100644 --- a/synapse/rest/media/v1/media_storage.py +++ b/synapse/rest/media/v1/media_storage.py @@ -36,6 +36,7 @@ from twisted.internet.defer import Deferred from twisted.internet.interfaces import IConsumer from twisted.protocols.basic import FileSender +import synapse from synapse.api.errors import NotFoundError from synapse.logging.context import defer_to_thread, make_deferred_yieldable from synapse.util import Clock @@ -145,15 +146,15 @@ class MediaStorage: f.flush() f.close() - spam = await self.spam_checker.check_media_file_for_spam( + spam_check = await self.spam_checker.check_media_file_for_spam( ReadableFileWrapper(self.clock, fname), file_info ) - if spam: + if spam_check != synapse.module_api.NOT_SPAM: logger.info("Blocking media due to spam checker") # Note that we'll delete the stored media, due to the # try/except below. The media also won't be stored in # the DB. - raise SpamMediaException() + raise SpamMediaException(errcode=spam_check) for provider in self.storage_providers: await provider.store_file(path, file_info) diff --git a/synapse/server.py b/synapse/server.py index a66ec228db..a6a415aeab 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -29,6 +29,7 @@ from twisted.web.iweb import IPolicyForHTTPS from twisted.web.resource import Resource from synapse.api.auth import Auth +from synapse.api.auth_blocking import AuthBlocking from synapse.api.filtering import Filtering from synapse.api.ratelimiting import Ratelimiter, RequestRatelimiter from synapse.appservice.api import ApplicationServiceApi @@ -380,6 +381,10 @@ class HomeServer(metaclass=abc.ABCMeta): return Auth(self) @cache_in_self + def get_auth_blocking(self) -> AuthBlocking: + return AuthBlocking(self) + + @cache_in_self def get_http_client_context_factory(self) -> IPolicyForHTTPS: if self.config.tls.use_insecure_ssl_client_just_for_testing_do_not_use: return InsecureInterceptableContextFactory() diff --git a/synapse/server_notices/resource_limits_server_notices.py b/synapse/server_notices/resource_limits_server_notices.py index 6863020778..3134cd2d3d 100644 --- a/synapse/server_notices/resource_limits_server_notices.py +++ b/synapse/server_notices/resource_limits_server_notices.py @@ -37,7 +37,7 @@ class ResourceLimitsServerNotices: self._server_notices_manager = hs.get_server_notices_manager() self._store = hs.get_datastores().main self._storage_controllers = hs.get_storage_controllers() - self._auth = hs.get_auth() + self._auth_blocking = hs.get_auth_blocking() self._config = hs.config self._resouce_limited = False self._account_data_handler = hs.get_account_data_handler() @@ -91,7 +91,7 @@ class ResourceLimitsServerNotices: # Normally should always pass in user_id to check_auth_blocking # if you have it, but in this case are checking what would happen # to other users if they were to arrive. - await self._auth.check_auth_blocking() + await self._auth_blocking.check_auth_blocking() except ResourceLimitError as e: limit_msg = e.msg limit_type = e.limit_type diff --git a/synapse/state/v1.py b/synapse/state/v1.py index 499a328201..8bbb4ce41c 100644 --- a/synapse/state/v1.py +++ b/synapse/state/v1.py @@ -30,7 +30,7 @@ from typing import ( from synapse import event_auth from synapse.api.constants import EventTypes from synapse.api.errors import AuthError -from synapse.api.room_versions import RoomVersion, RoomVersions +from synapse.api.room_versions import RoomVersion from synapse.events import EventBase from synapse.types import MutableStateMap, StateMap @@ -331,7 +331,6 @@ def _resolve_auth_events( try: # The signatures have already been checked at this point event_auth.check_auth_rules_for_event( - RoomVersions.V1, event, auth_events.values(), ) @@ -349,7 +348,6 @@ def _resolve_normal_events( try: # The signatures have already been checked at this point event_auth.check_auth_rules_for_event( - RoomVersions.V1, event, auth_events.values(), ) diff --git a/synapse/state/v2.py b/synapse/state/v2.py index c618df2fde..6a16f38a15 100644 --- a/synapse/state/v2.py +++ b/synapse/state/v2.py @@ -17,12 +17,14 @@ import itertools import logging from typing import ( Any, + Awaitable, Callable, Collection, Dict, Generator, Iterable, List, + Mapping, Optional, Sequence, Set, @@ -30,33 +32,58 @@ from typing import ( overload, ) -from typing_extensions import Literal +from typing_extensions import Literal, Protocol -import synapse.state from synapse import event_auth from synapse.api.constants import EventTypes from synapse.api.errors import AuthError from synapse.api.room_versions import RoomVersion from synapse.events import EventBase from synapse.types import MutableStateMap, StateMap -from synapse.util import Clock logger = logging.getLogger(__name__) +class Clock(Protocol): + # This is usually synapse.util.Clock, but it's replaced with a FakeClock in tests. + # We only ever sleep(0) though, so that other async functions can make forward + # progress without waiting for stateres to complete. + def sleep(self, duration_ms: float) -> Awaitable[None]: + ... + + +class StateResolutionStore(Protocol): + # This is usually synapse.state.StateResolutionStore, but it's replaced with a + # TestStateResolutionStore in tests. + def get_events( + self, event_ids: Collection[str], allow_rejected: bool = False + ) -> Awaitable[Dict[str, EventBase]]: + ... + + def get_auth_chain_difference( + self, room_id: str, state_sets: List[Set[str]] + ) -> Awaitable[Set[str]]: + ... + + # We want to await to the reactor occasionally during state res when dealing # with large data sets, so that we don't exhaust the reactor. This is done by # awaiting to reactor during loops every N iterations. _AWAIT_AFTER_ITERATIONS = 100 +__all__ = [ + "resolve_events_with_store", +] + + async def resolve_events_with_store( clock: Clock, room_id: str, room_version: RoomVersion, state_sets: Sequence[StateMap[str]], event_map: Optional[Dict[str, EventBase]], - state_res_store: "synapse.state.StateResolutionStore", + state_res_store: StateResolutionStore, ) -> StateMap[str]: """Resolves the state using the v2 state resolution algorithm @@ -194,7 +221,7 @@ async def _get_power_level_for_sender( room_id: str, event_id: str, event_map: Dict[str, EventBase], - state_res_store: "synapse.state.StateResolutionStore", + state_res_store: StateResolutionStore, ) -> int: """Return the power level of the sender of the given event according to their auth events. @@ -243,9 +270,9 @@ async def _get_power_level_for_sender( async def _get_auth_chain_difference( room_id: str, - state_sets: Sequence[StateMap[str]], + state_sets: Sequence[Mapping[Any, str]], event_map: Dict[str, EventBase], - state_res_store: "synapse.state.StateResolutionStore", + state_res_store: StateResolutionStore, ) -> Set[str]: """Compare the auth chains of each state set and return the set of events that only appear in some but not all of the auth chains. @@ -406,7 +433,7 @@ async def _add_event_and_auth_chain_to_graph( room_id: str, event_id: str, event_map: Dict[str, EventBase], - state_res_store: "synapse.state.StateResolutionStore", + state_res_store: StateResolutionStore, auth_diff: Set[str], ) -> None: """Helper function for _reverse_topological_power_sort that add the event @@ -440,7 +467,7 @@ async def _reverse_topological_power_sort( room_id: str, event_ids: Iterable[str], event_map: Dict[str, EventBase], - state_res_store: "synapse.state.StateResolutionStore", + state_res_store: StateResolutionStore, auth_diff: Set[str], ) -> List[str]: """Returns a list of the event_ids sorted by reverse topological ordering, @@ -501,7 +528,7 @@ async def _iterative_auth_checks( event_ids: List[str], base_state: StateMap[str], event_map: Dict[str, EventBase], - state_res_store: "synapse.state.StateResolutionStore", + state_res_store: StateResolutionStore, ) -> MutableStateMap[str]: """Sequentially apply auth checks to each event in given list, updating the state as it goes along. @@ -547,7 +574,6 @@ async def _iterative_auth_checks( try: event_auth.check_auth_rules_for_event( - room_version, event, auth_events.values(), ) @@ -570,7 +596,7 @@ async def _mainline_sort( event_ids: List[str], resolved_power_event_id: Optional[str], event_map: Dict[str, EventBase], - state_res_store: "synapse.state.StateResolutionStore", + state_res_store: StateResolutionStore, ) -> List[str]: """Returns a sorted list of event_ids sorted by mainline ordering based on the given event resolved_power_event_id @@ -639,7 +665,7 @@ async def _get_mainline_depth_for_event( event: EventBase, mainline_map: Dict[str, int], event_map: Dict[str, EventBase], - state_res_store: "synapse.state.StateResolutionStore", + state_res_store: StateResolutionStore, ) -> int: """Get the mainline depths for the given event based on the mainline map @@ -683,7 +709,7 @@ async def _get_event( room_id: str, event_id: str, event_map: Dict[str, EventBase], - state_res_store: "synapse.state.StateResolutionStore", + state_res_store: StateResolutionStore, allow_none: Literal[False] = False, ) -> EventBase: ... @@ -694,7 +720,7 @@ async def _get_event( room_id: str, event_id: str, event_map: Dict[str, EventBase], - state_res_store: "synapse.state.StateResolutionStore", + state_res_store: StateResolutionStore, allow_none: Literal[True], ) -> Optional[EventBase]: ... @@ -704,7 +730,7 @@ async def _get_event( room_id: str, event_id: str, event_map: Dict[str, EventBase], - state_res_store: "synapse.state.StateResolutionStore", + state_res_store: StateResolutionStore, allow_none: bool = False, ) -> Optional[EventBase]: """Helper function to look up event in event_map, falling back to looking diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py index b1e5208c76..555b4e77d2 100644 --- a/synapse/storage/background_updates.py +++ b/synapse/storage/background_updates.py @@ -507,25 +507,6 @@ class BackgroundUpdater: update_handler ) - def register_noop_background_update(self, update_name: str) -> None: - """Register a noop handler for a background update. - - This is useful when we previously did a background update, but no - longer wish to do the update. In this case the background update should - be removed from the schema delta files, but there may still be some - users who have the background update queued, so this method should - also be called to clear the update. - - Args: - update_name: Name of update - """ - - async def noop_update(progress: JsonDict, batch_size: int) -> int: - await self._end_background_update(update_name) - return 1 - - self.register_background_update_handler(update_name, noop_update) - def register_background_index_update( self, update_name: str, diff --git a/synapse/storage/controllers/persist_events.py b/synapse/storage/controllers/persist_events.py index 4caaa81808..4bcb99d06e 100644 --- a/synapse/storage/controllers/persist_events.py +++ b/synapse/storage/controllers/persist_events.py @@ -388,10 +388,13 @@ class EventsPersistenceStorageController: # TODO(faster_joins): get a real stream ordering, to make this work correctly # across workers. + # https://github.com/matrix-org/synapse/issues/12994 # # TODO(faster_joins): this can race against event persistence, in which case we # will end up with incorrect state. Perhaps we should make this a job we - # farm out to the event persister, somehow. + # farm out to the event persister thread, somehow. + # https://github.com/matrix-org/synapse/issues/13007 + # stream_id = self.main_store.get_room_max_stream_ordering() await self.persist_events_store.update_current_state(room_id, delta, stream_id) diff --git a/synapse/storage/controllers/state.py b/synapse/storage/controllers/state.py index 3b4cdb67eb..d3a44bc876 100644 --- a/synapse/storage/controllers/state.py +++ b/synapse/storage/controllers/state.py @@ -452,6 +452,9 @@ class StateStorageController: up to date. """ # FIXME(faster_joins): what do we do here? + # https://github.com/matrix-org/synapse/issues/12814 + # https://github.com/matrix-org/synapse/issues/12815 + # https://github.com/matrix-org/synapse/issues/13008 return await self.stores.main.get_partial_current_state_deltas( prev_stream_id, max_stream_id diff --git a/synapse/storage/databases/main/__init__.py b/synapse/storage/databases/main/__init__.py index 11d9d16c19..9121badb3a 100644 --- a/synapse/storage/databases/main/__init__.py +++ b/synapse/storage/databases/main/__init__.py @@ -45,7 +45,6 @@ from .event_push_actions import EventPushActionsStore from .events_bg_updates import EventsBackgroundUpdatesStore from .events_forward_extremities import EventForwardExtremitiesStore from .filtering import FilteringStore -from .group_server import GroupServerStore from .keys import KeyStore from .lock import LockStore from .media_repository import MediaRepositoryStore @@ -117,7 +116,6 @@ class DataStore( DeviceStore, DeviceInboxStore, UserDirectoryStore, - GroupServerStore, UserErasureStore, MonthlyActiveUsersWorkerStore, StatsStore, diff --git a/synapse/storage/databases/main/deviceinbox.py b/synapse/storage/databases/main/deviceinbox.py index 599b418383..422e0e65ca 100644 --- a/synapse/storage/databases/main/deviceinbox.py +++ b/synapse/storage/databases/main/deviceinbox.py @@ -834,8 +834,6 @@ class DeviceInboxWorkerStore(SQLBaseStore): class DeviceInboxBackgroundUpdateStore(SQLBaseStore): DEVICE_INBOX_STREAM_ID = "device_inbox_stream_drop" - REMOVE_DELETED_DEVICES = "remove_deleted_devices_from_device_inbox" - REMOVE_HIDDEN_DEVICES = "remove_hidden_devices_from_device_inbox" REMOVE_DEAD_DEVICES_FROM_INBOX = "remove_dead_devices_from_device_inbox" def __init__( @@ -857,15 +855,6 @@ class DeviceInboxBackgroundUpdateStore(SQLBaseStore): self.DEVICE_INBOX_STREAM_ID, self._background_drop_index_device_inbox ) - # Used to be a background update that deletes all device_inboxes for deleted - # devices. - self.db_pool.updates.register_noop_background_update( - self.REMOVE_DELETED_DEVICES - ) - # Used to be a background update that deletes all device_inboxes for hidden - # devices. - self.db_pool.updates.register_noop_background_update(self.REMOVE_HIDDEN_DEVICES) - self.db_pool.updates.register_background_update_handler( self.REMOVE_DEAD_DEVICES_FROM_INBOX, self._remove_dead_devices_from_device_inbox, diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index d900064c07..2414a7dc38 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -1240,15 +1240,6 @@ class DeviceBackgroundUpdateStore(SQLBaseStore): self._remove_duplicate_outbound_pokes, ) - # a pair of background updates that were added during the 1.14 release cycle, - # but replaced with 58/06dlols_unique_idx.py - self.db_pool.updates.register_noop_background_update( - "device_lists_outbound_last_success_unique_idx", - ) - self.db_pool.updates.register_noop_background_update( - "drop_device_lists_outbound_last_success_non_unique_idx", - ) - async def _drop_device_list_streams_non_unique_indexes( self, progress: JsonDict, batch_size: int ) -> int: @@ -1433,16 +1424,6 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore): ) raise StoreError(500, "Problem storing device.") - async def delete_device(self, user_id: str, device_id: str) -> None: - """Delete a device and its device_inbox. - - Args: - user_id: The ID of the user which owns the device - device_id: The ID of the device to delete - """ - - await self.delete_devices(user_id, [device_id]) - async def delete_devices(self, user_id: str, device_ids: List[str]) -> None: """Deletes several devices. diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index 17e35cf63e..a8773374be 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -46,7 +46,7 @@ from synapse.storage.database import ( ) from synapse.storage.databases.main.events_worker import EventCacheEntry from synapse.storage.databases.main.search import SearchEntry -from synapse.storage.engines.postgres import PostgresEngine +from synapse.storage.engines import PostgresEngine from synapse.storage.util.id_generators import AbstractStreamIdGenerator from synapse.storage.util.sequence import SequenceGenerator from synapse.types import JsonDict, StateMap, get_domain_from_id diff --git a/synapse/storage/databases/main/events_bg_updates.py b/synapse/storage/databases/main/events_bg_updates.py index d5f0059665..bea34a4c4a 100644 --- a/synapse/storage/databases/main/events_bg_updates.py +++ b/synapse/storage/databases/main/events_bg_updates.py @@ -177,11 +177,6 @@ class EventsBackgroundUpdatesStore(SQLBaseStore): self._purged_chain_cover_index, ) - # The event_thread_relation background update was replaced with the - # event_arbitrary_relations one, which handles any relation to avoid - # needed to potentially crawl the entire events table in the future. - self.db_pool.updates.register_noop_background_update("event_thread_relation") - self.db_pool.updates.register_background_update_handler( "event_arbitrary_relations", self._event_arbitrary_relations, diff --git a/synapse/storage/databases/main/group_server.py b/synapse/storage/databases/main/group_server.py deleted file mode 100644 index c15a7136b6..0000000000 --- a/synapse/storage/databases/main/group_server.py +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright 2017 Vector Creations Ltd -# Copyright 2018 New Vector Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from typing import TYPE_CHECKING - -from synapse.storage._base import SQLBaseStore -from synapse.storage.database import DatabasePool, LoggingDatabaseConnection - -if TYPE_CHECKING: - from synapse.server import HomeServer - - -class GroupServerStore(SQLBaseStore): - def __init__( - self, - database: DatabasePool, - db_conn: LoggingDatabaseConnection, - hs: "HomeServer", - ): - # Register a legacy groups background update as a no-op. - database.updates.register_noop_background_update("local_group_updates_index") - super().__init__(database, db_conn, hs) diff --git a/synapse/storage/databases/main/media_repository.py b/synapse/storage/databases/main/media_repository.py index d028be16de..9b172a64d8 100644 --- a/synapse/storage/databases/main/media_repository.py +++ b/synapse/storage/databases/main/media_repository.py @@ -37,9 +37,6 @@ from synapse.types import JsonDict, UserID if TYPE_CHECKING: from synapse.server import HomeServer -BG_UPDATE_REMOVE_MEDIA_REPO_INDEX_WITHOUT_METHOD = ( - "media_repository_drop_index_wo_method" -) BG_UPDATE_REMOVE_MEDIA_REPO_INDEX_WITHOUT_METHOD_2 = ( "media_repository_drop_index_wo_method_2" ) @@ -111,13 +108,6 @@ class MediaRepositoryBackgroundUpdateStore(SQLBaseStore): unique=True, ) - # the original impl of _drop_media_index_without_method was broken (see - # https://github.com/matrix-org/synapse/issues/8649), so we replace the original - # impl with a no-op and run the fixed migration as - # media_repository_drop_index_wo_method_2. - self.db_pool.updates.register_noop_background_update( - BG_UPDATE_REMOVE_MEDIA_REPO_INDEX_WITHOUT_METHOD - ) self.db_pool.updates.register_background_update_handler( BG_UPDATE_REMOVE_MEDIA_REPO_INDEX_WITHOUT_METHOD_2, self._drop_media_index_without_method, diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py index 21e954ccc1..b6106affa6 100644 --- a/synapse/storage/databases/main/receipts.py +++ b/synapse/storage/databases/main/receipts.py @@ -36,6 +36,7 @@ from synapse.storage.database import ( LoggingTransaction, ) from synapse.storage.engines import PostgresEngine +from synapse.storage.engines._base import IsolationLevel from synapse.storage.util.id_generators import ( AbstractStreamIdTracker, MultiWriterIdGenerator, @@ -764,6 +765,10 @@ class ReceiptsWorkerStore(SQLBaseStore): linearized_event_id, data, stream_id=stream_id, + # Read committed is actually beneficial here because we check for a receipt with + # greater stream order, and checking the very latest data at select time is better + # than the data at transaction start time. + isolation_level=IsolationLevel.READ_COMMITTED, ) # If the receipt was older than the currently persisted one, nothing to do. diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py index 4991360b70..cb63cd9b7d 100644 --- a/synapse/storage/databases/main/registration.py +++ b/synapse/storage/databases/main/registration.py @@ -1805,21 +1805,10 @@ class RegistrationBackgroundUpdateStore(RegistrationWorkerStore): columns=["creation_ts"], ) - # we no longer use refresh tokens, but it's possible that some people - # might have a background update queued to build this index. Just - # clear the background update. - self.db_pool.updates.register_noop_background_update( - "refresh_tokens_device_index" - ) - self.db_pool.updates.register_background_update_handler( "users_set_deactivated_flag", self._background_update_set_deactivated_flag ) - self.db_pool.updates.register_noop_background_update( - "user_threepids_grandfather" - ) - self.db_pool.updates.register_background_index_update( "user_external_ids_user_id_idx", index_name="user_external_ids_user_id_idx", diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py index 68d4fc2e64..5760d3428e 100644 --- a/synapse/storage/databases/main/room.py +++ b/synapse/storage/databases/main/room.py @@ -1112,6 +1112,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): # this can race with incoming events, so we watch out for FK errors. # TODO(faster_joins): this still doesn't completely fix the race, since the persist process # is not atomic. I fear we need an application-level lock. + # https://github.com/matrix-org/synapse/issues/12988 try: await self.db_pool.runInteraction( "clear_partial_state_room", self._clear_partial_state_room_txn, room_id @@ -1119,6 +1120,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): return True except self.db_pool.engine.module.DatabaseError as e: # TODO(faster_joins): how do we distinguish between FK errors and other errors? + # https://github.com/matrix-org/synapse/issues/12988 logger.warning( "Exception while clearing lazy partial-state-room %s, retrying: %s", room_id, diff --git a/synapse/storage/databases/main/search.py b/synapse/storage/databases/main/search.py index 78e0773b2a..f6e24b68d2 100644 --- a/synapse/storage/databases/main/search.py +++ b/synapse/storage/databases/main/search.py @@ -113,7 +113,6 @@ class SearchBackgroundUpdateStore(SearchWorkerStore): EVENT_SEARCH_UPDATE_NAME = "event_search" EVENT_SEARCH_ORDER_UPDATE_NAME = "event_search_order" - EVENT_SEARCH_USE_GIST_POSTGRES_NAME = "event_search_postgres_gist" EVENT_SEARCH_USE_GIN_POSTGRES_NAME = "event_search_postgres_gin" EVENT_SEARCH_DELETE_NON_STRINGS = "event_search_sqlite_delete_non_strings" @@ -132,15 +131,6 @@ class SearchBackgroundUpdateStore(SearchWorkerStore): self.EVENT_SEARCH_ORDER_UPDATE_NAME, self._background_reindex_search_order ) - # we used to have a background update to turn the GIN index into a - # GIST one; we no longer do that (obviously) because we actually want - # a GIN index. However, it's possible that some people might still have - # the background update queued, so we register a handler to clear the - # background update. - self.db_pool.updates.register_noop_background_update( - self.EVENT_SEARCH_USE_GIST_POSTGRES_NAME - ) - self.db_pool.updates.register_background_update_handler( self.EVENT_SEARCH_USE_GIN_POSTGRES_NAME, self._background_reindex_gin_search ) diff --git a/synapse/storage/databases/main/state.py b/synapse/storage/databases/main/state.py index bdd00273cd..9674c4a757 100644 --- a/synapse/storage/databases/main/state.py +++ b/synapse/storage/databases/main/state.py @@ -127,13 +127,8 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): NotFoundError: if the room is unknown """ - # First we try looking up room version from the database, but for old - # rooms we might not have added the room version to it yet so we fall - # back to previous behaviour and look in current state events. - # # We really should have an entry in the rooms table for every room we - # care about, but let's be a bit paranoid (at least while the background - # update is happening) to avoid breaking existing rooms. + # care about, but let's be a bit paranoid. room_version = self.db_pool.simple_select_one_onecol_txn( txn, table="rooms", @@ -440,6 +435,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): ) # TODO(faster_joins): need to do something about workers here + # https://github.com/matrix-org/synapse/issues/12994 txn.call_after(self.is_partial_state_event.invalidate, (event.event_id,)) txn.call_after( self._get_state_group_for_event.prefill, diff --git a/synapse/storage/databases/main/stats.py b/synapse/storage/databases/main/stats.py index b95dbef678..538451b05f 100644 --- a/synapse/storage/databases/main/stats.py +++ b/synapse/storage/databases/main/stats.py @@ -120,11 +120,6 @@ class StatsStore(StateDeltasStore): self.db_pool.updates.register_background_update_handler( "populate_stats_process_users", self._populate_stats_process_users ) - # we no longer need to perform clean-up, but we will give ourselves - # the potential to reintroduce it in the future – so documentation - # will still encourage the use of this no-op handler. - self.db_pool.updates.register_noop_background_update("populate_stats_cleanup") - self.db_pool.updates.register_noop_background_update("populate_stats_prepare") async def _populate_stats_process_users( self, progress: JsonDict, batch_size: int diff --git a/synapse/storage/engines/__init__.py b/synapse/storage/engines/__init__.py index f51b3d228e..a182e8a098 100644 --- a/synapse/storage/engines/__init__.py +++ b/synapse/storage/engines/__init__.py @@ -11,11 +11,35 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Mapping +from typing import Any, Mapping, NoReturn from ._base import BaseDatabaseEngine, IncorrectDatabaseSetup -from .postgres import PostgresEngine -from .sqlite import Sqlite3Engine + +# The classes `PostgresEngine` and `Sqlite3Engine` must always be importable, because +# we use `isinstance(engine, PostgresEngine)` to write different queries for postgres +# and sqlite. But the database driver modules are both optional: they may not be +# installed. To account for this, create dummy classes on import failure so we can +# still run `isinstance()` checks. +try: + from .postgres import PostgresEngine +except ImportError: + + class PostgresEngine(BaseDatabaseEngine): # type: ignore[no-redef] + def __new__(cls, *args: object, **kwargs: object) -> NoReturn: # type: ignore[misc] + raise RuntimeError( + f"Cannot create {cls.__name__} -- psycopg2 module is not installed" + ) + + +try: + from .sqlite import Sqlite3Engine +except ImportError: + + class Sqlite3Engine(BaseDatabaseEngine): # type: ignore[no-redef] + def __new__(cls, *args: object, **kwargs: object) -> NoReturn: # type: ignore[misc] + raise RuntimeError( + f"Cannot create {cls.__name__} -- sqlite3 module is not installed" + ) def create_engine(database_config: Mapping[str, Any]) -> BaseDatabaseEngine: @@ -30,4 +54,10 @@ def create_engine(database_config: Mapping[str, Any]) -> BaseDatabaseEngine: raise RuntimeError("Unsupported database engine '%s'" % (name,)) -__all__ = ["create_engine", "BaseDatabaseEngine", "IncorrectDatabaseSetup"] +__all__ = [ + "create_engine", + "BaseDatabaseEngine", + "PostgresEngine", + "Sqlite3Engine", + "IncorrectDatabaseSetup", +] diff --git a/synapse/storage/engines/postgres.py b/synapse/storage/engines/postgres.py index 391f8ed24a..517f9d5f98 100644 --- a/synapse/storage/engines/postgres.py +++ b/synapse/storage/engines/postgres.py @@ -15,6 +15,8 @@ import logging from typing import TYPE_CHECKING, Any, Mapping, NoReturn, Optional, Tuple, cast +import psycopg2.extensions + from synapse.storage.engines._base import ( BaseDatabaseEngine, IncorrectDatabaseSetup, @@ -23,18 +25,14 @@ from synapse.storage.engines._base import ( from synapse.storage.types import Cursor if TYPE_CHECKING: - import psycopg2 # noqa: F401 - from synapse.storage.database import LoggingDatabaseConnection logger = logging.getLogger(__name__) -class PostgresEngine(BaseDatabaseEngine["psycopg2.connection"]): +class PostgresEngine(BaseDatabaseEngine[psycopg2.extensions.connection]): def __init__(self, database_config: Mapping[str, Any]): - import psycopg2.extensions - super().__init__(psycopg2, database_config) psycopg2.extensions.register_type(psycopg2.extensions.UNICODE) @@ -69,7 +67,9 @@ class PostgresEngine(BaseDatabaseEngine["psycopg2.connection"]): return collation, ctype def check_database( - self, db_conn: "psycopg2.connection", allow_outdated_version: bool = False + self, + db_conn: psycopg2.extensions.connection, + allow_outdated_version: bool = False, ) -> None: # Get the version of PostgreSQL that we're using. As per the psycopg2 # docs: The number is formed by converting the major, minor, and @@ -176,8 +176,6 @@ class PostgresEngine(BaseDatabaseEngine["psycopg2.connection"]): return True def is_deadlock(self, error: Exception) -> bool: - import psycopg2.extensions - if isinstance(error, psycopg2.DatabaseError): # https://www.postgresql.org/docs/current/static/errcodes-appendix.html # "40001" serialization_failure @@ -185,7 +183,7 @@ class PostgresEngine(BaseDatabaseEngine["psycopg2.connection"]): return error.pgcode in ["40001", "40P01"] return False - def is_connection_closed(self, conn: "psycopg2.connection") -> bool: + def is_connection_closed(self, conn: psycopg2.extensions.connection) -> bool: return bool(conn.closed) def lock_table(self, txn: Cursor, table: str) -> None: @@ -205,18 +203,16 @@ class PostgresEngine(BaseDatabaseEngine["psycopg2.connection"]): else: return "%i.%i.%i" % (numver / 10000, (numver % 10000) / 100, numver % 100) - def in_transaction(self, conn: "psycopg2.connection") -> bool: - import psycopg2.extensions - + def in_transaction(self, conn: psycopg2.extensions.connection) -> bool: return conn.status != psycopg2.extensions.STATUS_READY def attempt_to_set_autocommit( - self, conn: "psycopg2.connection", autocommit: bool + self, conn: psycopg2.extensions.connection, autocommit: bool ) -> None: return conn.set_session(autocommit=autocommit) def attempt_to_set_isolation_level( - self, conn: "psycopg2.connection", isolation_level: Optional[int] + self, conn: psycopg2.extensions.connection, isolation_level: Optional[int] ) -> None: if isolation_level is None: isolation_level = self.default_isolation_level diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py index c33df42084..09a2b58f4c 100644 --- a/synapse/storage/prepare_database.py +++ b/synapse/storage/prepare_database.py @@ -23,8 +23,7 @@ from typing_extensions import Counter as CounterType from synapse.config.homeserver import HomeServerConfig from synapse.storage.database import LoggingDatabaseConnection -from synapse.storage.engines import BaseDatabaseEngine -from synapse.storage.engines.postgres import PostgresEngine +from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine from synapse.storage.schema import SCHEMA_COMPAT_VERSION, SCHEMA_VERSION from synapse.storage.types import Cursor diff --git a/synapse/storage/schema/main/delta/70/02remove_noop_background_updates.sql b/synapse/storage/schema/main/delta/70/02remove_noop_background_updates.sql new file mode 100644 index 0000000000..fa96ac50c2 --- /dev/null +++ b/synapse/storage/schema/main/delta/70/02remove_noop_background_updates.sql @@ -0,0 +1,61 @@ +/* Copyright 2022 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- Clean-up background updates which should no longer be run. Previously these +-- used the (now removed) register_noop_background_update method. + +-- Used to be a background update that deletes all device_inboxes for deleted +-- devices. +DELETE FROM background_updates WHERE update_name = 'remove_deleted_devices_from_device_inbox'; +-- Used to be a background update that deletes all device_inboxes for hidden +-- devices. +DELETE FROM background_updates WHERE update_name = 'remove_hidden_devices_from_device_inbox'; + +-- A pair of background updates that were added during the 1.14 release cycle, +-- but replaced with 58/06dlols_unique_idx.py +DELETE FROM background_updates WHERE update_name = 'device_lists_outbound_last_success_unique_idx'; +DELETE FROM background_updates WHERE update_name = 'drop_device_lists_outbound_last_success_non_unique_idx'; + +-- The event_thread_relation background update was replaced with the +-- event_arbitrary_relations one, which handles any relation to avoid +-- needed to potentially crawl the entire events table in the future. +DELETE FROM background_updates WHERE update_name = 'event_thread_relation'; + +-- A legacy groups background update. +DELETE FROM background_updates WHERE update_name = 'local_group_updates_index'; + +-- The original impl of _drop_media_index_without_method was broken (see +-- https://github.com/matrix-org/synapse/issues/8649), so we replace the original +-- impl with a no-op and run the fixed migration as +-- media_repository_drop_index_wo_method_2. +DELETE FROM background_updates WHERE update_name = 'media_repository_drop_index_wo_method'; + +-- We no longer use refresh tokens, but it's possible that some people +-- might have a background update queued to build this index. Just +-- clear the background update. +DELETE FROM background_updates WHERE update_name = 'refresh_tokens_device_index'; + +DELETE FROM background_updates WHERE update_name = 'user_threepids_grandfather'; + +-- We used to have a background update to turn the GIN index into a +-- GIST one; we no longer do that (obviously) because we actually want +-- a GIN index. However, it's possible that some people might still have +-- the background update queued, so we register a handler to clear the +-- background update. +DELETE FROM background_updates WHERE update_name = 'event_search_postgres_gist'; + +-- We no longer need to perform clean-up. +DELETE FROM background_updates WHERE update_name = 'populate_stats_cleanup'; +DELETE FROM background_updates WHERE update_name = 'populate_stats_prepare'; diff --git a/synapse/storage/state.py b/synapse/storage/state.py index 96aaffb53c..af3bab2c15 100644 --- a/synapse/storage/state.py +++ b/synapse/storage/state.py @@ -546,6 +546,7 @@ class StateFilter: # the sender of a piece of state wasn't actually in the room, then clearly that # state shouldn't have been returned. # We should at least add some tests around this to see what happens. + # https://github.com/matrix-org/synapse/issues/13006 # if we haven't requested membership events, then it depends on the value of # 'include_others' diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py index d8046b7553..6323d452e7 100644 --- a/synapse/util/__init__.py +++ b/synapse/util/__init__.py @@ -19,6 +19,7 @@ from typing import Any, Callable, Dict, Generator, Optional import attr from frozendict import frozendict +from matrix_common.versionstring import get_distribution_version_string from twisted.internet import defer, task from twisted.internet.defer import Deferred @@ -183,3 +184,8 @@ def log_failure( if not consumeErrors: return failure return None + + +# Version string with git info. Computed here once so that we don't invoke git multiple +# times. +SYNAPSE_VERSION = get_distribution_version_string("matrix-synapse", __file__) diff --git a/tests/api/test_auth.py b/tests/api/test_auth.py index bc75ddd3e9..54af9089e9 100644 --- a/tests/api/test_auth.py +++ b/tests/api/test_auth.py @@ -19,6 +19,7 @@ import pymacaroons from twisted.test.proto_helpers import MemoryReactor from synapse.api.auth import Auth +from synapse.api.auth_blocking import AuthBlocking from synapse.api.constants import UserTypes from synapse.api.errors import ( AuthError, @@ -49,7 +50,7 @@ class AuthTestCase(unittest.HomeserverTestCase): # AuthBlocking reads from the hs' config on initialization. We need to # modify its config instead of the hs' - self.auth_blocking = self.auth._auth_blocking + self.auth_blocking = AuthBlocking(hs) self.test_user = "@foo:bar" self.test_token = b"_test_token_" @@ -362,20 +363,22 @@ class AuthTestCase(unittest.HomeserverTestCase): small_number_of_users = 1 # Ensure no error thrown - self.get_success(self.auth.check_auth_blocking()) + self.get_success(self.auth_blocking.check_auth_blocking()) self.auth_blocking._limit_usage_by_mau = True self.store.get_monthly_active_count = simple_async_mock(lots_of_users) - e = self.get_failure(self.auth.check_auth_blocking(), ResourceLimitError) + e = self.get_failure( + self.auth_blocking.check_auth_blocking(), ResourceLimitError + ) self.assertEqual(e.value.admin_contact, self.hs.config.server.admin_contact) self.assertEqual(e.value.errcode, Codes.RESOURCE_LIMIT_EXCEEDED) self.assertEqual(e.value.code, 403) # Ensure does not throw an error self.store.get_monthly_active_count = simple_async_mock(small_number_of_users) - self.get_success(self.auth.check_auth_blocking()) + self.get_success(self.auth_blocking.check_auth_blocking()) def test_blocking_mau__depending_on_user_type(self): self.auth_blocking._max_mau_value = 50 @@ -383,15 +386,18 @@ class AuthTestCase(unittest.HomeserverTestCase): self.store.get_monthly_active_count = simple_async_mock(100) # Support users allowed - self.get_success(self.auth.check_auth_blocking(user_type=UserTypes.SUPPORT)) + self.get_success( + self.auth_blocking.check_auth_blocking(user_type=UserTypes.SUPPORT) + ) self.store.get_monthly_active_count = simple_async_mock(100) # Bots not allowed self.get_failure( - self.auth.check_auth_blocking(user_type=UserTypes.BOT), ResourceLimitError + self.auth_blocking.check_auth_blocking(user_type=UserTypes.BOT), + ResourceLimitError, ) self.store.get_monthly_active_count = simple_async_mock(100) # Real users not allowed - self.get_failure(self.auth.check_auth_blocking(), ResourceLimitError) + self.get_failure(self.auth_blocking.check_auth_blocking(), ResourceLimitError) def test_blocking_mau__appservice_requester_allowed_when_not_tracking_ips(self): self.auth_blocking._max_mau_value = 50 @@ -419,7 +425,7 @@ class AuthTestCase(unittest.HomeserverTestCase): app_service=appservice, authenticated_entity="@appservice:server", ) - self.get_success(self.auth.check_auth_blocking(requester=requester)) + self.get_success(self.auth_blocking.check_auth_blocking(requester=requester)) def test_blocking_mau__appservice_requester_disallowed_when_tracking_ips(self): self.auth_blocking._max_mau_value = 50 @@ -448,7 +454,8 @@ class AuthTestCase(unittest.HomeserverTestCase): authenticated_entity="@appservice:server", ) self.get_failure( - self.auth.check_auth_blocking(requester=requester), ResourceLimitError + self.auth_blocking.check_auth_blocking(requester=requester), + ResourceLimitError, ) def test_reserved_threepid(self): @@ -459,18 +466,21 @@ class AuthTestCase(unittest.HomeserverTestCase): unknown_threepid = {"medium": "email", "address": "unreserved@server.com"} self.auth_blocking._mau_limits_reserved_threepids = [threepid] - self.get_failure(self.auth.check_auth_blocking(), ResourceLimitError) + self.get_failure(self.auth_blocking.check_auth_blocking(), ResourceLimitError) self.get_failure( - self.auth.check_auth_blocking(threepid=unknown_threepid), ResourceLimitError + self.auth_blocking.check_auth_blocking(threepid=unknown_threepid), + ResourceLimitError, ) - self.get_success(self.auth.check_auth_blocking(threepid=threepid)) + self.get_success(self.auth_blocking.check_auth_blocking(threepid=threepid)) def test_hs_disabled(self): self.auth_blocking._hs_disabled = True self.auth_blocking._hs_disabled_message = "Reason for being disabled" - e = self.get_failure(self.auth.check_auth_blocking(), ResourceLimitError) + e = self.get_failure( + self.auth_blocking.check_auth_blocking(), ResourceLimitError + ) self.assertEqual(e.value.admin_contact, self.hs.config.server.admin_contact) self.assertEqual(e.value.errcode, Codes.RESOURCE_LIMIT_EXCEEDED) self.assertEqual(e.value.code, 403) @@ -485,7 +495,9 @@ class AuthTestCase(unittest.HomeserverTestCase): self.auth_blocking._hs_disabled = True self.auth_blocking._hs_disabled_message = "Reason for being disabled" - e = self.get_failure(self.auth.check_auth_blocking(), ResourceLimitError) + e = self.get_failure( + self.auth_blocking.check_auth_blocking(), ResourceLimitError + ) self.assertEqual(e.value.admin_contact, self.hs.config.server.admin_contact) self.assertEqual(e.value.errcode, Codes.RESOURCE_LIMIT_EXCEEDED) self.assertEqual(e.value.code, 403) @@ -495,4 +507,4 @@ class AuthTestCase(unittest.HomeserverTestCase): user = "@user:server" self.auth_blocking._server_notices_mxid = user self.auth_blocking._hs_disabled_message = "Reason for being disabled" - self.get_success(self.auth.check_auth_blocking(user)) + self.get_success(self.auth_blocking.check_auth_blocking(user)) diff --git a/tests/federation/transport/server/test__base.py b/tests/federation/transport/server/test__base.py index e63885c1c9..d33e86db4c 100644 --- a/tests/federation/transport/server/test__base.py +++ b/tests/federation/transport/server/test__base.py @@ -24,7 +24,7 @@ from synapse.types import JsonDict from synapse.util.ratelimitutils import FederationRateLimiter from tests import unittest -from tests.http.server._base import EndpointCancellationTestHelperMixin +from tests.http.server._base import test_disconnect class CancellableFederationServlet(BaseFederationServlet): @@ -54,9 +54,7 @@ class CancellableFederationServlet(BaseFederationServlet): return HTTPStatus.OK, {"result": True} -class BaseFederationServletCancellationTests( - unittest.FederatingHomeserverTestCase, EndpointCancellationTestHelperMixin -): +class BaseFederationServletCancellationTests(unittest.FederatingHomeserverTestCase): """Tests for `BaseFederationServlet` cancellation.""" skip = "`BaseFederationServlet` does not support cancellation yet." @@ -86,7 +84,7 @@ class BaseFederationServletCancellationTests( # request won't be processed. self.pump() - self._test_disconnect( + test_disconnect( self.reactor, channel, expect_cancellation=True, @@ -106,7 +104,7 @@ class BaseFederationServletCancellationTests( # request won't be processed. self.pump() - self._test_disconnect( + test_disconnect( self.reactor, channel, expect_cancellation=False, diff --git a/tests/handlers/test_auth.py b/tests/handlers/test_auth.py index 67a7829769..7106799d44 100644 --- a/tests/handlers/test_auth.py +++ b/tests/handlers/test_auth.py @@ -38,7 +38,7 @@ class AuthTestCase(unittest.HomeserverTestCase): # MAU tests # AuthBlocking reads from the hs' config on initialization. We need to # modify its config instead of the hs' - self.auth_blocking = hs.get_auth()._auth_blocking + self.auth_blocking = hs.get_auth_blocking() self.auth_blocking._max_mau_value = 50 self.small_number_of_users = 1 diff --git a/tests/handlers/test_device.py b/tests/handlers/test_device.py index 01ea7d2a42..b8b465d35b 100644 --- a/tests/handlers/test_device.py +++ b/tests/handlers/test_device.py @@ -154,7 +154,7 @@ class DeviceTestCase(unittest.HomeserverTestCase): self._record_users() # delete the device - self.get_success(self.handler.delete_device(user1, "abc")) + self.get_success(self.handler.delete_devices(user1, ["abc"])) # check the device was deleted self.get_failure(self.handler.get_device(user1, "abc"), NotFoundError) @@ -179,7 +179,7 @@ class DeviceTestCase(unittest.HomeserverTestCase): ) # delete the device - self.get_success(self.handler.delete_device(user1, "abc")) + self.get_success(self.handler.delete_devices(user1, ["abc"])) # check that the device_inbox was deleted res = self.get_success( diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py index b6ba19c739..23f35d5bf5 100644 --- a/tests/handlers/test_register.py +++ b/tests/handlers/test_register.py @@ -699,7 +699,7 @@ class RegistrationTestCase(unittest.HomeserverTestCase): """ if localpart is None: raise SynapseError(400, "Request must include user id") - await self.hs.get_auth().check_auth_blocking() + await self.hs.get_auth_blocking().check_auth_blocking() need_register = True try: diff --git a/tests/handlers/test_room_summary.py b/tests/handlers/test_room_summary.py index 0546655690..aa650756e4 100644 --- a/tests/handlers/test_room_summary.py +++ b/tests/handlers/test_room_summary.py @@ -178,7 +178,7 @@ class SpaceSummaryTestCase(unittest.HomeserverTestCase): result_room_ids.append(result_room["room_id"]) result_children_ids.append( [ - (cs["room_id"], cs["state_key"]) + (result_room["room_id"], cs["state_key"]) for cs in result_room["children_state"] ] ) diff --git a/tests/handlers/test_stats.py b/tests/handlers/test_stats.py index ecd78fa369..05f9ec3c51 100644 --- a/tests/handlers/test_stats.py +++ b/tests/handlers/test_stats.py @@ -46,16 +46,9 @@ class StatsRoomTests(unittest.HomeserverTestCase): self.get_success( self.store.db_pool.simple_insert( "background_updates", - {"update_name": "populate_stats_prepare", "progress_json": "{}"}, - ) - ) - self.get_success( - self.store.db_pool.simple_insert( - "background_updates", { "update_name": "populate_stats_process_rooms", "progress_json": "{}", - "depends_on": "populate_stats_prepare", }, ) ) @@ -69,16 +62,6 @@ class StatsRoomTests(unittest.HomeserverTestCase): }, ) ) - self.get_success( - self.store.db_pool.simple_insert( - "background_updates", - { - "update_name": "populate_stats_cleanup", - "progress_json": "{}", - "depends_on": "populate_stats_process_users", - }, - ) - ) async def get_all_room_state(self): return await self.store.db_pool.simple_select_list( @@ -533,7 +516,6 @@ class StatsRoomTests(unittest.HomeserverTestCase): { "update_name": "populate_stats_process_rooms", "progress_json": "{}", - "depends_on": "populate_stats_prepare", }, ) ) @@ -547,16 +529,6 @@ class StatsRoomTests(unittest.HomeserverTestCase): }, ) ) - self.get_success( - self.store.db_pool.simple_insert( - "background_updates", - { - "update_name": "populate_stats_cleanup", - "progress_json": "{}", - "depends_on": "populate_stats_process_users", - }, - ) - ) self.wait_for_background_updates() diff --git a/tests/handlers/test_sync.py b/tests/handlers/test_sync.py index db3302a4c7..ecc7cc6461 100644 --- a/tests/handlers/test_sync.py +++ b/tests/handlers/test_sync.py @@ -45,7 +45,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase): # AuthBlocking reads from the hs' config on initialization. We need to # modify its config instead of the hs' - self.auth_blocking = self.hs.get_auth()._auth_blocking + self.auth_blocking = self.hs.get_auth_blocking() def test_wait_for_sync_for_user_auth_blocking(self): user_id1 = "@user1:test" diff --git a/tests/http/server/_base.py b/tests/http/server/_base.py index b9f1a381aa..994d8880b0 100644 --- a/tests/http/server/_base.py +++ b/tests/http/server/_base.py @@ -12,89 +12,543 @@ # See the License for the specific language governing permissions and # limitations under the License. +import inspect +import itertools +import logging from http import HTTPStatus -from typing import Any, Callable, Optional, Union +from typing import ( + Any, + Callable, + ContextManager, + Dict, + List, + Optional, + Set, + Tuple, + TypeVar, + Union, +) from unittest import mock +from unittest.mock import Mock +from twisted.internet.defer import Deferred from twisted.internet.error import ConnectionDone +from twisted.python.failure import Failure +from twisted.test.proto_helpers import MemoryReactorClock +from twisted.web.server import Site from synapse.http.server import ( HTTP_STATUS_REQUEST_CANCELLED, respond_with_html_bytes, respond_with_json, ) +from synapse.http.site import SynapseRequest +from synapse.logging.context import LoggingContext, make_deferred_yieldable from synapse.types import JsonDict -from tests import unittest -from tests.server import FakeChannel, ThreadedMemoryReactorClock +from tests.server import FakeChannel, make_request +from tests.unittest import logcontext_clean +logger = logging.getLogger(__name__) -class EndpointCancellationTestHelperMixin(unittest.TestCase): - """Provides helper methods for testing cancellation of endpoints.""" - def _test_disconnect( - self, - reactor: ThreadedMemoryReactorClock, - channel: FakeChannel, - expect_cancellation: bool, - expected_body: Union[bytes, JsonDict], - expected_code: Optional[int] = None, - ) -> None: - """Disconnects an in-flight request and checks the response. +T = TypeVar("T") - Args: - reactor: The twisted reactor running the request handler. - channel: The `FakeChannel` for the request. - expect_cancellation: `True` if request processing is expected to be - cancelled, `False` if the request should run to completion. - expected_body: The expected response for the request. - expected_code: The expected status code for the request. Defaults to `200` - or `499` depending on `expect_cancellation`. - """ - # Determine the expected status code. - if expected_code is None: - if expect_cancellation: - expected_code = HTTP_STATUS_REQUEST_CANCELLED - else: - expected_code = HTTPStatus.OK - - request = channel.request - self.assertFalse( - channel.is_finished(), + +def test_disconnect( + reactor: MemoryReactorClock, + channel: FakeChannel, + expect_cancellation: bool, + expected_body: Union[bytes, JsonDict], + expected_code: Optional[int] = None, +) -> None: + """Disconnects an in-flight request and checks the response. + + Args: + reactor: The twisted reactor running the request handler. + channel: The `FakeChannel` for the request. + expect_cancellation: `True` if request processing is expected to be cancelled, + `False` if the request should run to completion. + expected_body: The expected response for the request. + expected_code: The expected status code for the request. Defaults to `200` or + `499` depending on `expect_cancellation`. + """ + # Determine the expected status code. + if expected_code is None: + if expect_cancellation: + expected_code = HTTP_STATUS_REQUEST_CANCELLED + else: + expected_code = HTTPStatus.OK + + request = channel.request + if channel.is_finished(): + raise AssertionError( "Request finished before we could disconnect - " - "was `await_result=False` passed to `make_request`?", + "ensure `await_result=False` is passed to `make_request`.", ) - # We're about to disconnect the request. This also disconnects the channel, so - # we have to rely on mocks to extract the response. - respond_method: Callable[..., Any] - if isinstance(expected_body, bytes): - respond_method = respond_with_html_bytes + # We're about to disconnect the request. This also disconnects the channel, so we + # have to rely on mocks to extract the response. + respond_method: Callable[..., Any] + if isinstance(expected_body, bytes): + respond_method = respond_with_html_bytes + else: + respond_method = respond_with_json + + with mock.patch( + f"synapse.http.server.{respond_method.__name__}", wraps=respond_method + ) as respond_mock: + # Disconnect the request. + request.connectionLost(reason=ConnectionDone()) + + if expect_cancellation: + # An immediate cancellation is expected. + respond_mock.assert_called_once() else: - respond_method = respond_with_json + respond_mock.assert_not_called() - with mock.patch( - f"synapse.http.server.{respond_method.__name__}", wraps=respond_method - ) as respond_mock: - # Disconnect the request. - request.connectionLost(reason=ConnectionDone()) + # The handler is expected to run to completion. + reactor.advance(1.0) + respond_mock.assert_called_once() - if expect_cancellation: - # An immediate cancellation is expected. - respond_mock.assert_called_once() - args, _kwargs = respond_mock.call_args - code, body = args[1], args[2] - self.assertEqual(code, expected_code) - self.assertEqual(request.code, expected_code) - self.assertEqual(body, expected_body) - else: - respond_mock.assert_not_called() - - # The handler is expected to run to completion. - reactor.pump([1.0]) + args, _kwargs = respond_mock.call_args + code, body = args[1], args[2] + + if code != expected_code: + raise AssertionError( + f"{code} != {expected_code} : " + "Request did not finish with the expected status code." + ) + + if request.code != expected_code: + raise AssertionError( + f"{request.code} != {expected_code} : " + "Request did not finish with the expected status code." + ) + + if body != expected_body: + raise AssertionError( + f"{body!r} != {expected_body!r} : " + "Request did not finish with the expected status code." + ) + + +@logcontext_clean +def make_request_with_cancellation_test( + test_name: str, + reactor: MemoryReactorClock, + site: Site, + method: str, + path: str, + content: Union[bytes, str, JsonDict] = b"", +) -> FakeChannel: + """Performs a request repeatedly, disconnecting at successive `await`s, until + one completes. + + Fails if: + * A logging context is lost during cancellation. + * A logging context get restarted after it is marked as finished, eg. if + a request's logging context is used by some processing started by the + request, but the request neglects to cancel that processing or wait for it + to complete. + + Note that "Re-starting finished log context" errors get raised within the + request handling code and may or may not get caught. These errors will + likely manifest as a different logging context error at a later point. When + debugging logging context failures, setting a breakpoint in + `logcontext_error` can prove useful. + * A request gets stuck, possibly due to a previous cancellation. + * The request does not return a 499 when the client disconnects. + This implies that a `CancelledError` was swallowed somewhere. + + It is up to the caller to verify that the request returns the correct data when + it finally runs to completion. + + Note that this function can only cover a single code path and does not guarantee + that an endpoint is compatible with cancellation on every code path. + To allow inspection of the code path that is being tested, this function will + log the stack trace at every `await` that gets cancelled. To view these log + lines, `trial` can be run with the `SYNAPSE_TEST_LOG_LEVEL=INFO` environment + variable, which will include the log lines in `_trial_temp/test.log`. + Alternatively, `_log_for_request` can be modified to write to `sys.stdout`. + + Args: + test_name: The name of the test, which will be logged. + reactor: The twisted reactor running the request handler. + site: The twisted `Site` to use to render the request. + method: The HTTP request method ("verb"). + path: The HTTP path, suitably URL encoded (e.g. escaped UTF-8 & spaces and + such). + content: The body of the request. + + Returns: + The `FakeChannel` object which stores the result of the final request that + runs to completion. + """ + # To process a request, a coroutine run is created for the async method handling + # the request. That method may then start other coroutine runs, wrapped in + # `Deferred`s. + # + # We would like to trigger a cancellation at the first `await`, re-run the + # request and cancel at the second `await`, and so on. By patching + # `Deferred.__next__`, we can intercept `await`s, track which ones we have or + # have not seen, and force them to block when they wouldn't have. + + # The set of previously seen `await`s. + # Each element is a stringified stack trace. + seen_awaits: Set[Tuple[str, ...]] = set() + + _log_for_request( + 0, f"Running make_request_with_cancellation_test for {test_name}..." + ) + + for request_number in itertools.count(1): + deferred_patch = Deferred__next__Patch(seen_awaits, request_number) + + try: + with mock.patch( + "synapse.http.server.respond_with_json", wraps=respond_with_json + ) as respond_mock: + with deferred_patch.patch(): + # Start the request. + channel = make_request( + reactor, site, method, path, content, await_result=False + ) + request = channel.request + + # Run the request until we see a new `await` which we have not + # yet cancelled at, or it completes. + while not respond_mock.called and not deferred_patch.new_await_seen: + previous_awaits_seen = deferred_patch.awaits_seen + + reactor.advance(0.0) + + if deferred_patch.awaits_seen == previous_awaits_seen: + # We didn't see any progress. Try advancing the clock. + reactor.advance(1.0) + + if deferred_patch.awaits_seen == previous_awaits_seen: + # We still didn't see any progress. The request might be + # stuck. + raise AssertionError( + "Request appears to be stuck, possibly due to a " + "previous cancelled request" + ) + + if respond_mock.called: + # The request ran to completion and we are done with testing it. + + # `respond_with_json` writes the response asynchronously, so we + # might have to give the reactor a kick before the channel gets + # the response. + deferred_patch.unblock_awaits() + channel.await_result() + + return channel + + # Disconnect the client and wait for the response. + request.connectionLost(reason=ConnectionDone()) + + _log_for_request(request_number, "--- disconnected ---") + + # Advance the reactor just enough to get a response. + # We don't want to advance the reactor too far, because we can only + # detect re-starts of finished logging contexts after we set the + # finished flag below. + for _ in range(2): + # We may need to pump the reactor to allow `delay_cancellation`s to + # finish. + if not respond_mock.called: + reactor.advance(0.0) + + # Try advancing the clock if that didn't work. + if not respond_mock.called: + reactor.advance(1.0) + + # `delay_cancellation`s may be waiting for processing that we've + # forced to block. Try unblocking them, followed by another round of + # pumping the reactor. + if not respond_mock.called: + deferred_patch.unblock_awaits() + + # Mark the request's logging context as finished. If it gets + # activated again, an `AssertionError` will be raised and bubble up + # through request handling code. This `AssertionError` may or may not be + # caught. Eventually some other code will deactivate the logging + # context which will raise a different `AssertionError` because + # resource usage won't have been correctly tracked. + if isinstance(request, SynapseRequest) and request.logcontext: + request.logcontext.finished = True + + # Check that the request finished with a 499, + # ie. the `CancelledError` wasn't swallowed. respond_mock.assert_called_once() - args, _kwargs = respond_mock.call_args - code, body = args[1], args[2] - self.assertEqual(code, expected_code) - self.assertEqual(request.code, expected_code) - self.assertEqual(body, expected_body) + + if request.code != HTTP_STATUS_REQUEST_CANCELLED: + raise AssertionError( + f"{request.code} != {HTTP_STATUS_REQUEST_CANCELLED} : " + "Cancelled request did not finish with the correct status code." + ) + finally: + # Unblock any processing that might be shared between requests, if we + # haven't already done so. + deferred_patch.unblock_awaits() + + assert False, "unreachable" # noqa: B011 + + +class Deferred__next__Patch: + """A `Deferred.__next__` patch that will intercept `await`s and force them + to block once it sees a new `await`. + + When done with the patch, `unblock_awaits()` must be called to clean up after any + `await`s that were forced to block, otherwise processing shared between multiple + requests, such as database queries started by `@cached`, will become permanently + stuck. + + Usage: + seen_awaits = set() + deferred_patch = Deferred__next__Patch(seen_awaits, 1) + try: + with deferred_patch.patch(): + # do things + ... + finally: + deferred_patch.unblock_awaits() + """ + + def __init__(self, seen_awaits: Set[Tuple[str, ...]], request_number: int): + """ + Args: + seen_awaits: The set of stack traces of `await`s that have been previously + seen. When the `Deferred.__next__` patch sees a new `await`, it will add + it to the set. + request_number: The request number to log against. + """ + self._request_number = request_number + self._seen_awaits = seen_awaits + + self._original_Deferred___next__ = Deferred.__next__ + + # The number of `await`s on `Deferred`s we have seen so far. + self.awaits_seen = 0 + + # Whether we have seen a new `await` not in `seen_awaits`. + self.new_await_seen = False + + # To force `await`s on resolved `Deferred`s to block, we make up a new + # unresolved `Deferred` and return it out of `Deferred.__next__` / + # `coroutine.send()`. We have to resolve it later, in case the `await`ing + # coroutine is part of some shared processing, such as `@cached`. + self._to_unblock: Dict[Deferred, Union[object, Failure]] = {} + + # The last stack we logged. + self._previous_stack: List[inspect.FrameInfo] = [] + + def patch(self) -> ContextManager[Mock]: + """Returns a context manager which patches `Deferred.__next__`.""" + + def Deferred___next__( + deferred: "Deferred[T]", value: object = None + ) -> "Deferred[T]": + """Intercepts `await`s on `Deferred`s and rigs them to block once we have + seen enough of them. + + `Deferred.__next__` will normally: + * return `self` if the `Deferred` is unresolved, in which case + `coroutine.send()` will return the `Deferred`, and + `_defer.inlineCallbacks` will stop running the coroutine until the + `Deferred` is resolved. + * raise a `StopIteration(result)`, containing the result of the `await`. + * raise another exception, which will come out of the `await`. + """ + self.awaits_seen += 1 + + stack = _get_stack(skip_frames=1) + stack_hash = _hash_stack(stack) + + if stack_hash not in self._seen_awaits: + # Block at the current `await` onwards. + self._seen_awaits.add(stack_hash) + self.new_await_seen = True + + if not self.new_await_seen: + # This `await` isn't interesting. Let it proceed normally. + + # Don't log the stack. It's been seen before in a previous run. + self._previous_stack = stack + + return self._original_Deferred___next__(deferred, value) + + # We want to block at the current `await`. + if deferred.called and not deferred.paused: + # This `Deferred` already has a result. + # We return a new, unresolved, `Deferred` for `_inlineCallbacks` to wait + # on. This blocks the coroutine that did this `await`. + # We queue it up for unblocking later. + new_deferred: "Deferred[T]" = Deferred() + self._to_unblock[new_deferred] = deferred.result + + _log_await_stack( + stack, + self._previous_stack, + self._request_number, + "force-blocked await", + ) + self._previous_stack = stack + + return make_deferred_yieldable(new_deferred) + + # This `Deferred` does not have a result yet. + # The `await` will block normally, so we don't have to do anything. + _log_await_stack( + stack, + self._previous_stack, + self._request_number, + "blocking await", + ) + self._previous_stack = stack + + return self._original_Deferred___next__(deferred, value) + + return mock.patch.object(Deferred, "__next__", new=Deferred___next__) + + def unblock_awaits(self) -> None: + """Unblocks any shared processing that we forced to block. + + Must be called when done, otherwise processing shared between multiple requests, + such as database queries started by `@cached`, will become permanently stuck. + """ + to_unblock = self._to_unblock + self._to_unblock = {} + for deferred, result in to_unblock.items(): + deferred.callback(result) + + +def _log_for_request(request_number: int, message: str) -> None: + """Logs a message for an iteration of `make_request_with_cancellation_test`.""" + # We want consistent alignment when logging stack traces, so ensure the logging + # context has a fixed width name. + with LoggingContext(name=f"request-{request_number:<2}"): + logger.info(message) + + +def _log_await_stack( + stack: List[inspect.FrameInfo], + previous_stack: List[inspect.FrameInfo], + request_number: int, + note: str, +) -> None: + """Logs the stack for an `await` in `make_request_with_cancellation_test`. + + Only logs the part of the stack that has changed since the previous call. + + Example output looks like: + ``` + delay_cancellation:750 (synapse/util/async_helpers.py:750) + DatabasePool._runInteraction:768 (synapse/storage/database.py:768) + > *blocked on await* at DatabasePool.runWithConnection:891 (synapse/storage/database.py:891) + ``` + + Args: + stack: The stack to log, as returned by `_get_stack()`. + previous_stack: The previous stack logged, with callers appearing before + callees. + request_number: The request number to log against. + note: A note to attach to the last stack frame, eg. "blocked on await". + """ + for i, frame_info in enumerate(stack[:-1]): + # Skip any frames in common with the previous logging. + if i < len(previous_stack) and frame_info == previous_stack[i]: + continue + + frame = _format_stack_frame(frame_info) + message = f"{' ' * i}{frame}" + _log_for_request(request_number, message) + + # Always print the final frame with the `await`. + # If the frame with the `await` started another coroutine run, we may have already + # printed a deeper stack which includes our final frame. We want to log where all + # `await`s happen, so we reprint the frame in this case. + i = len(stack) - 1 + frame_info = stack[i] + frame = _format_stack_frame(frame_info) + message = f"{' ' * i}> *{note}* at {frame}" + _log_for_request(request_number, message) + + +def _format_stack_frame(frame_info: inspect.FrameInfo) -> str: + """Returns a string representation of a stack frame. + + Used for debug logging. + + Returns: + A string, formatted like + "JsonResource._async_render:559 (synapse/http/server.py:559)". + """ + method_name = _get_stack_frame_method_name(frame_info) + + return ( + f"{method_name}:{frame_info.lineno} ({frame_info.filename}:{frame_info.lineno})" + ) + + +def _get_stack(skip_frames: int) -> List[inspect.FrameInfo]: + """Captures the stack for a request. + + Skips any twisted frames and stops at `JsonResource.wrapped_async_request_handler`. + + Used for debug logging. + + Returns: + A list of `inspect.FrameInfo`s, with callers appearing before callees. + """ + stack = [] + + skip_frames += 1 # Also skip `get_stack` itself. + + for frame_info in inspect.stack()[skip_frames:]: + # Skip any twisted `inlineCallbacks` gunk. + if "/twisted/" in frame_info.filename: + continue + + # Exclude the reactor frame, upwards. + method_name = _get_stack_frame_method_name(frame_info) + if method_name == "ThreadedMemoryReactorClock.advance": + break + + stack.append(frame_info) + + # Stop at `JsonResource`'s `wrapped_async_request_handler`, which is the entry + # point for request handling. + if frame_info.function == "wrapped_async_request_handler": + break + + return stack[::-1] + + +def _get_stack_frame_method_name(frame_info: inspect.FrameInfo) -> str: + """Returns the name of a stack frame's method. + + eg. "JsonResource._async_render". + """ + method_name = frame_info.function + + # Prefix the class name for instance methods. + frame_self = frame_info.frame.f_locals.get("self") + if frame_self: + method = getattr(frame_self, method_name, None) + if method: + method_name = method.__qualname__ + else: + # We couldn't find the method on `self`. + # Make something up. It's useful to know which class "contains" a + # function anyway. + method_name = f"{type(frame_self).__name__} {method_name}" + + return method_name + + +def _hash_stack(stack: List[inspect.FrameInfo]): + """Turns a stack into a hashable value that can be put into a set.""" + return tuple(_format_stack_frame(frame) for frame in stack) diff --git a/tests/http/test_servlet.py b/tests/http/test_servlet.py index b3655d7b44..bb966c80c6 100644 --- a/tests/http/test_servlet.py +++ b/tests/http/test_servlet.py @@ -30,7 +30,7 @@ from synapse.server import HomeServer from synapse.types import JsonDict from tests import unittest -from tests.http.server._base import EndpointCancellationTestHelperMixin +from tests.http.server._base import test_disconnect def make_request(content): @@ -108,9 +108,7 @@ class CancellableRestServlet(RestServlet): return HTTPStatus.OK, {"result": True} -class TestRestServletCancellation( - unittest.HomeserverTestCase, EndpointCancellationTestHelperMixin -): +class TestRestServletCancellation(unittest.HomeserverTestCase): """Tests for `RestServlet` cancellation.""" servlets = [ @@ -120,7 +118,7 @@ class TestRestServletCancellation( def test_cancellable_disconnect(self) -> None: """Test that handlers with the `@cancellable` flag can be cancelled.""" channel = self.make_request("GET", "/sleep", await_result=False) - self._test_disconnect( + test_disconnect( self.reactor, channel, expect_cancellation=True, @@ -130,7 +128,7 @@ class TestRestServletCancellation( def test_uncancellable_disconnect(self) -> None: """Test that handlers without the `@cancellable` flag cannot be cancelled.""" channel = self.make_request("POST", "/sleep", await_result=False) - self._test_disconnect( + test_disconnect( self.reactor, channel, expect_cancellation=False, diff --git a/tests/replication/http/test__base.py b/tests/replication/http/test__base.py index a5ab093a27..822a957c3a 100644 --- a/tests/replication/http/test__base.py +++ b/tests/replication/http/test__base.py @@ -25,7 +25,7 @@ from synapse.server import HomeServer from synapse.types import JsonDict from tests import unittest -from tests.http.server._base import EndpointCancellationTestHelperMixin +from tests.http.server._base import test_disconnect class CancellableReplicationEndpoint(ReplicationEndpoint): @@ -69,9 +69,7 @@ class UncancellableReplicationEndpoint(ReplicationEndpoint): return HTTPStatus.OK, {"result": True} -class ReplicationEndpointCancellationTestCase( - unittest.HomeserverTestCase, EndpointCancellationTestHelperMixin -): +class ReplicationEndpointCancellationTestCase(unittest.HomeserverTestCase): """Tests for `ReplicationEndpoint` cancellation.""" def create_test_resource(self): @@ -87,7 +85,7 @@ class ReplicationEndpointCancellationTestCase( """Test that handlers with the `@cancellable` flag can be cancelled.""" path = f"{REPLICATION_PREFIX}/{CancellableReplicationEndpoint.NAME}/" channel = self.make_request("POST", path, await_result=False) - self._test_disconnect( + test_disconnect( self.reactor, channel, expect_cancellation=True, @@ -98,7 +96,7 @@ class ReplicationEndpointCancellationTestCase( """Test that handlers without the `@cancellable` flag cannot be cancelled.""" path = f"{REPLICATION_PREFIX}/{UncancellableReplicationEndpoint.NAME}/" channel = self.make_request("POST", path, await_result=False) - self._test_disconnect( + test_disconnect( self.reactor, channel, expect_cancellation=False, diff --git a/tests/rest/client/test_relations.py b/tests/rest/client/test_relations.py index 62e4db23ef..aa84906548 100644 --- a/tests/rest/client/test_relations.py +++ b/tests/rest/client/test_relations.py @@ -728,6 +728,7 @@ class RelationsTestCase(BaseRelationsTestCase): class RelationPaginationTestCase(BaseRelationsTestCase): + @unittest.override_config({"experimental_features": {"msc3715_enabled": True}}) def test_basic_paginate_relations(self) -> None: """Tests that calling pagination API correctly the latest relations.""" channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") diff --git a/tests/rest/client/test_rooms.py b/tests/rest/client/test_rooms.py index f523d89b8f..35c59ee9e0 100644 --- a/tests/rest/client/test_rooms.py +++ b/tests/rest/client/test_rooms.py @@ -18,10 +18,13 @@ """Tests REST events for /rooms paths.""" import json -from typing import Any, Dict, Iterable, List, Optional +from typing import Any, Dict, Iterable, List, Optional, Union from unittest.mock import Mock, call from urllib import parse as urlparse +# `Literal` appears with Python 3.8. +from typing_extensions import Literal + from twisted.test.proto_helpers import MemoryReactor import synapse.rest.admin @@ -42,6 +45,7 @@ from synapse.util import Clock from synapse.util.stringutils import random_string from tests import unittest +from tests.http.server._base import make_request_with_cancellation_test from tests.test_utils import make_awaitable PATH_PREFIX = b"/_matrix/client/api/v1" @@ -471,6 +475,49 @@ class RoomPermissionsTestCase(RoomBase): ) +class RoomStateTestCase(RoomBase): + """Tests /rooms/$room_id/state.""" + + user_id = "@sid1:red" + + def test_get_state_cancellation(self) -> None: + """Test cancellation of a `/rooms/$room_id/state` request.""" + room_id = self.helper.create_room_as(self.user_id) + channel = make_request_with_cancellation_test( + "test_state_cancellation", + self.reactor, + self.site, + "GET", + "/rooms/%s/state" % room_id, + ) + + self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertCountEqual( + [state_event["type"] for state_event in channel.json_body], + { + "m.room.create", + "m.room.power_levels", + "m.room.join_rules", + "m.room.member", + "m.room.history_visibility", + }, + ) + + def test_get_state_event_cancellation(self) -> None: + """Test cancellation of a `/rooms/$room_id/state/$event_type` request.""" + room_id = self.helper.create_room_as(self.user_id) + channel = make_request_with_cancellation_test( + "test_state_cancellation", + self.reactor, + self.site, + "GET", + "/rooms/%s/state/m.room.member/%s" % (room_id, self.user_id), + ) + + self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(channel.json_body, {"membership": "join"}) + + class RoomsMemberListTestCase(RoomBase): """Tests /rooms/$room_id/members/list REST events.""" @@ -591,6 +638,62 @@ class RoomsMemberListTestCase(RoomBase): channel = self.make_request("GET", room_path) self.assertEqual(200, channel.code, msg=channel.result["body"]) + def test_get_member_list_cancellation(self) -> None: + """Test cancellation of a `/rooms/$room_id/members` request.""" + room_id = self.helper.create_room_as(self.user_id) + channel = make_request_with_cancellation_test( + "test_get_member_list_cancellation", + self.reactor, + self.site, + "GET", + "/rooms/%s/members" % room_id, + ) + + self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(len(channel.json_body["chunk"]), 1) + self.assertLessEqual( + { + "content": {"membership": "join"}, + "room_id": room_id, + "sender": self.user_id, + "state_key": self.user_id, + "type": "m.room.member", + "user_id": self.user_id, + }.items(), + channel.json_body["chunk"][0].items(), + ) + + def test_get_member_list_with_at_token_cancellation(self) -> None: + """Test cancellation of a `/rooms/$room_id/members?at=<sync token>` request.""" + room_id = self.helper.create_room_as(self.user_id) + + # first sync to get an at token + channel = self.make_request("GET", "/sync") + self.assertEqual(200, channel.code) + sync_token = channel.json_body["next_batch"] + + channel = make_request_with_cancellation_test( + "test_get_member_list_with_at_token_cancellation", + self.reactor, + self.site, + "GET", + "/rooms/%s/members?at=%s" % (room_id, sync_token), + ) + + self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(len(channel.json_body["chunk"]), 1) + self.assertLessEqual( + { + "content": {"membership": "join"}, + "room_id": room_id, + "sender": self.user_id, + "state_key": self.user_id, + "type": "m.room.member", + "user_id": self.user_id, + }.items(), + channel.json_body["chunk"][0].items(), + ) + class RoomsCreateTestCase(RoomBase): """Tests /rooms and /rooms/$room_id REST events.""" @@ -677,9 +780,11 @@ class RoomsCreateTestCase(RoomBase): channel = self.make_request("POST", "/createRoom", content) self.assertEqual(200, channel.code) - def test_spam_checker_may_join_room(self) -> None: + def test_spam_checker_may_join_room_deprecated(self) -> None: """Tests that the user_may_join_room spam checker callback is correctly bypassed when creating a new room. + + In this test, we use the deprecated API in which callbacks return a bool. """ async def user_may_join_room( @@ -701,6 +806,32 @@ class RoomsCreateTestCase(RoomBase): self.assertEqual(join_mock.call_count, 0) + def test_spam_checker_may_join_room(self) -> None: + """Tests that the user_may_join_room spam checker callback is correctly bypassed + when creating a new room. + + In this test, we use the more recent API in which callbacks return a `Union[Codes, Literal["NOT_SPAM"]]`. + """ + + async def user_may_join_room( + mxid: str, + room_id: str, + is_invite: bool, + ) -> Codes: + return Codes.CONSENT_NOT_GIVEN + + join_mock = Mock(side_effect=user_may_join_room) + self.hs.get_spam_checker()._user_may_join_room_callbacks.append(join_mock) + + channel = self.make_request( + "POST", + "/createRoom", + {}, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + self.assertEqual(join_mock.call_count, 0) + class RoomTopicTestCase(RoomBase): """Tests /rooms/$room_id/topic REST events.""" @@ -911,9 +1042,11 @@ class RoomJoinTestCase(RoomBase): self.room2 = self.helper.create_room_as(room_creator=self.user1, tok=self.tok1) self.room3 = self.helper.create_room_as(room_creator=self.user1, tok=self.tok1) - def test_spam_checker_may_join_room(self) -> None: + def test_spam_checker_may_join_room_deprecated(self) -> None: """Tests that the user_may_join_room spam checker callback is correctly called and blocks room joins when needed. + + This test uses the deprecated API, in which callbacks return booleans. """ # Register a dummy callback. Make it allow all room joins for now. @@ -926,6 +1059,8 @@ class RoomJoinTestCase(RoomBase): ) -> bool: return return_value + # `spec` argument is needed for this function mock to have `__qualname__`, which + # is needed for `Measure` metrics buried in SpamChecker. callback_mock = Mock(side_effect=user_may_join_room, spec=lambda *x: None) self.hs.get_spam_checker()._user_may_join_room_callbacks.append(callback_mock) @@ -968,6 +1103,67 @@ class RoomJoinTestCase(RoomBase): return_value = False self.helper.join(self.room3, self.user2, expect_code=403, tok=self.tok2) + def test_spam_checker_may_join_room(self) -> None: + """Tests that the user_may_join_room spam checker callback is correctly called + and blocks room joins when needed. + + This test uses the latest API to this day, in which callbacks return `NOT_SPAM` or `Codes`. + """ + + # Register a dummy callback. Make it allow all room joins for now. + return_value: Union[Literal["NOT_SPAM"], Codes] = synapse.module_api.NOT_SPAM + + async def user_may_join_room( + userid: str, + room_id: str, + is_invited: bool, + ) -> Union[Literal["NOT_SPAM"], Codes]: + return return_value + + # `spec` argument is needed for this function mock to have `__qualname__`, which + # is needed for `Measure` metrics buried in SpamChecker. + callback_mock = Mock(side_effect=user_may_join_room, spec=lambda *x: None) + self.hs.get_spam_checker()._user_may_join_room_callbacks.append(callback_mock) + + # Join a first room, without being invited to it. + self.helper.join(self.room1, self.user2, tok=self.tok2) + + # Check that the callback was called with the right arguments. + expected_call_args = ( + ( + self.user2, + self.room1, + False, + ), + ) + self.assertEqual( + callback_mock.call_args, + expected_call_args, + callback_mock.call_args, + ) + + # Join a second room, this time with an invite for it. + self.helper.invite(self.room2, self.user1, self.user2, tok=self.tok1) + self.helper.join(self.room2, self.user2, tok=self.tok2) + + # Check that the callback was called with the right arguments. + expected_call_args = ( + ( + self.user2, + self.room2, + True, + ), + ) + self.assertEqual( + callback_mock.call_args, + expected_call_args, + callback_mock.call_args, + ) + + # Now make the callback deny all room joins, and check that a join actually fails. + return_value = Codes.CONSENT_NOT_GIVEN + self.helper.join(self.room3, self.user2, expect_code=403, tok=self.tok2) + class RoomJoinRatelimitTestCase(RoomBase): user_id = "@sid1:red" @@ -2845,9 +3041,14 @@ class ThreepidInviteTestCase(unittest.HomeserverTestCase): self.room_id = self.helper.create_room_as(self.user_id, tok=self.tok) - def test_threepid_invite_spamcheck(self) -> None: + def test_threepid_invite_spamcheck_deprecated(self) -> None: + """ + Test allowing/blocking threepid invites with a spam-check module. + + In this test, we use the deprecated API in which callbacks return a bool. + """ # Mock a few functions to prevent the test from failing due to failing to talk to - # a remote IS. We keep the mock for _mock_make_and_store_3pid_invite around so we + # a remote IS. We keep the mock for make_and_store_3pid_invite around so we # can check its call_count later on during the test. make_invite_mock = Mock(return_value=make_awaitable(0)) self.hs.get_room_member_handler()._make_and_store_3pid_invite = make_invite_mock @@ -2901,3 +3102,67 @@ class ThreepidInviteTestCase(unittest.HomeserverTestCase): # Also check that it stopped before calling _make_and_store_3pid_invite. make_invite_mock.assert_called_once() + + def test_threepid_invite_spamcheck(self) -> None: + """ + Test allowing/blocking threepid invites with a spam-check module. + + In this test, we use the more recent API in which callbacks return a `Union[Codes, Literal["NOT_SPAM"]]`.""" + # Mock a few functions to prevent the test from failing due to failing to talk to + # a remote IS. We keep the mock for make_and_store_3pid_invite around so we + # can check its call_count later on during the test. + make_invite_mock = Mock(return_value=make_awaitable(0)) + self.hs.get_room_member_handler()._make_and_store_3pid_invite = make_invite_mock + self.hs.get_identity_handler().lookup_3pid = Mock( + return_value=make_awaitable(None), + ) + + # Add a mock to the spamchecker callbacks for user_may_send_3pid_invite. Make it + # allow everything for now. + # `spec` argument is needed for this function mock to have `__qualname__`, which + # is needed for `Measure` metrics buried in SpamChecker. + mock = Mock( + return_value=make_awaitable(synapse.module_api.NOT_SPAM), + spec=lambda *x: None, + ) + self.hs.get_spam_checker()._user_may_send_3pid_invite_callbacks.append(mock) + + # Send a 3PID invite into the room and check that it succeeded. + email_to_invite = "teresa@example.com" + channel = self.make_request( + method="POST", + path="/rooms/" + self.room_id + "/invite", + content={ + "id_server": "example.com", + "id_access_token": "sometoken", + "medium": "email", + "address": email_to_invite, + }, + access_token=self.tok, + ) + self.assertEqual(channel.code, 200) + + # Check that the callback was called with the right params. + mock.assert_called_with(self.user_id, "email", email_to_invite, self.room_id) + + # Check that the call to send the invite was made. + make_invite_mock.assert_called_once() + + # Now change the return value of the callback to deny any invite and test that + # we can't send the invite. + mock.return_value = make_awaitable(Codes.CONSENT_NOT_GIVEN) + channel = self.make_request( + method="POST", + path="/rooms/" + self.room_id + "/invite", + content={ + "id_server": "example.com", + "id_access_token": "sometoken", + "medium": "email", + "address": email_to_invite, + }, + access_token=self.tok, + ) + self.assertEqual(channel.code, 403) + + # Also check that it stopped before calling _make_and_store_3pid_invite. + make_invite_mock.assert_called_once() diff --git a/tests/server_notices/test_resource_limits_server_notices.py b/tests/server_notices/test_resource_limits_server_notices.py index 07e29788e5..e07ae78fc4 100644 --- a/tests/server_notices/test_resource_limits_server_notices.py +++ b/tests/server_notices/test_resource_limits_server_notices.py @@ -96,7 +96,9 @@ class TestResourceLimitsServerNotices(unittest.HomeserverTestCase): def test_maybe_send_server_notice_to_user_remove_blocked_notice(self): """Test when user has blocked notice, but should have it removed""" - self._rlsn._auth.check_auth_blocking = Mock(return_value=make_awaitable(None)) + self._rlsn._auth_blocking.check_auth_blocking = Mock( + return_value=make_awaitable(None) + ) mock_event = Mock( type=EventTypes.Message, content={"msgtype": ServerNoticeMsgType} ) @@ -112,7 +114,7 @@ class TestResourceLimitsServerNotices(unittest.HomeserverTestCase): """ Test when user has blocked notice, but notice ought to be there (NOOP) """ - self._rlsn._auth.check_auth_blocking = Mock( + self._rlsn._auth_blocking.check_auth_blocking = Mock( return_value=make_awaitable(None), side_effect=ResourceLimitError(403, "foo"), ) @@ -132,7 +134,7 @@ class TestResourceLimitsServerNotices(unittest.HomeserverTestCase): """ Test when user does not have blocked notice, but should have one """ - self._rlsn._auth.check_auth_blocking = Mock( + self._rlsn._auth_blocking.check_auth_blocking = Mock( return_value=make_awaitable(None), side_effect=ResourceLimitError(403, "foo"), ) @@ -145,7 +147,9 @@ class TestResourceLimitsServerNotices(unittest.HomeserverTestCase): """ Test when user does not have blocked notice, nor should they (NOOP) """ - self._rlsn._auth.check_auth_blocking = Mock(return_value=make_awaitable(None)) + self._rlsn._auth_blocking.check_auth_blocking = Mock( + return_value=make_awaitable(None) + ) self.get_success(self._rlsn.maybe_send_server_notice_to_user(self.user_id)) @@ -156,7 +160,9 @@ class TestResourceLimitsServerNotices(unittest.HomeserverTestCase): Test when user is not part of the MAU cohort - this should not ever happen - but ... """ - self._rlsn._auth.check_auth_blocking = Mock(return_value=make_awaitable(None)) + self._rlsn._auth_blocking.check_auth_blocking = Mock( + return_value=make_awaitable(None) + ) self._rlsn._store.user_last_seen_monthly_active = Mock( return_value=make_awaitable(None) ) @@ -170,7 +176,7 @@ class TestResourceLimitsServerNotices(unittest.HomeserverTestCase): Test that when server is over MAU limit and alerting is suppressed, then an alert message is not sent into the room """ - self._rlsn._auth.check_auth_blocking = Mock( + self._rlsn._auth_blocking.check_auth_blocking = Mock( return_value=make_awaitable(None), side_effect=ResourceLimitError( 403, "foo", limit_type=LimitBlockingTypes.MONTHLY_ACTIVE_USER @@ -185,7 +191,7 @@ class TestResourceLimitsServerNotices(unittest.HomeserverTestCase): """ Test that when a server is disabled, that MAU limit alerting is ignored. """ - self._rlsn._auth.check_auth_blocking = Mock( + self._rlsn._auth_blocking.check_auth_blocking = Mock( return_value=make_awaitable(None), side_effect=ResourceLimitError( 403, "foo", limit_type=LimitBlockingTypes.HS_DISABLED @@ -202,7 +208,7 @@ class TestResourceLimitsServerNotices(unittest.HomeserverTestCase): When the room is already in a blocked state, test that when alerting is suppressed that the room is returned to an unblocked state. """ - self._rlsn._auth.check_auth_blocking = Mock( + self._rlsn._auth_blocking.check_auth_blocking = Mock( return_value=make_awaitable(None), side_effect=ResourceLimitError( 403, "foo", limit_type=LimitBlockingTypes.MONTHLY_ACTIVE_USER diff --git a/tests/state/test_v2.py b/tests/state/test_v2.py index 8370a27195..78b83d97b6 100644 --- a/tests/state/test_v2.py +++ b/tests/state/test_v2.py @@ -13,7 +13,17 @@ # limitations under the License. import itertools -from typing import List +from typing import ( + Collection, + Dict, + Iterable, + List, + Mapping, + Optional, + Set, + Tuple, + TypeVar, +) import attr @@ -22,13 +32,13 @@ from twisted.internet import defer from synapse.api.constants import EventTypes, JoinRules, Membership from synapse.api.room_versions import RoomVersions from synapse.event_auth import auth_types_for_event -from synapse.events import make_event_from_dict +from synapse.events import EventBase, make_event_from_dict from synapse.state.v2 import ( _get_auth_chain_difference, lexicographical_topological_sort, resolve_events_with_store, ) -from synapse.types import EventID +from synapse.types import EventID, StateMap from tests import unittest @@ -48,7 +58,7 @@ ORIGIN_SERVER_TS = 0 class FakeClock: - def sleep(self, msec): + def sleep(self, msec: float) -> "defer.Deferred[None]": return defer.succeed(None) @@ -60,7 +70,14 @@ class FakeEvent: as domain. """ - def __init__(self, id, sender, type, state_key, content): + def __init__( + self, + id: str, + sender: str, + type: str, + state_key: Optional[str], + content: Mapping[str, object], + ): self.node_id = id self.event_id = EventID(id, "example.com").to_string() self.sender = sender @@ -69,12 +86,12 @@ class FakeEvent: self.content = content self.room_id = ROOM_ID - def to_event(self, auth_events, prev_events): + def to_event(self, auth_events: List[str], prev_events: List[str]) -> EventBase: """Given the auth_events and prev_events, convert to a Frozen Event Args: - auth_events (list[str]): list of event_ids - prev_events (list[str]): list of event_ids + auth_events: list of event_ids + prev_events: list of event_ids Returns: FrozenEvent @@ -164,7 +181,7 @@ INITIAL_EDGES = ["START", "IMZ", "IMC", "IMB", "IJR", "IPOWER", "IMA", "CREATE"] class StateTestCase(unittest.TestCase): - def test_ban_vs_pl(self): + def test_ban_vs_pl(self) -> None: events = [ FakeEvent( id="PA", @@ -202,7 +219,7 @@ class StateTestCase(unittest.TestCase): self.do_check(events, edges, expected_state_ids) - def test_join_rule_evasion(self): + def test_join_rule_evasion(self) -> None: events = [ FakeEvent( id="JR", @@ -226,7 +243,7 @@ class StateTestCase(unittest.TestCase): self.do_check(events, edges, expected_state_ids) - def test_offtopic_pl(self): + def test_offtopic_pl(self) -> None: events = [ FakeEvent( id="PA", @@ -257,7 +274,7 @@ class StateTestCase(unittest.TestCase): self.do_check(events, edges, expected_state_ids) - def test_topic_basic(self): + def test_topic_basic(self) -> None: events = [ FakeEvent( id="T1", sender=ALICE, type=EventTypes.Topic, state_key="", content={} @@ -297,7 +314,7 @@ class StateTestCase(unittest.TestCase): self.do_check(events, edges, expected_state_ids) - def test_topic_reset(self): + def test_topic_reset(self) -> None: events = [ FakeEvent( id="T1", sender=ALICE, type=EventTypes.Topic, state_key="", content={} @@ -327,7 +344,7 @@ class StateTestCase(unittest.TestCase): self.do_check(events, edges, expected_state_ids) - def test_topic(self): + def test_topic(self) -> None: events = [ FakeEvent( id="T1", sender=ALICE, type=EventTypes.Topic, state_key="", content={} @@ -380,7 +397,7 @@ class StateTestCase(unittest.TestCase): self.do_check(events, edges, expected_state_ids) - def test_mainline_sort(self): + def test_mainline_sort(self) -> None: """Tests that the mainline ordering works correctly.""" events = [ @@ -434,22 +451,26 @@ class StateTestCase(unittest.TestCase): self.do_check(events, edges, expected_state_ids) - def do_check(self, events, edges, expected_state_ids): + def do_check( + self, + events: List[FakeEvent], + edges: List[List[str]], + expected_state_ids: List[str], + ) -> None: """Take a list of events and edges and calculate the state of the graph at END, and asserts it matches `expected_state_ids` Args: - events (list[FakeEvent]) - edges (list[list[str]]): A list of chains of event edges, e.g. + events + edges: A list of chains of event edges, e.g. `[[A, B, C]]` are edges A->B and B->C. - expected_state_ids (list[str]): The expected state at END, (excluding + expected_state_ids: The expected state at END, (excluding the keys that haven't changed since START). """ # We want to sort the events into topological order for processing. - graph = {} + graph: Dict[str, Set[str]] = {} - # node_id -> FakeEvent - fake_event_map = {} + fake_event_map: Dict[str, FakeEvent] = {} for ev in itertools.chain(INITIAL_EVENTS, events): graph[ev.node_id] = set() @@ -462,10 +483,8 @@ class StateTestCase(unittest.TestCase): for a, b in pairwise(edge_list): graph[a].add(b) - # event_id -> FrozenEvent - event_map = {} - # node_id -> state - state_at_event = {} + event_map: Dict[str, EventBase] = {} + state_at_event: Dict[str, StateMap[str]] = {} # We copy the map as the sort consumes the graph graph_copy = {k: set(v) for k, v in graph.items()} @@ -496,7 +515,16 @@ class StateTestCase(unittest.TestCase): if fake_event.state_key is not None: state_after[(fake_event.type, fake_event.state_key)] = event_id - auth_types = set(auth_types_for_event(RoomVersions.V6, fake_event)) + # This type ignore is a bit sad. Things we have tried: + # 1. Define a `GenericEvent` Protocol satisfied by FakeEvent, EventBase and + # EventBuilder. But this is Hard because the relevant attributes are + # DictProperty[T] descriptors on EventBase but normal Ts on FakeEvent. + # 2. Define a `GenericEvent` Protocol describing `FakeEvent` only, and + # change this function to accept Union[Event, EventBase, EventBuilder]. + # This seems reasonable to me, but mypy isn't happy. I think that's + # a mypy bug, see https://github.com/python/mypy/issues/5570 + # Instead, resort to a type-ignore. + auth_types = set(auth_types_for_event(RoomVersions.V6, fake_event)) # type: ignore[arg-type] auth_events = [] for key in auth_types: @@ -530,8 +558,14 @@ class StateTestCase(unittest.TestCase): class LexicographicalTestCase(unittest.TestCase): - def test_simple(self): - graph = {"l": {"o"}, "m": {"n", "o"}, "n": {"o"}, "o": set(), "p": {"o"}} + def test_simple(self) -> None: + graph: Dict[str, Set[str]] = { + "l": {"o"}, + "m": {"n", "o"}, + "n": {"o"}, + "o": set(), + "p": {"o"}, + } res = list(lexicographical_topological_sort(graph, key=lambda x: x)) @@ -539,7 +573,7 @@ class LexicographicalTestCase(unittest.TestCase): class SimpleParamStateTestCase(unittest.TestCase): - def setUp(self): + def setUp(self) -> None: # We build up a simple DAG. event_map = {} @@ -627,7 +661,7 @@ class SimpleParamStateTestCase(unittest.TestCase): ] } - def test_event_map_none(self): + def test_event_map_none(self) -> None: # Test that we correctly handle passing `None` as the event_map state_d = resolve_events_with_store( @@ -649,7 +683,7 @@ class AuthChainDifferenceTestCase(unittest.TestCase): events. """ - def test_simple(self): + def test_simple(self) -> None: # Test getting the auth difference for a simple chain with a single # unpersisted event: # @@ -695,7 +729,7 @@ class AuthChainDifferenceTestCase(unittest.TestCase): self.assertEqual(difference, {c.event_id}) - def test_multiple_unpersisted_chain(self): + def test_multiple_unpersisted_chain(self) -> None: # Test getting the auth difference for a simple chain with multiple # unpersisted events: # @@ -752,7 +786,7 @@ class AuthChainDifferenceTestCase(unittest.TestCase): self.assertEqual(difference, {d.event_id, c.event_id}) - def test_unpersisted_events_different_sets(self): + def test_unpersisted_events_different_sets(self) -> None: # Test getting the auth difference for with multiple unpersisted events # in different branches: # @@ -820,7 +854,10 @@ class AuthChainDifferenceTestCase(unittest.TestCase): self.assertEqual(difference, {d.event_id, e.event_id}) -def pairwise(iterable): +T = TypeVar("T") + + +def pairwise(iterable: Iterable[T]) -> Iterable[Tuple[T, T]]: "s -> (s0,s1), (s1,s2), (s2, s3), ..." a, b = itertools.tee(iterable) next(b, None) @@ -829,24 +866,26 @@ def pairwise(iterable): @attr.s class TestStateResolutionStore: - event_map = attr.ib() + event_map: Dict[str, EventBase] = attr.ib() - def get_events(self, event_ids, allow_rejected=False): + def get_events( + self, event_ids: Collection[str], allow_rejected: bool = False + ) -> "defer.Deferred[Dict[str, EventBase]]": """Get events from the database Args: - event_ids (list): The event_ids of the events to fetch - allow_rejected (bool): If True return rejected events. + event_ids: The event_ids of the events to fetch + allow_rejected: If True return rejected events. Returns: - Deferred[dict[str, FrozenEvent]]: Dict from event_id to event. + Dict from event_id to event. """ return defer.succeed( {eid: self.event_map[eid] for eid in event_ids if eid in self.event_map} ) - def _get_auth_chain(self, event_ids: List[str]) -> List[str]: + def _get_auth_chain(self, event_ids: Iterable[str]) -> List[str]: """Gets the full auth chain for a set of events (including rejected events). @@ -880,7 +919,9 @@ class TestStateResolutionStore: return list(result) - def get_auth_chain_difference(self, room_id, auth_sets): + def get_auth_chain_difference( + self, room_id: str, auth_sets: List[Set[str]] + ) -> "defer.Deferred[Set[str]]": chains = [frozenset(self._get_auth_chain(a)) for a in auth_sets] common = set(chains[0]).intersection(*chains[1:]) diff --git a/tests/test_event_auth.py b/tests/test_event_auth.py index e2c506e5a4..229ecd84a6 100644 --- a/tests/test_event_auth.py +++ b/tests/test_event_auth.py @@ -15,10 +15,12 @@ import unittest from typing import Optional +from parameterized import parameterized + from synapse import event_auth from synapse.api.constants import EventContentFields from synapse.api.errors import AuthError -from synapse.api.room_versions import RoomVersions +from synapse.api.room_versions import EventFormatVersions, RoomVersion, RoomVersions from synapse.events import EventBase, make_event_from_dict from synapse.types import JsonDict, get_domain_from_id @@ -30,38 +32,39 @@ class EventAuthTestCase(unittest.TestCase): """ creator = "@creator:example.com" auth_events = [ - _create_event(creator), - _join_event(creator), + _create_event(RoomVersions.V9, creator), + _join_event(RoomVersions.V9, creator), ] # creator should be able to send state event_auth.check_auth_rules_for_event( - RoomVersions.V9, - _random_state_event(creator), + _random_state_event(RoomVersions.V9, creator), auth_events, ) # ... but a rejected join_rules event should cause it to be rejected - rejected_join_rules = _join_rules_event(creator, "public") + rejected_join_rules = _join_rules_event( + RoomVersions.V9, + creator, + "public", + ) rejected_join_rules.rejected_reason = "stinky" auth_events.append(rejected_join_rules) self.assertRaises( AuthError, event_auth.check_auth_rules_for_event, - RoomVersions.V9, - _random_state_event(creator), + _random_state_event(RoomVersions.V9, creator), auth_events, ) # ... even if there is *also* a good join rules - auth_events.append(_join_rules_event(creator, "public")) + auth_events.append(_join_rules_event(RoomVersions.V9, creator, "public")) self.assertRaises( AuthError, event_auth.check_auth_rules_for_event, - RoomVersions.V9, - _random_state_event(creator), + _random_state_event(RoomVersions.V9, creator), auth_events, ) @@ -73,15 +76,14 @@ class EventAuthTestCase(unittest.TestCase): creator = "@creator:example.com" joiner = "@joiner:example.com" auth_events = [ - _create_event(creator), - _join_event(creator), - _join_event(joiner), + _create_event(RoomVersions.V1, creator), + _join_event(RoomVersions.V1, creator), + _join_event(RoomVersions.V1, joiner), ] # creator should be able to send state event_auth.check_auth_rules_for_event( - RoomVersions.V1, - _random_state_event(creator), + _random_state_event(RoomVersions.V1, creator), auth_events, ) @@ -89,8 +91,7 @@ class EventAuthTestCase(unittest.TestCase): self.assertRaises( AuthError, event_auth.check_auth_rules_for_event, - RoomVersions.V1, - _random_state_event(joiner), + _random_state_event(RoomVersions.V1, joiner), auth_events, ) @@ -104,28 +105,28 @@ class EventAuthTestCase(unittest.TestCase): king = "@joiner2:example.com" auth_events = [ - _create_event(creator), - _join_event(creator), + _create_event(RoomVersions.V1, creator), + _join_event(RoomVersions.V1, creator), _power_levels_event( - creator, {"state_default": "30", "users": {pleb: "29", king: "30"}} + RoomVersions.V1, + creator, + {"state_default": "30", "users": {pleb: "29", king: "30"}}, ), - _join_event(pleb), - _join_event(king), + _join_event(RoomVersions.V1, pleb), + _join_event(RoomVersions.V1, king), ] # pleb should not be able to send state self.assertRaises( AuthError, event_auth.check_auth_rules_for_event, - RoomVersions.V1, - _random_state_event(pleb), + _random_state_event(RoomVersions.V1, pleb), auth_events, ), # king should be able to send state event_auth.check_auth_rules_for_event( - RoomVersions.V1, - _random_state_event(king), + _random_state_event(RoomVersions.V1, king), auth_events, ) @@ -134,37 +135,33 @@ class EventAuthTestCase(unittest.TestCase): creator = "@creator:example.com" other = "@other:example.com" auth_events = [ - _create_event(creator), - _join_event(creator), + _create_event(RoomVersions.V1, creator), + _join_event(RoomVersions.V1, creator), ] # creator should be able to send aliases event_auth.check_auth_rules_for_event( - RoomVersions.V1, - _alias_event(creator), + _alias_event(RoomVersions.V1, creator), auth_events, ) # Reject an event with no state key. with self.assertRaises(AuthError): event_auth.check_auth_rules_for_event( - RoomVersions.V1, - _alias_event(creator, state_key=""), + _alias_event(RoomVersions.V1, creator, state_key=""), auth_events, ) # If the domain of the sender does not match the state key, reject. with self.assertRaises(AuthError): event_auth.check_auth_rules_for_event( - RoomVersions.V1, - _alias_event(creator, state_key="test.com"), + _alias_event(RoomVersions.V1, creator, state_key="test.com"), auth_events, ) # Note that the member does *not* need to be in the room. event_auth.check_auth_rules_for_event( - RoomVersions.V1, - _alias_event(other), + _alias_event(RoomVersions.V1, other), auth_events, ) @@ -173,38 +170,35 @@ class EventAuthTestCase(unittest.TestCase): creator = "@creator:example.com" other = "@other:example.com" auth_events = [ - _create_event(creator), - _join_event(creator), + _create_event(RoomVersions.V6, creator), + _join_event(RoomVersions.V6, creator), ] # creator should be able to send aliases event_auth.check_auth_rules_for_event( - RoomVersions.V6, - _alias_event(creator), + _alias_event(RoomVersions.V6, creator), auth_events, ) # No particular checks are done on the state key. event_auth.check_auth_rules_for_event( - RoomVersions.V6, - _alias_event(creator, state_key=""), + _alias_event(RoomVersions.V6, creator, state_key=""), auth_events, ) event_auth.check_auth_rules_for_event( - RoomVersions.V6, - _alias_event(creator, state_key="test.com"), + _alias_event(RoomVersions.V6, creator, state_key="test.com"), auth_events, ) # Per standard auth rules, the member must be in the room. with self.assertRaises(AuthError): event_auth.check_auth_rules_for_event( - RoomVersions.V6, - _alias_event(other), + _alias_event(RoomVersions.V6, other), auth_events, ) - def test_msc2209(self): + @parameterized.expand([(RoomVersions.V1, True), (RoomVersions.V6, False)]) + def test_notifications(self, room_version: RoomVersion, allow_modification: bool): """ Notifications power levels get checked due to MSC2209. """ @@ -212,28 +206,26 @@ class EventAuthTestCase(unittest.TestCase): pleb = "@joiner:example.com" auth_events = [ - _create_event(creator), - _join_event(creator), + _create_event(room_version, creator), + _join_event(room_version, creator), _power_levels_event( - creator, {"state_default": "30", "users": {pleb: "30"}} + room_version, creator, {"state_default": "30", "users": {pleb: "30"}} ), - _join_event(pleb), + _join_event(room_version, pleb), ] - # pleb should be able to modify the notifications power level. - event_auth.check_auth_rules_for_event( - RoomVersions.V1, - _power_levels_event(pleb, {"notifications": {"room": 100}}), - auth_events, + pl_event = _power_levels_event( + room_version, pleb, {"notifications": {"room": 100}} ) - # But an MSC2209 room rejects this change. - with self.assertRaises(AuthError): - event_auth.check_auth_rules_for_event( - RoomVersions.V6, - _power_levels_event(pleb, {"notifications": {"room": 100}}), - auth_events, - ) + # on room V1, pleb should be able to modify the notifications power level. + if allow_modification: + event_auth.check_auth_rules_for_event(pl_event, auth_events) + + else: + # But an MSC2209 room rejects this change. + with self.assertRaises(AuthError): + event_auth.check_auth_rules_for_event(pl_event, auth_events) def test_join_rules_public(self): """ @@ -243,58 +235,60 @@ class EventAuthTestCase(unittest.TestCase): pleb = "@joiner:example.com" auth_events = { - ("m.room.create", ""): _create_event(creator), - ("m.room.member", creator): _join_event(creator), - ("m.room.join_rules", ""): _join_rules_event(creator, "public"), + ("m.room.create", ""): _create_event(RoomVersions.V6, creator), + ("m.room.member", creator): _join_event(RoomVersions.V6, creator), + ("m.room.join_rules", ""): _join_rules_event( + RoomVersions.V6, creator, "public" + ), } # Check join. event_auth.check_auth_rules_for_event( - RoomVersions.V6, - _join_event(pleb), + _join_event(RoomVersions.V6, pleb), auth_events.values(), ) # A user cannot be force-joined to a room. with self.assertRaises(AuthError): event_auth.check_auth_rules_for_event( - RoomVersions.V6, - _member_event(pleb, "join", sender=creator), + _member_event(RoomVersions.V6, pleb, "join", sender=creator), auth_events.values(), ) # Banned should be rejected. - auth_events[("m.room.member", pleb)] = _member_event(pleb, "ban") + auth_events[("m.room.member", pleb)] = _member_event( + RoomVersions.V6, pleb, "ban" + ) with self.assertRaises(AuthError): event_auth.check_auth_rules_for_event( - RoomVersions.V6, - _join_event(pleb), + _join_event(RoomVersions.V6, pleb), auth_events.values(), ) # A user who left can re-join. - auth_events[("m.room.member", pleb)] = _member_event(pleb, "leave") + auth_events[("m.room.member", pleb)] = _member_event( + RoomVersions.V6, pleb, "leave" + ) event_auth.check_auth_rules_for_event( - RoomVersions.V6, - _join_event(pleb), + _join_event(RoomVersions.V6, pleb), auth_events.values(), ) # A user can send a join if they're in the room. - auth_events[("m.room.member", pleb)] = _member_event(pleb, "join") + auth_events[("m.room.member", pleb)] = _member_event( + RoomVersions.V6, pleb, "join" + ) event_auth.check_auth_rules_for_event( - RoomVersions.V6, - _join_event(pleb), + _join_event(RoomVersions.V6, pleb), auth_events.values(), ) # A user can accept an invite. auth_events[("m.room.member", pleb)] = _member_event( - pleb, "invite", sender=creator + RoomVersions.V6, pleb, "invite", sender=creator ) event_auth.check_auth_rules_for_event( - RoomVersions.V6, - _join_event(pleb), + _join_event(RoomVersions.V6, pleb), auth_events.values(), ) @@ -306,64 +300,88 @@ class EventAuthTestCase(unittest.TestCase): pleb = "@joiner:example.com" auth_events = { - ("m.room.create", ""): _create_event(creator), - ("m.room.member", creator): _join_event(creator), - ("m.room.join_rules", ""): _join_rules_event(creator, "invite"), + ("m.room.create", ""): _create_event(RoomVersions.V6, creator), + ("m.room.member", creator): _join_event(RoomVersions.V6, creator), + ("m.room.join_rules", ""): _join_rules_event( + RoomVersions.V6, creator, "invite" + ), } # A join without an invite is rejected. with self.assertRaises(AuthError): event_auth.check_auth_rules_for_event( - RoomVersions.V6, - _join_event(pleb), + _join_event(RoomVersions.V6, pleb), auth_events.values(), ) # A user cannot be force-joined to a room. with self.assertRaises(AuthError): event_auth.check_auth_rules_for_event( - RoomVersions.V6, - _member_event(pleb, "join", sender=creator), + _member_event(RoomVersions.V6, pleb, "join", sender=creator), auth_events.values(), ) # Banned should be rejected. - auth_events[("m.room.member", pleb)] = _member_event(pleb, "ban") + auth_events[("m.room.member", pleb)] = _member_event( + RoomVersions.V6, pleb, "ban" + ) with self.assertRaises(AuthError): event_auth.check_auth_rules_for_event( - RoomVersions.V6, - _join_event(pleb), + _join_event(RoomVersions.V6, pleb), auth_events.values(), ) # A user who left cannot re-join. - auth_events[("m.room.member", pleb)] = _member_event(pleb, "leave") + auth_events[("m.room.member", pleb)] = _member_event( + RoomVersions.V6, pleb, "leave" + ) with self.assertRaises(AuthError): event_auth.check_auth_rules_for_event( - RoomVersions.V6, - _join_event(pleb), + _join_event(RoomVersions.V6, pleb), auth_events.values(), ) # A user can send a join if they're in the room. - auth_events[("m.room.member", pleb)] = _member_event(pleb, "join") + auth_events[("m.room.member", pleb)] = _member_event( + RoomVersions.V6, pleb, "join" + ) event_auth.check_auth_rules_for_event( - RoomVersions.V6, - _join_event(pleb), + _join_event(RoomVersions.V6, pleb), auth_events.values(), ) # A user can accept an invite. auth_events[("m.room.member", pleb)] = _member_event( - pleb, "invite", sender=creator + RoomVersions.V6, pleb, "invite", sender=creator ) event_auth.check_auth_rules_for_event( - RoomVersions.V6, - _join_event(pleb), + _join_event(RoomVersions.V6, pleb), auth_events.values(), ) - def test_join_rules_msc3083_restricted(self): + def test_join_rules_restricted_old_room(self) -> None: + """Old room versions should reject joins to restricted rooms""" + creator = "@creator:example.com" + pleb = "@joiner:example.com" + + auth_events = { + ("m.room.create", ""): _create_event(RoomVersions.V6, creator), + ("m.room.member", creator): _join_event(RoomVersions.V6, creator), + ("m.room.power_levels", ""): _power_levels_event( + RoomVersions.V6, creator, {"invite": 0} + ), + ("m.room.join_rules", ""): _join_rules_event( + RoomVersions.V6, creator, "restricted" + ), + } + + with self.assertRaises(AuthError): + event_auth.check_auth_rules_for_event( + _join_event(RoomVersions.V6, pleb), + auth_events.values(), + ) + + def test_join_rules_msc3083_restricted(self) -> None: """ Test joining a restricted room from MSC3083. @@ -377,29 +395,25 @@ class EventAuthTestCase(unittest.TestCase): pleb = "@joiner:example.com" auth_events = { - ("m.room.create", ""): _create_event(creator), - ("m.room.member", creator): _join_event(creator), - ("m.room.power_levels", ""): _power_levels_event(creator, {"invite": 0}), - ("m.room.join_rules", ""): _join_rules_event(creator, "restricted"), + ("m.room.create", ""): _create_event(RoomVersions.V8, creator), + ("m.room.member", creator): _join_event(RoomVersions.V8, creator), + ("m.room.power_levels", ""): _power_levels_event( + RoomVersions.V8, creator, {"invite": 0} + ), + ("m.room.join_rules", ""): _join_rules_event( + RoomVersions.V8, creator, "restricted" + ), } - # Older room versions don't understand this join rule - with self.assertRaises(AuthError): - event_auth.check_auth_rules_for_event( - RoomVersions.V6, - _join_event(pleb), - auth_events.values(), - ) - # A properly formatted join event should work. authorised_join_event = _join_event( + RoomVersions.V8, pleb, additional_content={ EventContentFields.AUTHORISING_USER: "@creator:example.com" }, ) event_auth.check_auth_rules_for_event( - RoomVersions.V8, authorised_join_event, auth_events.values(), ) @@ -408,14 +422,16 @@ class EventAuthTestCase(unittest.TestCase): # are done properly). pl_auth_events = auth_events.copy() pl_auth_events[("m.room.power_levels", "")] = _power_levels_event( - creator, {"invite": 100, "users": {"@inviter:foo.test": 150}} + RoomVersions.V8, + creator, + {"invite": 100, "users": {"@inviter:foo.test": 150}}, ) pl_auth_events[("m.room.member", "@inviter:foo.test")] = _join_event( - "@inviter:foo.test" + RoomVersions.V8, "@inviter:foo.test" ) event_auth.check_auth_rules_for_event( - RoomVersions.V8, _join_event( + RoomVersions.V8, pleb, additional_content={ EventContentFields.AUTHORISING_USER: "@inviter:foo.test" @@ -427,20 +443,21 @@ class EventAuthTestCase(unittest.TestCase): # A join which is missing an authorised server is rejected. with self.assertRaises(AuthError): event_auth.check_auth_rules_for_event( - RoomVersions.V8, - _join_event(pleb), + _join_event(RoomVersions.V8, pleb), auth_events.values(), ) # An join authorised by a user who is not in the room is rejected. pl_auth_events = auth_events.copy() pl_auth_events[("m.room.power_levels", "")] = _power_levels_event( - creator, {"invite": 100, "users": {"@other:example.com": 150}} + RoomVersions.V8, + creator, + {"invite": 100, "users": {"@other:example.com": 150}}, ) with self.assertRaises(AuthError): event_auth.check_auth_rules_for_event( - RoomVersions.V8, _join_event( + RoomVersions.V8, pleb, additional_content={ EventContentFields.AUTHORISING_USER: "@other:example.com" @@ -453,8 +470,8 @@ class EventAuthTestCase(unittest.TestCase): # *would* be valid, but is sent be a different user.) with self.assertRaises(AuthError): event_auth.check_auth_rules_for_event( - RoomVersions.V8, _member_event( + RoomVersions.V8, pleb, "join", sender=creator, @@ -466,39 +483,41 @@ class EventAuthTestCase(unittest.TestCase): ) # Banned should be rejected. - auth_events[("m.room.member", pleb)] = _member_event(pleb, "ban") + auth_events[("m.room.member", pleb)] = _member_event( + RoomVersions.V8, pleb, "ban" + ) with self.assertRaises(AuthError): event_auth.check_auth_rules_for_event( - RoomVersions.V8, authorised_join_event, auth_events.values(), ) # A user who left can re-join. - auth_events[("m.room.member", pleb)] = _member_event(pleb, "leave") + auth_events[("m.room.member", pleb)] = _member_event( + RoomVersions.V8, pleb, "leave" + ) event_auth.check_auth_rules_for_event( - RoomVersions.V8, authorised_join_event, auth_events.values(), ) # A user can send a join if they're in the room. (This doesn't need to # be authorised since the user is already joined.) - auth_events[("m.room.member", pleb)] = _member_event(pleb, "join") + auth_events[("m.room.member", pleb)] = _member_event( + RoomVersions.V8, pleb, "join" + ) event_auth.check_auth_rules_for_event( - RoomVersions.V8, - _join_event(pleb), + _join_event(RoomVersions.V8, pleb), auth_events.values(), ) # A user can accept an invite. (This doesn't need to be authorised since # the user was invited.) auth_events[("m.room.member", pleb)] = _member_event( - pleb, "invite", sender=creator + RoomVersions.V8, pleb, "invite", sender=creator ) event_auth.check_auth_rules_for_event( - RoomVersions.V8, - _join_event(pleb), + _join_event(RoomVersions.V8, pleb), auth_events.values(), ) @@ -508,20 +527,25 @@ class EventAuthTestCase(unittest.TestCase): TEST_ROOM_ID = "!test:room" -def _create_event(user_id: str) -> EventBase: +def _create_event( + room_version: RoomVersion, + user_id: str, +) -> EventBase: return make_event_from_dict( { "room_id": TEST_ROOM_ID, - "event_id": _get_event_id(), + **_maybe_get_event_id_dict_for_room_version(room_version), "type": "m.room.create", "state_key": "", "sender": user_id, "content": {"creator": user_id}, - } + }, + room_version=room_version, ) def _member_event( + room_version: RoomVersion, user_id: str, membership: str, sender: Optional[str] = None, @@ -530,79 +554,102 @@ def _member_event( return make_event_from_dict( { "room_id": TEST_ROOM_ID, - "event_id": _get_event_id(), + **_maybe_get_event_id_dict_for_room_version(room_version), "type": "m.room.member", "sender": sender or user_id, "state_key": user_id, "content": {"membership": membership, **(additional_content or {})}, "prev_events": [], - } + }, + room_version=room_version, ) -def _join_event(user_id: str, additional_content: Optional[dict] = None) -> EventBase: - return _member_event(user_id, "join", additional_content=additional_content) +def _join_event( + room_version: RoomVersion, + user_id: str, + additional_content: Optional[dict] = None, +) -> EventBase: + return _member_event( + room_version, + user_id, + "join", + additional_content=additional_content, + ) -def _power_levels_event(sender: str, content: JsonDict) -> EventBase: +def _power_levels_event( + room_version: RoomVersion, + sender: str, + content: JsonDict, +) -> EventBase: return make_event_from_dict( { "room_id": TEST_ROOM_ID, - "event_id": _get_event_id(), + **_maybe_get_event_id_dict_for_room_version(room_version), "type": "m.room.power_levels", "sender": sender, "state_key": "", "content": content, - } + }, + room_version=room_version, ) -def _alias_event(sender: str, **kwargs) -> EventBase: +def _alias_event(room_version: RoomVersion, sender: str, **kwargs) -> EventBase: data = { "room_id": TEST_ROOM_ID, - "event_id": _get_event_id(), + **_maybe_get_event_id_dict_for_room_version(room_version), "type": "m.room.aliases", "sender": sender, "state_key": get_domain_from_id(sender), "content": {"aliases": []}, } data.update(**kwargs) - return make_event_from_dict(data) + return make_event_from_dict(data, room_version=room_version) -def _random_state_event(sender: str) -> EventBase: +def _random_state_event(room_version: RoomVersion, sender: str) -> EventBase: return make_event_from_dict( { "room_id": TEST_ROOM_ID, - "event_id": _get_event_id(), + **_maybe_get_event_id_dict_for_room_version(room_version), "type": "test.state", "sender": sender, "state_key": "", "content": {"membership": "join"}, - } + }, + room_version=room_version, ) -def _join_rules_event(sender: str, join_rule: str) -> EventBase: +def _join_rules_event( + room_version: RoomVersion, sender: str, join_rule: str +) -> EventBase: return make_event_from_dict( { "room_id": TEST_ROOM_ID, - "event_id": _get_event_id(), + **_maybe_get_event_id_dict_for_room_version(room_version), "type": "m.room.join_rules", "sender": sender, "state_key": "", "content": { "join_rule": join_rule, }, - } + }, + room_version=room_version, ) event_count = 0 -def _get_event_id() -> str: +def _maybe_get_event_id_dict_for_room_version(room_version: RoomVersion) -> dict: + """If this room version needs it, generate an event id""" + if room_version.event_format != EventFormatVersions.V1: + return {} + global event_count c = event_count event_count += 1 - return "!%i:example.com" % (c,) + return {"event_id": "!%i:example.com" % (c,)} diff --git a/tests/test_server.py b/tests/test_server.py index 0f1eb43cbc..847432f791 100644 --- a/tests/test_server.py +++ b/tests/test_server.py @@ -34,7 +34,7 @@ from synapse.types import JsonDict from synapse.util import Clock from tests import unittest -from tests.http.server._base import EndpointCancellationTestHelperMixin +from tests.http.server._base import test_disconnect from tests.server import ( FakeSite, ThreadedMemoryReactorClock, @@ -407,7 +407,7 @@ class CancellableDirectServeHtmlResource(DirectServeHtmlResource): return HTTPStatus.OK, b"ok" -class DirectServeJsonResourceCancellationTests(EndpointCancellationTestHelperMixin): +class DirectServeJsonResourceCancellationTests(unittest.TestCase): """Tests for `DirectServeJsonResource` cancellation.""" def setUp(self): @@ -421,7 +421,7 @@ class DirectServeJsonResourceCancellationTests(EndpointCancellationTestHelperMix channel = make_request( self.reactor, self.site, "GET", "/sleep", await_result=False ) - self._test_disconnect( + test_disconnect( self.reactor, channel, expect_cancellation=True, @@ -433,7 +433,7 @@ class DirectServeJsonResourceCancellationTests(EndpointCancellationTestHelperMix channel = make_request( self.reactor, self.site, "POST", "/sleep", await_result=False ) - self._test_disconnect( + test_disconnect( self.reactor, channel, expect_cancellation=False, @@ -441,7 +441,7 @@ class DirectServeJsonResourceCancellationTests(EndpointCancellationTestHelperMix ) -class DirectServeHtmlResourceCancellationTests(EndpointCancellationTestHelperMixin): +class DirectServeHtmlResourceCancellationTests(unittest.TestCase): """Tests for `DirectServeHtmlResource` cancellation.""" def setUp(self): @@ -455,7 +455,7 @@ class DirectServeHtmlResourceCancellationTests(EndpointCancellationTestHelperMix channel = make_request( self.reactor, self.site, "GET", "/sleep", await_result=False ) - self._test_disconnect( + test_disconnect( self.reactor, channel, expect_cancellation=True, @@ -467,6 +467,6 @@ class DirectServeHtmlResourceCancellationTests(EndpointCancellationTestHelperMix channel = make_request( self.reactor, self.site, "POST", "/sleep", await_result=False ) - self._test_disconnect( + test_disconnect( self.reactor, channel, expect_cancellation=False, expected_body=b"ok" ) |