From 6d282a9c89ae9fb55fe7ccc8d0ab16bf18b206ec Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 2 Mar 2022 14:28:18 +0000 Subject: Make release script write correct no-op changelog (#12127) As we want to include the previous version in the "No new changes..." string. --- changelog.d/12127.misc | 1 + scripts-dev/release.py | 30 ++++++++++++++++++++++++++++-- 2 files changed, 29 insertions(+), 2 deletions(-) create mode 100644 changelog.d/12127.misc diff --git a/changelog.d/12127.misc b/changelog.d/12127.misc new file mode 100644 index 0000000000..e42eca63a8 --- /dev/null +++ b/changelog.d/12127.misc @@ -0,0 +1 @@ +Update release script to insert the previous version when writing "No significant changes" line in the changelog. diff --git a/scripts-dev/release.py b/scripts-dev/release.py index 4e1f99fee4..046453e65f 100755 --- a/scripts-dev/release.py +++ b/scripts-dev/release.py @@ -17,6 +17,8 @@ """An interactive script for doing a release. See `cli()` below. """ +import glob +import os import re import subprocess import sys @@ -209,8 +211,8 @@ def prepare(): with open("synapse/__init__.py", "w") as f: f.write(parsed_synapse_ast.dumps()) - # Generate changelogs - run_until_successful("python3 -m towncrier", shell=True) + # Generate changelogs. + generate_and_write_changelog(current_version) # Generate debian changelogs if parsed_new_version.pre is not None: @@ -523,5 +525,29 @@ def get_changes_for_version(wanted_version: version.Version) -> str: return "\n".join(version_changelog) +def generate_and_write_changelog(current_version: version.Version): + # We do this by getting a draft so that we can edit it before writing to the + # changelog. + result = run_until_successful( + "python3 -m towncrier --draft", shell=True, capture_output=True + ) + new_changes = result.stdout.decode("utf-8") + new_changes = new_changes.replace( + "No significant changes.", f"No significant changes since {current_version}." + ) + + # Prepend changes to changelog + with open("CHANGES.md", "r+") as f: + existing_content = f.read() + f.seek(0, 0) + f.write(new_changes) + f.write("\n") + f.write(existing_content) + + # Remove all the news fragments + for f in glob.iglob("changelog.d/*.*"): + os.remove(f) + + if __name__ == "__main__": cli() -- cgit 1.5.1 From 1fbe0316a991e77289d4577b16ff3fcd27c26dc8 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Wed, 2 Mar 2022 18:00:26 +0000 Subject: Add suffices to scripts in scripts-dev (#12137) * Rename scripts-dev to have suffices * Update references to `scripts-dev` * Changelog * These scripts don't pass mypy --- .github/workflows/release-artifacts.yml | 4 +- .github/workflows/tests.yml | 4 +- changelog.d/12137.misc | 1 + docs/code_style.md | 2 +- mypy.ini | 12 +- scripts-dev/build_debian_packages | 217 -------------------------------- scripts-dev/build_debian_packages.py | 217 ++++++++++++++++++++++++++++++++ scripts-dev/check-newsfragment | 62 --------- scripts-dev/check-newsfragment.sh | 62 +++++++++ scripts-dev/generate_sample_config | 28 ----- scripts-dev/generate_sample_config.sh | 28 +++++ scripts-dev/lint.sh | 2 - scripts-dev/sign_json | 166 ------------------------ scripts-dev/sign_json.py | 166 ++++++++++++++++++++++++ tox.ini | 2 - 15 files changed, 490 insertions(+), 483 deletions(-) create mode 100644 changelog.d/12137.misc delete mode 100755 scripts-dev/build_debian_packages create mode 100755 scripts-dev/build_debian_packages.py delete mode 100755 scripts-dev/check-newsfragment create mode 100755 scripts-dev/check-newsfragment.sh delete mode 100755 scripts-dev/generate_sample_config create mode 100755 scripts-dev/generate_sample_config.sh delete mode 100755 scripts-dev/sign_json create mode 100755 scripts-dev/sign_json.py diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml index eee3633d50..65ea761ad7 100644 --- a/.github/workflows/release-artifacts.yml +++ b/.github/workflows/release-artifacts.yml @@ -31,7 +31,7 @@ jobs: # if we're running from a tag, get the full list of distros; otherwise just use debian:sid dists='["debian:sid"]' if [[ $GITHUB_REF == refs/tags/* ]]; then - dists=$(scripts-dev/build_debian_packages --show-dists-json) + dists=$(scripts-dev/build_debian_packages.py --show-dists-json) fi echo "::set-output name=distros::$dists" # map the step outputs to job outputs @@ -74,7 +74,7 @@ jobs: # see https://github.com/docker/build-push-action/issues/252 # for the cache magic here run: | - ./src/scripts-dev/build_debian_packages \ + ./src/scripts-dev/build_debian_packages.py \ --docker-build-arg=--cache-from=type=local,src=/tmp/.buildx-cache \ --docker-build-arg=--cache-to=type=local,mode=max,dest=/tmp/.buildx-cache-new \ --docker-build-arg=--progress=plain \ diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index e9e4277322..3f4e44ca59 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -16,7 +16,7 @@ jobs: - uses: actions/checkout@v2 - uses: actions/setup-python@v2 - run: pip install -e . - - run: scripts-dev/generate_sample_config --check + - run: scripts-dev/generate_sample_config.sh --check lint: runs-on: ubuntu-latest @@ -51,7 +51,7 @@ jobs: fetch-depth: 0 - uses: actions/setup-python@v2 - run: "pip install 'towncrier>=18.6.0rc1'" - - run: scripts-dev/check-newsfragment + - run: scripts-dev/check-newsfragment.sh env: PULL_REQUEST_NUMBER: ${{ github.event.number }} diff --git a/changelog.d/12137.misc b/changelog.d/12137.misc new file mode 100644 index 0000000000..118ff77a91 --- /dev/null +++ b/changelog.d/12137.misc @@ -0,0 +1 @@ +Give `scripts-dev` scripts suffixes for neater CI config. \ No newline at end of file diff --git a/docs/code_style.md b/docs/code_style.md index 4d8e7c973d..e7c9cd1a5e 100644 --- a/docs/code_style.md +++ b/docs/code_style.md @@ -172,6 +172,6 @@ frobber: ``` Note that the sample configuration is generated from the synapse code -and is maintained by a script, `scripts-dev/generate_sample_config`. +and is maintained by a script, `scripts-dev/generate_sample_config.sh`. Making sure that the output from this script matches the desired format is left as an exercise for the reader! diff --git a/mypy.ini b/mypy.ini index 23ca4eaa5a..10971b7225 100644 --- a/mypy.ini +++ b/mypy.ini @@ -11,7 +11,7 @@ local_partial_types = True no_implicit_optional = True files = - scripts-dev/sign_json, + scripts-dev/, setup.py, synapse/, tests/ @@ -23,10 +23,20 @@ files = # https://docs.python.org/3/library/re.html#re.X exclude = (?x) ^( + |scripts-dev/build_debian_packages.py + |scripts-dev/check_signature.py + |scripts-dev/definitions.py + |scripts-dev/federation_client.py + |scripts-dev/hash_history.py + |scripts-dev/list_url_patterns.py + |scripts-dev/release.py + |scripts-dev/tail-synapse.py + |synapse/_scripts/export_signing_key.py |synapse/_scripts/move_remote_media_to_new_store.py |synapse/_scripts/synapse_port_db.py |synapse/_scripts/update_synapse_database.py + |synapse/storage/databases/__init__.py |synapse/storage/databases/main/__init__.py |synapse/storage/databases/main/cache.py diff --git a/scripts-dev/build_debian_packages b/scripts-dev/build_debian_packages deleted file mode 100755 index 7ff96a1ee6..0000000000 --- a/scripts-dev/build_debian_packages +++ /dev/null @@ -1,217 +0,0 @@ -#!/usr/bin/env python3 - -# Build the Debian packages using Docker images. -# -# This script builds the Docker images and then executes them sequentially, each -# one building a Debian package for the targeted operating system. It is -# designed to be a "single command" to produce all the images. -# -# By default, builds for all known distributions, but a list of distributions -# can be passed on the commandline for debugging. - -import argparse -import json -import os -import signal -import subprocess -import sys -import threading -from concurrent.futures import ThreadPoolExecutor -from typing import Optional, Sequence - -DISTS = ( - "debian:buster", # oldstable: EOL 2022-08 - "debian:bullseye", - "debian:bookworm", - "debian:sid", - "ubuntu:focal", # 20.04 LTS (our EOL forced by Py38 on 2024-10-14) - "ubuntu:impish", # 21.10 (EOL 2022-07) -) - -DESC = """\ -Builds .debs for synapse, using a Docker image for the build environment. - -By default, builds for all known distributions, but a list of distributions -can be passed on the commandline for debugging. -""" - -projdir = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) - - -class Builder(object): - def __init__( - self, redirect_stdout=False, docker_build_args: Optional[Sequence[str]] = None - ): - self.redirect_stdout = redirect_stdout - self._docker_build_args = tuple(docker_build_args or ()) - self.active_containers = set() - self._lock = threading.Lock() - self._failed = False - - def run_build(self, dist, skip_tests=False): - """Build deb for a single distribution""" - - if self._failed: - print("not building %s due to earlier failure" % (dist,)) - raise Exception("failed") - - try: - self._inner_build(dist, skip_tests) - except Exception as e: - print("build of %s failed: %s" % (dist, e), file=sys.stderr) - self._failed = True - raise - - def _inner_build(self, dist, skip_tests=False): - tag = dist.split(":", 1)[1] - - # Make the dir where the debs will live. - # - # Note that we deliberately put this outside the source tree, otherwise - # we tend to get source packages which are full of debs. (We could hack - # around that with more magic in the build_debian.sh script, but that - # doesn't solve the problem for natively-run dpkg-buildpakage). - debsdir = os.path.join(projdir, "../debs") - os.makedirs(debsdir, exist_ok=True) - - if self.redirect_stdout: - logfile = os.path.join(debsdir, "%s.buildlog" % (tag,)) - print("building %s: directing output to %s" % (dist, logfile)) - stdout = open(logfile, "w") - else: - stdout = None - - # first build a docker image for the build environment - build_args = ( - ( - "docker", - "build", - "--tag", - "dh-venv-builder:" + tag, - "--build-arg", - "distro=" + dist, - "-f", - "docker/Dockerfile-dhvirtualenv", - ) - + self._docker_build_args - + ("docker",) - ) - - subprocess.check_call( - build_args, - stdout=stdout, - stderr=subprocess.STDOUT, - cwd=projdir, - ) - - container_name = "synapse_build_" + tag - with self._lock: - self.active_containers.add(container_name) - - # then run the build itself - subprocess.check_call( - [ - "docker", - "run", - "--rm", - "--name", - container_name, - "--volume=" + projdir + ":/synapse/source:ro", - "--volume=" + debsdir + ":/debs", - "-e", - "TARGET_USERID=%i" % (os.getuid(),), - "-e", - "TARGET_GROUPID=%i" % (os.getgid(),), - "-e", - "DEB_BUILD_OPTIONS=%s" % ("nocheck" if skip_tests else ""), - "dh-venv-builder:" + tag, - ], - stdout=stdout, - stderr=subprocess.STDOUT, - ) - - with self._lock: - self.active_containers.remove(container_name) - - if stdout is not None: - stdout.close() - print("Completed build of %s" % (dist,)) - - def kill_containers(self): - with self._lock: - active = list(self.active_containers) - - for c in active: - print("killing container %s" % (c,)) - subprocess.run( - [ - "docker", - "kill", - c, - ], - stdout=subprocess.DEVNULL, - ) - with self._lock: - self.active_containers.remove(c) - - -def run_builds(builder, dists, jobs=1, skip_tests=False): - def sig(signum, _frame): - print("Caught SIGINT") - builder.kill_containers() - - signal.signal(signal.SIGINT, sig) - - with ThreadPoolExecutor(max_workers=jobs) as e: - res = e.map(lambda dist: builder.run_build(dist, skip_tests), dists) - - # make sure we consume the iterable so that exceptions are raised. - for _ in res: - pass - - -if __name__ == "__main__": - parser = argparse.ArgumentParser( - description=DESC, - ) - parser.add_argument( - "-j", - "--jobs", - type=int, - default=1, - help="specify the number of builds to run in parallel", - ) - parser.add_argument( - "--no-check", - action="store_true", - help="skip running tests after building", - ) - parser.add_argument( - "--docker-build-arg", - action="append", - help="specify an argument to pass to docker build", - ) - parser.add_argument( - "--show-dists-json", - action="store_true", - help="instead of building the packages, just list the dists to build for, as a json array", - ) - parser.add_argument( - "dist", - nargs="*", - default=DISTS, - help="a list of distributions to build for. Default: %(default)s", - ) - args = parser.parse_args() - if args.show_dists_json: - print(json.dumps(DISTS)) - else: - builder = Builder( - redirect_stdout=(args.jobs > 1), docker_build_args=args.docker_build_arg - ) - run_builds( - builder, - dists=args.dist, - jobs=args.jobs, - skip_tests=args.no_check, - ) diff --git a/scripts-dev/build_debian_packages.py b/scripts-dev/build_debian_packages.py new file mode 100755 index 0000000000..7ff96a1ee6 --- /dev/null +++ b/scripts-dev/build_debian_packages.py @@ -0,0 +1,217 @@ +#!/usr/bin/env python3 + +# Build the Debian packages using Docker images. +# +# This script builds the Docker images and then executes them sequentially, each +# one building a Debian package for the targeted operating system. It is +# designed to be a "single command" to produce all the images. +# +# By default, builds for all known distributions, but a list of distributions +# can be passed on the commandline for debugging. + +import argparse +import json +import os +import signal +import subprocess +import sys +import threading +from concurrent.futures import ThreadPoolExecutor +from typing import Optional, Sequence + +DISTS = ( + "debian:buster", # oldstable: EOL 2022-08 + "debian:bullseye", + "debian:bookworm", + "debian:sid", + "ubuntu:focal", # 20.04 LTS (our EOL forced by Py38 on 2024-10-14) + "ubuntu:impish", # 21.10 (EOL 2022-07) +) + +DESC = """\ +Builds .debs for synapse, using a Docker image for the build environment. + +By default, builds for all known distributions, but a list of distributions +can be passed on the commandline for debugging. +""" + +projdir = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) + + +class Builder(object): + def __init__( + self, redirect_stdout=False, docker_build_args: Optional[Sequence[str]] = None + ): + self.redirect_stdout = redirect_stdout + self._docker_build_args = tuple(docker_build_args or ()) + self.active_containers = set() + self._lock = threading.Lock() + self._failed = False + + def run_build(self, dist, skip_tests=False): + """Build deb for a single distribution""" + + if self._failed: + print("not building %s due to earlier failure" % (dist,)) + raise Exception("failed") + + try: + self._inner_build(dist, skip_tests) + except Exception as e: + print("build of %s failed: %s" % (dist, e), file=sys.stderr) + self._failed = True + raise + + def _inner_build(self, dist, skip_tests=False): + tag = dist.split(":", 1)[1] + + # Make the dir where the debs will live. + # + # Note that we deliberately put this outside the source tree, otherwise + # we tend to get source packages which are full of debs. (We could hack + # around that with more magic in the build_debian.sh script, but that + # doesn't solve the problem for natively-run dpkg-buildpakage). + debsdir = os.path.join(projdir, "../debs") + os.makedirs(debsdir, exist_ok=True) + + if self.redirect_stdout: + logfile = os.path.join(debsdir, "%s.buildlog" % (tag,)) + print("building %s: directing output to %s" % (dist, logfile)) + stdout = open(logfile, "w") + else: + stdout = None + + # first build a docker image for the build environment + build_args = ( + ( + "docker", + "build", + "--tag", + "dh-venv-builder:" + tag, + "--build-arg", + "distro=" + dist, + "-f", + "docker/Dockerfile-dhvirtualenv", + ) + + self._docker_build_args + + ("docker",) + ) + + subprocess.check_call( + build_args, + stdout=stdout, + stderr=subprocess.STDOUT, + cwd=projdir, + ) + + container_name = "synapse_build_" + tag + with self._lock: + self.active_containers.add(container_name) + + # then run the build itself + subprocess.check_call( + [ + "docker", + "run", + "--rm", + "--name", + container_name, + "--volume=" + projdir + ":/synapse/source:ro", + "--volume=" + debsdir + ":/debs", + "-e", + "TARGET_USERID=%i" % (os.getuid(),), + "-e", + "TARGET_GROUPID=%i" % (os.getgid(),), + "-e", + "DEB_BUILD_OPTIONS=%s" % ("nocheck" if skip_tests else ""), + "dh-venv-builder:" + tag, + ], + stdout=stdout, + stderr=subprocess.STDOUT, + ) + + with self._lock: + self.active_containers.remove(container_name) + + if stdout is not None: + stdout.close() + print("Completed build of %s" % (dist,)) + + def kill_containers(self): + with self._lock: + active = list(self.active_containers) + + for c in active: + print("killing container %s" % (c,)) + subprocess.run( + [ + "docker", + "kill", + c, + ], + stdout=subprocess.DEVNULL, + ) + with self._lock: + self.active_containers.remove(c) + + +def run_builds(builder, dists, jobs=1, skip_tests=False): + def sig(signum, _frame): + print("Caught SIGINT") + builder.kill_containers() + + signal.signal(signal.SIGINT, sig) + + with ThreadPoolExecutor(max_workers=jobs) as e: + res = e.map(lambda dist: builder.run_build(dist, skip_tests), dists) + + # make sure we consume the iterable so that exceptions are raised. + for _ in res: + pass + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description=DESC, + ) + parser.add_argument( + "-j", + "--jobs", + type=int, + default=1, + help="specify the number of builds to run in parallel", + ) + parser.add_argument( + "--no-check", + action="store_true", + help="skip running tests after building", + ) + parser.add_argument( + "--docker-build-arg", + action="append", + help="specify an argument to pass to docker build", + ) + parser.add_argument( + "--show-dists-json", + action="store_true", + help="instead of building the packages, just list the dists to build for, as a json array", + ) + parser.add_argument( + "dist", + nargs="*", + default=DISTS, + help="a list of distributions to build for. Default: %(default)s", + ) + args = parser.parse_args() + if args.show_dists_json: + print(json.dumps(DISTS)) + else: + builder = Builder( + redirect_stdout=(args.jobs > 1), docker_build_args=args.docker_build_arg + ) + run_builds( + builder, + dists=args.dist, + jobs=args.jobs, + skip_tests=args.no_check, + ) diff --git a/scripts-dev/check-newsfragment b/scripts-dev/check-newsfragment deleted file mode 100755 index 493558ad65..0000000000 --- a/scripts-dev/check-newsfragment +++ /dev/null @@ -1,62 +0,0 @@ -#!/usr/bin/env bash -# -# A script which checks that an appropriate news file has been added on this -# branch. - -echo -e "+++ \033[32mChecking newsfragment\033[m" - -set -e - -# make sure that origin/develop is up to date -git remote set-branches --add origin develop -git fetch -q origin develop - -pr="$PULL_REQUEST_NUMBER" - -# if there are changes in the debian directory, check that the debian changelog -# has been updated -if ! git diff --quiet FETCH_HEAD... -- debian; then - if git diff --quiet FETCH_HEAD... -- debian/changelog; then - echo "Updates to debian directory, but no update to the changelog." >&2 - echo "!! Please see the contributing guide for help writing your changelog entry:" >&2 - echo "https://github.com/matrix-org/synapse/blob/develop/CONTRIBUTING.md#debian-changelog" >&2 - exit 1 - fi -fi - -# if there are changes *outside* the debian directory, check that the -# newsfragments have been updated. -if ! git diff --name-only FETCH_HEAD... | grep -qv '^debian/'; then - exit 0 -fi - -# Print a link to the contributing guide if the user makes a mistake -CONTRIBUTING_GUIDE_TEXT="!! Please see the contributing guide for help writing your changelog entry: -https://github.com/matrix-org/synapse/blob/develop/CONTRIBUTING.md#changelog" - -# If check-newsfragment returns a non-zero exit code, print the contributing guide and exit -python -m towncrier.check --compare-with=origin/develop || (echo -e "$CONTRIBUTING_GUIDE_TEXT" >&2 && exit 1) - -echo -echo "--------------------------" -echo - -matched=0 -for f in $(git diff --diff-filter=d --name-only FETCH_HEAD... -- changelog.d); do - # check that any added newsfiles on this branch end with a full stop. - lastchar=$(tr -d '\n' < "$f" | tail -c 1) - if [ "$lastchar" != '.' ] && [ "$lastchar" != '!' ]; then - echo -e "\e[31mERROR: newsfragment $f does not end with a '.' or '!'\e[39m" >&2 - echo -e "$CONTRIBUTING_GUIDE_TEXT" >&2 - exit 1 - fi - - # see if this newsfile corresponds to the right PR - [[ -n "$pr" && "$f" == changelog.d/"$pr".* ]] && matched=1 -done - -if [[ -n "$pr" && "$matched" -eq 0 ]]; then - echo -e "\e[31mERROR: Did not find a news fragment with the right number: expected changelog.d/$pr.*.\e[39m" >&2 - echo -e "$CONTRIBUTING_GUIDE_TEXT" >&2 - exit 1 -fi diff --git a/scripts-dev/check-newsfragment.sh b/scripts-dev/check-newsfragment.sh new file mode 100755 index 0000000000..493558ad65 --- /dev/null +++ b/scripts-dev/check-newsfragment.sh @@ -0,0 +1,62 @@ +#!/usr/bin/env bash +# +# A script which checks that an appropriate news file has been added on this +# branch. + +echo -e "+++ \033[32mChecking newsfragment\033[m" + +set -e + +# make sure that origin/develop is up to date +git remote set-branches --add origin develop +git fetch -q origin develop + +pr="$PULL_REQUEST_NUMBER" + +# if there are changes in the debian directory, check that the debian changelog +# has been updated +if ! git diff --quiet FETCH_HEAD... -- debian; then + if git diff --quiet FETCH_HEAD... -- debian/changelog; then + echo "Updates to debian directory, but no update to the changelog." >&2 + echo "!! Please see the contributing guide for help writing your changelog entry:" >&2 + echo "https://github.com/matrix-org/synapse/blob/develop/CONTRIBUTING.md#debian-changelog" >&2 + exit 1 + fi +fi + +# if there are changes *outside* the debian directory, check that the +# newsfragments have been updated. +if ! git diff --name-only FETCH_HEAD... | grep -qv '^debian/'; then + exit 0 +fi + +# Print a link to the contributing guide if the user makes a mistake +CONTRIBUTING_GUIDE_TEXT="!! Please see the contributing guide for help writing your changelog entry: +https://github.com/matrix-org/synapse/blob/develop/CONTRIBUTING.md#changelog" + +# If check-newsfragment returns a non-zero exit code, print the contributing guide and exit +python -m towncrier.check --compare-with=origin/develop || (echo -e "$CONTRIBUTING_GUIDE_TEXT" >&2 && exit 1) + +echo +echo "--------------------------" +echo + +matched=0 +for f in $(git diff --diff-filter=d --name-only FETCH_HEAD... -- changelog.d); do + # check that any added newsfiles on this branch end with a full stop. + lastchar=$(tr -d '\n' < "$f" | tail -c 1) + if [ "$lastchar" != '.' ] && [ "$lastchar" != '!' ]; then + echo -e "\e[31mERROR: newsfragment $f does not end with a '.' or '!'\e[39m" >&2 + echo -e "$CONTRIBUTING_GUIDE_TEXT" >&2 + exit 1 + fi + + # see if this newsfile corresponds to the right PR + [[ -n "$pr" && "$f" == changelog.d/"$pr".* ]] && matched=1 +done + +if [[ -n "$pr" && "$matched" -eq 0 ]]; then + echo -e "\e[31mERROR: Did not find a news fragment with the right number: expected changelog.d/$pr.*.\e[39m" >&2 + echo -e "$CONTRIBUTING_GUIDE_TEXT" >&2 + exit 1 +fi diff --git a/scripts-dev/generate_sample_config b/scripts-dev/generate_sample_config deleted file mode 100755 index 185e277933..0000000000 --- a/scripts-dev/generate_sample_config +++ /dev/null @@ -1,28 +0,0 @@ -#!/usr/bin/env bash -# -# Update/check the docs/sample_config.yaml - -set -e - -cd "$(dirname "$0")/.." - -SAMPLE_CONFIG="docs/sample_config.yaml" -SAMPLE_LOG_CONFIG="docs/sample_log_config.yaml" - -check() { - diff -u "$SAMPLE_LOG_CONFIG" <(synapse/_scripts/generate_log_config.py) >/dev/null || return 1 -} - -if [ "$1" == "--check" ]; then - diff -u "$SAMPLE_CONFIG" <(synapse/_scripts/generate_config.py --header-file docs/.sample_config_header.yaml) >/dev/null || { - echo -e "\e[1m\e[31m$SAMPLE_CONFIG is not up-to-date. Regenerate it with \`scripts-dev/generate_sample_config\`.\e[0m" >&2 - exit 1 - } - diff -u "$SAMPLE_LOG_CONFIG" <(synapse/_scripts/generate_log_config.py) >/dev/null || { - echo -e "\e[1m\e[31m$SAMPLE_LOG_CONFIG is not up-to-date. Regenerate it with \`scripts-dev/generate_sample_config\`.\e[0m" >&2 - exit 1 - } -else - synapse/_scripts/generate_config.py --header-file docs/.sample_config_header.yaml -o "$SAMPLE_CONFIG" - synapse/_scripts/generate_log_config.py -o "$SAMPLE_LOG_CONFIG" -fi diff --git a/scripts-dev/generate_sample_config.sh b/scripts-dev/generate_sample_config.sh new file mode 100755 index 0000000000..375897eacb --- /dev/null +++ b/scripts-dev/generate_sample_config.sh @@ -0,0 +1,28 @@ +#!/usr/bin/env bash +# +# Update/check the docs/sample_config.yaml + +set -e + +cd "$(dirname "$0")/.." + +SAMPLE_CONFIG="docs/sample_config.yaml" +SAMPLE_LOG_CONFIG="docs/sample_log_config.yaml" + +check() { + diff -u "$SAMPLE_LOG_CONFIG" <(synapse/_scripts/generate_log_config.py) >/dev/null || return 1 +} + +if [ "$1" == "--check" ]; then + diff -u "$SAMPLE_CONFIG" <(synapse/_scripts/generate_config.py --header-file docs/.sample_config_header.yaml) >/dev/null || { + echo -e "\e[1m\e[31m$SAMPLE_CONFIG is not up-to-date. Regenerate it with \`scripts-dev/generate_sample_config.sh\`.\e[0m" >&2 + exit 1 + } + diff -u "$SAMPLE_LOG_CONFIG" <(synapse/_scripts/generate_log_config.py) >/dev/null || { + echo -e "\e[1m\e[31m$SAMPLE_LOG_CONFIG is not up-to-date. Regenerate it with \`scripts-dev/generate_sample_config.sh\`.\e[0m" >&2 + exit 1 + } +else + synapse/_scripts/generate_config.py --header-file docs/.sample_config_header.yaml -o "$SAMPLE_CONFIG" + synapse/_scripts/generate_log_config.py -o "$SAMPLE_LOG_CONFIG" +fi diff --git a/scripts-dev/lint.sh b/scripts-dev/lint.sh index df4d4934d0..2f5f2c3566 100755 --- a/scripts-dev/lint.sh +++ b/scripts-dev/lint.sh @@ -85,8 +85,6 @@ else "synapse" "docker" "tests" # annoyingly, black doesn't find these so we have to list them "scripts-dev" - "scripts-dev/build_debian_packages" - "scripts-dev/sign_json" "contrib" "synctl" "setup.py" "synmark" "stubs" ".ci" ) fi diff --git a/scripts-dev/sign_json b/scripts-dev/sign_json deleted file mode 100755 index 9459543106..0000000000 --- a/scripts-dev/sign_json +++ /dev/null @@ -1,166 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2020 The Matrix.org Foundation C.I.C. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import argparse -import json -import sys -from json import JSONDecodeError - -import yaml -from signedjson.key import read_signing_keys -from signedjson.sign import sign_json - -from synapse.api.room_versions import KNOWN_ROOM_VERSIONS -from synapse.crypto.event_signing import add_hashes_and_signatures -from synapse.util import json_encoder - - -def main(): - parser = argparse.ArgumentParser( - description="""Adds a signature to a JSON object. - -Example usage: - - $ scripts-dev/sign_json.py -N test -k localhost.signing.key "{}" - {"signatures":{"test":{"ed25519:a_ZnZh":"LmPnml6iM0iR..."}}} -""", - formatter_class=argparse.RawDescriptionHelpFormatter, - ) - - parser.add_argument( - "-N", - "--server-name", - help="Name to give as the local homeserver. If unspecified, will be " - "read from the config file.", - ) - - parser.add_argument( - "-k", - "--signing-key-path", - help="Path to the file containing the private ed25519 key to sign the " - "request with.", - ) - - parser.add_argument( - "-K", - "--signing-key", - help="The private ed25519 key to sign the request with.", - ) - - parser.add_argument( - "-c", - "--config", - default="homeserver.yaml", - help=( - "Path to synapse config file, from which the server name and/or signing " - "key path will be read. Ignored if --server-name and --signing-key(-path) " - "are both given." - ), - ) - - parser.add_argument( - "--sign-event-room-version", - type=str, - help=( - "Sign the JSON as an event for the given room version, rather than raw JSON. " - "This means that we will add a 'hashes' object, and redact the event before " - "signing." - ), - ) - - input_args = parser.add_mutually_exclusive_group() - - input_args.add_argument("input_data", nargs="?", help="Raw JSON to be signed.") - - input_args.add_argument( - "-i", - "--input", - type=argparse.FileType("r"), - default=sys.stdin, - help=( - "A file from which to read the JSON to be signed. If neither --input nor " - "input_data are given, JSON will be read from stdin." - ), - ) - - parser.add_argument( - "-o", - "--output", - type=argparse.FileType("w"), - default=sys.stdout, - help="Where to write the signed JSON. Defaults to stdout.", - ) - - args = parser.parse_args() - - if not args.server_name or not (args.signing_key_path or args.signing_key): - read_args_from_config(args) - - if args.signing_key: - keys = read_signing_keys([args.signing_key]) - else: - with open(args.signing_key_path) as f: - keys = read_signing_keys(f) - - json_to_sign = args.input_data - if json_to_sign is None: - json_to_sign = args.input.read() - - try: - obj = json.loads(json_to_sign) - except JSONDecodeError as e: - print("Unable to parse input as JSON: %s" % e, file=sys.stderr) - sys.exit(1) - - if not isinstance(obj, dict): - print("Input json was not an object", file=sys.stderr) - sys.exit(1) - - if args.sign_event_room_version: - room_version = KNOWN_ROOM_VERSIONS.get(args.sign_event_room_version) - if not room_version: - print( - f"Unknown room version {args.sign_event_room_version}", file=sys.stderr - ) - sys.exit(1) - add_hashes_and_signatures(room_version, obj, args.server_name, keys[0]) - else: - sign_json(obj, args.server_name, keys[0]) - - for c in json_encoder.iterencode(obj): - args.output.write(c) - args.output.write("\n") - - -def read_args_from_config(args: argparse.Namespace) -> None: - with open(args.config, "r") as fh: - config = yaml.safe_load(fh) - if not args.server_name: - args.server_name = config["server_name"] - if not args.signing_key_path and not args.signing_key: - if "signing_key" in config: - args.signing_key = config["signing_key"] - elif "signing_key_path" in config: - args.signing_key_path = config["signing_key_path"] - else: - print( - "A signing key must be given on the commandline or in the config file.", - file=sys.stderr, - ) - sys.exit(1) - - -if __name__ == "__main__": - main() diff --git a/scripts-dev/sign_json.py b/scripts-dev/sign_json.py new file mode 100755 index 0000000000..9459543106 --- /dev/null +++ b/scripts-dev/sign_json.py @@ -0,0 +1,166 @@ +#!/usr/bin/env python +# +# Copyright 2020 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import argparse +import json +import sys +from json import JSONDecodeError + +import yaml +from signedjson.key import read_signing_keys +from signedjson.sign import sign_json + +from synapse.api.room_versions import KNOWN_ROOM_VERSIONS +from synapse.crypto.event_signing import add_hashes_and_signatures +from synapse.util import json_encoder + + +def main(): + parser = argparse.ArgumentParser( + description="""Adds a signature to a JSON object. + +Example usage: + + $ scripts-dev/sign_json.py -N test -k localhost.signing.key "{}" + {"signatures":{"test":{"ed25519:a_ZnZh":"LmPnml6iM0iR..."}}} +""", + formatter_class=argparse.RawDescriptionHelpFormatter, + ) + + parser.add_argument( + "-N", + "--server-name", + help="Name to give as the local homeserver. If unspecified, will be " + "read from the config file.", + ) + + parser.add_argument( + "-k", + "--signing-key-path", + help="Path to the file containing the private ed25519 key to sign the " + "request with.", + ) + + parser.add_argument( + "-K", + "--signing-key", + help="The private ed25519 key to sign the request with.", + ) + + parser.add_argument( + "-c", + "--config", + default="homeserver.yaml", + help=( + "Path to synapse config file, from which the server name and/or signing " + "key path will be read. Ignored if --server-name and --signing-key(-path) " + "are both given." + ), + ) + + parser.add_argument( + "--sign-event-room-version", + type=str, + help=( + "Sign the JSON as an event for the given room version, rather than raw JSON. " + "This means that we will add a 'hashes' object, and redact the event before " + "signing." + ), + ) + + input_args = parser.add_mutually_exclusive_group() + + input_args.add_argument("input_data", nargs="?", help="Raw JSON to be signed.") + + input_args.add_argument( + "-i", + "--input", + type=argparse.FileType("r"), + default=sys.stdin, + help=( + "A file from which to read the JSON to be signed. If neither --input nor " + "input_data are given, JSON will be read from stdin." + ), + ) + + parser.add_argument( + "-o", + "--output", + type=argparse.FileType("w"), + default=sys.stdout, + help="Where to write the signed JSON. Defaults to stdout.", + ) + + args = parser.parse_args() + + if not args.server_name or not (args.signing_key_path or args.signing_key): + read_args_from_config(args) + + if args.signing_key: + keys = read_signing_keys([args.signing_key]) + else: + with open(args.signing_key_path) as f: + keys = read_signing_keys(f) + + json_to_sign = args.input_data + if json_to_sign is None: + json_to_sign = args.input.read() + + try: + obj = json.loads(json_to_sign) + except JSONDecodeError as e: + print("Unable to parse input as JSON: %s" % e, file=sys.stderr) + sys.exit(1) + + if not isinstance(obj, dict): + print("Input json was not an object", file=sys.stderr) + sys.exit(1) + + if args.sign_event_room_version: + room_version = KNOWN_ROOM_VERSIONS.get(args.sign_event_room_version) + if not room_version: + print( + f"Unknown room version {args.sign_event_room_version}", file=sys.stderr + ) + sys.exit(1) + add_hashes_and_signatures(room_version, obj, args.server_name, keys[0]) + else: + sign_json(obj, args.server_name, keys[0]) + + for c in json_encoder.iterencode(obj): + args.output.write(c) + args.output.write("\n") + + +def read_args_from_config(args: argparse.Namespace) -> None: + with open(args.config, "r") as fh: + config = yaml.safe_load(fh) + if not args.server_name: + args.server_name = config["server_name"] + if not args.signing_key_path and not args.signing_key: + if "signing_key" in config: + args.signing_key = config["signing_key"] + elif "signing_key_path" in config: + args.signing_key_path = config["signing_key_path"] + else: + print( + "A signing key must be given on the commandline or in the config file.", + file=sys.stderr, + ) + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/tox.ini b/tox.ini index 8d6aa7580b..f4829200cc 100644 --- a/tox.ini +++ b/tox.ini @@ -40,8 +40,6 @@ lint_targets = tests # annoyingly, black doesn't find these so we have to list them scripts-dev - scripts-dev/build_debian_packages - scripts-dev/sign_json stubs contrib synctl -- cgit 1.5.1 From 11282ade1d8deeafa042a639e2685472d6347e69 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Wed, 2 Mar 2022 19:22:44 +0000 Subject: Move the `snapcraft` configuration to `contrib`. (#12142) * Move the `snapcraft` configuration to `contrib`. We're happy for people to package this as a snap image if it's useful, but we don't support or maintain it. I'd like to move the config to `contrib` to reflect this state of affairs. * Changelog --- MANIFEST.in | 1 - changelog.d/12142.misc | 1 + contrib/snap/snapcraft.yaml | 57 +++++++++++++++++++++++++++++++++++++++++++++ snap/snapcraft.yaml | 57 --------------------------------------------- 4 files changed, 58 insertions(+), 58 deletions(-) create mode 100644 changelog.d/12142.misc create mode 100644 contrib/snap/snapcraft.yaml delete mode 100644 snap/snapcraft.yaml diff --git a/MANIFEST.in b/MANIFEST.in index 7e903518e1..f1e295e583 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -52,5 +52,4 @@ prune contrib prune debian prune demo/etc prune docker -prune snap prune stubs diff --git a/changelog.d/12142.misc b/changelog.d/12142.misc new file mode 100644 index 0000000000..5d09f90b52 --- /dev/null +++ b/changelog.d/12142.misc @@ -0,0 +1 @@ +Move the snapcraft configuration file to `contrib`. \ No newline at end of file diff --git a/contrib/snap/snapcraft.yaml b/contrib/snap/snapcraft.yaml new file mode 100644 index 0000000000..dd4c8478d5 --- /dev/null +++ b/contrib/snap/snapcraft.yaml @@ -0,0 +1,57 @@ +name: matrix-synapse +base: core18 +version: git +summary: Reference Matrix homeserver +description: | + Synapse is the reference Matrix homeserver. + Matrix is a federated and decentralised instant messaging and VoIP system. + +grade: stable +confinement: strict + +apps: + matrix-synapse: + command: synctl --no-daemonize start $SNAP_COMMON/homeserver.yaml + stop-command: synctl -c $SNAP_COMMON stop + plugs: [network-bind, network] + daemon: simple + hash-password: + command: hash_password + generate-config: + command: generate_config + generate-signing-key: + command: generate_signing_key + register-new-matrix-user: + command: register_new_matrix_user + plugs: [network] + synctl: + command: synctl +parts: + matrix-synapse: + source: . + plugin: python + python-version: python3 + python-packages: + - '.[all]' + - pip + - setuptools + - setuptools-scm + - wheel + build-packages: + - libffi-dev + - libturbojpeg0-dev + - libssl-dev + - libxslt1-dev + - libpq-dev + - zlib1g-dev + stage-packages: + - libasn1-8-heimdal + - libgssapi3-heimdal + - libhcrypto4-heimdal + - libheimbase1-heimdal + - libheimntlm0-heimdal + - libhx509-5-heimdal + - libkrb5-26-heimdal + - libldap-2.4-2 + - libpq5 + - libsasl2-2 diff --git a/snap/snapcraft.yaml b/snap/snapcraft.yaml deleted file mode 100644 index dd4c8478d5..0000000000 --- a/snap/snapcraft.yaml +++ /dev/null @@ -1,57 +0,0 @@ -name: matrix-synapse -base: core18 -version: git -summary: Reference Matrix homeserver -description: | - Synapse is the reference Matrix homeserver. - Matrix is a federated and decentralised instant messaging and VoIP system. - -grade: stable -confinement: strict - -apps: - matrix-synapse: - command: synctl --no-daemonize start $SNAP_COMMON/homeserver.yaml - stop-command: synctl -c $SNAP_COMMON stop - plugs: [network-bind, network] - daemon: simple - hash-password: - command: hash_password - generate-config: - command: generate_config - generate-signing-key: - command: generate_signing_key - register-new-matrix-user: - command: register_new_matrix_user - plugs: [network] - synctl: - command: synctl -parts: - matrix-synapse: - source: . - plugin: python - python-version: python3 - python-packages: - - '.[all]' - - pip - - setuptools - - setuptools-scm - - wheel - build-packages: - - libffi-dev - - libturbojpeg0-dev - - libssl-dev - - libxslt1-dev - - libpq-dev - - zlib1g-dev - stage-packages: - - libasn1-8-heimdal - - libgssapi3-heimdal - - libhcrypto4-heimdal - - libheimbase1-heimdal - - libheimntlm0-heimdal - - libhx509-5-heimdal - - libkrb5-26-heimdal - - libldap-2.4-2 - - libpq5 - - libsasl2-2 -- cgit 1.5.1 From ae8a616b4926a005cd73db835a1ebcea4f5cbee0 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 3 Mar 2022 11:39:58 +0100 Subject: Correctly register deactivation and profile update module callbacks (#12141) --- changelog.d/12141.bugfix | 1 + synapse/events/third_party_rules.py | 10 +++++++--- synapse/module_api/__init__.py | 8 ++++++++ 3 files changed, 16 insertions(+), 3 deletions(-) create mode 100644 changelog.d/12141.bugfix diff --git a/changelog.d/12141.bugfix b/changelog.d/12141.bugfix new file mode 100644 index 0000000000..03a2507e2c --- /dev/null +++ b/changelog.d/12141.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse 1.54.0rc1 preventing the new module callbacks introduced in this release from being registered by modules. diff --git a/synapse/events/third_party_rules.py b/synapse/events/third_party_rules.py index dd3104faf3..ede72ee876 100644 --- a/synapse/events/third_party_rules.py +++ b/synapse/events/third_party_rules.py @@ -174,7 +174,9 @@ class ThirdPartyEventRules: ] = None, on_new_event: Optional[ON_NEW_EVENT_CALLBACK] = None, on_profile_update: Optional[ON_PROFILE_UPDATE_CALLBACK] = None, - on_deactivation: Optional[ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK] = None, + on_user_deactivation_status_changed: Optional[ + ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK + ] = None, ) -> None: """Register callbacks from modules for each hook.""" if check_event_allowed is not None: @@ -199,8 +201,10 @@ class ThirdPartyEventRules: if on_profile_update is not None: self._on_profile_update_callbacks.append(on_profile_update) - if on_deactivation is not None: - self._on_user_deactivation_status_changed_callbacks.append(on_deactivation) + if on_user_deactivation_status_changed is not None: + self._on_user_deactivation_status_changed_callbacks.append( + on_user_deactivation_status_changed, + ) async def check_event_allowed( self, event: EventBase, context: EventContext diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index 7e46931869..c42eeedd87 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -59,6 +59,8 @@ from synapse.events.third_party_rules import ( CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK, ON_CREATE_ROOM_CALLBACK, ON_NEW_EVENT_CALLBACK, + ON_PROFILE_UPDATE_CALLBACK, + ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK, ) from synapse.handlers.account_validity import ( IS_USER_EXPIRED_CALLBACK, @@ -281,6 +283,10 @@ class ModuleApi: CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK ] = None, on_new_event: Optional[ON_NEW_EVENT_CALLBACK] = None, + on_profile_update: Optional[ON_PROFILE_UPDATE_CALLBACK] = None, + on_user_deactivation_status_changed: Optional[ + ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK + ] = None, ) -> None: """Registers callbacks for third party event rules capabilities. @@ -292,6 +298,8 @@ class ModuleApi: check_threepid_can_be_invited=check_threepid_can_be_invited, check_visibility_can_be_modified=check_visibility_can_be_modified, on_new_event=on_new_event, + on_profile_update=on_profile_update, + on_user_deactivation_status_changed=on_user_deactivation_status_changed, ) def register_presence_router_callbacks( -- cgit 1.5.1 From 31b125ccec75e708b09f40205c8cfe692edfa6b4 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Thu, 3 Mar 2022 04:45:23 -0600 Subject: Enable MSC3030 Complement tests in Synapse (#12144) The Complement tests for MSC3030 are now merged, https://github.com/matrix-org/complement/pull/178 Synapse implmentation: https://github.com/matrix-org/synapse/pull/9445 --- .github/workflows/tests.yml | 2 +- changelog.d/12144.misc | 1 + scripts-dev/complement.sh | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/12144.misc diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 3f4e44ca59..fa9611d42b 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -376,7 +376,7 @@ jobs: # Run Complement - run: | set -o pipefail - go test -v -json -p 1 -tags synapse_blacklist,msc2403 ./tests/... 2>&1 | gotestfmt + go test -v -json -p 1 -tags synapse_blacklist,msc2403,msc3030 ./tests/... 2>&1 | gotestfmt shell: bash name: Run Complement Tests env: diff --git a/changelog.d/12144.misc b/changelog.d/12144.misc new file mode 100644 index 0000000000..d8f71bb203 --- /dev/null +++ b/changelog.d/12144.misc @@ -0,0 +1 @@ +Enable [MSC3030](https://github.com/matrix-org/matrix-doc/pull/3030) Complement tests in CI. diff --git a/scripts-dev/complement.sh b/scripts-dev/complement.sh index 0aecb3daf1..e3d3e0f293 100755 --- a/scripts-dev/complement.sh +++ b/scripts-dev/complement.sh @@ -71,4 +71,4 @@ fi # Run the tests! echo "Images built; running complement" -go test -v -tags synapse_blacklist,msc2403 -count=1 $EXTRA_COMPLEMENT_ARGS ./tests/... +go test -v -tags synapse_blacklist,msc2403,msc3030 -count=1 $EXTRA_COMPLEMENT_ARGS ./tests/... -- cgit 1.5.1 From 61fd2a8f591f20fe9d1cffe659336664bf44e742 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 3 Mar 2022 10:52:35 +0000 Subject: Limit the size of the aggregation_key (#12101) There's no reason to let people use long keys. --- changelog.d/12101.misc | 1 + synapse/handlers/message.py | 3 +++ 2 files changed, 4 insertions(+) create mode 100644 changelog.d/12101.misc diff --git a/changelog.d/12101.misc b/changelog.d/12101.misc new file mode 100644 index 0000000000..d165f73d13 --- /dev/null +++ b/changelog.d/12101.misc @@ -0,0 +1 @@ +Limit the size of `aggregation_key` on annotations. diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 61cb133ef2..0799ec9a84 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -1069,6 +1069,9 @@ class EventCreationHandler: if relation_type == RelationTypes.ANNOTATION: aggregation_key = relation["key"] + if len(aggregation_key) > 500: + raise SynapseError(400, "Aggregation key is too long") + already_exists = await self.store.has_user_annotated_event( relates_to, event.type, aggregation_key, event.sender ) -- cgit 1.5.1 From a511a890d7c556ad357d27443e5665e6cc25e0b5 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Thu, 3 Mar 2022 05:19:20 -0600 Subject: Enable MSC2716 Complement tests in Synapse (#12145) Co-authored-by: Brendan Abolivier --- .github/workflows/tests.yml | 2 +- changelog.d/12145.misc | 1 + scripts-dev/complement.sh | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/12145.misc diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index fa9611d42b..c89c50cd07 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -376,7 +376,7 @@ jobs: # Run Complement - run: | set -o pipefail - go test -v -json -p 1 -tags synapse_blacklist,msc2403,msc3030 ./tests/... 2>&1 | gotestfmt + go test -v -json -p 1 -tags synapse_blacklist,msc2403,msc2716,msc3030 ./tests/... 2>&1 | gotestfmt shell: bash name: Run Complement Tests env: diff --git a/changelog.d/12145.misc b/changelog.d/12145.misc new file mode 100644 index 0000000000..4092a2d66e --- /dev/null +++ b/changelog.d/12145.misc @@ -0,0 +1 @@ +Enable [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) Complement tests in CI. diff --git a/scripts-dev/complement.sh b/scripts-dev/complement.sh index e3d3e0f293..0a79a4063f 100755 --- a/scripts-dev/complement.sh +++ b/scripts-dev/complement.sh @@ -71,4 +71,4 @@ fi # Run the tests! echo "Images built; running complement" -go test -v -tags synapse_blacklist,msc2403,msc3030 -count=1 $EXTRA_COMPLEMENT_ARGS ./tests/... +go test -v -tags synapse_blacklist,msc2403,msc2716,msc3030 -count=1 $EXTRA_COMPLEMENT_ARGS ./tests/... -- cgit 1.5.1 From cea1b58c4a5850a59e14808324ce1d9f222f52e5 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Thu, 3 Mar 2022 12:47:55 +0000 Subject: Don't impose version checks on dev extras at runtime (#12129) * Fix incorrect argument in test case * Add copyright header * Docstring and __all__ * Exclude dev depenencies * Use changelog from #12088 * Include version in error messages This will hopefully distinguish between the version of the source code and the version of the distribution package that is installed. * Linter script is your friend --- changelog.d/12129.misc | 1 + synapse/util/check_dependencies.py | 61 +++++++++++++++++++++++++++++++---- tests/util/test_check_dependencies.py | 21 ++++++++++-- 3 files changed, 74 insertions(+), 9 deletions(-) create mode 100644 changelog.d/12129.misc diff --git a/changelog.d/12129.misc b/changelog.d/12129.misc new file mode 100644 index 0000000000..ce4213650c --- /dev/null +++ b/changelog.d/12129.misc @@ -0,0 +1 @@ +Inspect application dependencies using `importlib.metadata` or its backport. \ No newline at end of file diff --git a/synapse/util/check_dependencies.py b/synapse/util/check_dependencies.py index 3a1f6b3c75..39b0a91db3 100644 --- a/synapse/util/check_dependencies.py +++ b/synapse/util/check_dependencies.py @@ -1,3 +1,25 @@ +# Copyright 2022 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +""" +This module exposes a single function which checks synapse's dependencies are present +and correctly versioned. It makes use of `importlib.metadata` to do so. The details +are a bit murky: there's no easy way to get a map from "extras" to the packages they +require. But this is probably just symptomatic of Python's package management. +""" + import logging from typing import Iterable, NamedTuple, Optional @@ -10,6 +32,8 @@ try: except ImportError: import importlib_metadata as metadata # type: ignore[no-redef] +__all__ = ["check_requirements"] + class DependencyException(Exception): @property @@ -29,7 +53,17 @@ class DependencyException(Exception): yield '"' + i + '"' -EXTRAS = set(metadata.metadata(DISTRIBUTION_NAME).get_all("Provides-Extra")) +DEV_EXTRAS = {"lint", "mypy", "test", "dev"} +RUNTIME_EXTRAS = ( + set(metadata.metadata(DISTRIBUTION_NAME).get_all("Provides-Extra")) - DEV_EXTRAS +) +VERSION = metadata.version(DISTRIBUTION_NAME) + + +def _is_dev_dependency(req: Requirement) -> bool: + return req.marker is not None and any( + req.marker.evaluate({"extra": e}) for e in DEV_EXTRAS + ) class Dependency(NamedTuple): @@ -43,6 +77,9 @@ def _generic_dependencies() -> Iterable[Dependency]: assert requirements is not None for raw_requirement in requirements: req = Requirement(raw_requirement) + if _is_dev_dependency(req): + continue + # https://packaging.pypa.io/en/latest/markers.html#usage notes that # > Evaluating an extra marker with no environment is an error # so we pass in a dummy empty extra value here. @@ -56,6 +93,8 @@ def _dependencies_for_extra(extra: str) -> Iterable[Dependency]: assert requirements is not None for raw_requirement in requirements: req = Requirement(raw_requirement) + if _is_dev_dependency(req): + continue # Exclude mandatory deps by only selecting deps needed with this extra. if ( req.marker is not None @@ -67,18 +106,26 @@ def _dependencies_for_extra(extra: str) -> Iterable[Dependency]: def _not_installed(requirement: Requirement, extra: Optional[str] = None) -> str: if extra: - return f"Need {requirement.name} for {extra}, but it is not installed" + return ( + f"Synapse {VERSION} needs {requirement.name} for {extra}, " + f"but it is not installed" + ) else: - return f"Need {requirement.name}, but it is not installed" + return f"Synapse {VERSION} needs {requirement.name}, but it is not installed" def _incorrect_version( requirement: Requirement, got: str, extra: Optional[str] = None ) -> str: if extra: - return f"Need {requirement} for {extra}, but got {requirement.name}=={got}" + return ( + f"Synapse {VERSION} needs {requirement} for {extra}, " + f"but got {requirement.name}=={got}" + ) else: - return f"Need {requirement}, but got {requirement.name}=={got}" + return ( + f"Synapse {VERSION} needs {requirement}, but got {requirement.name}=={got}" + ) def check_requirements(extra: Optional[str] = None) -> None: @@ -100,10 +147,10 @@ def check_requirements(extra: Optional[str] = None) -> None: # First work out which dependencies are required, and which are optional. if extra is None: dependencies = _generic_dependencies() - elif extra in EXTRAS: + elif extra in RUNTIME_EXTRAS: dependencies = _dependencies_for_extra(extra) else: - raise ValueError(f"Synapse does not provide the feature '{extra}'") + raise ValueError(f"Synapse {VERSION} does not provide the feature '{extra}'") deps_unfulfilled = [] errors = [] diff --git a/tests/util/test_check_dependencies.py b/tests/util/test_check_dependencies.py index 3c07252252..a91c33272f 100644 --- a/tests/util/test_check_dependencies.py +++ b/tests/util/test_check_dependencies.py @@ -65,6 +65,23 @@ class TestDependencyChecker(TestCase): # should not raise check_requirements() + def test_checks_ignore_dev_dependencies(self) -> None: + """Bot generic and per-extra checks should ignore dev dependencies.""" + with patch( + "synapse.util.check_dependencies.metadata.requires", + return_value=["dummypkg >= 1; extra == 'mypy'"], + ), patch("synapse.util.check_dependencies.RUNTIME_EXTRAS", {"cool-extra"}): + # We're testing that none of these calls raise. + with self.mock_installed_package(None): + check_requirements() + check_requirements("cool-extra") + with self.mock_installed_package(old): + check_requirements() + check_requirements("cool-extra") + with self.mock_installed_package(new): + check_requirements() + check_requirements("cool-extra") + def test_generic_check_of_optional_dependency(self) -> None: """Complain if an optional package is old.""" with patch( @@ -85,11 +102,11 @@ class TestDependencyChecker(TestCase): with patch( "synapse.util.check_dependencies.metadata.requires", return_value=["dummypkg >= 1; extra == 'cool-extra'"], - ), patch("synapse.util.check_dependencies.EXTRAS", {"cool-extra"}): + ), patch("synapse.util.check_dependencies.RUNTIME_EXTRAS", {"cool-extra"}): with self.mock_installed_package(None): self.assertRaises(DependencyException, check_requirements, "cool-extra") with self.mock_installed_package(old): self.assertRaises(DependencyException, check_requirements, "cool-extra") with self.mock_installed_package(new): # should not raise - check_requirements() + check_requirements("cool-extra") -- cgit 1.5.1 From 1d11b452b70c768e4919bd9cf6bcaeda2050a3d4 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 3 Mar 2022 10:43:06 -0500 Subject: Use the proper serialization format when bundling aggregations. (#12090) This ensures that the `latest_event` field of the bundled aggregation for threads uses the same format as the other events in the response. --- changelog.d/12090.bugfix | 1 + synapse/appservice/api.py | 24 ++++--- synapse/events/utils.py | 81 ++++++++++++++------- synapse/handlers/events.py | 3 +- synapse/handlers/initial_sync.py | 9 ++- synapse/handlers/pagination.py | 7 +- synapse/rest/client/notifications.py | 9 ++- synapse/rest/client/sync.py | 132 ++++++++++------------------------- tests/events/test_utils.py | 5 +- tests/rest/client/test_relations.py | 2 - 10 files changed, 130 insertions(+), 143 deletions(-) create mode 100644 changelog.d/12090.bugfix diff --git a/changelog.d/12090.bugfix b/changelog.d/12090.bugfix new file mode 100644 index 0000000000..087065dcb1 --- /dev/null +++ b/changelog.d/12090.bugfix @@ -0,0 +1 @@ +Use the proper serialization format for bundled thread aggregations. The bug has existed since Synapse v1.48.0. diff --git a/synapse/appservice/api.py b/synapse/appservice/api.py index a0ea958af6..98fe354014 100644 --- a/synapse/appservice/api.py +++ b/synapse/appservice/api.py @@ -25,7 +25,7 @@ from synapse.appservice import ( TransactionUnusedFallbackKeys, ) from synapse.events import EventBase -from synapse.events.utils import serialize_event +from synapse.events.utils import SerializeEventConfig, serialize_event from synapse.http.client import SimpleHttpClient from synapse.types import JsonDict, ThirdPartyInstanceID from synapse.util.caches.response_cache import ResponseCache @@ -321,16 +321,18 @@ class ApplicationServiceApi(SimpleHttpClient): serialize_event( e, time_now, - as_client_event=True, - # If this is an invite or a knock membership event, and we're interested - # in this user, then include any stripped state alongside the event. - include_stripped_room_state=( - e.type == EventTypes.Member - and ( - e.membership == Membership.INVITE - or e.membership == Membership.KNOCK - ) - and service.is_interested_in_user(e.state_key) + config=SerializeEventConfig( + as_client_event=True, + # If this is an invite or a knock membership event, and we're interested + # in this user, then include any stripped state alongside the event. + include_stripped_room_state=( + e.type == EventTypes.Member + and ( + e.membership == Membership.INVITE + or e.membership == Membership.KNOCK + ) + and service.is_interested_in_user(e.state_key) + ), ), ) for e in events diff --git a/synapse/events/utils.py b/synapse/events/utils.py index 9386fa29dd..ee34cb46e4 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -26,6 +26,7 @@ from typing import ( Union, ) +import attr from frozendict import frozendict from synapse.api.constants import EventContentFields, EventTypes, RelationTypes @@ -303,29 +304,37 @@ def format_event_for_client_v2_without_room_id(d: JsonDict) -> JsonDict: return d +@attr.s(slots=True, frozen=True, auto_attribs=True) +class SerializeEventConfig: + as_client_event: bool = True + # Function to convert from federation format to client format + event_format: Callable[[JsonDict], JsonDict] = format_event_for_client_v1 + # ID of the user's auth token - used for namespacing of transaction IDs + token_id: Optional[int] = None + # List of event fields to include. If empty, all fields will be returned. + only_event_fields: Optional[List[str]] = None + # Some events can have stripped room state stored in the `unsigned` field. + # This is required for invite and knock functionality. If this option is + # False, that state will be removed from the event before it is returned. + # Otherwise, it will be kept. + include_stripped_room_state: bool = False + + +_DEFAULT_SERIALIZE_EVENT_CONFIG = SerializeEventConfig() + + def serialize_event( e: Union[JsonDict, EventBase], time_now_ms: int, *, - as_client_event: bool = True, - event_format: Callable[[JsonDict], JsonDict] = format_event_for_client_v1, - token_id: Optional[str] = None, - only_event_fields: Optional[List[str]] = None, - include_stripped_room_state: bool = False, + config: SerializeEventConfig = _DEFAULT_SERIALIZE_EVENT_CONFIG, ) -> JsonDict: """Serialize event for clients Args: e time_now_ms - as_client_event - event_format - token_id - only_event_fields - include_stripped_room_state: Some events can have stripped room state - stored in the `unsigned` field. This is required for invite and knock - functionality. If this option is False, that state will be removed from the - event before it is returned. Otherwise, it will be kept. + config: Event serialization config Returns: The serialized event dictionary. @@ -348,11 +357,11 @@ def serialize_event( if "redacted_because" in e.unsigned: d["unsigned"]["redacted_because"] = serialize_event( - e.unsigned["redacted_because"], time_now_ms, event_format=event_format + e.unsigned["redacted_because"], time_now_ms, config=config ) - if token_id is not None: - if token_id == getattr(e.internal_metadata, "token_id", None): + if config.token_id is not None: + if config.token_id == getattr(e.internal_metadata, "token_id", None): txn_id = getattr(e.internal_metadata, "txn_id", None) if txn_id is not None: d["unsigned"]["transaction_id"] = txn_id @@ -361,13 +370,14 @@ def serialize_event( # that are meant to provide metadata about a room to an invitee/knocker. They are # intended to only be included in specific circumstances, such as down sync, and # should not be included in any other case. - if not include_stripped_room_state: + if not config.include_stripped_room_state: d["unsigned"].pop("invite_room_state", None) d["unsigned"].pop("knock_room_state", None) - if as_client_event: - d = event_format(d) + if config.as_client_event: + d = config.event_format(d) + only_event_fields = config.only_event_fields if only_event_fields: if not isinstance(only_event_fields, list) or not all( isinstance(f, str) for f in only_event_fields @@ -390,18 +400,18 @@ class EventClientSerializer: event: Union[JsonDict, EventBase], time_now: int, *, + config: SerializeEventConfig = _DEFAULT_SERIALIZE_EVENT_CONFIG, bundle_aggregations: Optional[Dict[str, "BundledAggregations"]] = None, - **kwargs: Any, ) -> JsonDict: """Serializes a single event. Args: event: The event being serialized. time_now: The current time in milliseconds + config: Event serialization config bundle_aggregations: Whether to include the bundled aggregations for this event. Only applies to non-state events. (State events never include bundled aggregations.) - **kwargs: Arguments to pass to `serialize_event` Returns: The serialized event @@ -410,7 +420,7 @@ class EventClientSerializer: if not isinstance(event, EventBase): return event - serialized_event = serialize_event(event, time_now, **kwargs) + serialized_event = serialize_event(event, time_now, config=config) # Check if there are any bundled aggregations to include with the event. if bundle_aggregations: @@ -419,6 +429,7 @@ class EventClientSerializer: self._inject_bundled_aggregations( event, time_now, + config, bundle_aggregations[event.event_id], serialized_event, ) @@ -456,6 +467,7 @@ class EventClientSerializer: self, event: EventBase, time_now: int, + config: SerializeEventConfig, aggregations: "BundledAggregations", serialized_event: JsonDict, ) -> None: @@ -466,6 +478,7 @@ class EventClientSerializer: time_now: The current time in milliseconds aggregations: The bundled aggregation to serialize. serialized_event: The serialized event which may be modified. + config: Event serialization config """ serialized_aggregations = {} @@ -493,8 +506,8 @@ class EventClientSerializer: thread = aggregations.thread # Don't bundle aggregations as this could recurse forever. - serialized_latest_event = self.serialize_event( - thread.latest_event, time_now, bundle_aggregations=None + serialized_latest_event = serialize_event( + thread.latest_event, time_now, config=config ) # Manually apply an edit, if one exists. if thread.latest_edit: @@ -515,20 +528,34 @@ class EventClientSerializer: ) def serialize_events( - self, events: Iterable[Union[JsonDict, EventBase]], time_now: int, **kwargs: Any + self, + events: Iterable[Union[JsonDict, EventBase]], + time_now: int, + *, + config: SerializeEventConfig = _DEFAULT_SERIALIZE_EVENT_CONFIG, + bundle_aggregations: Optional[Dict[str, "BundledAggregations"]] = None, ) -> List[JsonDict]: """Serializes multiple events. Args: event time_now: The current time in milliseconds - **kwargs: Arguments to pass to `serialize_event` + config: Event serialization config + bundle_aggregations: Whether to include the bundled aggregations for this + event. Only applies to non-state events. (State events never include + bundled aggregations.) Returns: The list of serialized events """ return [ - self.serialize_event(event, time_now=time_now, **kwargs) for event in events + self.serialize_event( + event, + time_now, + config=config, + bundle_aggregations=bundle_aggregations, + ) + for event in events ] diff --git a/synapse/handlers/events.py b/synapse/handlers/events.py index 97e75e60c3..d2ccb5c5d3 100644 --- a/synapse/handlers/events.py +++ b/synapse/handlers/events.py @@ -19,6 +19,7 @@ from typing import TYPE_CHECKING, Iterable, List, Optional from synapse.api.constants import EduTypes, EventTypes, Membership from synapse.api.errors import AuthError, SynapseError from synapse.events import EventBase +from synapse.events.utils import SerializeEventConfig from synapse.handlers.presence import format_user_presence_state from synapse.streams.config import PaginationConfig from synapse.types import JsonDict, UserID @@ -120,7 +121,7 @@ class EventStreamHandler: chunks = self._event_serializer.serialize_events( events, time_now, - as_client_event=as_client_event, + config=SerializeEventConfig(as_client_event=as_client_event), ) chunk = { diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py index 344f20f37c..316cfae24f 100644 --- a/synapse/handlers/initial_sync.py +++ b/synapse/handlers/initial_sync.py @@ -18,6 +18,7 @@ from typing import TYPE_CHECKING, List, Optional, Tuple, cast from synapse.api.constants import EduTypes, EventTypes, Membership from synapse.api.errors import SynapseError from synapse.events import EventBase +from synapse.events.utils import SerializeEventConfig from synapse.events.validator import EventValidator from synapse.handlers.presence import format_user_presence_state from synapse.handlers.receipts import ReceiptEventSource @@ -156,6 +157,8 @@ class InitialSyncHandler: if limit is None: limit = 10 + serializer_options = SerializeEventConfig(as_client_event=as_client_event) + async def handle_room(event: RoomsForUser) -> None: d: JsonDict = { "room_id": event.room_id, @@ -173,7 +176,7 @@ class InitialSyncHandler: d["invite"] = self._event_serializer.serialize_event( invite_event, time_now, - as_client_event=as_client_event, + config=serializer_options, ) rooms_ret.append(d) @@ -225,7 +228,7 @@ class InitialSyncHandler: self._event_serializer.serialize_events( messages, time_now=time_now, - as_client_event=as_client_event, + config=serializer_options, ) ), "start": await start_token.to_string(self.store), @@ -235,7 +238,7 @@ class InitialSyncHandler: d["state"] = self._event_serializer.serialize_events( current_state.values(), time_now=time_now, - as_client_event=as_client_event, + config=serializer_options, ) account_data_events = [] diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index 5c01a426ff..183fabcfc0 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -22,6 +22,7 @@ from twisted.python.failure import Failure from synapse.api.constants import EventTypes, Membership from synapse.api.errors import SynapseError from synapse.api.filtering import Filter +from synapse.events.utils import SerializeEventConfig from synapse.handlers.room import ShutdownRoomResponse from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage.state import StateFilter @@ -541,13 +542,15 @@ class PaginationHandler: time_now = self.clock.time_msec() + serialize_options = SerializeEventConfig(as_client_event=as_client_event) + chunk = { "chunk": ( self._event_serializer.serialize_events( events, time_now, + config=serialize_options, bundle_aggregations=aggregations, - as_client_event=as_client_event, ) ), "start": await from_token.to_string(self.store), @@ -556,7 +559,7 @@ class PaginationHandler: if state: chunk["state"] = self._event_serializer.serialize_events( - state, time_now, as_client_event=as_client_event + state, time_now, config=serialize_options ) return chunk diff --git a/synapse/rest/client/notifications.py b/synapse/rest/client/notifications.py index 20377a9ac6..ff040de6b8 100644 --- a/synapse/rest/client/notifications.py +++ b/synapse/rest/client/notifications.py @@ -16,7 +16,10 @@ import logging from typing import TYPE_CHECKING, Tuple from synapse.api.constants import ReceiptTypes -from synapse.events.utils import format_event_for_client_v2_without_room_id +from synapse.events.utils import ( + SerializeEventConfig, + format_event_for_client_v2_without_room_id, +) from synapse.http.server import HttpServer from synapse.http.servlet import RestServlet, parse_integer, parse_string from synapse.http.site import SynapseRequest @@ -75,7 +78,9 @@ class NotificationsServlet(RestServlet): self._event_serializer.serialize_event( notif_events[pa.event_id], self.clock.time_msec(), - event_format=format_event_for_client_v2_without_room_id, + config=SerializeEventConfig( + event_format=format_event_for_client_v2_without_room_id + ), ) ), } diff --git a/synapse/rest/client/sync.py b/synapse/rest/client/sync.py index f3018ff690..53c385a86c 100644 --- a/synapse/rest/client/sync.py +++ b/synapse/rest/client/sync.py @@ -14,24 +14,14 @@ import itertools import logging from collections import defaultdict -from typing import ( - TYPE_CHECKING, - Any, - Callable, - Dict, - Iterable, - List, - Optional, - Tuple, - Union, -) +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union from synapse.api.constants import Membership, PresenceState from synapse.api.errors import Codes, StoreError, SynapseError from synapse.api.filtering import FilterCollection from synapse.api.presence import UserPresenceState -from synapse.events import EventBase from synapse.events.utils import ( + SerializeEventConfig, format_event_for_client_v2_without_room_id, format_event_raw, ) @@ -48,7 +38,6 @@ from synapse.http.server import HttpServer from synapse.http.servlet import RestServlet, parse_boolean, parse_integer, parse_string from synapse.http.site import SynapseRequest from synapse.logging.opentracing import trace -from synapse.storage.databases.main.relations import BundledAggregations from synapse.types import JsonDict, StreamToken from synapse.util import json_decoder @@ -239,28 +228,31 @@ class SyncRestServlet(RestServlet): else: raise Exception("Unknown event format %s" % (filter.event_format,)) + serialize_options = SerializeEventConfig( + event_format=event_formatter, + token_id=access_token_id, + only_event_fields=filter.event_fields, + ) + stripped_serialize_options = SerializeEventConfig( + event_format=event_formatter, + token_id=access_token_id, + include_stripped_room_state=True, + ) + joined = await self.encode_joined( - sync_result.joined, - time_now, - access_token_id, - filter.event_fields, - event_formatter, + sync_result.joined, time_now, serialize_options ) invited = await self.encode_invited( - sync_result.invited, time_now, access_token_id, event_formatter + sync_result.invited, time_now, stripped_serialize_options ) knocked = await self.encode_knocked( - sync_result.knocked, time_now, access_token_id, event_formatter + sync_result.knocked, time_now, stripped_serialize_options ) archived = await self.encode_archived( - sync_result.archived, - time_now, - access_token_id, - filter.event_fields, - event_formatter, + sync_result.archived, time_now, serialize_options ) logger.debug("building sync response dict") @@ -339,9 +331,7 @@ class SyncRestServlet(RestServlet): self, rooms: List[JoinedSyncResult], time_now: int, - token_id: Optional[int], - event_fields: List[str], - event_formatter: Callable[[JsonDict], JsonDict], + serialize_options: SerializeEventConfig, ) -> JsonDict: """ Encode the joined rooms in a sync result @@ -349,24 +339,14 @@ class SyncRestServlet(RestServlet): Args: rooms: list of sync results for rooms this user is joined to time_now: current time - used as a baseline for age calculations - token_id: ID of the user's auth token - used for namespacing - of transaction IDs - event_fields: List of event fields to include. If empty, - all fields will be returned. - event_formatter: function to convert from federation format - to client format + serialize_options: Event serializer options Returns: The joined rooms list, in our response format """ joined = {} for room in rooms: joined[room.room_id] = await self.encode_room( - room, - time_now, - token_id, - joined=True, - only_fields=event_fields, - event_formatter=event_formatter, + room, time_now, joined=True, serialize_options=serialize_options ) return joined @@ -376,8 +356,7 @@ class SyncRestServlet(RestServlet): self, rooms: List[InvitedSyncResult], time_now: int, - token_id: Optional[int], - event_formatter: Callable[[JsonDict], JsonDict], + serialize_options: SerializeEventConfig, ) -> JsonDict: """ Encode the invited rooms in a sync result @@ -385,10 +364,7 @@ class SyncRestServlet(RestServlet): Args: rooms: list of sync results for rooms this user is invited to time_now: current time - used as a baseline for age calculations - token_id: ID of the user's auth token - used for namespacing - of transaction IDs - event_formatter: function to convert from federation format - to client format + serialize_options: Event serializer options Returns: The invited rooms list, in our response format @@ -396,11 +372,7 @@ class SyncRestServlet(RestServlet): invited = {} for room in rooms: invite = self._event_serializer.serialize_event( - room.invite, - time_now, - token_id=token_id, - event_format=event_formatter, - include_stripped_room_state=True, + room.invite, time_now, config=serialize_options ) unsigned = dict(invite.get("unsigned", {})) invite["unsigned"] = unsigned @@ -415,8 +387,7 @@ class SyncRestServlet(RestServlet): self, rooms: List[KnockedSyncResult], time_now: int, - token_id: Optional[int], - event_formatter: Callable[[Dict], Dict], + serialize_options: SerializeEventConfig, ) -> Dict[str, Dict[str, Any]]: """ Encode the rooms we've knocked on in a sync result. @@ -424,8 +395,7 @@ class SyncRestServlet(RestServlet): Args: rooms: list of sync results for rooms this user is knocking on time_now: current time - used as a baseline for age calculations - token_id: ID of the user's auth token - used for namespacing of transaction IDs - event_formatter: function to convert from federation format to client format + serialize_options: Event serializer options Returns: The list of rooms the user has knocked on, in our response format. @@ -433,11 +403,7 @@ class SyncRestServlet(RestServlet): knocked = {} for room in rooms: knock = self._event_serializer.serialize_event( - room.knock, - time_now, - token_id=token_id, - event_format=event_formatter, - include_stripped_room_state=True, + room.knock, time_now, config=serialize_options ) # Extract the `unsigned` key from the knock event. @@ -470,9 +436,7 @@ class SyncRestServlet(RestServlet): self, rooms: List[ArchivedSyncResult], time_now: int, - token_id: Optional[int], - event_fields: List[str], - event_formatter: Callable[[JsonDict], JsonDict], + serialize_options: SerializeEventConfig, ) -> JsonDict: """ Encode the archived rooms in a sync result @@ -480,23 +444,14 @@ class SyncRestServlet(RestServlet): Args: rooms: list of sync results for rooms this user is joined to time_now: current time - used as a baseline for age calculations - token_id: ID of the user's auth token - used for namespacing - of transaction IDs - event_fields: List of event fields to include. If empty, - all fields will be returned. - event_formatter: function to convert from federation format to client format + serialize_options: Event serializer options Returns: The archived rooms list, in our response format """ joined = {} for room in rooms: joined[room.room_id] = await self.encode_room( - room, - time_now, - token_id, - joined=False, - only_fields=event_fields, - event_formatter=event_formatter, + room, time_now, joined=False, serialize_options=serialize_options ) return joined @@ -505,10 +460,8 @@ class SyncRestServlet(RestServlet): self, room: Union[JoinedSyncResult, ArchivedSyncResult], time_now: int, - token_id: Optional[int], joined: bool, - only_fields: Optional[List[str]], - event_formatter: Callable[[JsonDict], JsonDict], + serialize_options: SerializeEventConfig, ) -> JsonDict: """ Args: @@ -524,20 +477,6 @@ class SyncRestServlet(RestServlet): Returns: The room, encoded in our response format """ - - def serialize( - events: Iterable[EventBase], - aggregations: Optional[Dict[str, BundledAggregations]] = None, - ) -> List[JsonDict]: - return self._event_serializer.serialize_events( - events, - time_now=time_now, - bundle_aggregations=aggregations, - token_id=token_id, - event_format=event_formatter, - only_event_fields=only_fields, - ) - state_dict = room.state timeline_events = room.timeline.events @@ -554,9 +493,14 @@ class SyncRestServlet(RestServlet): event.room_id, ) - serialized_state = serialize(state_events) - serialized_timeline = serialize( - timeline_events, room.timeline.bundled_aggregations + serialized_state = self._event_serializer.serialize_events( + state_events, time_now, config=serialize_options + ) + serialized_timeline = self._event_serializer.serialize_events( + timeline_events, + time_now, + config=serialize_options, + bundle_aggregations=room.timeline.bundled_aggregations, ) account_data = room.account_data diff --git a/tests/events/test_utils.py b/tests/events/test_utils.py index 45e3395b33..00ad19e446 100644 --- a/tests/events/test_utils.py +++ b/tests/events/test_utils.py @@ -16,6 +16,7 @@ from synapse.api.constants import EventContentFields from synapse.api.room_versions import RoomVersions from synapse.events import make_event_from_dict from synapse.events.utils import ( + SerializeEventConfig, copy_power_levels_contents, prune_event, serialize_event, @@ -392,7 +393,9 @@ class PruneEventTestCase(unittest.TestCase): class SerializeEventTestCase(unittest.TestCase): def serialize(self, ev, fields): - return serialize_event(ev, 1479807801915, only_event_fields=fields) + return serialize_event( + ev, 1479807801915, config=SerializeEventConfig(only_event_fields=fields) + ) def test_event_fields_works_with_keys(self): self.assertEqual( diff --git a/tests/rest/client/test_relations.py b/tests/rest/client/test_relations.py index 709f851a38..53062b41de 100644 --- a/tests/rest/client/test_relations.py +++ b/tests/rest/client/test_relations.py @@ -704,10 +704,8 @@ class RelationsTestCase(BaseRelationsTestCase): } }, "event_id": thread_2, - "room_id": self.room, "sender": self.user_id, "type": "m.room.test", - "user_id": self.user_id, }, relations_dict[RelationTypes.THREAD].get("latest_event"), ) -- cgit 1.5.1 From 7e91107be1a4287873266e588a3c5b415279f4c8 Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Thu, 3 Mar 2022 17:05:44 +0100 Subject: Add type hints to `tests/rest` (#12146) * Add type hints to `tests/rest` * newsfile * change import from `SigningKey` --- changelog.d/12146.misc | 1 + mypy.ini | 7 +--- tests/rest/key/v2/test_remote_key_resource.py | 44 ++++++++++++++-------- tests/rest/media/v1/test_base.py | 4 +- tests/rest/media/v1/test_filepath.py | 48 ++++++++++++------------ tests/rest/media/v1/test_html_preview.py | 54 +++++++++++++-------------- tests/rest/media/v1/test_oembed.py | 10 ++--- tests/rest/test_health.py | 8 ++-- tests/rest/test_well_known.py | 20 +++++----- 9 files changed, 104 insertions(+), 92 deletions(-) create mode 100644 changelog.d/12146.misc diff --git a/changelog.d/12146.misc b/changelog.d/12146.misc new file mode 100644 index 0000000000..3ca7c47212 --- /dev/null +++ b/changelog.d/12146.misc @@ -0,0 +1 @@ +Add type hints to `tests/rest`. diff --git a/mypy.ini b/mypy.ini index 10971b7225..481e8a5366 100644 --- a/mypy.ini +++ b/mypy.ini @@ -89,8 +89,6 @@ exclude = (?x) |tests/push/test_presentable_names.py |tests/push/test_push_rule_evaluator.py |tests/rest/client/test_transactions.py - |tests/rest/key/v2/test_remote_key_resource.py - |tests/rest/media/v1/test_base.py |tests/rest/media/v1/test_media_storage.py |tests/rest/media/v1/test_url_preview.py |tests/scripts/test_new_matrix_user.py @@ -254,10 +252,7 @@ disallow_untyped_defs = True [mypy-tests.storage.test_user_directory] disallow_untyped_defs = True -[mypy-tests.rest.admin.*] -disallow_untyped_defs = True - -[mypy-tests.rest.client.*] +[mypy-tests.rest.*] disallow_untyped_defs = True [mypy-tests.federation.transport.test_client] diff --git a/tests/rest/key/v2/test_remote_key_resource.py b/tests/rest/key/v2/test_remote_key_resource.py index 4672a68596..978c252f84 100644 --- a/tests/rest/key/v2/test_remote_key_resource.py +++ b/tests/rest/key/v2/test_remote_key_resource.py @@ -13,19 +13,24 @@ # limitations under the License. import urllib.parse from io import BytesIO, StringIO +from typing import Any, Dict, Optional, Union from unittest.mock import Mock import signedjson.key from canonicaljson import encode_canonical_json -from nacl.signing import SigningKey from signedjson.sign import sign_json +from signedjson.types import SigningKey -from twisted.web.resource import NoResource +from twisted.test.proto_helpers import MemoryReactor +from twisted.web.resource import NoResource, Resource from synapse.crypto.keyring import PerspectivesKeyFetcher from synapse.http.site import SynapseRequest from synapse.rest.key.v2 import KeyApiV2Resource +from synapse.server import HomeServer from synapse.storage.keys import FetchKeyResult +from synapse.types import JsonDict +from synapse.util import Clock from synapse.util.httpresourcetree import create_resource_tree from synapse.util.stringutils import random_string @@ -35,11 +40,11 @@ from tests.utils import default_config class BaseRemoteKeyResourceTestCase(unittest.HomeserverTestCase): - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: self.http_client = Mock() return self.setup_test_homeserver(federation_http_client=self.http_client) - def create_test_resource(self): + def create_test_resource(self) -> Resource: return create_resource_tree( {"/_matrix/key/v2": KeyApiV2Resource(self.hs)}, root_resource=NoResource() ) @@ -51,7 +56,12 @@ class BaseRemoteKeyResourceTestCase(unittest.HomeserverTestCase): Tell the mock http client to expect an outgoing GET request for the given key """ - async def get_json(destination, path, ignore_backoff=False, **kwargs): + async def get_json( + destination: str, + path: str, + ignore_backoff: bool = False, + **kwargs: Any, + ) -> Union[JsonDict, list]: self.assertTrue(ignore_backoff) self.assertEqual(destination, server_name) key_id = "%s:%s" % (signing_key.alg, signing_key.version) @@ -84,7 +94,8 @@ class RemoteKeyResourceTestCase(BaseRemoteKeyResourceTestCase): Checks that the response is a 200 and returns the decoded json body. """ channel = FakeChannel(self.site, self.reactor) - req = SynapseRequest(channel, self.site) + # channel is a `FakeChannel` but `HTTPChannel` is expected + req = SynapseRequest(channel, self.site) # type: ignore[arg-type] req.content = BytesIO(b"") req.requestReceived( b"GET", @@ -97,7 +108,7 @@ class RemoteKeyResourceTestCase(BaseRemoteKeyResourceTestCase): resp = channel.json_body return resp - def test_get_key(self): + def test_get_key(self) -> None: """Fetch a remote key""" SERVER_NAME = "remote.server" testkey = signedjson.key.generate_signing_key("ver1") @@ -114,7 +125,7 @@ class RemoteKeyResourceTestCase(BaseRemoteKeyResourceTestCase): self.assertIn(SERVER_NAME, keys[0]["signatures"]) self.assertIn(self.hs.hostname, keys[0]["signatures"]) - def test_get_own_key(self): + def test_get_own_key(self) -> None: """Fetch our own key""" testkey = signedjson.key.generate_signing_key("ver1") self.expect_outgoing_key_request(self.hs.hostname, testkey) @@ -141,7 +152,7 @@ class EndToEndPerspectivesTests(BaseRemoteKeyResourceTestCase): endpoint, to check that the two implementations are compatible. """ - def default_config(self): + def default_config(self) -> Dict[str, Any]: config = super().default_config() # replace the signing key with our own @@ -152,7 +163,7 @@ class EndToEndPerspectivesTests(BaseRemoteKeyResourceTestCase): return config - def prepare(self, reactor, clock, homeserver): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: # make a second homeserver, configured to use the first one as a key notary self.http_client2 = Mock() config = default_config(name="keyclient") @@ -175,7 +186,9 @@ class EndToEndPerspectivesTests(BaseRemoteKeyResourceTestCase): # wire up outbound POST /key/v2/query requests from hs2 so that they # will be forwarded to hs1 - async def post_json(destination, path, data): + async def post_json( + destination: str, path: str, data: Optional[JsonDict] = None + ) -> Union[JsonDict, list]: self.assertEqual(destination, self.hs.hostname) self.assertEqual( path, @@ -183,7 +196,8 @@ class EndToEndPerspectivesTests(BaseRemoteKeyResourceTestCase): ) channel = FakeChannel(self.site, self.reactor) - req = SynapseRequest(channel, self.site) + # channel is a `FakeChannel` but `HTTPChannel` is expected + req = SynapseRequest(channel, self.site) # type: ignore[arg-type] req.content = BytesIO(encode_canonical_json(data)) req.requestReceived( @@ -198,7 +212,7 @@ class EndToEndPerspectivesTests(BaseRemoteKeyResourceTestCase): self.http_client2.post_json.side_effect = post_json - def test_get_key(self): + def test_get_key(self) -> None: """Fetch a key belonging to a random server""" # make up a key to be fetched. testkey = signedjson.key.generate_signing_key("abc") @@ -218,7 +232,7 @@ class EndToEndPerspectivesTests(BaseRemoteKeyResourceTestCase): signedjson.key.encode_verify_key_base64(testkey.verify_key), ) - def test_get_notary_key(self): + def test_get_notary_key(self) -> None: """Fetch a key belonging to the notary server""" # make up a key to be fetched. We randomise the keyid to try to get it to # appear before the key server signing key sometimes (otherwise we bail out @@ -240,7 +254,7 @@ class EndToEndPerspectivesTests(BaseRemoteKeyResourceTestCase): signedjson.key.encode_verify_key_base64(testkey.verify_key), ) - def test_get_notary_keyserver_key(self): + def test_get_notary_keyserver_key(self) -> None: """Fetch the notary's keyserver key""" # we expect hs1 to make a regular key request to itself self.expect_outgoing_key_request(self.hs.hostname, self.hs_signing_key) diff --git a/tests/rest/media/v1/test_base.py b/tests/rest/media/v1/test_base.py index f761e23f1b..c73179151a 100644 --- a/tests/rest/media/v1/test_base.py +++ b/tests/rest/media/v1/test_base.py @@ -28,11 +28,11 @@ class GetFileNameFromHeadersTests(unittest.TestCase): b"inline; filename*=utf-8''foo%C2%A3bar": "foo£bar", } - def tests(self): + def tests(self) -> None: for hdr, expected in self.TEST_CASES.items(): res = get_filename_from_headers({b"Content-Disposition": [hdr]}) self.assertEqual( res, expected, - "expected output for %s to be %s but was %s" % (hdr, expected, res), + f"expected output for {hdr!r} to be {expected} but was {res}", ) diff --git a/tests/rest/media/v1/test_filepath.py b/tests/rest/media/v1/test_filepath.py index 913bc530aa..43e6f0f70a 100644 --- a/tests/rest/media/v1/test_filepath.py +++ b/tests/rest/media/v1/test_filepath.py @@ -21,12 +21,12 @@ from tests import unittest class MediaFilePathsTestCase(unittest.TestCase): - def setUp(self): + def setUp(self) -> None: super().setUp() self.filepaths = MediaFilePaths("/media_store") - def test_local_media_filepath(self): + def test_local_media_filepath(self) -> None: """Test local media paths""" self.assertEqual( self.filepaths.local_media_filepath_rel("GerZNDnDZVjsOtardLuwfIBg"), @@ -37,7 +37,7 @@ class MediaFilePathsTestCase(unittest.TestCase): "/media_store/local_content/Ge/rZ/NDnDZVjsOtardLuwfIBg", ) - def test_local_media_thumbnail(self): + def test_local_media_thumbnail(self) -> None: """Test local media thumbnail paths""" self.assertEqual( self.filepaths.local_media_thumbnail_rel( @@ -52,14 +52,14 @@ class MediaFilePathsTestCase(unittest.TestCase): "/media_store/local_thumbnails/Ge/rZ/NDnDZVjsOtardLuwfIBg/800-600-image-jpeg-scale", ) - def test_local_media_thumbnail_dir(self): + def test_local_media_thumbnail_dir(self) -> None: """Test local media thumbnail directory paths""" self.assertEqual( self.filepaths.local_media_thumbnail_dir("GerZNDnDZVjsOtardLuwfIBg"), "/media_store/local_thumbnails/Ge/rZ/NDnDZVjsOtardLuwfIBg", ) - def test_remote_media_filepath(self): + def test_remote_media_filepath(self) -> None: """Test remote media paths""" self.assertEqual( self.filepaths.remote_media_filepath_rel( @@ -74,7 +74,7 @@ class MediaFilePathsTestCase(unittest.TestCase): "/media_store/remote_content/example.com/Ge/rZ/NDnDZVjsOtardLuwfIBg", ) - def test_remote_media_thumbnail(self): + def test_remote_media_thumbnail(self) -> None: """Test remote media thumbnail paths""" self.assertEqual( self.filepaths.remote_media_thumbnail_rel( @@ -99,7 +99,7 @@ class MediaFilePathsTestCase(unittest.TestCase): "/media_store/remote_thumbnail/example.com/Ge/rZ/NDnDZVjsOtardLuwfIBg/800-600-image-jpeg-scale", ) - def test_remote_media_thumbnail_legacy(self): + def test_remote_media_thumbnail_legacy(self) -> None: """Test old-style remote media thumbnail paths""" self.assertEqual( self.filepaths.remote_media_thumbnail_rel_legacy( @@ -108,7 +108,7 @@ class MediaFilePathsTestCase(unittest.TestCase): "remote_thumbnail/example.com/Ge/rZ/NDnDZVjsOtardLuwfIBg/800-600-image-jpeg", ) - def test_remote_media_thumbnail_dir(self): + def test_remote_media_thumbnail_dir(self) -> None: """Test remote media thumbnail directory paths""" self.assertEqual( self.filepaths.remote_media_thumbnail_dir( @@ -117,7 +117,7 @@ class MediaFilePathsTestCase(unittest.TestCase): "/media_store/remote_thumbnail/example.com/Ge/rZ/NDnDZVjsOtardLuwfIBg", ) - def test_url_cache_filepath(self): + def test_url_cache_filepath(self) -> None: """Test URL cache paths""" self.assertEqual( self.filepaths.url_cache_filepath_rel("2020-01-02_GerZNDnDZVjsOtar"), @@ -128,7 +128,7 @@ class MediaFilePathsTestCase(unittest.TestCase): "/media_store/url_cache/2020-01-02/GerZNDnDZVjsOtar", ) - def test_url_cache_filepath_legacy(self): + def test_url_cache_filepath_legacy(self) -> None: """Test old-style URL cache paths""" self.assertEqual( self.filepaths.url_cache_filepath_rel("GerZNDnDZVjsOtardLuwfIBg"), @@ -139,7 +139,7 @@ class MediaFilePathsTestCase(unittest.TestCase): "/media_store/url_cache/Ge/rZ/NDnDZVjsOtardLuwfIBg", ) - def test_url_cache_filepath_dirs_to_delete(self): + def test_url_cache_filepath_dirs_to_delete(self) -> None: """Test URL cache cleanup paths""" self.assertEqual( self.filepaths.url_cache_filepath_dirs_to_delete( @@ -148,7 +148,7 @@ class MediaFilePathsTestCase(unittest.TestCase): ["/media_store/url_cache/2020-01-02"], ) - def test_url_cache_filepath_dirs_to_delete_legacy(self): + def test_url_cache_filepath_dirs_to_delete_legacy(self) -> None: """Test old-style URL cache cleanup paths""" self.assertEqual( self.filepaths.url_cache_filepath_dirs_to_delete( @@ -160,7 +160,7 @@ class MediaFilePathsTestCase(unittest.TestCase): ], ) - def test_url_cache_thumbnail(self): + def test_url_cache_thumbnail(self) -> None: """Test URL cache thumbnail paths""" self.assertEqual( self.filepaths.url_cache_thumbnail_rel( @@ -175,7 +175,7 @@ class MediaFilePathsTestCase(unittest.TestCase): "/media_store/url_cache_thumbnails/2020-01-02/GerZNDnDZVjsOtar/800-600-image-jpeg-scale", ) - def test_url_cache_thumbnail_legacy(self): + def test_url_cache_thumbnail_legacy(self) -> None: """Test old-style URL cache thumbnail paths""" self.assertEqual( self.filepaths.url_cache_thumbnail_rel( @@ -190,7 +190,7 @@ class MediaFilePathsTestCase(unittest.TestCase): "/media_store/url_cache_thumbnails/Ge/rZ/NDnDZVjsOtardLuwfIBg/800-600-image-jpeg-scale", ) - def test_url_cache_thumbnail_directory(self): + def test_url_cache_thumbnail_directory(self) -> None: """Test URL cache thumbnail directory paths""" self.assertEqual( self.filepaths.url_cache_thumbnail_directory_rel( @@ -203,7 +203,7 @@ class MediaFilePathsTestCase(unittest.TestCase): "/media_store/url_cache_thumbnails/2020-01-02/GerZNDnDZVjsOtar", ) - def test_url_cache_thumbnail_directory_legacy(self): + def test_url_cache_thumbnail_directory_legacy(self) -> None: """Test old-style URL cache thumbnail directory paths""" self.assertEqual( self.filepaths.url_cache_thumbnail_directory_rel( @@ -216,7 +216,7 @@ class MediaFilePathsTestCase(unittest.TestCase): "/media_store/url_cache_thumbnails/Ge/rZ/NDnDZVjsOtardLuwfIBg", ) - def test_url_cache_thumbnail_dirs_to_delete(self): + def test_url_cache_thumbnail_dirs_to_delete(self) -> None: """Test URL cache thumbnail cleanup paths""" self.assertEqual( self.filepaths.url_cache_thumbnail_dirs_to_delete( @@ -228,7 +228,7 @@ class MediaFilePathsTestCase(unittest.TestCase): ], ) - def test_url_cache_thumbnail_dirs_to_delete_legacy(self): + def test_url_cache_thumbnail_dirs_to_delete_legacy(self) -> None: """Test old-style URL cache thumbnail cleanup paths""" self.assertEqual( self.filepaths.url_cache_thumbnail_dirs_to_delete( @@ -241,7 +241,7 @@ class MediaFilePathsTestCase(unittest.TestCase): ], ) - def test_server_name_validation(self): + def test_server_name_validation(self) -> None: """Test validation of server names""" self._test_path_validation( [ @@ -274,7 +274,7 @@ class MediaFilePathsTestCase(unittest.TestCase): ], ) - def test_file_id_validation(self): + def test_file_id_validation(self) -> None: """Test validation of local, remote and legacy URL cache file / media IDs""" # File / media IDs get split into three parts to form paths, consisting of the # first two characters, next two characters and rest of the ID. @@ -357,7 +357,7 @@ class MediaFilePathsTestCase(unittest.TestCase): invalid_values=invalid_file_ids, ) - def test_url_cache_media_id_validation(self): + def test_url_cache_media_id_validation(self) -> None: """Test validation of URL cache media IDs""" self._test_path_validation( [ @@ -387,7 +387,7 @@ class MediaFilePathsTestCase(unittest.TestCase): ], ) - def test_content_type_validation(self): + def test_content_type_validation(self) -> None: """Test validation of thumbnail content types""" self._test_path_validation( [ @@ -410,7 +410,7 @@ class MediaFilePathsTestCase(unittest.TestCase): ], ) - def test_thumbnail_method_validation(self): + def test_thumbnail_method_validation(self) -> None: """Test validation of thumbnail methods""" self._test_path_validation( [ @@ -440,7 +440,7 @@ class MediaFilePathsTestCase(unittest.TestCase): parameter: str, valid_values: Iterable[str], invalid_values: Iterable[str], - ): + ) -> None: """Test that the specified methods validate the named parameter as expected Args: diff --git a/tests/rest/media/v1/test_html_preview.py b/tests/rest/media/v1/test_html_preview.py index a4b57e3d1f..3fb37a2a59 100644 --- a/tests/rest/media/v1/test_html_preview.py +++ b/tests/rest/media/v1/test_html_preview.py @@ -32,7 +32,7 @@ class SummarizeTestCase(unittest.TestCase): if not lxml: skip = "url preview feature requires lxml" - def test_long_summarize(self): + def test_long_summarize(self) -> None: example_paras = [ """Tromsø (Norwegian pronunciation: [ˈtrʊmsœ] ( listen); Northern Sami: Romsa; Finnish: Tromssa[2] Kven: Tromssa) is a city and municipality in @@ -90,7 +90,7 @@ class SummarizeTestCase(unittest.TestCase): " Tromsøya had a population of 36,088. Substantial parts of the urban…", ) - def test_short_summarize(self): + def test_short_summarize(self) -> None: example_paras = [ "Tromsø (Norwegian pronunciation: [ˈtrʊmsœ] ( listen); Northern Sami:" " Romsa; Finnish: Tromssa[2] Kven: Tromssa) is a city and municipality in" @@ -117,7 +117,7 @@ class SummarizeTestCase(unittest.TestCase): " most of the year.", ) - def test_small_then_large_summarize(self): + def test_small_then_large_summarize(self) -> None: example_paras = [ "Tromsø (Norwegian pronunciation: [ˈtrʊmsœ] ( listen); Northern Sami:" " Romsa; Finnish: Tromssa[2] Kven: Tromssa) is a city and municipality in" @@ -150,7 +150,7 @@ class CalcOgTestCase(unittest.TestCase): if not lxml: skip = "url preview feature requires lxml" - def test_simple(self): + def test_simple(self) -> None: html = b""" Foo @@ -165,7 +165,7 @@ class CalcOgTestCase(unittest.TestCase): self.assertEqual(og, {"og:title": "Foo", "og:description": "Some text."}) - def test_comment(self): + def test_comment(self) -> None: html = b""" Foo @@ -181,7 +181,7 @@ class CalcOgTestCase(unittest.TestCase): self.assertEqual(og, {"og:title": "Foo", "og:description": "Some text."}) - def test_comment2(self): + def test_comment2(self) -> None: html = b""" Foo @@ -206,7 +206,7 @@ class CalcOgTestCase(unittest.TestCase): }, ) - def test_script(self): + def test_script(self) -> None: html = b""" Foo @@ -222,7 +222,7 @@ class CalcOgTestCase(unittest.TestCase): self.assertEqual(og, {"og:title": "Foo", "og:description": "Some text."}) - def test_missing_title(self): + def test_missing_title(self) -> None: html = b""" @@ -236,7 +236,7 @@ class CalcOgTestCase(unittest.TestCase): self.assertEqual(og, {"og:title": None, "og:description": "Some text."}) - def test_h1_as_title(self): + def test_h1_as_title(self) -> None: html = b""" @@ -251,7 +251,7 @@ class CalcOgTestCase(unittest.TestCase): self.assertEqual(og, {"og:title": "Title", "og:description": "Some text."}) - def test_missing_title_and_broken_h1(self): + def test_missing_title_and_broken_h1(self) -> None: html = b""" @@ -266,19 +266,19 @@ class CalcOgTestCase(unittest.TestCase): self.assertEqual(og, {"og:title": None, "og:description": "Some text."}) - def test_empty(self): + def test_empty(self) -> None: """Test a body with no data in it.""" html = b"" tree = decode_body(html, "http://example.com/test.html") self.assertIsNone(tree) - def test_no_tree(self): + def test_no_tree(self) -> None: """A valid body with no tree in it.""" html = b"\x00" tree = decode_body(html, "http://example.com/test.html") self.assertIsNone(tree) - def test_xml(self): + def test_xml(self) -> None: """Test decoding XML and ensure it works properly.""" # Note that the strip() call is important to ensure the xml tag starts # at the initial byte. @@ -293,7 +293,7 @@ class CalcOgTestCase(unittest.TestCase): og = parse_html_to_open_graph(tree, "http://example.com/test.html") self.assertEqual(og, {"og:title": "Foo", "og:description": "Some text."}) - def test_invalid_encoding(self): + def test_invalid_encoding(self) -> None: """An invalid character encoding should be ignored and treated as UTF-8, if possible.""" html = b""" @@ -307,7 +307,7 @@ class CalcOgTestCase(unittest.TestCase): og = parse_html_to_open_graph(tree, "http://example.com/test.html") self.assertEqual(og, {"og:title": "Foo", "og:description": "Some text."}) - def test_invalid_encoding2(self): + def test_invalid_encoding2(self) -> None: """A body which doesn't match the sent character encoding.""" # Note that this contains an invalid UTF-8 sequence in the title. html = b""" @@ -322,7 +322,7 @@ class CalcOgTestCase(unittest.TestCase): og = parse_html_to_open_graph(tree, "http://example.com/test.html") self.assertEqual(og, {"og:title": "ÿÿ Foo", "og:description": "Some text."}) - def test_windows_1252(self): + def test_windows_1252(self) -> None: """A body which uses cp1252, but doesn't declare that.""" html = b""" @@ -338,7 +338,7 @@ class CalcOgTestCase(unittest.TestCase): class MediaEncodingTestCase(unittest.TestCase): - def test_meta_charset(self): + def test_meta_charset(self) -> None: """A character encoding is found via the meta tag.""" encodings = _get_html_media_encodings( b""" @@ -363,7 +363,7 @@ class MediaEncodingTestCase(unittest.TestCase): ) self.assertEqual(list(encodings), ["ascii", "utf-8", "cp1252"]) - def test_meta_charset_underscores(self): + def test_meta_charset_underscores(self) -> None: """A character encoding contains underscore.""" encodings = _get_html_media_encodings( b""" @@ -376,7 +376,7 @@ class MediaEncodingTestCase(unittest.TestCase): ) self.assertEqual(list(encodings), ["shift_jis", "utf-8", "cp1252"]) - def test_xml_encoding(self): + def test_xml_encoding(self) -> None: """A character encoding is found via the meta tag.""" encodings = _get_html_media_encodings( b""" @@ -388,7 +388,7 @@ class MediaEncodingTestCase(unittest.TestCase): ) self.assertEqual(list(encodings), ["ascii", "utf-8", "cp1252"]) - def test_meta_xml_encoding(self): + def test_meta_xml_encoding(self) -> None: """Meta tags take precedence over XML encoding.""" encodings = _get_html_media_encodings( b""" @@ -402,7 +402,7 @@ class MediaEncodingTestCase(unittest.TestCase): ) self.assertEqual(list(encodings), ["utf-16", "ascii", "utf-8", "cp1252"]) - def test_content_type(self): + def test_content_type(self) -> None: """A character encoding is found via the Content-Type header.""" # Test a few variations of the header. headers = ( @@ -417,12 +417,12 @@ class MediaEncodingTestCase(unittest.TestCase): encodings = _get_html_media_encodings(b"", header) self.assertEqual(list(encodings), ["ascii", "utf-8", "cp1252"]) - def test_fallback(self): + def test_fallback(self) -> None: """A character encoding cannot be found in the body or header.""" encodings = _get_html_media_encodings(b"", "text/html") self.assertEqual(list(encodings), ["utf-8", "cp1252"]) - def test_duplicates(self): + def test_duplicates(self) -> None: """Ensure each encoding is only attempted once.""" encodings = _get_html_media_encodings( b""" @@ -436,7 +436,7 @@ class MediaEncodingTestCase(unittest.TestCase): ) self.assertEqual(list(encodings), ["utf-8", "cp1252"]) - def test_unknown_invalid(self): + def test_unknown_invalid(self) -> None: """A character encoding should be ignored if it is unknown or invalid.""" encodings = _get_html_media_encodings( b""" @@ -451,7 +451,7 @@ class MediaEncodingTestCase(unittest.TestCase): class RebaseUrlTestCase(unittest.TestCase): - def test_relative(self): + def test_relative(self) -> None: """Relative URLs should be resolved based on the context of the base URL.""" self.assertEqual( rebase_url("subpage", "https://example.com/foo/"), @@ -466,14 +466,14 @@ class RebaseUrlTestCase(unittest.TestCase): "https://example.com/bar", ) - def test_absolute(self): + def test_absolute(self) -> None: """Absolute URLs should not be modified.""" self.assertEqual( rebase_url("https://alice.com/a/", "https://example.com/foo/"), "https://alice.com/a/", ) - def test_data(self): + def test_data(self) -> None: """Data URLs should not be modified.""" self.assertEqual( rebase_url("data:,Hello%2C%20World%21", "https://example.com/foo/"), diff --git a/tests/rest/media/v1/test_oembed.py b/tests/rest/media/v1/test_oembed.py index 048d0ca44a..f38d7225f8 100644 --- a/tests/rest/media/v1/test_oembed.py +++ b/tests/rest/media/v1/test_oembed.py @@ -16,7 +16,7 @@ import json from twisted.test.proto_helpers import MemoryReactor -from synapse.rest.media.v1.oembed import OEmbedProvider +from synapse.rest.media.v1.oembed import OEmbedProvider, OEmbedResult from synapse.server import HomeServer from synapse.types import JsonDict from synapse.util import Clock @@ -25,15 +25,15 @@ from tests.unittest import HomeserverTestCase class OEmbedTests(HomeserverTestCase): - def prepare(self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer): - self.oembed = OEmbedProvider(homeserver) + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.oembed = OEmbedProvider(hs) - def parse_response(self, response: JsonDict): + def parse_response(self, response: JsonDict) -> OEmbedResult: return self.oembed.parse_oembed_response( "https://test", json.dumps(response).encode("utf-8") ) - def test_version(self): + def test_version(self) -> None: """Accept versions that are similar to 1.0 as a string or int (or missing).""" for version in ("1.0", 1.0, 1): result = self.parse_response({"version": version, "type": "link"}) diff --git a/tests/rest/test_health.py b/tests/rest/test_health.py index 01d48c3860..da325955f8 100644 --- a/tests/rest/test_health.py +++ b/tests/rest/test_health.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +from http import HTTPStatus from synapse.rest.health import HealthResource @@ -19,12 +19,12 @@ from tests import unittest class HealthCheckTests(unittest.HomeserverTestCase): - def create_test_resource(self): + def create_test_resource(self) -> HealthResource: # replace the JsonResource with a HealthResource. return HealthResource() - def test_health(self): + def test_health(self) -> None: channel = self.make_request("GET", "/health", shorthand=False) - self.assertEqual(channel.code, 200) + self.assertEqual(channel.code, HTTPStatus.OK) self.assertEqual(channel.result["body"], b"OK") diff --git a/tests/rest/test_well_known.py b/tests/rest/test_well_known.py index 118aa93a32..11f78f52b8 100644 --- a/tests/rest/test_well_known.py +++ b/tests/rest/test_well_known.py @@ -11,6 +11,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from http import HTTPStatus + from twisted.web.resource import Resource from synapse.rest.well_known import well_known_resource @@ -19,7 +21,7 @@ from tests import unittest class WellKnownTests(unittest.HomeserverTestCase): - def create_test_resource(self): + def create_test_resource(self) -> Resource: # replace the JsonResource with a Resource wrapping the WellKnownResource res = Resource() res.putChild(b".well-known", well_known_resource(self.hs)) @@ -31,12 +33,12 @@ class WellKnownTests(unittest.HomeserverTestCase): "default_identity_server": "https://testis", } ) - def test_client_well_known(self): + def test_client_well_known(self) -> None: channel = self.make_request( "GET", "/.well-known/matrix/client", shorthand=False ) - self.assertEqual(channel.code, 200) + self.assertEqual(channel.code, HTTPStatus.OK) self.assertEqual( channel.json_body, { @@ -50,27 +52,27 @@ class WellKnownTests(unittest.HomeserverTestCase): "public_baseurl": None, } ) - def test_client_well_known_no_public_baseurl(self): + def test_client_well_known_no_public_baseurl(self) -> None: channel = self.make_request( "GET", "/.well-known/matrix/client", shorthand=False ) - self.assertEqual(channel.code, 404) + self.assertEqual(channel.code, HTTPStatus.NOT_FOUND) @unittest.override_config({"serve_server_wellknown": True}) - def test_server_well_known(self): + def test_server_well_known(self) -> None: channel = self.make_request( "GET", "/.well-known/matrix/server", shorthand=False ) - self.assertEqual(channel.code, 200) + self.assertEqual(channel.code, HTTPStatus.OK) self.assertEqual( channel.json_body, {"m.server": "test:443"}, ) - def test_server_well_known_disabled(self): + def test_server_well_known_disabled(self) -> None: channel = self.make_request( "GET", "/.well-known/matrix/server", shorthand=False ) - self.assertEqual(channel.code, 404) + self.assertEqual(channel.code, HTTPStatus.NOT_FOUND) -- cgit 1.5.1 From 9297d040a72b70c7cc0ec15319afdd99b01ba885 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Thu, 3 Mar 2022 17:14:09 +0000 Subject: Detox, part 2 of N (#12152) I've argued in #11537 that poetry and tox don't cooperate well at the moment. (See also #12119.) Therefore I'm pruning away bits of tox to make the transition to poetry easier. This change removes the commands for coverage. We don't use coverage in anger at the moment. It shouldn't be too hard to add coverage as a dev-dependency and reintroduce this if we really want it. --- changelog.d/12152.misc | 1 + tox.ini | 26 -------------------------- 2 files changed, 1 insertion(+), 26 deletions(-) create mode 100644 changelog.d/12152.misc diff --git a/changelog.d/12152.misc b/changelog.d/12152.misc new file mode 100644 index 0000000000..b9877eaccb --- /dev/null +++ b/changelog.d/12152.misc @@ -0,0 +1 @@ +Prune unused jobs from `tox` config. \ No newline at end of file diff --git a/tox.ini b/tox.ini index f4829200cc..04d282a705 100644 --- a/tox.ini +++ b/tox.ini @@ -158,32 +158,6 @@ commands = extras = lint commands = isort -c --df {[base]lint_targets} -[testenv:combine] -skip_install = true -usedevelop = false -deps = - coverage - pip>=10 -commands= - coverage combine - coverage report - -[testenv:cov-erase] -skip_install = true -usedevelop = false -deps = - coverage -commands= - coverage erase - -[testenv:cov-html] -skip_install = true -usedevelop = false -deps = - coverage -commands= - coverage html - [testenv:mypy] deps = {[base]deps} -- cgit 1.5.1 From fb0ffa96766a4b6f298f53af2d212e4c4d09d9e9 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Thu, 3 Mar 2022 18:14:09 +0000 Subject: Rename various ApplicationServices interested methods (#11915) --- changelog.d/11915.misc | 1 + synapse/appservice/__init__.py | 133 ++++++++++++++++++++++++------------ synapse/handlers/appservice.py | 4 +- synapse/handlers/directory.py | 6 +- synapse/handlers/receipts.py | 2 +- synapse/handlers/typing.py | 4 +- tests/appservice/test_appservice.py | 45 +++++++++--- tests/handlers/test_appservice.py | 56 +++++++++++---- 8 files changed, 175 insertions(+), 76 deletions(-) create mode 100644 changelog.d/11915.misc diff --git a/changelog.d/11915.misc b/changelog.d/11915.misc new file mode 100644 index 0000000000..e3cef1511e --- /dev/null +++ b/changelog.d/11915.misc @@ -0,0 +1 @@ +Simplify the `ApplicationService` class' set of public methods related to interest checking. \ No newline at end of file diff --git a/synapse/appservice/__init__.py b/synapse/appservice/__init__.py index 4d3f8e4923..07ec95f1d6 100644 --- a/synapse/appservice/__init__.py +++ b/synapse/appservice/__init__.py @@ -175,27 +175,14 @@ class ApplicationService: return namespace.exclusive return False - async def _matches_user(self, event: EventBase, store: "DataStore") -> bool: - if self.is_interested_in_user(event.sender): - return True - - # also check m.room.member state key - if event.type == EventTypes.Member and self.is_interested_in_user( - event.state_key - ): - return True - - does_match = await self.matches_user_in_member_list(event.room_id, store) - return does_match - @cached(num_args=1, cache_context=True) - async def matches_user_in_member_list( + async def _matches_user_in_member_list( self, room_id: str, store: "DataStore", cache_context: _CacheContext, ) -> bool: - """Check if this service is interested a room based upon it's membership + """Check if this service is interested a room based upon its membership Args: room_id: The room to check. @@ -214,47 +201,110 @@ class ApplicationService: return True return False - def _matches_room_id(self, event: EventBase) -> bool: - if hasattr(event, "room_id"): - return self.is_interested_in_room(event.room_id) - return False + def is_interested_in_user( + self, + user_id: str, + ) -> bool: + """ + Returns whether the application is interested in a given user ID. + + The appservice is considered to be interested in a user if either: the + user ID is in the appservice's user namespace, or if the user is the + appservice's configured sender_localpart. + + Args: + user_id: The ID of the user to check. + + Returns: + True if the application service is interested in the user, False if not. + """ + return ( + # User is the appservice's sender_localpart user + user_id == self.sender + # User is in the appservice's user namespace + or self.is_user_in_namespace(user_id) + ) + + @cached(num_args=1, cache_context=True) + async def is_interested_in_room( + self, + room_id: str, + store: "DataStore", + cache_context: _CacheContext, + ) -> bool: + """ + Returns whether the application service is interested in a given room ID. + + The appservice is considered to be interested in the room if either: the ID or one + of the aliases of the room is in the appservice's room ID or alias namespace + respectively, or if one of the members of the room fall into the appservice's user + namespace. - async def _matches_aliases(self, event: EventBase, store: "DataStore") -> bool: - alias_list = await store.get_aliases_for_room(event.room_id) + Args: + room_id: The ID of the room to check. + store: The homeserver's datastore class. + + Returns: + True if the application service is interested in the room, False if not. + """ + # Check if we have interest in this room ID + if self.is_room_id_in_namespace(room_id): + return True + + # likewise with the room's aliases (if it has any) + alias_list = await store.get_aliases_for_room(room_id) for alias in alias_list: - if self.is_interested_in_alias(alias): + if self.is_room_alias_in_namespace(alias): return True - return False + # And finally, perform an expensive check on whether any of the + # users in the room match the appservice's user namespace + return await self._matches_user_in_member_list( + room_id, store, on_invalidate=cache_context.invalidate + ) - async def is_interested(self, event: EventBase, store: "DataStore") -> bool: + @cached(num_args=1, cache_context=True) + async def is_interested_in_event( + self, + event_id: str, + event: EventBase, + store: "DataStore", + cache_context: _CacheContext, + ) -> bool: """Check if this service is interested in this event. Args: + event_id: The ID of the event to check. This is purely used for simplifying the + caching of calls to this method. event: The event to check. store: The datastore to query. Returns: - True if this service would like to know about this event. + True if this service would like to know about this event, otherwise False. """ - # Do cheap checks first - if self._matches_room_id(event): + # Check if we're interested in this event's sender by namespace (or if they're the + # sender_localpart user) + if self.is_interested_in_user(event.sender): return True - # This will check the namespaces first before - # checking the store, so should be run before _matches_aliases - if await self._matches_user(event, store): + # additionally, if this is a membership event, perform the same checks on + # the user it references + if event.type == EventTypes.Member and self.is_interested_in_user( + event.state_key + ): return True - # This will check the store, so should be run last - if await self._matches_aliases(event, store): + # This will check the datastore, so should be run last + if await self.is_interested_in_room( + event.room_id, store, on_invalidate=cache_context.invalidate + ): return True return False - @cached(num_args=1) + @cached(num_args=1, cache_context=True) async def is_interested_in_presence( - self, user_id: UserID, store: "DataStore" + self, user_id: UserID, store: "DataStore", cache_context: _CacheContext ) -> bool: """Check if this service is interested a user's presence @@ -272,20 +322,19 @@ class ApplicationService: # Then find out if the appservice is interested in any of those rooms for room_id in room_ids: - if await self.matches_user_in_member_list(room_id, store): + if await self.is_interested_in_room( + room_id, store, on_invalidate=cache_context.invalidate + ): return True return False - def is_interested_in_user(self, user_id: str) -> bool: - return ( - bool(self._matches_regex(ApplicationService.NS_USERS, user_id)) - or user_id == self.sender - ) + def is_user_in_namespace(self, user_id: str) -> bool: + return bool(self._matches_regex(ApplicationService.NS_USERS, user_id)) - def is_interested_in_alias(self, alias: str) -> bool: + def is_room_alias_in_namespace(self, alias: str) -> bool: return bool(self._matches_regex(ApplicationService.NS_ALIASES, alias)) - def is_interested_in_room(self, room_id: str) -> bool: + def is_room_id_in_namespace(self, room_id: str) -> bool: return bool(self._matches_regex(ApplicationService.NS_ROOMS, room_id)) def is_exclusive_user(self, user_id: str) -> bool: diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index e6461cc3c9..bd913e524e 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -571,7 +571,7 @@ class ApplicationServicesHandler: room_alias_str = room_alias.to_string() services = self.store.get_app_services() alias_query_services = [ - s for s in services if (s.is_interested_in_alias(room_alias_str)) + s for s in services if (s.is_room_alias_in_namespace(room_alias_str)) ] for alias_service in alias_query_services: is_known_alias = await self.appservice_api.query_alias( @@ -660,7 +660,7 @@ class ApplicationServicesHandler: # inside of a list comprehension anymore. interested_list = [] for s in services: - if await s.is_interested(event, self.store): + if await s.is_interested_in_event(event.event_id, event, self.store): interested_list.append(s) return interested_list diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index b7064c6624..33d827a45b 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -119,7 +119,7 @@ class DirectoryHandler: service = requester.app_service if service: - if not service.is_interested_in_alias(room_alias_str): + if not service.is_room_alias_in_namespace(room_alias_str): raise SynapseError( 400, "This application service has not reserved this kind of alias.", @@ -221,7 +221,7 @@ class DirectoryHandler: async def delete_appservice_association( self, service: ApplicationService, room_alias: RoomAlias ) -> None: - if not service.is_interested_in_alias(room_alias.to_string()): + if not service.is_room_alias_in_namespace(room_alias.to_string()): raise SynapseError( 400, "This application service has not reserved this kind of alias", @@ -376,7 +376,7 @@ class DirectoryHandler: # non-exclusive locks on the alias (or there are no interested services) services = self.store.get_app_services() interested_services = [ - s for s in services if s.is_interested_in_alias(alias.to_string()) + s for s in services if s.is_room_alias_in_namespace(alias.to_string()) ] for service in interested_services: diff --git a/synapse/handlers/receipts.py b/synapse/handlers/receipts.py index b4132c353a..6250bb3bdf 100644 --- a/synapse/handlers/receipts.py +++ b/synapse/handlers/receipts.py @@ -269,7 +269,7 @@ class ReceiptEventSource(EventSource[int, JsonDict]): # Then filter down to rooms that the AS can read events = [] for room_id, event in rooms_to_events.items(): - if not await service.matches_user_in_member_list(room_id, self.store): + if not await service.is_interested_in_room(room_id, self.store): continue events.append(event) diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py index 843c68eb0f..3b89126528 100644 --- a/synapse/handlers/typing.py +++ b/synapse/handlers/typing.py @@ -486,9 +486,7 @@ class TypingNotificationEventSource(EventSource[int, JsonDict]): if handler._room_serials[room_id] <= from_key: continue - if not await service.matches_user_in_member_list( - room_id, self._main_store - ): + if not await service.is_interested_in_room(room_id, self._main_store): continue events.append(self._make_event_for(room_id)) diff --git a/tests/appservice/test_appservice.py b/tests/appservice/test_appservice.py index 9bd6275e92..edc584d0cf 100644 --- a/tests/appservice/test_appservice.py +++ b/tests/appservice/test_appservice.py @@ -36,7 +36,10 @@ class ApplicationServiceTestCase(unittest.TestCase): hostname="matrix.org", # only used by get_groups_for_user ) self.event = Mock( - type="m.something", room_id="!foo:bar", sender="@someone:somewhere" + event_id="$abc:xyz", + type="m.something", + room_id="!foo:bar", + sender="@someone:somewhere", ) self.store = Mock() @@ -50,7 +53,9 @@ class ApplicationServiceTestCase(unittest.TestCase): self.assertTrue( ( yield defer.ensureDeferred( - self.service.is_interested(self.event, self.store) + self.service.is_interested_in_event( + self.event.event_id, self.event, self.store + ) ) ) ) @@ -62,7 +67,9 @@ class ApplicationServiceTestCase(unittest.TestCase): self.assertFalse( ( yield defer.ensureDeferred( - self.service.is_interested(self.event, self.store) + self.service.is_interested_in_event( + self.event.event_id, self.event, self.store + ) ) ) ) @@ -76,7 +83,9 @@ class ApplicationServiceTestCase(unittest.TestCase): self.assertTrue( ( yield defer.ensureDeferred( - self.service.is_interested(self.event, self.store) + self.service.is_interested_in_event( + self.event.event_id, self.event, self.store + ) ) ) ) @@ -90,7 +99,9 @@ class ApplicationServiceTestCase(unittest.TestCase): self.assertTrue( ( yield defer.ensureDeferred( - self.service.is_interested(self.event, self.store) + self.service.is_interested_in_event( + self.event.event_id, self.event, self.store + ) ) ) ) @@ -104,7 +115,9 @@ class ApplicationServiceTestCase(unittest.TestCase): self.assertFalse( ( yield defer.ensureDeferred( - self.service.is_interested(self.event, self.store) + self.service.is_interested_in_event( + self.event.event_id, self.event, self.store + ) ) ) ) @@ -121,7 +134,9 @@ class ApplicationServiceTestCase(unittest.TestCase): self.assertTrue( ( yield defer.ensureDeferred( - self.service.is_interested(self.event, self.store) + self.service.is_interested_in_event( + self.event.event_id, self.event, self.store + ) ) ) ) @@ -174,7 +189,9 @@ class ApplicationServiceTestCase(unittest.TestCase): self.assertFalse( ( yield defer.ensureDeferred( - self.service.is_interested(self.event, self.store) + self.service.is_interested_in_event( + self.event.event_id, self.event, self.store + ) ) ) ) @@ -191,7 +208,9 @@ class ApplicationServiceTestCase(unittest.TestCase): self.assertTrue( ( yield defer.ensureDeferred( - self.service.is_interested(self.event, self.store) + self.service.is_interested_in_event( + self.event.event_id, self.event, self.store + ) ) ) ) @@ -207,7 +226,9 @@ class ApplicationServiceTestCase(unittest.TestCase): self.assertTrue( ( yield defer.ensureDeferred( - self.service.is_interested(self.event, self.store) + self.service.is_interested_in_event( + self.event.event_id, self.event, self.store + ) ) ) ) @@ -225,7 +246,9 @@ class ApplicationServiceTestCase(unittest.TestCase): self.assertTrue( ( yield defer.ensureDeferred( - self.service.is_interested(event=self.event, store=self.store) + self.service.is_interested_in_event( + self.event.event_id, self.event, self.store + ) ) ) ) diff --git a/tests/handlers/test_appservice.py b/tests/handlers/test_appservice.py index 072e6bbcdd..cead9f90df 100644 --- a/tests/handlers/test_appservice.py +++ b/tests/handlers/test_appservice.py @@ -59,11 +59,11 @@ class AppServiceHandlerTestCase(unittest.TestCase): self.event_source = hs.get_event_sources() def test_notify_interested_services(self): - interested_service = self._mkservice(is_interested=True) + interested_service = self._mkservice(is_interested_in_event=True) services = [ - self._mkservice(is_interested=False), + self._mkservice(is_interested_in_event=False), interested_service, - self._mkservice(is_interested=False), + self._mkservice(is_interested_in_event=False), ] self.mock_as_api.query_user.return_value = make_awaitable(True) @@ -85,7 +85,7 @@ class AppServiceHandlerTestCase(unittest.TestCase): def test_query_user_exists_unknown_user(self): user_id = "@someone:anywhere" - services = [self._mkservice(is_interested=True)] + services = [self._mkservice(is_interested_in_event=True)] services[0].is_interested_in_user.return_value = True self.mock_store.get_app_services.return_value = services self.mock_store.get_user_by_id.return_value = make_awaitable(None) @@ -102,7 +102,7 @@ class AppServiceHandlerTestCase(unittest.TestCase): def test_query_user_exists_known_user(self): user_id = "@someone:anywhere" - services = [self._mkservice(is_interested=True)] + services = [self._mkservice(is_interested_in_event=True)] services[0].is_interested_in_user.return_value = True self.mock_store.get_app_services.return_value = services self.mock_store.get_user_by_id.return_value = make_awaitable({"name": user_id}) @@ -127,11 +127,11 @@ class AppServiceHandlerTestCase(unittest.TestCase): room_id = "!alpha:bet" servers = ["aperture"] - interested_service = self._mkservice_alias(is_interested_in_alias=True) + interested_service = self._mkservice_alias(is_room_alias_in_namespace=True) services = [ - self._mkservice_alias(is_interested_in_alias=False), + self._mkservice_alias(is_room_alias_in_namespace=False), interested_service, - self._mkservice_alias(is_interested_in_alias=False), + self._mkservice_alias(is_room_alias_in_namespace=False), ] self.mock_as_api.query_alias.return_value = make_awaitable(True) @@ -275,7 +275,7 @@ class AppServiceHandlerTestCase(unittest.TestCase): to be pushed out to interested appservices, and that the stream ID is updated accordingly. """ - interested_service = self._mkservice(is_interested=True) + interested_service = self._mkservice(is_interested_in_event=True) services = [interested_service] self.mock_store.get_app_services.return_value = services self.mock_store.get_type_stream_id_for_appservice.return_value = make_awaitable( @@ -304,7 +304,7 @@ class AppServiceHandlerTestCase(unittest.TestCase): Test sending out of order ephemeral events to the appservice handler are ignored. """ - interested_service = self._mkservice(is_interested=True) + interested_service = self._mkservice(is_interested_in_event=True) services = [interested_service] self.mock_store.get_app_services.return_value = services @@ -325,17 +325,45 @@ class AppServiceHandlerTestCase(unittest.TestCase): interested_service, ephemeral=[] ) - def _mkservice(self, is_interested, protocols=None): + def _mkservice( + self, is_interested_in_event: bool, protocols: Optional[Iterable] = None + ) -> Mock: + """ + Create a new mock representing an ApplicationService. + + Args: + is_interested_in_event: Whether this application service will be considered + interested in all events. + protocols: The third-party protocols that this application service claims to + support. + + Returns: + A mock representing the ApplicationService. + """ service = Mock() - service.is_interested.return_value = make_awaitable(is_interested) + service.is_interested_in_event.return_value = make_awaitable( + is_interested_in_event + ) service.token = "mock_service_token" service.url = "mock_service_url" service.protocols = protocols return service - def _mkservice_alias(self, is_interested_in_alias): + def _mkservice_alias(self, is_room_alias_in_namespace: bool) -> Mock: + """ + Create a new mock representing an ApplicationService that is or is not interested + any given room aliase. + + Args: + is_room_alias_in_namespace: If true, the application service will be interested + in all room aliases that are queried against it. If false, the application + service will not be interested in any room aliases. + + Returns: + A mock representing the ApplicationService. + """ service = Mock() - service.is_interested_in_alias.return_value = is_interested_in_alias + service.is_room_alias_in_namespace.return_value = is_room_alias_in_namespace service.token = "mock_service_token" service.url = "mock_service_url" return service -- cgit 1.5.1 From 8533c8b03d8916e3805c7d0e0020226017680147 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 3 Mar 2022 19:58:08 +0000 Subject: Avoid generating state groups for local out-of-band leaves (#12154) If we locally generate a rejection for an invite received over federation, it is stored as an outlier (because we probably don't have the state for the room). However, currently we still generate a state group for it (even though the state in that state group will be nonsense). By setting the `outlier` param on `create_event`, we avoid the nonsensical state. --- changelog.d/12154.misc | 1 + synapse/handlers/room_member.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/12154.misc diff --git a/changelog.d/12154.misc b/changelog.d/12154.misc new file mode 100644 index 0000000000..18d2a4728b --- /dev/null +++ b/changelog.d/12154.misc @@ -0,0 +1 @@ +Avoid generating state groups for local out-of-band leaves. diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index a582837cf0..7cbc484b06 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -1736,8 +1736,8 @@ class RoomMemberMasterHandler(RoomMemberHandler): txn_id=txn_id, prev_event_ids=prev_event_ids, auth_event_ids=auth_event_ids, + outlier=True, ) - event.internal_metadata.outlier = True event.internal_metadata.out_of_band_membership = True result_event = await self.event_creation_handler.handle_new_client_event( -- cgit 1.5.1 From d56202b0383627fdb4e04404d62922dce16868f8 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 4 Mar 2022 10:25:18 +0000 Subject: Fix type of `events` in `StateGroupStorage` and `StateHandler` (#12156) We make multiple passes over this, so a regular iterable won't do. --- changelog.d/12156.misc | 1 + synapse/state/__init__.py | 6 +++--- synapse/storage/state.py | 8 ++++---- 3 files changed, 8 insertions(+), 7 deletions(-) create mode 100644 changelog.d/12156.misc diff --git a/changelog.d/12156.misc b/changelog.d/12156.misc new file mode 100644 index 0000000000..4818d988d7 --- /dev/null +++ b/changelog.d/12156.misc @@ -0,0 +1 @@ +Fix some type annotations. diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index 6babd5963c..21888cc8c5 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -194,7 +194,7 @@ class StateHandler: } async def get_current_state_ids( - self, room_id: str, latest_event_ids: Optional[Iterable[str]] = None + self, room_id: str, latest_event_ids: Optional[Collection[str]] = None ) -> StateMap[str]: """Get the current state, or the state at a set of events, for a room @@ -243,7 +243,7 @@ class StateHandler: return await self.get_hosts_in_room_at_events(room_id, event_ids) async def get_hosts_in_room_at_events( - self, room_id: str, event_ids: Iterable[str] + self, room_id: str, event_ids: Collection[str] ) -> Set[str]: """Get the hosts that were in a room at the given event ids @@ -404,7 +404,7 @@ class StateHandler: @measure_func() async def resolve_state_groups_for_events( - self, room_id: str, event_ids: Iterable[str] + self, room_id: str, event_ids: Collection[str] ) -> _StateCacheEntry: """Given a list of event_ids this method fetches the state at each event, resolves conflicts between them and returns them. diff --git a/synapse/storage/state.py b/synapse/storage/state.py index e79ecf64a0..86f1a5373b 100644 --- a/synapse/storage/state.py +++ b/synapse/storage/state.py @@ -561,7 +561,7 @@ class StateGroupStorage: return state_group_delta.prev_group, state_group_delta.delta_ids async def get_state_groups_ids( - self, _room_id: str, event_ids: Iterable[str] + self, _room_id: str, event_ids: Collection[str] ) -> Dict[int, MutableStateMap[str]]: """Get the event IDs of all the state for the state groups for the given events @@ -596,7 +596,7 @@ class StateGroupStorage: return group_to_state[state_group] async def get_state_groups( - self, room_id: str, event_ids: Iterable[str] + self, room_id: str, event_ids: Collection[str] ) -> Dict[int, List[EventBase]]: """Get the state groups for the given list of event_ids @@ -648,7 +648,7 @@ class StateGroupStorage: return self.stores.state._get_state_groups_from_groups(groups, state_filter) async def get_state_for_events( - self, event_ids: Iterable[str], state_filter: Optional[StateFilter] = None + self, event_ids: Collection[str], state_filter: Optional[StateFilter] = None ) -> Dict[str, StateMap[EventBase]]: """Given a list of event_ids and type tuples, return a list of state dicts for each event. @@ -684,7 +684,7 @@ class StateGroupStorage: return {event: event_to_state[event] for event in event_ids} async def get_state_ids_for_events( - self, event_ids: Iterable[str], state_filter: Optional[StateFilter] = None + self, event_ids: Collection[str], state_filter: Optional[StateFilter] = None ) -> Dict[str, StateMap[str]]: """ Get the state dicts corresponding to a list of events, containing the event_ids -- cgit 1.5.1 From 87c230c27cdeb7e421f61f1271a500c760f1f63b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 4 Mar 2022 10:31:19 +0000 Subject: Update client-visibility filtering for outlier events (#12155) Avoid trying to get the state for outliers, which isn't a sensible thing to do. --- changelog.d/12155.misc | 1 + synapse/visibility.py | 17 ++++++++++- tests/test_visibility.py | 76 ++++++++++++++++++++++++++++++++++++++++++++++-- 3 files changed, 90 insertions(+), 4 deletions(-) create mode 100644 changelog.d/12155.misc diff --git a/changelog.d/12155.misc b/changelog.d/12155.misc new file mode 100644 index 0000000000..9f333e718a --- /dev/null +++ b/changelog.d/12155.misc @@ -0,0 +1 @@ +Avoid trying to calculate the state at outlier events. diff --git a/synapse/visibility.py b/synapse/visibility.py index 1b970ce479..281cbe4d88 100644 --- a/synapse/visibility.py +++ b/synapse/visibility.py @@ -81,8 +81,9 @@ async def filter_events_for_client( types = ((EventTypes.RoomHistoryVisibility, ""), (EventTypes.Member, user_id)) + # we exclude outliers at this point, and then handle them separately later event_id_to_state = await storage.state.get_state_for_events( - frozenset(e.event_id for e in events), + frozenset(e.event_id for e in events if not e.internal_metadata.outlier), state_filter=StateFilter.from_types(types), ) @@ -154,6 +155,17 @@ async def filter_events_for_client( if event.event_id in always_include_ids: return event + # we need to handle outliers separately, since we don't have the room state. + if event.internal_metadata.outlier: + # Normally these can't be seen by clients, but we make an exception for + # for out-of-band membership events (eg, incoming invites, or rejections of + # said invite) for the user themselves. + if event.type == EventTypes.Member and event.state_key == user_id: + logger.debug("Returning out-of-band-membership event %s", event) + return event + + return None + state = event_id_to_state[event.event_id] # get the room_visibility at the time of the event. @@ -198,6 +210,9 @@ async def filter_events_for_client( # Always allow the user to see their own leave events, otherwise # they won't see the room disappear if they reject the invite + # + # (Note this doesn't work for out-of-band invite rejections, which don't + # have prev_state populated. They are handled above in the outlier code.) if membership == "leave" and ( prev_membership == "join" or prev_membership == "invite" ): diff --git a/tests/test_visibility.py b/tests/test_visibility.py index 219b5660b1..532e3fe9cd 100644 --- a/tests/test_visibility.py +++ b/tests/test_visibility.py @@ -13,11 +13,12 @@ # limitations under the License. import logging from typing import Optional +from unittest.mock import patch from synapse.api.room_versions import RoomVersions -from synapse.events import EventBase -from synapse.types import JsonDict -from synapse.visibility import filter_events_for_server +from synapse.events import EventBase, make_event_from_dict +from synapse.types import JsonDict, create_requester +from synapse.visibility import filter_events_for_client, filter_events_for_server from tests import unittest from tests.utils import create_room @@ -185,3 +186,72 @@ class FilterEventsForServerTestCase(unittest.HomeserverTestCase): self.get_success(self.storage.persistence.persist_event(event, context)) return event + + +class FilterEventsForClientTestCase(unittest.FederatingHomeserverTestCase): + def test_out_of_band_invite_rejection(self): + # this is where we have received an invite event over federation, and then + # rejected it. + invite_pdu = { + "room_id": "!room:id", + "depth": 1, + "auth_events": [], + "prev_events": [], + "origin_server_ts": 1, + "sender": "@someone:" + self.OTHER_SERVER_NAME, + "type": "m.room.member", + "state_key": "@user:test", + "content": {"membership": "invite"}, + } + self.add_hashes_and_signatures(invite_pdu) + invite_event_id = make_event_from_dict(invite_pdu, RoomVersions.V9).event_id + + self.get_success( + self.hs.get_federation_server().on_invite_request( + self.OTHER_SERVER_NAME, + invite_pdu, + "9", + ) + ) + + # stub out do_remotely_reject_invite so that we fall back to a locally- + # generated rejection + with patch.object( + self.hs.get_federation_handler(), + "do_remotely_reject_invite", + side_effect=Exception(), + ): + reject_event_id, _ = self.get_success( + self.hs.get_room_member_handler().remote_reject_invite( + invite_event_id, + txn_id=None, + requester=create_requester("@user:test"), + content={}, + ) + ) + + invite_event, reject_event = self.get_success( + self.hs.get_datastores().main.get_events_as_list( + [invite_event_id, reject_event_id] + ) + ) + + # the invited user should be able to see both the invite and the rejection + self.assertEqual( + self.get_success( + filter_events_for_client( + self.hs.get_storage(), "@user:test", [invite_event, reject_event] + ) + ), + [invite_event, reject_event], + ) + + # other users should see neither + self.assertEqual( + self.get_success( + filter_events_for_client( + self.hs.get_storage(), "@other:test", [invite_event, reject_event] + ) + ), + [], + ) -- cgit 1.5.1 From 423cca9efe06d78aaca5f62fb74ee7e5bceebe49 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 4 Mar 2022 11:48:15 +0000 Subject: Spread out sending device lists to remote hosts (#12132) --- changelog.d/12132.feature | 1 + synapse/federation/send_queue.py | 2 +- synapse/federation/sender/__init__.py | 26 +++++++---- synapse/federation/sender/per_destination_queue.py | 10 +++++ synapse/handlers/device.py | 2 +- synapse/replication/tcp/client.py | 2 +- tests/federation/test_federation_sender.py | 52 ++++++++++++++++++++-- 7 files changed, 79 insertions(+), 16 deletions(-) create mode 100644 changelog.d/12132.feature diff --git a/changelog.d/12132.feature b/changelog.d/12132.feature new file mode 100644 index 0000000000..3b8362ad35 --- /dev/null +++ b/changelog.d/12132.feature @@ -0,0 +1 @@ +Improve performance of logging in for large accounts. diff --git a/synapse/federation/send_queue.py b/synapse/federation/send_queue.py index 0d7c4f5067..d720b5fd3f 100644 --- a/synapse/federation/send_queue.py +++ b/synapse/federation/send_queue.py @@ -244,7 +244,7 @@ class FederationRemoteSendQueue(AbstractFederationSender): self.notifier.on_new_replication_data() - def send_device_messages(self, destination: str) -> None: + def send_device_messages(self, destination: str, immediate: bool = False) -> None: """As per FederationSender""" # We don't need to replicate this as it gets sent down a different # stream. diff --git a/synapse/federation/sender/__init__.py b/synapse/federation/sender/__init__.py index 6106a486d1..30e2421efc 100644 --- a/synapse/federation/sender/__init__.py +++ b/synapse/federation/sender/__init__.py @@ -118,7 +118,12 @@ class AbstractFederationSender(metaclass=abc.ABCMeta): raise NotImplementedError() @abc.abstractmethod - def send_device_messages(self, destination: str) -> None: + def send_device_messages(self, destination: str, immediate: bool = True) -> None: + """Tells the sender that a new device message is ready to be sent to the + destination. The `immediate` flag specifies whether the messages should + be tried to be sent immediately, or whether it can be delayed for a + short while (to aid performance). + """ raise NotImplementedError() @abc.abstractmethod @@ -146,9 +151,8 @@ class AbstractFederationSender(metaclass=abc.ABCMeta): @attr.s -class _PresenceQueue: - """A queue of destinations that need to be woken up due to new presence - updates. +class _DestinationWakeupQueue: + """A queue of destinations that need to be woken up due to new updates. Staggers waking up of per destination queues to ensure that we don't attempt to start TLS connections with many hosts all at once, leading to pinned CPU. @@ -175,7 +179,7 @@ class _PresenceQueue: if not self.processing: self._handle() - @wrap_as_background_process("_PresenceQueue.handle") + @wrap_as_background_process("_DestinationWakeupQueue.handle") async def _handle(self) -> None: """Background process to drain the queue.""" @@ -297,7 +301,7 @@ class FederationSender(AbstractFederationSender): self._external_cache = hs.get_external_cache() - self._presence_queue = _PresenceQueue(self, self.clock) + self._destination_wakeup_queue = _DestinationWakeupQueue(self, self.clock) def _get_per_destination_queue(self, destination: str) -> PerDestinationQueue: """Get or create a PerDestinationQueue for the given destination @@ -614,7 +618,7 @@ class FederationSender(AbstractFederationSender): states, start_loop=False ) - self._presence_queue.add_to_queue(destination) + self._destination_wakeup_queue.add_to_queue(destination) def build_and_send_edu( self, @@ -667,7 +671,7 @@ class FederationSender(AbstractFederationSender): else: queue.send_edu(edu) - def send_device_messages(self, destination: str) -> None: + def send_device_messages(self, destination: str, immediate: bool = False) -> None: if destination == self.server_name: logger.warning("Not sending device update to ourselves") return @@ -677,7 +681,11 @@ class FederationSender(AbstractFederationSender): ): return - self._get_per_destination_queue(destination).attempt_new_transaction() + if immediate: + self._get_per_destination_queue(destination).attempt_new_transaction() + else: + self._get_per_destination_queue(destination).mark_new_data() + self._destination_wakeup_queue.add_to_queue(destination) def wake_destination(self, destination: str) -> None: """Called when we want to retry sending transactions to a remote. diff --git a/synapse/federation/sender/per_destination_queue.py b/synapse/federation/sender/per_destination_queue.py index c8768f22bc..d80f0ac5e8 100644 --- a/synapse/federation/sender/per_destination_queue.py +++ b/synapse/federation/sender/per_destination_queue.py @@ -219,6 +219,16 @@ class PerDestinationQueue: self._pending_edus.append(edu) self.attempt_new_transaction() + def mark_new_data(self) -> None: + """Marks that the destination has new data to send, without starting a + new transaction. + + If a transaction loop is already in progress then a new transcation will + be attempted when the current one finishes. + """ + + self._new_data_to_send = True + def attempt_new_transaction(self) -> None: """Try to start a new transaction to this destination diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 934b5bd734..d90cb259a6 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -506,7 +506,7 @@ class DeviceHandler(DeviceWorkerHandler): "Sending device list update notif for %r to: %r", user_id, hosts ) for host in hosts: - self.federation_sender.send_device_messages(host) + self.federation_sender.send_device_messages(host, immediate=False) log_kv({"message": "sent device update to host", "host": host}) async def notify_user_signature_update( diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index 1b8479b0b4..b8fc1d4db9 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -380,7 +380,7 @@ class FederationSenderHandler: # changes. hosts = {row.entity for row in rows if not row.entity.startswith("@")} for host in hosts: - self.federation_sender.send_device_messages(host) + self.federation_sender.send_device_messages(host, immediate=False) elif stream_name == ToDeviceStream.NAME: # The to_device stream includes stuff to be pushed to both local diff --git a/tests/federation/test_federation_sender.py b/tests/federation/test_federation_sender.py index 60e0c31f43..e90592855a 100644 --- a/tests/federation/test_federation_sender.py +++ b/tests/federation/test_federation_sender.py @@ -201,9 +201,12 @@ class FederationSenderDevicesTestCases(HomeserverTestCase): self.assertEqual(len(self.edus), 1) stream_id = self.check_device_update_edu(self.edus.pop(0), u1, "D1", None) + # We queue up device list updates to be sent over federation, so we + # advance to clear the queue. + self.reactor.advance(1) + # a second call should produce no new device EDUs self.hs.get_federation_sender().send_device_messages("host2") - self.pump() self.assertEqual(self.edus, []) # a second device @@ -232,6 +235,10 @@ class FederationSenderDevicesTestCases(HomeserverTestCase): device1_signing_key = self.generate_and_upload_device_signing_key(u1, "D1") device2_signing_key = self.generate_and_upload_device_signing_key(u1, "D2") + # We queue up device list updates to be sent over federation, so we + # advance to clear the queue. + self.reactor.advance(1) + # expect two more edus self.assertEqual(len(self.edus), 2) stream_id = self.check_device_update_edu(self.edus.pop(0), u1, "D1", stream_id) @@ -265,6 +272,10 @@ class FederationSenderDevicesTestCases(HomeserverTestCase): e2e_handler.upload_signing_keys_for_user(u1, cross_signing_keys) ) + # We queue up device list updates to be sent over federation, so we + # advance to clear the queue. + self.reactor.advance(1) + # expect signing key update edu self.assertEqual(len(self.edus), 2) self.assertEqual(self.edus.pop(0)["edu_type"], "m.signing_key_update") @@ -284,6 +295,10 @@ class FederationSenderDevicesTestCases(HomeserverTestCase): ) self.assertEqual(ret["failures"], {}) + # We queue up device list updates to be sent over federation, so we + # advance to clear the queue. + self.reactor.advance(1) + # expect two edus, in one or two transactions. We don't know what order the # devices will be updated. self.assertEqual(len(self.edus), 2) @@ -307,6 +322,10 @@ class FederationSenderDevicesTestCases(HomeserverTestCase): self.login("user", "pass", device_id="D2") self.login("user", "pass", device_id="D3") + # We queue up device list updates to be sent over federation, so we + # advance to clear the queue. + self.reactor.advance(1) + # expect three edus self.assertEqual(len(self.edus), 3) stream_id = self.check_device_update_edu(self.edus.pop(0), u1, "D1", None) @@ -318,6 +337,10 @@ class FederationSenderDevicesTestCases(HomeserverTestCase): self.hs.get_device_handler().delete_devices(u1, ["D1", "D2", "D3"]) ) + # We queue up device list updates to be sent over federation, so we + # advance to clear the queue. + self.reactor.advance(1) + # expect three edus, in an unknown order self.assertEqual(len(self.edus), 3) for edu in self.edus: @@ -350,12 +373,19 @@ class FederationSenderDevicesTestCases(HomeserverTestCase): self.hs.get_device_handler().delete_devices(u1, ["D1", "D2", "D3"]) ) + # We queue up device list updates to be sent over federation, so we + # advance to clear the queue. + self.reactor.advance(1) + self.assertGreaterEqual(mock_send_txn.call_count, 4) # recover the server mock_send_txn.side_effect = self.record_transaction self.hs.get_federation_sender().send_device_messages("host2") - self.pump() + + # We queue up device list updates to be sent over federation, so we + # advance to clear the queue. + self.reactor.advance(1) # for each device, there should be a single update self.assertEqual(len(self.edus), 3) @@ -390,6 +420,10 @@ class FederationSenderDevicesTestCases(HomeserverTestCase): self.hs.get_device_handler().delete_devices(u1, ["D1", "D2", "D3"]) ) + # We queue up device list updates to be sent over federation, so we + # advance to clear the queue. + self.reactor.advance(1) + self.assertGreaterEqual(mock_send_txn.call_count, 4) # run the prune job @@ -401,7 +435,10 @@ class FederationSenderDevicesTestCases(HomeserverTestCase): # recover the server mock_send_txn.side_effect = self.record_transaction self.hs.get_federation_sender().send_device_messages("host2") - self.pump() + + # We queue up device list updates to be sent over federation, so we + # advance to clear the queue. + self.reactor.advance(1) # there should be a single update for this user. self.assertEqual(len(self.edus), 1) @@ -435,6 +472,10 @@ class FederationSenderDevicesTestCases(HomeserverTestCase): self.login("user", "pass", device_id="D2") self.login("user", "pass", device_id="D3") + # We queue up device list updates to be sent over federation, so we + # advance to clear the queue. + self.reactor.advance(1) + # delete them again self.get_success( self.hs.get_device_handler().delete_devices(u1, ["D1", "D2", "D3"]) @@ -451,7 +492,10 @@ class FederationSenderDevicesTestCases(HomeserverTestCase): # recover the server mock_send_txn.side_effect = self.record_transaction self.hs.get_federation_sender().send_device_messages("host2") - self.pump() + + # We queue up device list updates to be sent over federation, so we + # advance to clear the queue. + self.reactor.advance(1) # ... and we should get a single update for this user. self.assertEqual(len(self.edus), 1) -- cgit 1.5.1 From 4aeb00ca20a0d9dbb2a104591aca081c723eb6d9 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Fri, 4 Mar 2022 11:58:49 +0000 Subject: Move synctl into `synapse._scripts` and expose as an entrypoint (#12140) --- .dockerignore | 1 - MANIFEST.in | 1 - changelog.d/12140.misc | 1 + docker/Dockerfile | 2 +- docs/postgres.md | 8 +- docs/turn-howto.md | 5 +- docs/upgrade.md | 23 ++- scripts-dev/lint.sh | 2 +- setup.py | 2 +- synapse/_scripts/synctl.py | 360 +++++++++++++++++++++++++++++++++++++++++++++ synctl | 360 --------------------------------------------- tox.ini | 1 - 12 files changed, 393 insertions(+), 373 deletions(-) create mode 100644 changelog.d/12140.misc create mode 100755 synapse/_scripts/synctl.py delete mode 100755 synctl diff --git a/.dockerignore b/.dockerignore index 617f701597..434231fce9 100644 --- a/.dockerignore +++ b/.dockerignore @@ -7,6 +7,5 @@ !MANIFEST.in !README.rst !setup.py -!synctl **/__pycache__ diff --git a/MANIFEST.in b/MANIFEST.in index f1e295e583..d744c090ac 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,4 +1,3 @@ -include synctl include LICENSE include VERSION include *.rst diff --git a/changelog.d/12140.misc b/changelog.d/12140.misc new file mode 100644 index 0000000000..33a21a29f0 --- /dev/null +++ b/changelog.d/12140.misc @@ -0,0 +1 @@ +Move `synctl` into `synapse._scripts` and expose as an entry point. \ No newline at end of file diff --git a/docker/Dockerfile b/docker/Dockerfile index 327275a9ca..24b5515eb9 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -46,7 +46,7 @@ RUN \ && rm -rf /var/lib/apt/lists/* # Copy just what we need to pip install -COPY MANIFEST.in README.rst setup.py synctl /synapse/ +COPY MANIFEST.in README.rst setup.py /synapse/ COPY synapse/__init__.py /synapse/synapse/__init__.py COPY synapse/python_dependencies.py /synapse/synapse/python_dependencies.py diff --git a/docs/postgres.md b/docs/postgres.md index 0562021da5..de4e2ba4b7 100644 --- a/docs/postgres.md +++ b/docs/postgres.md @@ -153,9 +153,9 @@ database file (typically `homeserver.db`) to another location. Once the copy is complete, restart synapse. For instance: ```sh -./synctl stop +synctl stop cp homeserver.db homeserver.db.snapshot -./synctl start +synctl start ``` Copy the old config file into a new config file: @@ -192,10 +192,10 @@ Once that has completed, change the synapse config to point at the PostgreSQL database configuration file `homeserver-postgres.yaml`: ```sh -./synctl stop +synctl stop mv homeserver.yaml homeserver-old-sqlite.yaml mv homeserver-postgres.yaml homeserver.yaml -./synctl start +synctl start ``` Synapse should now be running against PostgreSQL. diff --git a/docs/turn-howto.md b/docs/turn-howto.md index eba7ca6124..3a2cd04e36 100644 --- a/docs/turn-howto.md +++ b/docs/turn-howto.md @@ -238,8 +238,9 @@ After updating the homeserver configuration, you must restart synapse: * If you use synctl: ```sh - cd /where/you/run/synapse - ./synctl restart + # Depending on how Synapse is installed, synctl may already be on + # your PATH. If not, you may need to activate a virtual environment. + synctl restart ``` * If you use systemd: ```sh diff --git a/docs/upgrade.md b/docs/upgrade.md index f9be3ac6bc..0d0bb066ee 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -47,7 +47,7 @@ this document. 3. Restart Synapse: ```bash - ./synctl restart + synctl restart ``` To check whether your update was successful, you can check the running @@ -85,6 +85,27 @@ process, for example: dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb ``` +# Upgrading to v1.55.0 + +## `synctl` script has been moved + +The `synctl` script +[has been made](https://github.com/matrix-org/synapse/pull/12140) an +[entry point](https://packaging.python.org/en/latest/specifications/entry-points/) +and no longer exists at the root of Synapse's source tree. If you wish to use +`synctl` to manage your homeserver, you should invoke `synctl` directly, e.g. +`synctl start` instead of `./synctl start` or `/path/to/synctl start`. + +You will need to ensure `synctl` is on your `PATH`. + - This is automatically the case when using + [Debian packages](https://packages.matrix.org/debian/) or + [docker images](https://hub.docker.com/r/matrixdotorg/synapse) + provided by Matrix.org. + - When installing from a wheel, sdist, or PyPI, a `synctl` executable is added + to your Python installation's `bin`. This should be on your `PATH` + automatically, though you might need to activate a virtual environment + depending on how you installed Synapse. + # Upgrading to v1.54.0 ## Legacy structured logging configuration removal diff --git a/scripts-dev/lint.sh b/scripts-dev/lint.sh index 2f5f2c3566..c063fafa97 100755 --- a/scripts-dev/lint.sh +++ b/scripts-dev/lint.sh @@ -85,7 +85,7 @@ else "synapse" "docker" "tests" # annoyingly, black doesn't find these so we have to list them "scripts-dev" - "contrib" "synctl" "setup.py" "synmark" "stubs" ".ci" + "contrib" "setup.py" "synmark" "stubs" ".ci" ) fi fi diff --git a/setup.py b/setup.py index 318df16766..439ed75d72 100755 --- a/setup.py +++ b/setup.py @@ -155,6 +155,7 @@ setup( # Application "synapse_homeserver = synapse.app.homeserver:main", "synapse_worker = synapse.app.generic_worker:main", + "synctl = synapse._scripts.synctl:main", # Scripts "export_signing_key = synapse._scripts.export_signing_key:main", "generate_config = synapse._scripts.generate_config:main", @@ -177,6 +178,5 @@ setup( "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", ], - scripts=["synctl"], cmdclass={"test": TestCommand}, ) diff --git a/synapse/_scripts/synctl.py b/synapse/_scripts/synctl.py new file mode 100755 index 0000000000..1ab36949c7 --- /dev/null +++ b/synapse/_scripts/synctl.py @@ -0,0 +1,360 @@ +#!/usr/bin/env python +# Copyright 2014-2016 OpenMarket Ltd +# Copyright 2018 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import collections +import errno +import glob +import os +import os.path +import signal +import subprocess +import sys +import time +from typing import Iterable, Optional + +import yaml + +from synapse.config import find_config_files + +MAIN_PROCESS = "synapse.app.homeserver" + +GREEN = "\x1b[1;32m" +YELLOW = "\x1b[1;33m" +RED = "\x1b[1;31m" +NORMAL = "\x1b[m" + +SYNCTL_CACHE_FACTOR_WARNING = """\ +Setting 'synctl_cache_factor' in the config is deprecated. Instead, please do +one of the following: + - Either set the environment variable 'SYNAPSE_CACHE_FACTOR' + - or set 'caches.global_factor' in the homeserver config. +--------------------------------------------------------------------------------""" + + +def pid_running(pid): + try: + os.kill(pid, 0) + except OSError as err: + if err.errno == errno.EPERM: + pass # process exists + else: + return False + + # When running in a container, orphan processes may not get reaped and their + # PIDs may remain valid. Try to work around the issue. + try: + with open(f"/proc/{pid}/status") as status_file: + if "zombie" in status_file.read(): + return False + except Exception: + # This isn't Linux or `/proc/` is unavailable. + # Assume that the process is still running. + pass + + return True + + +def write(message, colour=NORMAL, stream=sys.stdout): + # Lets check if we're writing to a TTY before colouring + should_colour = False + try: + should_colour = stream.isatty() + except AttributeError: + # Just in case `isatty` isn't defined on everything. The python + # docs are incredibly vague. + pass + + if not should_colour: + stream.write(message + "\n") + else: + stream.write(colour + message + NORMAL + "\n") + + +def abort(message, colour=RED, stream=sys.stderr): + write(message, colour, stream) + sys.exit(1) + + +def start(pidfile: str, app: str, config_files: Iterable[str], daemonize: bool) -> bool: + """Attempts to start a synapse main or worker process. + Args: + pidfile: the pidfile we expect the process to create + app: the python module to run + config_files: config files to pass to synapse + daemonize: if True, will include a --daemonize argument to synapse + + Returns: + True if the process started successfully or was already running + False if there was an error starting the process + """ + + if os.path.exists(pidfile) and pid_running(int(open(pidfile).read())): + print(app + " already running") + return True + + args = [sys.executable, "-m", app] + for c in config_files: + args += ["-c", c] + if daemonize: + args.append("--daemonize") + + try: + subprocess.check_call(args) + write("started %s(%s)" % (app, ",".join(config_files)), colour=GREEN) + return True + except subprocess.CalledProcessError as e: + err = "%s(%s) failed to start (exit code: %d). Check the Synapse logfile" % ( + app, + ",".join(config_files), + e.returncode, + ) + if daemonize: + err += ", or run synctl with --no-daemonize" + err += "." + write(err, colour=RED, stream=sys.stderr) + return False + + +def stop(pidfile: str, app: str) -> Optional[int]: + """Attempts to kill a synapse worker from the pidfile. + Args: + pidfile: path to file containing worker's pid + app: name of the worker's appservice + + Returns: + process id, or None if the process was not running + """ + + if os.path.exists(pidfile): + pid = int(open(pidfile).read()) + try: + os.kill(pid, signal.SIGTERM) + write("stopped %s" % (app,), colour=GREEN) + return pid + except OSError as err: + if err.errno == errno.ESRCH: + write("%s not running" % (app,), colour=YELLOW) + elif err.errno == errno.EPERM: + abort("Cannot stop %s: Operation not permitted" % (app,)) + else: + abort("Cannot stop %s: Unknown error" % (app,)) + else: + write( + "No running worker of %s found (from %s)\nThe process might be managed by another controller (e.g. systemd)" + % (app, pidfile), + colour=YELLOW, + ) + return None + + +Worker = collections.namedtuple( + "Worker", ["app", "configfile", "pidfile", "cache_factor", "cache_factors"] +) + + +def main(): + + parser = argparse.ArgumentParser() + + parser.add_argument( + "action", + choices=["start", "stop", "restart"], + help="whether to start, stop or restart the synapse", + ) + parser.add_argument( + "configfile", + nargs="?", + default="homeserver.yaml", + help="the homeserver config file. Defaults to homeserver.yaml. May also be" + " a directory with *.yaml files", + ) + parser.add_argument( + "-w", "--worker", metavar="WORKERCONFIG", help="start or stop a single worker" + ) + parser.add_argument( + "-a", + "--all-processes", + metavar="WORKERCONFIGDIR", + help="start or stop all the workers in the given directory" + " and the main synapse process", + ) + parser.add_argument( + "--no-daemonize", + action="store_false", + dest="daemonize", + help="Run synapse in the foreground for debugging. " + "Will work only if the daemonize option is not set in the config.", + ) + + options = parser.parse_args() + + if options.worker and options.all_processes: + write('Cannot use "--worker" with "--all-processes"', stream=sys.stderr) + sys.exit(1) + if not options.daemonize and options.all_processes: + write('Cannot use "--no-daemonize" with "--all-processes"', stream=sys.stderr) + sys.exit(1) + + configfile = options.configfile + + if not os.path.exists(configfile): + write( + f"Config file {configfile} does not exist.\n" + f"To generate a config file, run:\n" + f" {sys.executable} -m {MAIN_PROCESS}" + f" -c {configfile} --generate-config" + f" --server-name= --report-stats=\n", + stream=sys.stderr, + ) + sys.exit(1) + + config_files = find_config_files([configfile]) + config = {} + for config_file in config_files: + with open(config_file) as file_stream: + yaml_config = yaml.safe_load(file_stream) + if yaml_config is not None: + config.update(yaml_config) + + pidfile = config["pid_file"] + cache_factor = config.get("synctl_cache_factor") + start_stop_synapse = True + + if cache_factor: + write(SYNCTL_CACHE_FACTOR_WARNING) + os.environ["SYNAPSE_CACHE_FACTOR"] = str(cache_factor) + + cache_factors = config.get("synctl_cache_factors", {}) + for cache_name, factor in cache_factors.items(): + os.environ["SYNAPSE_CACHE_FACTOR_" + cache_name.upper()] = str(factor) + + worker_configfiles = [] + if options.worker: + start_stop_synapse = False + worker_configfile = options.worker + if not os.path.exists(worker_configfile): + write( + "No worker config found at %r" % (worker_configfile,), stream=sys.stderr + ) + sys.exit(1) + worker_configfiles.append(worker_configfile) + + if options.all_processes: + # To start the main synapse with -a you need to add a worker file + # with worker_app == "synapse.app.homeserver" + start_stop_synapse = False + worker_configdir = options.all_processes + if not os.path.isdir(worker_configdir): + write( + "No worker config directory found at %r" % (worker_configdir,), + stream=sys.stderr, + ) + sys.exit(1) + worker_configfiles.extend( + sorted(glob.glob(os.path.join(worker_configdir, "*.yaml"))) + ) + + workers = [] + for worker_configfile in worker_configfiles: + with open(worker_configfile) as stream: + worker_config = yaml.safe_load(stream) + worker_app = worker_config["worker_app"] + if worker_app == "synapse.app.homeserver": + # We need to special case all of this to pick up options that may + # be set in the main config file or in this worker config file. + worker_pidfile = worker_config.get("pid_file") or pidfile + worker_cache_factor = ( + worker_config.get("synctl_cache_factor") or cache_factor + ) + worker_cache_factors = ( + worker_config.get("synctl_cache_factors") or cache_factors + ) + # The master process doesn't support using worker_* config. + for key in worker_config: + if key == "worker_app": # But we allow worker_app + continue + assert not key.startswith( + "worker_" + ), "Main process cannot use worker_* config" + else: + worker_pidfile = worker_config["worker_pid_file"] + worker_cache_factor = worker_config.get("synctl_cache_factor") + worker_cache_factors = worker_config.get("synctl_cache_factors", {}) + workers.append( + Worker( + worker_app, + worker_configfile, + worker_pidfile, + worker_cache_factor, + worker_cache_factors, + ) + ) + + action = options.action + + if action == "stop" or action == "restart": + running_pids = [] + for worker in workers: + pid = stop(worker.pidfile, worker.app) + if pid is not None: + running_pids.append(pid) + + if start_stop_synapse: + pid = stop(pidfile, MAIN_PROCESS) + if pid is not None: + running_pids.append(pid) + + if len(running_pids) > 0: + write("Waiting for processes to exit...") + for running_pid in running_pids: + while pid_running(running_pid): + time.sleep(0.2) + write("All processes exited") + + if action == "start" or action == "restart": + error = False + if start_stop_synapse: + if not start(pidfile, MAIN_PROCESS, (configfile,), options.daemonize): + error = True + + for worker in workers: + env = os.environ.copy() + + if worker.cache_factor: + os.environ["SYNAPSE_CACHE_FACTOR"] = str(worker.cache_factor) + + for cache_name, factor in worker.cache_factors.items(): + os.environ["SYNAPSE_CACHE_FACTOR_" + cache_name.upper()] = str(factor) + + if not start( + worker.pidfile, + worker.app, + (configfile, worker.configfile), + options.daemonize, + ): + error = True + + # Reset env back to the original + os.environ.clear() + os.environ.update(env) + + if error: + exit(1) + + +if __name__ == "__main__": + main() diff --git a/synctl b/synctl deleted file mode 100755 index 1ab36949c7..0000000000 --- a/synctl +++ /dev/null @@ -1,360 +0,0 @@ -#!/usr/bin/env python -# Copyright 2014-2016 OpenMarket Ltd -# Copyright 2018 New Vector Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import argparse -import collections -import errno -import glob -import os -import os.path -import signal -import subprocess -import sys -import time -from typing import Iterable, Optional - -import yaml - -from synapse.config import find_config_files - -MAIN_PROCESS = "synapse.app.homeserver" - -GREEN = "\x1b[1;32m" -YELLOW = "\x1b[1;33m" -RED = "\x1b[1;31m" -NORMAL = "\x1b[m" - -SYNCTL_CACHE_FACTOR_WARNING = """\ -Setting 'synctl_cache_factor' in the config is deprecated. Instead, please do -one of the following: - - Either set the environment variable 'SYNAPSE_CACHE_FACTOR' - - or set 'caches.global_factor' in the homeserver config. ---------------------------------------------------------------------------------""" - - -def pid_running(pid): - try: - os.kill(pid, 0) - except OSError as err: - if err.errno == errno.EPERM: - pass # process exists - else: - return False - - # When running in a container, orphan processes may not get reaped and their - # PIDs may remain valid. Try to work around the issue. - try: - with open(f"/proc/{pid}/status") as status_file: - if "zombie" in status_file.read(): - return False - except Exception: - # This isn't Linux or `/proc/` is unavailable. - # Assume that the process is still running. - pass - - return True - - -def write(message, colour=NORMAL, stream=sys.stdout): - # Lets check if we're writing to a TTY before colouring - should_colour = False - try: - should_colour = stream.isatty() - except AttributeError: - # Just in case `isatty` isn't defined on everything. The python - # docs are incredibly vague. - pass - - if not should_colour: - stream.write(message + "\n") - else: - stream.write(colour + message + NORMAL + "\n") - - -def abort(message, colour=RED, stream=sys.stderr): - write(message, colour, stream) - sys.exit(1) - - -def start(pidfile: str, app: str, config_files: Iterable[str], daemonize: bool) -> bool: - """Attempts to start a synapse main or worker process. - Args: - pidfile: the pidfile we expect the process to create - app: the python module to run - config_files: config files to pass to synapse - daemonize: if True, will include a --daemonize argument to synapse - - Returns: - True if the process started successfully or was already running - False if there was an error starting the process - """ - - if os.path.exists(pidfile) and pid_running(int(open(pidfile).read())): - print(app + " already running") - return True - - args = [sys.executable, "-m", app] - for c in config_files: - args += ["-c", c] - if daemonize: - args.append("--daemonize") - - try: - subprocess.check_call(args) - write("started %s(%s)" % (app, ",".join(config_files)), colour=GREEN) - return True - except subprocess.CalledProcessError as e: - err = "%s(%s) failed to start (exit code: %d). Check the Synapse logfile" % ( - app, - ",".join(config_files), - e.returncode, - ) - if daemonize: - err += ", or run synctl with --no-daemonize" - err += "." - write(err, colour=RED, stream=sys.stderr) - return False - - -def stop(pidfile: str, app: str) -> Optional[int]: - """Attempts to kill a synapse worker from the pidfile. - Args: - pidfile: path to file containing worker's pid - app: name of the worker's appservice - - Returns: - process id, or None if the process was not running - """ - - if os.path.exists(pidfile): - pid = int(open(pidfile).read()) - try: - os.kill(pid, signal.SIGTERM) - write("stopped %s" % (app,), colour=GREEN) - return pid - except OSError as err: - if err.errno == errno.ESRCH: - write("%s not running" % (app,), colour=YELLOW) - elif err.errno == errno.EPERM: - abort("Cannot stop %s: Operation not permitted" % (app,)) - else: - abort("Cannot stop %s: Unknown error" % (app,)) - else: - write( - "No running worker of %s found (from %s)\nThe process might be managed by another controller (e.g. systemd)" - % (app, pidfile), - colour=YELLOW, - ) - return None - - -Worker = collections.namedtuple( - "Worker", ["app", "configfile", "pidfile", "cache_factor", "cache_factors"] -) - - -def main(): - - parser = argparse.ArgumentParser() - - parser.add_argument( - "action", - choices=["start", "stop", "restart"], - help="whether to start, stop or restart the synapse", - ) - parser.add_argument( - "configfile", - nargs="?", - default="homeserver.yaml", - help="the homeserver config file. Defaults to homeserver.yaml. May also be" - " a directory with *.yaml files", - ) - parser.add_argument( - "-w", "--worker", metavar="WORKERCONFIG", help="start or stop a single worker" - ) - parser.add_argument( - "-a", - "--all-processes", - metavar="WORKERCONFIGDIR", - help="start or stop all the workers in the given directory" - " and the main synapse process", - ) - parser.add_argument( - "--no-daemonize", - action="store_false", - dest="daemonize", - help="Run synapse in the foreground for debugging. " - "Will work only if the daemonize option is not set in the config.", - ) - - options = parser.parse_args() - - if options.worker and options.all_processes: - write('Cannot use "--worker" with "--all-processes"', stream=sys.stderr) - sys.exit(1) - if not options.daemonize and options.all_processes: - write('Cannot use "--no-daemonize" with "--all-processes"', stream=sys.stderr) - sys.exit(1) - - configfile = options.configfile - - if not os.path.exists(configfile): - write( - f"Config file {configfile} does not exist.\n" - f"To generate a config file, run:\n" - f" {sys.executable} -m {MAIN_PROCESS}" - f" -c {configfile} --generate-config" - f" --server-name= --report-stats=\n", - stream=sys.stderr, - ) - sys.exit(1) - - config_files = find_config_files([configfile]) - config = {} - for config_file in config_files: - with open(config_file) as file_stream: - yaml_config = yaml.safe_load(file_stream) - if yaml_config is not None: - config.update(yaml_config) - - pidfile = config["pid_file"] - cache_factor = config.get("synctl_cache_factor") - start_stop_synapse = True - - if cache_factor: - write(SYNCTL_CACHE_FACTOR_WARNING) - os.environ["SYNAPSE_CACHE_FACTOR"] = str(cache_factor) - - cache_factors = config.get("synctl_cache_factors", {}) - for cache_name, factor in cache_factors.items(): - os.environ["SYNAPSE_CACHE_FACTOR_" + cache_name.upper()] = str(factor) - - worker_configfiles = [] - if options.worker: - start_stop_synapse = False - worker_configfile = options.worker - if not os.path.exists(worker_configfile): - write( - "No worker config found at %r" % (worker_configfile,), stream=sys.stderr - ) - sys.exit(1) - worker_configfiles.append(worker_configfile) - - if options.all_processes: - # To start the main synapse with -a you need to add a worker file - # with worker_app == "synapse.app.homeserver" - start_stop_synapse = False - worker_configdir = options.all_processes - if not os.path.isdir(worker_configdir): - write( - "No worker config directory found at %r" % (worker_configdir,), - stream=sys.stderr, - ) - sys.exit(1) - worker_configfiles.extend( - sorted(glob.glob(os.path.join(worker_configdir, "*.yaml"))) - ) - - workers = [] - for worker_configfile in worker_configfiles: - with open(worker_configfile) as stream: - worker_config = yaml.safe_load(stream) - worker_app = worker_config["worker_app"] - if worker_app == "synapse.app.homeserver": - # We need to special case all of this to pick up options that may - # be set in the main config file or in this worker config file. - worker_pidfile = worker_config.get("pid_file") or pidfile - worker_cache_factor = ( - worker_config.get("synctl_cache_factor") or cache_factor - ) - worker_cache_factors = ( - worker_config.get("synctl_cache_factors") or cache_factors - ) - # The master process doesn't support using worker_* config. - for key in worker_config: - if key == "worker_app": # But we allow worker_app - continue - assert not key.startswith( - "worker_" - ), "Main process cannot use worker_* config" - else: - worker_pidfile = worker_config["worker_pid_file"] - worker_cache_factor = worker_config.get("synctl_cache_factor") - worker_cache_factors = worker_config.get("synctl_cache_factors", {}) - workers.append( - Worker( - worker_app, - worker_configfile, - worker_pidfile, - worker_cache_factor, - worker_cache_factors, - ) - ) - - action = options.action - - if action == "stop" or action == "restart": - running_pids = [] - for worker in workers: - pid = stop(worker.pidfile, worker.app) - if pid is not None: - running_pids.append(pid) - - if start_stop_synapse: - pid = stop(pidfile, MAIN_PROCESS) - if pid is not None: - running_pids.append(pid) - - if len(running_pids) > 0: - write("Waiting for processes to exit...") - for running_pid in running_pids: - while pid_running(running_pid): - time.sleep(0.2) - write("All processes exited") - - if action == "start" or action == "restart": - error = False - if start_stop_synapse: - if not start(pidfile, MAIN_PROCESS, (configfile,), options.daemonize): - error = True - - for worker in workers: - env = os.environ.copy() - - if worker.cache_factor: - os.environ["SYNAPSE_CACHE_FACTOR"] = str(worker.cache_factor) - - for cache_name, factor in worker.cache_factors.items(): - os.environ["SYNAPSE_CACHE_FACTOR_" + cache_name.upper()] = str(factor) - - if not start( - worker.pidfile, - worker.app, - (configfile, worker.configfile), - options.daemonize, - ): - error = True - - # Reset env back to the original - os.environ.clear() - os.environ.update(env) - - if error: - exit(1) - - -if __name__ == "__main__": - main() diff --git a/tox.ini b/tox.ini index 04d282a705..f1f96b27ea 100644 --- a/tox.ini +++ b/tox.ini @@ -42,7 +42,6 @@ lint_targets = scripts-dev stubs contrib - synctl synmark .ci docker -- cgit 1.5.1 From 36071d39f784ddc2271c91a72a55d9ee4f8689bb Mon Sep 17 00:00:00 2001 From: David Robertson Date: Fri, 4 Mar 2022 12:01:51 +0000 Subject: Changelog (#12153) --- .github/workflows/tests.yml | 1 + changelog.d/12153.misc | 1 + tox.ini | 1 - 3 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/12153.misc diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index c89c50cd07..3bce95b0e0 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -17,6 +17,7 @@ jobs: - uses: actions/setup-python@v2 - run: pip install -e . - run: scripts-dev/generate_sample_config.sh --check + - run: scripts-dev/config-lint.sh lint: runs-on: ubuntu-latest diff --git a/changelog.d/12153.misc b/changelog.d/12153.misc new file mode 100644 index 0000000000..f02d140f38 --- /dev/null +++ b/changelog.d/12153.misc @@ -0,0 +1 @@ +Move CI checks out of tox, to facilitate a move to using poetry. \ No newline at end of file diff --git a/tox.ini b/tox.ini index f1f96b27ea..3ffd2c3e97 100644 --- a/tox.ini +++ b/tox.ini @@ -151,7 +151,6 @@ extras = lint commands = python -m black --check --diff {[base]lint_targets} flake8 {[base]lint_targets} {env:PEP8SUFFIX:} - {toxinidir}/scripts-dev/config-lint.sh [testenv:check_isort] extras = lint -- cgit 1.5.1 From cd1ae3d0b438ff453b7d4750c4fe901f266fcbb6 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 4 Mar 2022 07:10:10 -0500 Subject: Remove backwards compatibility with RelationPaginationToken. (#12138) --- changelog.d/12138.removal | 1 + synapse/rest/client/relations.py | 55 +++++++--------------------- synapse/storage/relations.py | 31 ---------------- tests/rest/client/test_relations.py | 73 +------------------------------------ 4 files changed, 16 insertions(+), 144 deletions(-) create mode 100644 changelog.d/12138.removal diff --git a/changelog.d/12138.removal b/changelog.d/12138.removal new file mode 100644 index 0000000000..6ed84d476c --- /dev/null +++ b/changelog.d/12138.removal @@ -0,0 +1 @@ +Remove backwards compatibilty with pagination tokens from the `/relations` and `/aggregations` endpoints generated from Synapse < v1.52.0. diff --git a/synapse/rest/client/relations.py b/synapse/rest/client/relations.py index 487ea38b55..07fa1cdd4c 100644 --- a/synapse/rest/client/relations.py +++ b/synapse/rest/client/relations.py @@ -27,50 +27,15 @@ from synapse.http.server import HttpServer from synapse.http.servlet import RestServlet, parse_integer, parse_string from synapse.http.site import SynapseRequest from synapse.rest.client._base import client_patterns -from synapse.storage.relations import ( - AggregationPaginationToken, - PaginationChunk, - RelationPaginationToken, -) -from synapse.types import JsonDict, RoomStreamToken, StreamToken +from synapse.storage.relations import AggregationPaginationToken, PaginationChunk +from synapse.types import JsonDict, StreamToken if TYPE_CHECKING: from synapse.server import HomeServer - from synapse.storage.databases.main import DataStore logger = logging.getLogger(__name__) -async def _parse_token( - store: "DataStore", token: Optional[str] -) -> Optional[StreamToken]: - """ - For backwards compatibility support RelationPaginationToken, but new pagination - tokens are generated as full StreamTokens, to be compatible with /sync and /messages. - """ - if not token: - return None - # Luckily the format for StreamToken and RelationPaginationToken differ enough - # that they can easily be separated. An "_" appears in the serialization of - # RoomStreamToken (as part of StreamToken), but RelationPaginationToken uses - # "-" only for separators. - if "_" in token: - return await StreamToken.from_string(store, token) - else: - relation_token = RelationPaginationToken.from_string(token) - return StreamToken( - room_key=RoomStreamToken(relation_token.topological, relation_token.stream), - presence_key=0, - typing_key=0, - receipt_key=0, - account_data_key=0, - push_rules_key=0, - to_device_key=0, - device_list_key=0, - groups_key=0, - ) - - class RelationPaginationServlet(RestServlet): """API to paginate relations on an event by topological ordering, optionally filtered by relation type and event type. @@ -122,8 +87,12 @@ class RelationPaginationServlet(RestServlet): pagination_chunk = PaginationChunk(chunk=[]) else: # Return the relations - from_token = await _parse_token(self.store, from_token_str) - to_token = await _parse_token(self.store, to_token_str) + from_token = None + if from_token_str: + from_token = await StreamToken.from_string(self.store, from_token_str) + to_token = None + if to_token_str: + to_token = await StreamToken.from_string(self.store, to_token_str) pagination_chunk = await self.store.get_relations_for_event( event_id=parent_id, @@ -317,8 +286,12 @@ class RelationAggregationGroupPaginationServlet(RestServlet): from_token_str = parse_string(request, "from") to_token_str = parse_string(request, "to") - from_token = await _parse_token(self.store, from_token_str) - to_token = await _parse_token(self.store, to_token_str) + from_token = None + if from_token_str: + from_token = await StreamToken.from_string(self.store, from_token_str) + to_token = None + if to_token_str: + to_token = await StreamToken.from_string(self.store, to_token_str) result = await self.store.get_relations_for_event( event_id=parent_id, diff --git a/synapse/storage/relations.py b/synapse/storage/relations.py index 36ca2b8273..fba270150b 100644 --- a/synapse/storage/relations.py +++ b/synapse/storage/relations.py @@ -54,37 +54,6 @@ class PaginationChunk: return d -@attr.s(frozen=True, slots=True, auto_attribs=True) -class RelationPaginationToken: - """Pagination token for relation pagination API. - - As the results are in topological order, we can use the - `topological_ordering` and `stream_ordering` fields of the events at the - boundaries of the chunk as pagination tokens. - - Attributes: - topological: The topological ordering of the boundary event - stream: The stream ordering of the boundary event. - """ - - topological: int - stream: int - - @staticmethod - def from_string(string: str) -> "RelationPaginationToken": - try: - t, s = string.split("-") - return RelationPaginationToken(int(t), int(s)) - except ValueError: - raise SynapseError(400, "Invalid relation pagination token") - - async def to_string(self, store: "DataStore") -> str: - return "%d-%d" % (self.topological, self.stream) - - def as_tuple(self) -> Tuple[Any, ...]: - return attr.astuple(self) - - @attr.s(frozen=True, slots=True, auto_attribs=True) class AggregationPaginationToken: """Pagination token for relation aggregation pagination API. diff --git a/tests/rest/client/test_relations.py b/tests/rest/client/test_relations.py index 53062b41de..274f9c44c1 100644 --- a/tests/rest/client/test_relations.py +++ b/tests/rest/client/test_relations.py @@ -24,8 +24,7 @@ from synapse.api.constants import EventTypes, RelationTypes from synapse.rest import admin from synapse.rest.client import login, register, relations, room, sync from synapse.server import HomeServer -from synapse.storage.relations import RelationPaginationToken -from synapse.types import JsonDict, StreamToken +from synapse.types import JsonDict from synapse.util import Clock from tests import unittest @@ -281,15 +280,6 @@ class RelationsTestCase(BaseRelationsTestCase): channel.json_body["chunk"][0], ) - def _stream_token_to_relation_token(self, token: str) -> str: - """Convert a StreamToken into a legacy token (RelationPaginationToken).""" - room_key = self.get_success(StreamToken.from_string(self.store, token)).room_key - return self.get_success( - RelationPaginationToken( - topological=room_key.topological, stream=room_key.stream - ).to_string(self.store) - ) - def test_repeated_paginate_relations(self) -> None: """Test that if we paginate using a limit and tokens then we get the expected events. @@ -330,34 +320,6 @@ class RelationsTestCase(BaseRelationsTestCase): found_event_ids.reverse() self.assertEqual(found_event_ids, expected_event_ids) - # Reset and try again, but convert the tokens to the legacy format. - prev_token = "" - found_event_ids = [] - for _ in range(20): - from_token = "" - if prev_token: - from_token = "&from=" + self._stream_token_to_relation_token(prev_token) - - channel = self.make_request( - "GET", - f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?limit=1{from_token}", - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) - - found_event_ids.extend(e["event_id"] for e in channel.json_body["chunk"]) - next_batch = channel.json_body.get("next_batch") - - self.assertNotEqual(prev_token, next_batch) - prev_token = next_batch - - if not prev_token: - break - - # We paginated backwards, so reverse - found_event_ids.reverse() - self.assertEqual(found_event_ids, expected_event_ids) - def test_pagination_from_sync_and_messages(self) -> None: """Pagination tokens from /sync and /messages can be used to paginate /relations.""" channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "A") @@ -543,39 +505,6 @@ class RelationsTestCase(BaseRelationsTestCase): found_event_ids.reverse() self.assertEqual(found_event_ids, expected_event_ids) - # Reset and try again, but convert the tokens to the legacy format. - prev_token = "" - found_event_ids = [] - for _ in range(20): - from_token = "" - if prev_token: - from_token = "&from=" + self._stream_token_to_relation_token(prev_token) - - channel = self.make_request( - "GET", - f"/_matrix/client/unstable/rooms/{self.room}" - f"/aggregations/{self.parent_id}/{RelationTypes.ANNOTATION}" - f"/m.reaction/{encoded_key}?limit=1{from_token}", - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) - - self.assertEqual(len(channel.json_body["chunk"]), 1, channel.json_body) - - found_event_ids.extend(e["event_id"] for e in channel.json_body["chunk"]) - - next_batch = channel.json_body.get("next_batch") - - self.assertNotEqual(prev_token, next_batch) - prev_token = next_batch - - if not prev_token: - break - - # We paginated backwards, so reverse - found_event_ids.reverse() - self.assertEqual(found_event_ids, expected_event_ids) - def test_aggregation(self) -> None: """Test that annotations get correctly aggregated.""" -- cgit 1.5.1 From 158e0937eb56f14dd851549939037de499526fd2 Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Fri, 4 Mar 2022 13:10:05 +0000 Subject: Add test for `ObservableDeferred`'s cancellation behaviour (#12149) Signed-off-by: Sean Quah --- changelog.d/12149.misc | 1 + tests/util/test_async_helpers.py | 28 ++++++++++++++++++++++++++++ 2 files changed, 29 insertions(+) create mode 100644 changelog.d/12149.misc diff --git a/changelog.d/12149.misc b/changelog.d/12149.misc new file mode 100644 index 0000000000..d39af96723 --- /dev/null +++ b/changelog.d/12149.misc @@ -0,0 +1 @@ +Add test for `ObservableDeferred`'s cancellation behaviour. diff --git a/tests/util/test_async_helpers.py b/tests/util/test_async_helpers.py index 362014f4cb..ff53ce114b 100644 --- a/tests/util/test_async_helpers.py +++ b/tests/util/test_async_helpers.py @@ -100,6 +100,34 @@ class ObservableDeferredTest(TestCase): self.assertEqual(str(results[0].value), "gah!", "observer 1 errback result") self.assertEqual(str(results[1].value), "gah!", "observer 2 errback result") + def test_cancellation(self): + """Test that cancelling an observer does not affect other observers.""" + origin_d: "Deferred[int]" = Deferred() + observable = ObservableDeferred(origin_d, consumeErrors=True) + + observer1 = observable.observe() + observer2 = observable.observe() + observer3 = observable.observe() + + self.assertFalse(observer1.called) + self.assertFalse(observer2.called) + self.assertFalse(observer3.called) + + # cancel the second observer + observer2.cancel() + self.assertFalse(observer1.called) + self.failureResultOf(observer2, CancelledError) + self.assertFalse(observer3.called) + + # other observers resolve as normal + origin_d.callback(123) + self.assertEqual(observer1.result, 123, "observer 1 callback result") + self.assertEqual(observer3.result, 123, "observer 3 callback result") + + # additional observers resolve as normal + observer4 = observable.observe() + self.assertEqual(observer4.result, 123, "observer 4 callback result") + class TimeoutDeferredTest(TestCase): def setUp(self): -- cgit 1.5.1 From 75574726a766f09d955c05672d400c65cb341810 Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Fri, 4 Mar 2022 15:37:02 +0000 Subject: Add type hints for `ObservableDeferred` attributes (#12159) Signed-off-by: Sean Quah --- changelog.d/12159.misc | 1 + synapse/util/async_helpers.py | 14 +++++++++++--- 2 files changed, 12 insertions(+), 3 deletions(-) create mode 100644 changelog.d/12159.misc diff --git a/changelog.d/12159.misc b/changelog.d/12159.misc new file mode 100644 index 0000000000..30500f2fd9 --- /dev/null +++ b/changelog.d/12159.misc @@ -0,0 +1 @@ +Add type hints for `ObservableDeferred` attributes. diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py index 60c03a66fd..a9f67dcbac 100644 --- a/synapse/util/async_helpers.py +++ b/synapse/util/async_helpers.py @@ -40,7 +40,7 @@ from typing import ( ) import attr -from typing_extensions import ContextManager +from typing_extensions import ContextManager, Literal from twisted.internet import defer from twisted.internet.defer import CancelledError @@ -96,6 +96,10 @@ class ObservableDeferred(Generic[_T], AbstractObservableDeferred[_T]): __slots__ = ["_deferred", "_observers", "_result"] + _deferred: "defer.Deferred[_T]" + _observers: Union[List["defer.Deferred[_T]"], Tuple[()]] + _result: Union[None, Tuple[Literal[True], _T], Tuple[Literal[False], Failure]] + def __init__(self, deferred: "defer.Deferred[_T]", consumeErrors: bool = False): object.__setattr__(self, "_deferred", deferred) object.__setattr__(self, "_result", None) @@ -158,12 +162,14 @@ class ObservableDeferred(Generic[_T], AbstractObservableDeferred[_T]): effect the underlying deferred. """ if not self._result: + assert isinstance(self._observers, list) d: "defer.Deferred[_T]" = defer.Deferred() self._observers.append(d) return d + elif self._result[0]: + return defer.succeed(self._result[1]) else: - success, res = self._result - return defer.succeed(res) if success else defer.fail(res) + return defer.fail(self._result[1]) def observers(self) -> "Collection[defer.Deferred[_T]]": return self._observers @@ -175,6 +181,8 @@ class ObservableDeferred(Generic[_T], AbstractObservableDeferred[_T]): return self._result is not None and self._result[0] is True def get_result(self) -> Union[_T, Failure]: + if self._result is None: + raise ValueError(f"{self!r} has no result yet") return self._result[1] def __getattr__(self, name: str) -> Any: -- cgit 1.5.1 From 0752ab7a3621b90073f9332fbfdc8afe16a3be01 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 4 Mar 2022 17:57:27 +0000 Subject: Reduce to-device queries for /sync. (#12163) --- changelog.d/12163.misc | 1 + synapse/storage/databases/main/deviceinbox.py | 3 +++ 2 files changed, 4 insertions(+) create mode 100644 changelog.d/12163.misc diff --git a/changelog.d/12163.misc b/changelog.d/12163.misc new file mode 100644 index 0000000000..13de0895f5 --- /dev/null +++ b/changelog.d/12163.misc @@ -0,0 +1 @@ +Reduce number of DB queries made during processing of `/sync`. diff --git a/synapse/storage/databases/main/deviceinbox.py b/synapse/storage/databases/main/deviceinbox.py index 1392363de1..b4a1b041b1 100644 --- a/synapse/storage/databases/main/deviceinbox.py +++ b/synapse/storage/databases/main/deviceinbox.py @@ -298,6 +298,9 @@ class DeviceInboxWorkerStore(SQLBaseStore): # This user has new messages sent to them. Query messages for them user_ids_to_query.add(user_id) + if not user_ids_to_query: + return {}, to_stream_id + def get_device_messages_txn(txn: LoggingTransaction): # Build a query to select messages from any of the given devices that # are between the given stream id bounds. -- cgit 1.5.1 From d2ef1a79cf942b2f8c1a4724eb0c4bca0b60edbe Mon Sep 17 00:00:00 2001 From: David Robertson Date: Fri, 4 Mar 2022 22:40:24 +0000 Subject: Relax version guard for packaging (#12166) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It’s just occurred to me that #12088 pulled in the “packaging” package (~=21.3). I pulled in the newest version I had at the time. I only use it for packaging.requirements.Requirements. Which was added in packaging 16.1: https://github.com/pypa/packaging/releases/tag/16.1 https://pkgs.org/download/python3-packaging suggests that the oldest version we care about is 17.1 in Ubuntu Bionic. So I think with this bound we're hunky dory. --- changelog.d/12166.misc | 1 + synapse/python_dependencies.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/12166.misc diff --git a/changelog.d/12166.misc b/changelog.d/12166.misc new file mode 100644 index 0000000000..24b4a7c7de --- /dev/null +++ b/changelog.d/12166.misc @@ -0,0 +1 @@ +Relax the version guard for "packaging" added in #12088. diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index 8f48a33936..b40a7bbb76 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -83,8 +83,8 @@ REQUIREMENTS = [ # ijson 3.1.4 fixes a bug with "." in property names "ijson>=3.1.4", "matrix-common~=1.1.0", - # For runtime introspection of our dependencies - "packaging~=21.3", + # We need packaging.requirements.Requirement, added in 16.1. + "packaging>=16.1", ] CONDITIONAL_REQUIREMENTS = { -- cgit 1.5.1 From 0211f18d65b20c9bd77e64f296f8790f4267cf28 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 7 Mar 2022 12:24:06 +0000 Subject: Switch the `tests-done` job to an Action (#12161) I've factored it out for easier use in other workflows. --- .github/workflows/tests.yml | 30 +++++++++--------------------- changelog.d/12161.misc | 1 + 2 files changed, 10 insertions(+), 21 deletions(-) create mode 100644 changelog.d/12161.misc diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 3bce95b0e0..613a773775 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -388,34 +388,22 @@ jobs: tests-done: if: ${{ always() }} needs: + - check-sampleconfig - lint - lint-crlf - lint-newsfile - trial - trial-olddeps - sytest + - export-data - portdb - complement runs-on: ubuntu-latest steps: - - name: Set build result - env: - NEEDS_CONTEXT: ${{ toJSON(needs) }} - # the `jq` incantation dumps out a series of " " lines. - # we set it to an intermediate variable to avoid a pipe, which makes it - # hard to set $rc. - run: | - rc=0 - results=$(jq -r 'to_entries[] | [.key,.value.result] | join(" ")' <<< $NEEDS_CONTEXT) - while read job result ; do - # The newsfile lint may be skipped on non PR builds - if [ $result == "skipped" ] && [ $job == "lint-newsfile" ]; then - continue - fi - - if [ "$result" != "success" ]; then - echo "::set-failed ::Job $job returned $result" - rc=1 - fi - done <<< $results - exit $rc + - uses: matrix-org/done-action@v2 + with: + needs: ${{ toJSON(needs) }} + + # The newsfile lint may be skipped on non PR builds + skippable: + lint-newsfile diff --git a/changelog.d/12161.misc b/changelog.d/12161.misc new file mode 100644 index 0000000000..43eff08d46 --- /dev/null +++ b/changelog.d/12161.misc @@ -0,0 +1 @@ +Use a prebuilt Action for the `tests-done` CI job. -- cgit 1.5.1 From f63bedef07360216a8de71dc38f00f1aea503903 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 7 Mar 2022 09:00:05 -0500 Subject: Invalidate caches when an event with a relation is redacted. (#12121) The caches for the target of the relation must be cleared so that the bundled aggregations are re-calculated after the redaction is processed. --- changelog.d/12113.bugfix | 1 + changelog.d/12113.misc | 1 - changelog.d/12121.bugfix | 1 + synapse/storage/databases/main/cache.py | 2 + synapse/storage/databases/main/events.py | 38 +++++- tests/rest/client/test_relations.py | 207 ++++++++++++++++++++++++------- 6 files changed, 202 insertions(+), 48 deletions(-) create mode 100644 changelog.d/12113.bugfix delete mode 100644 changelog.d/12113.misc create mode 100644 changelog.d/12121.bugfix diff --git a/changelog.d/12113.bugfix b/changelog.d/12113.bugfix new file mode 100644 index 0000000000..df9b0dc413 --- /dev/null +++ b/changelog.d/12113.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug when redacting events with relations. diff --git a/changelog.d/12113.misc b/changelog.d/12113.misc deleted file mode 100644 index 102e064053..0000000000 --- a/changelog.d/12113.misc +++ /dev/null @@ -1 +0,0 @@ -Refactor the tests for event relations. diff --git a/changelog.d/12121.bugfix b/changelog.d/12121.bugfix new file mode 100644 index 0000000000..df9b0dc413 --- /dev/null +++ b/changelog.d/12121.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug when redacting events with relations. diff --git a/synapse/storage/databases/main/cache.py b/synapse/storage/databases/main/cache.py index c428dd5596..abd54c7dc7 100644 --- a/synapse/storage/databases/main/cache.py +++ b/synapse/storage/databases/main/cache.py @@ -200,6 +200,8 @@ class CacheInvalidationWorkerStore(SQLBaseStore): self.get_relations_for_event.invalidate((relates_to,)) self.get_aggregation_groups_for_event.invalidate((relates_to,)) self.get_applicable_edit.invalidate((relates_to,)) + self.get_thread_summary.invalidate((relates_to,)) + self.get_thread_participated.invalidate((relates_to,)) async def invalidate_cache_and_stream(self, cache_name: str, keys: Tuple[Any, ...]): """Invalidates the cache and adds it to the cache stream so slaves diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index ca2a9ba9d1..1dc83aa5e3 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -1518,7 +1518,7 @@ class PersistEventsStore: ) # Remove from relations table. - self._handle_redaction(txn, event.redacts) + self._handle_redact_relations(txn, event.redacts) # Update the event_forward_extremities, event_backward_extremities and # event_edges tables. @@ -1943,15 +1943,43 @@ class PersistEventsStore: txn.execute(sql, (batch_id,)) - def _handle_redaction(self, txn, redacted_event_id): - """Handles receiving a redaction and checking whether we need to remove - any redacted relations from the database. + def _handle_redact_relations( + self, txn: LoggingTransaction, redacted_event_id: str + ) -> None: + """Handles receiving a redaction and checking whether the redacted event + has any relations which must be removed from the database. Args: txn - redacted_event_id (str): The event that was redacted. + redacted_event_id: The event that was redacted. """ + # Fetch the current relation of the event being redacted. + redacted_relates_to = self.db_pool.simple_select_one_onecol_txn( + txn, + table="event_relations", + keyvalues={"event_id": redacted_event_id}, + retcol="relates_to_id", + allow_none=True, + ) + # Any relation information for the related event must be cleared. + if redacted_relates_to is not None: + self.store._invalidate_cache_and_stream( + txn, self.store.get_relations_for_event, (redacted_relates_to,) + ) + self.store._invalidate_cache_and_stream( + txn, self.store.get_aggregation_groups_for_event, (redacted_relates_to,) + ) + self.store._invalidate_cache_and_stream( + txn, self.store.get_applicable_edit, (redacted_relates_to,) + ) + self.store._invalidate_cache_and_stream( + txn, self.store.get_thread_summary, (redacted_relates_to,) + ) + self.store._invalidate_cache_and_stream( + txn, self.store.get_thread_participated, (redacted_relates_to,) + ) + self.db_pool.simple_delete_txn( txn, table="event_relations", keyvalues={"event_id": redacted_event_id} ) diff --git a/tests/rest/client/test_relations.py b/tests/rest/client/test_relations.py index 274f9c44c1..a40a5de399 100644 --- a/tests/rest/client/test_relations.py +++ b/tests/rest/client/test_relations.py @@ -1273,7 +1273,21 @@ class RelationsTestCase(BaseRelationsTestCase): class RelationRedactionTestCase(BaseRelationsTestCase): - """Test the behaviour of relations when the parent or child event is redacted.""" + """ + Test the behaviour of relations when the parent or child event is redacted. + + The behaviour of each relation type is subtly different which causes the tests + to be a bit repetitive, they follow a naming scheme of: + + test_redact_(relation|parent)_{relation_type} + + The first bit of "relation" means that the event with the relation defined + on it (the child event) is to be redacted. A "parent" means that the target + of the relation (the parent event) is to be redacted. + + The relation_type describes which type of relation is under test (i.e. it is + related to the value of rel_type in the event content). + """ def _redact(self, event_id: str) -> None: channel = self.make_request( @@ -1284,9 +1298,53 @@ class RelationRedactionTestCase(BaseRelationsTestCase): ) self.assertEqual(200, channel.code, channel.json_body) + def _make_relation_requests(self) -> Tuple[List[str], JsonDict]: + """ + Makes requests and ensures they result in a 200 response, returns a + tuple of results: + + 1. `/relations` -> Returns a list of event IDs. + 2. `/event` -> Returns the response's m.relations field (from unsigned), + if it exists. + """ + + # Request the relations of the event. + channel = self.make_request( + "GET", + f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}", + access_token=self.user_token, + ) + self.assertEquals(200, channel.code, channel.json_body) + event_ids = [ev["event_id"] for ev in channel.json_body["chunk"]] + + # Fetch the bundled aggregations of the event. + channel = self.make_request( + "GET", + f"/_matrix/client/unstable/rooms/{self.room}/event/{self.parent_id}", + access_token=self.user_token, + ) + self.assertEquals(200, channel.code, channel.json_body) + bundled_relations = channel.json_body["unsigned"].get("m.relations", {}) + + return event_ids, bundled_relations + + def _get_aggregations(self) -> List[JsonDict]: + """Request /aggregations on the parent ID and includes the returned chunk.""" + channel = self.make_request( + "GET", + f"/_matrix/client/unstable/rooms/{self.room}/aggregations/{self.parent_id}", + access_token=self.user_token, + ) + self.assertEqual(200, channel.code, channel.json_body) + return channel.json_body["chunk"] + def test_redact_relation_annotation(self) -> None: - """Test that annotations of an event are properly handled after the + """ + Test that annotations of an event are properly handled after the annotation is redacted. + + The redacted relation should not be included in bundled aggregations or + the response to relations. """ channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") self.assertEqual(200, channel.code, channel.json_body) @@ -1296,24 +1354,97 @@ class RelationRedactionTestCase(BaseRelationsTestCase): RelationTypes.ANNOTATION, "m.reaction", "a", access_token=self.user2_token ) self.assertEqual(200, channel.code, channel.json_body) + unredacted_event_id = channel.json_body["event_id"] + + # Both relations should exist. + event_ids, relations = self._make_relation_requests() + self.assertCountEqual(event_ids, [to_redact_event_id, unredacted_event_id]) + self.assertEquals( + relations["m.annotation"], + {"chunk": [{"type": "m.reaction", "key": "a", "count": 2}]}, + ) + + # Both relations appear in the aggregation. + chunk = self._get_aggregations() + self.assertEqual(chunk, [{"type": "m.reaction", "key": "a", "count": 2}]) # Redact one of the reactions. self._redact(to_redact_event_id) - # Ensure that the aggregations are correct. - channel = self.make_request( - "GET", - f"/_matrix/client/unstable/rooms/{self.room}/aggregations/{self.parent_id}", - access_token=self.user_token, + # The unredacted relation should still exist. + event_ids, relations = self._make_relation_requests() + self.assertEquals(event_ids, [unredacted_event_id]) + self.assertEquals( + relations["m.annotation"], + {"chunk": [{"type": "m.reaction", "key": "a", "count": 1}]}, + ) + + # The unredacted aggregation should still exist. + chunk = self._get_aggregations() + self.assertEqual(chunk, [{"type": "m.reaction", "key": "a", "count": 1}]) + + @unittest.override_config({"experimental_features": {"msc3440_enabled": True}}) + def test_redact_relation_thread(self) -> None: + """ + Test that thread replies are properly handled after the thread reply redacted. + + The redacted event should not be included in bundled aggregations or + the response to relations. + """ + channel = self._send_relation( + RelationTypes.THREAD, + EventTypes.Message, + content={"body": "reply 1", "msgtype": "m.text"}, ) self.assertEqual(200, channel.code, channel.json_body) + unredacted_event_id = channel.json_body["event_id"] + # Note that the *last* event in the thread is redacted, as that gets + # included in the bundled aggregation. + channel = self._send_relation( + RelationTypes.THREAD, + EventTypes.Message, + content={"body": "reply 2", "msgtype": "m.text"}, + ) + self.assertEqual(200, channel.code, channel.json_body) + to_redact_event_id = channel.json_body["event_id"] + + # Both relations exist. + event_ids, relations = self._make_relation_requests() + self.assertEquals(event_ids, [to_redact_event_id, unredacted_event_id]) + self.assertDictContainsSubset( + { + "count": 2, + "current_user_participated": True, + }, + relations[RelationTypes.THREAD], + ) + # And the latest event returned is the event that will be redacted. self.assertEqual( - channel.json_body, - {"chunk": [{"type": "m.reaction", "key": "a", "count": 1}]}, + relations[RelationTypes.THREAD]["latest_event"]["event_id"], + to_redact_event_id, ) - def test_redact_relation_edit(self) -> None: + # Redact one of the reactions. + self._redact(to_redact_event_id) + + # The unredacted relation should still exist. + event_ids, relations = self._make_relation_requests() + self.assertEquals(event_ids, [unredacted_event_id]) + self.assertDictContainsSubset( + { + "count": 1, + "current_user_participated": True, + }, + relations[RelationTypes.THREAD], + ) + # And the latest event is now the unredacted event. + self.assertEqual( + relations[RelationTypes.THREAD]["latest_event"]["event_id"], + unredacted_event_id, + ) + + def test_redact_parent_edit(self) -> None: """Test that edits of an event are redacted when the original event is redacted. """ @@ -1331,34 +1462,19 @@ class RelationRedactionTestCase(BaseRelationsTestCase): self.assertEqual(200, channel.code, channel.json_body) # Check the relation is returned - channel = self.make_request( - "GET", - f"/_matrix/client/unstable/rooms/{self.room}/relations" - f"/{self.parent_id}/m.replace/m.room.message", - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) - - self.assertIn("chunk", channel.json_body) - self.assertEqual(len(channel.json_body["chunk"]), 1) + event_ids, relations = self._make_relation_requests() + self.assertEqual(len(event_ids), 1) + self.assertIn(RelationTypes.REPLACE, relations) # Redact the original event self._redact(self.parent_id) - # Try to check for remaining m.replace relations - channel = self.make_request( - "GET", - f"/_matrix/client/unstable/rooms/{self.room}/relations" - f"/{self.parent_id}/m.replace/m.room.message", - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) - - # Check that no relations are returned - self.assertIn("chunk", channel.json_body) - self.assertEqual(channel.json_body["chunk"], []) + # The relations are not returned. + event_ids, relations = self._make_relation_requests() + self.assertEqual(len(event_ids), 0) + self.assertEqual(relations, {}) - def test_redact_parent(self) -> None: + def test_redact_parent_annotation(self) -> None: """Test that annotations of an event are redacted when the original event is redacted. """ @@ -1366,16 +1482,23 @@ class RelationRedactionTestCase(BaseRelationsTestCase): channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="👍") self.assertEqual(200, channel.code, channel.json_body) + # The relations should exist. + event_ids, relations = self._make_relation_requests() + self.assertEqual(len(event_ids), 1) + self.assertIn(RelationTypes.ANNOTATION, relations) + + # The aggregation should exist. + chunk = self._get_aggregations() + self.assertEqual(chunk, [{"type": "m.reaction", "key": "👍", "count": 1}]) + # Redact the original event. self._redact(self.parent_id) - # Check that aggregations returns zero - channel = self.make_request( - "GET", - f"/_matrix/client/unstable/rooms/{self.room}/aggregations/{self.parent_id}/m.annotation/m.reaction", - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) + # The relations are not returned. + event_ids, relations = self._make_relation_requests() + self.assertEqual(event_ids, []) + self.assertEqual(relations, {}) - self.assertIn("chunk", channel.json_body) - self.assertEqual(channel.json_body["chunk"], []) + # There's nothing to aggregate. + chunk = self._get_aggregations() + self.assertEqual(chunk, []) -- cgit 1.5.1 From 26211fec24d8d0a967de33147e148166359ec8cb Mon Sep 17 00:00:00 2001 From: Shay Date: Mon, 7 Mar 2022 09:44:33 -0800 Subject: Fix a bug in background updates wherein background updates are never run using the default batch size (#12157) --- changelog.d/12157.bugfix | 1 + synapse/storage/background_updates.py | 8 +++++--- tests/rest/admin/test_background_updates.py | 18 ++++++++---------- tests/storage/test_background_update.py | 4 ++-- 4 files changed, 16 insertions(+), 15 deletions(-) create mode 100644 changelog.d/12157.bugfix diff --git a/changelog.d/12157.bugfix b/changelog.d/12157.bugfix new file mode 100644 index 0000000000..c3d2e700bb --- /dev/null +++ b/changelog.d/12157.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in #4864 whereby background updates are never run with the default background batch size. diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py index d64910aded..4acc2c997d 100644 --- a/synapse/storage/background_updates.py +++ b/synapse/storage/background_updates.py @@ -102,10 +102,12 @@ class BackgroundUpdatePerformance: Returns: A duration in ms as a float """ - if self.avg_duration_ms == 0: - return 0 - elif self.total_item_count == 0: + # We want to return None if this is the first background update item + if self.total_item_count == 0: return None + # Avoid dividing by zero + elif self.avg_duration_ms == 0: + return 0 else: # Use the exponential moving average so that we can adapt to # changes in how long the update process takes. diff --git a/tests/rest/admin/test_background_updates.py b/tests/rest/admin/test_background_updates.py index fb36aa9940..becec84524 100644 --- a/tests/rest/admin/test_background_updates.py +++ b/tests/rest/admin/test_background_updates.py @@ -155,10 +155,10 @@ class BackgroundUpdatesTestCase(unittest.HomeserverTestCase): "current_updates": { "master": { "name": "test_update", - "average_items_per_ms": 0.001, + "average_items_per_ms": 0.1, "total_duration_ms": 1000.0, "total_item_count": ( - BackgroundUpdater.MINIMUM_BACKGROUND_BATCH_SIZE + BackgroundUpdater.DEFAULT_BACKGROUND_BATCH_SIZE ), } }, @@ -210,10 +210,10 @@ class BackgroundUpdatesTestCase(unittest.HomeserverTestCase): "current_updates": { "master": { "name": "test_update", - "average_items_per_ms": 0.001, + "average_items_per_ms": 0.1, "total_duration_ms": 1000.0, "total_item_count": ( - BackgroundUpdater.MINIMUM_BACKGROUND_BATCH_SIZE + BackgroundUpdater.DEFAULT_BACKGROUND_BATCH_SIZE ), } }, @@ -239,10 +239,10 @@ class BackgroundUpdatesTestCase(unittest.HomeserverTestCase): "current_updates": { "master": { "name": "test_update", - "average_items_per_ms": 0.001, + "average_items_per_ms": 0.1, "total_duration_ms": 1000.0, "total_item_count": ( - BackgroundUpdater.MINIMUM_BACKGROUND_BATCH_SIZE + BackgroundUpdater.DEFAULT_BACKGROUND_BATCH_SIZE ), } }, @@ -278,11 +278,9 @@ class BackgroundUpdatesTestCase(unittest.HomeserverTestCase): "current_updates": { "master": { "name": "test_update", - "average_items_per_ms": 0.001, + "average_items_per_ms": 0.05263157894736842, "total_duration_ms": 2000.0, - "total_item_count": ( - 2 * BackgroundUpdater.MINIMUM_BACKGROUND_BATCH_SIZE - ), + "total_item_count": (110), } }, "enabled": True, diff --git a/tests/storage/test_background_update.py b/tests/storage/test_background_update.py index 39dcc094bd..9fdf54ea31 100644 --- a/tests/storage/test_background_update.py +++ b/tests/storage/test_background_update.py @@ -66,13 +66,13 @@ class BackgroundUpdateTestCase(unittest.HomeserverTestCase): self.update_handler.reset_mock() res = self.get_success( self.updates.do_next_background_update(False), - by=0.01, + by=0.02, ) self.assertFalse(res) # on the first call, we should get run with the default background update size self.update_handler.assert_called_once_with( - {"my_key": 1}, self.updates.MINIMUM_BACKGROUND_BATCH_SIZE + {"my_key": 1}, self.updates.DEFAULT_BACKGROUND_BATCH_SIZE ) # second step: complete the update -- cgit 1.5.1 From 2eef234ae367657d4fe5cb0bef6bda67e97b7e4d Mon Sep 17 00:00:00 2001 From: reivilibre Date: Tue, 8 Mar 2022 10:47:28 +0000 Subject: Fix a bug introduced in 1.54.0rc1 which meant that Synapse would refuse to start if pre-release versions of dependencies were installed. (#12177) * Add failing test to characterise the regression #12176 * Permit pre-release versions of specified packages * Newsfile (bugfix) Signed-off-by: Olivier Wilkinson (reivilibre) --- changelog.d/12177.bugfix | 1 + synapse/util/check_dependencies.py | 3 ++- tests/util/test_check_dependencies.py | 19 +++++++++++++++++++ 3 files changed, 22 insertions(+), 1 deletion(-) create mode 100644 changelog.d/12177.bugfix diff --git a/changelog.d/12177.bugfix b/changelog.d/12177.bugfix new file mode 100644 index 0000000000..3f2852f345 --- /dev/null +++ b/changelog.d/12177.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in 1.54.0rc1 which meant that Synapse would refuse to start if pre-release versions of dependencies were installed. \ No newline at end of file diff --git a/synapse/util/check_dependencies.py b/synapse/util/check_dependencies.py index 39b0a91db3..12cd804939 100644 --- a/synapse/util/check_dependencies.py +++ b/synapse/util/check_dependencies.py @@ -163,7 +163,8 @@ def check_requirements(extra: Optional[str] = None) -> None: deps_unfulfilled.append(requirement.name) errors.append(_not_installed(requirement, extra)) else: - if not requirement.specifier.contains(dist.version): + # We specify prereleases=True to allow prereleases such as RCs. + if not requirement.specifier.contains(dist.version, prereleases=True): deps_unfulfilled.append(requirement.name) errors.append(_incorrect_version(requirement, dist.version, extra)) diff --git a/tests/util/test_check_dependencies.py b/tests/util/test_check_dependencies.py index a91c33272f..38e9f58ac6 100644 --- a/tests/util/test_check_dependencies.py +++ b/tests/util/test_check_dependencies.py @@ -27,7 +27,9 @@ class DummyDistribution(metadata.Distribution): old = DummyDistribution("0.1.2") +old_release_candidate = DummyDistribution("0.1.2rc3") new = DummyDistribution("1.2.3") +new_release_candidate = DummyDistribution("1.2.3rc4") # could probably use stdlib TestCase --- no need for twisted here @@ -110,3 +112,20 @@ class TestDependencyChecker(TestCase): with self.mock_installed_package(new): # should not raise check_requirements("cool-extra") + + def test_release_candidates_satisfy_dependency(self) -> None: + """ + Tests that release candidates count as far as satisfying a dependency + is concerned. + (Regression test, see #12176.) + """ + with patch( + "synapse.util.check_dependencies.metadata.requires", + return_value=["dummypkg >= 1"], + ): + with self.mock_installed_package(old_release_candidate): + self.assertRaises(DependencyException, check_requirements) + + with self.mock_installed_package(new_release_candidate): + # should not raise + check_requirements() -- cgit 1.5.1 From ea992adf867c0c74dccfd6a40e0f73933ccf2899 Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Tue, 8 Mar 2022 10:55:26 +0000 Subject: 1.54.0 --- CHANGES.md | 18 ++++++++++++++++++ changelog.d/12127.misc | 1 - changelog.d/12129.misc | 1 - changelog.d/12141.bugfix | 1 - changelog.d/12166.misc | 1 - changelog.d/12177.bugfix | 1 - debian/changelog | 6 ++++++ synapse/__init__.py | 2 +- 8 files changed, 25 insertions(+), 6 deletions(-) delete mode 100644 changelog.d/12127.misc delete mode 100644 changelog.d/12129.misc delete mode 100644 changelog.d/12141.bugfix delete mode 100644 changelog.d/12166.misc delete mode 100644 changelog.d/12177.bugfix diff --git a/CHANGES.md b/CHANGES.md index 0a87f5cd42..9d27cd35aa 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,21 @@ +Synapse 1.54.0 (2022-03-08) +=========================== + +Bugfixes +-------- + +- Fix a bug introduced in Synapse 1.54.0rc1 preventing the new module callbacks introduced in this release from being registered by modules. ([\#12141](https://github.com/matrix-org/synapse/issues/12141)) +- Fix a bug introduced in Synapse 1.54.0rc1 which meant that Synapse would refuse to start if pre-release versions of dependencies were installed. ([\#12177](https://github.com/matrix-org/synapse/issues/12177)) + + +Internal Changes +---------------- + +- Update release script to insert the previous version when writing "No significant changes" line in the changelog. ([\#12127](https://github.com/matrix-org/synapse/issues/12127)) +- Inspect application dependencies using `importlib.metadata` or its backport. ([\#12129](https://github.com/matrix-org/synapse/issues/12129)) +- Relax the version guard for "packaging" added in #12088. ([\#12166](https://github.com/matrix-org/synapse/issues/12166)) + + Synapse 1.54.0rc1 (2022-03-02) ============================== diff --git a/changelog.d/12127.misc b/changelog.d/12127.misc deleted file mode 100644 index e42eca63a8..0000000000 --- a/changelog.d/12127.misc +++ /dev/null @@ -1 +0,0 @@ -Update release script to insert the previous version when writing "No significant changes" line in the changelog. diff --git a/changelog.d/12129.misc b/changelog.d/12129.misc deleted file mode 100644 index ce4213650c..0000000000 --- a/changelog.d/12129.misc +++ /dev/null @@ -1 +0,0 @@ -Inspect application dependencies using `importlib.metadata` or its backport. \ No newline at end of file diff --git a/changelog.d/12141.bugfix b/changelog.d/12141.bugfix deleted file mode 100644 index 03a2507e2c..0000000000 --- a/changelog.d/12141.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse 1.54.0rc1 preventing the new module callbacks introduced in this release from being registered by modules. diff --git a/changelog.d/12166.misc b/changelog.d/12166.misc deleted file mode 100644 index 24b4a7c7de..0000000000 --- a/changelog.d/12166.misc +++ /dev/null @@ -1 +0,0 @@ -Relax the version guard for "packaging" added in #12088. diff --git a/changelog.d/12177.bugfix b/changelog.d/12177.bugfix deleted file mode 100644 index 3f2852f345..0000000000 --- a/changelog.d/12177.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in 1.54.0rc1 which meant that Synapse would refuse to start if pre-release versions of dependencies were installed. \ No newline at end of file diff --git a/debian/changelog b/debian/changelog index df3db85b8e..02136a0d60 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.54.0) stable; urgency=medium + + * New synapse release 1.54.0. + + -- Synapse Packaging team Tue, 08 Mar 2022 10:54:52 +0000 + matrix-synapse-py3 (1.54.0~rc1) stable; urgency=medium * New synapse release 1.54.0~rc1. diff --git a/synapse/__init__.py b/synapse/__init__.py index b21e1ed0f3..c6727024f0 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -47,7 +47,7 @@ try: except ImportError: pass -__version__ = "1.54.0rc1" +__version__ = "1.54.0" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when -- cgit 1.5.1 From 094802e04e6dc174040e2b050419df124cf2ba00 Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Tue, 8 Mar 2022 10:58:10 +0000 Subject: Shift up warning about Mjolnir --- CHANGES.md | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 9d27cd35aa..d9ea9fb46c 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,6 +1,10 @@ Synapse 1.54.0 (2022-03-08) =========================== +Please note that this will be the last release of Synapse that is compatible with Mjolnir 1.3.1 and earlier. +Administrators of servers which have the Mjolnir module installed are advised to upgrade Mjolnir to version 1.3.2 or later. + + Bugfixes -------- @@ -19,9 +23,6 @@ Internal Changes Synapse 1.54.0rc1 (2022-03-02) ============================== -Please note that this will be the last release of Synapse that is compatible with Mjolnir 1.3.1 and earlier. -Administrators of servers which have the Mjolnir module installed are advised to upgrade Mjolnir to version 1.3.2 or later. - Features -------- -- cgit 1.5.1 From d8bab6793c75774db4bde8aeec6897b607e08799 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 8 Mar 2022 07:26:05 -0500 Subject: Fix incorrect type hints for txredis. (#12042) Some properties were marked as RedisProtocol instead of ConnectionHandler, which wraps RedisProtocol instance(s). --- changelog.d/12042.misc | 1 + stubs/txredisapi.pyi | 9 ++++++--- synapse/replication/tcp/external_cache.py | 4 ++-- synapse/replication/tcp/redis.py | 6 +++--- synapse/server.py | 4 ++-- 5 files changed, 14 insertions(+), 10 deletions(-) create mode 100644 changelog.d/12042.misc diff --git a/changelog.d/12042.misc b/changelog.d/12042.misc new file mode 100644 index 0000000000..6ecdc96021 --- /dev/null +++ b/changelog.d/12042.misc @@ -0,0 +1 @@ +Correct type hints for txredis. diff --git a/stubs/txredisapi.pyi b/stubs/txredisapi.pyi index 429234d7ae..2d8ca018fb 100644 --- a/stubs/txredisapi.pyi +++ b/stubs/txredisapi.pyi @@ -20,7 +20,7 @@ from twisted.internet import protocol from twisted.internet.defer import Deferred class RedisProtocol(protocol.Protocol): - def publish(self, channel: str, message: bytes): ... + def publish(self, channel: str, message: bytes) -> "Deferred[None]": ... def ping(self) -> "Deferred[None]": ... def set( self, @@ -52,11 +52,14 @@ def lazyConnection( convertNumbers: bool = ..., ) -> RedisProtocol: ... -class ConnectionHandler: ... +# ConnectionHandler doesn't actually inherit from RedisProtocol, but it proxies +# most methods to it via ConnectionHandler.__getattr__. +class ConnectionHandler(RedisProtocol): + def disconnect(self) -> "Deferred[None]": ... class RedisFactory(protocol.ReconnectingClientFactory): continueTrying: bool - handler: RedisProtocol + handler: ConnectionHandler pool: List[RedisProtocol] replyTimeout: Optional[int] def __init__( diff --git a/synapse/replication/tcp/external_cache.py b/synapse/replication/tcp/external_cache.py index aaf91e5e02..bf7d017968 100644 --- a/synapse/replication/tcp/external_cache.py +++ b/synapse/replication/tcp/external_cache.py @@ -21,7 +21,7 @@ from synapse.logging.context import make_deferred_yieldable from synapse.util import json_decoder, json_encoder if TYPE_CHECKING: - from txredisapi import RedisProtocol + from txredisapi import ConnectionHandler from synapse.server import HomeServer @@ -63,7 +63,7 @@ class ExternalCache: def __init__(self, hs: "HomeServer"): if hs.config.redis.redis_enabled: self._redis_connection: Optional[ - "RedisProtocol" + "ConnectionHandler" ] = hs.get_outbound_redis_connection() else: self._redis_connection = None diff --git a/synapse/replication/tcp/redis.py b/synapse/replication/tcp/redis.py index 3170f7c59b..b84e572da1 100644 --- a/synapse/replication/tcp/redis.py +++ b/synapse/replication/tcp/redis.py @@ -93,7 +93,7 @@ class RedisSubscriber(txredisapi.SubscriberProtocol): synapse_handler: "ReplicationCommandHandler" synapse_stream_name: str - synapse_outbound_redis_connection: txredisapi.RedisProtocol + synapse_outbound_redis_connection: txredisapi.ConnectionHandler def __init__(self, *args: Any, **kwargs: Any): super().__init__(*args, **kwargs) @@ -313,7 +313,7 @@ class RedisDirectTcpReplicationClientFactory(SynapseRedisFactory): protocol = RedisSubscriber def __init__( - self, hs: "HomeServer", outbound_redis_connection: txredisapi.RedisProtocol + self, hs: "HomeServer", outbound_redis_connection: txredisapi.ConnectionHandler ): super().__init__( @@ -353,7 +353,7 @@ def lazyConnection( reconnect: bool = True, password: Optional[str] = None, replyTimeout: int = 30, -) -> txredisapi.RedisProtocol: +) -> txredisapi.ConnectionHandler: """Creates a connection to Redis that is lazily set up and reconnects if the connections is lost. """ diff --git a/synapse/server.py b/synapse/server.py index b5e2a319bc..46a64418ea 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -145,7 +145,7 @@ from synapse.util.stringutils import random_string logger = logging.getLogger(__name__) if TYPE_CHECKING: - from txredisapi import RedisProtocol + from txredisapi import ConnectionHandler from synapse.handlers.oidc import OidcHandler from synapse.handlers.saml import SamlHandler @@ -807,7 +807,7 @@ class HomeServer(metaclass=abc.ABCMeta): return AccountHandler(self) @cache_in_self - def get_outbound_redis_connection(self) -> "RedisProtocol": + def get_outbound_redis_connection(self) -> "ConnectionHandler": """ The Redis connection used for replication. -- cgit 1.5.1 From ca9234a9eba4fba02d8d50e5d5eff079bfaf0ebd Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 8 Mar 2022 08:09:11 -0500 Subject: Do not return allowed_room_ids from /hierarchy response. (#12175) This field is only to be used in the Server-Server API, and not the Client-Server API, but was being leaked when a federation response was used in the /hierarchy API. --- changelog.d/12175.bugfix | 1 + synapse/handlers/room_summary.py | 15 +++++++++++++-- tests/handlers/test_room_summary.py | 3 +++ 3 files changed, 17 insertions(+), 2 deletions(-) create mode 100644 changelog.d/12175.bugfix diff --git a/changelog.d/12175.bugfix b/changelog.d/12175.bugfix new file mode 100644 index 0000000000..881cb9b76c --- /dev/null +++ b/changelog.d/12175.bugfix @@ -0,0 +1 @@ +Fix a bug where non-standard information was returned from the `/hierarchy` API. Introduced in Synapse v1.41.0. diff --git a/synapse/handlers/room_summary.py b/synapse/handlers/room_summary.py index 3979cbba71..486145f48a 100644 --- a/synapse/handlers/room_summary.py +++ b/synapse/handlers/room_summary.py @@ -295,7 +295,7 @@ class RoomSummaryHandler: # inaccessible to the requesting user. if room_entry: # Add the room (including the stripped m.space.child events). - rooms_result.append(room_entry.as_json()) + rooms_result.append(room_entry.as_json(for_client=True)) # If this room is not at the max-depth, check if there are any # children to process. @@ -843,14 +843,25 @@ class _RoomEntry: # This may not include all children. children_state_events: Sequence[JsonDict] = () - def as_json(self) -> JsonDict: + def as_json(self, for_client: bool = False) -> JsonDict: """ Returns a JSON dictionary suitable for the room hierarchy endpoint. It returns the room summary including the stripped m.space.child events as a sub-key. + + Args: + for_client: If true, any server-server only fields are stripped from + the result. + """ result = dict(self.room) + + # Before returning to the client, remove the allowed_room_ids key, if it + # exists. + if for_client: + result.pop("allowed_room_ids", False) + result["children_state"] = self.children_state_events return result diff --git a/tests/handlers/test_room_summary.py b/tests/handlers/test_room_summary.py index cff07a8973..d37292ce13 100644 --- a/tests/handlers/test_room_summary.py +++ b/tests/handlers/test_room_summary.py @@ -172,6 +172,9 @@ class SpaceSummaryTestCase(unittest.HomeserverTestCase): result_room_ids = [] result_children_ids = [] for result_room in result["rooms"]: + # Ensure federation results are not leaking over the client-server API. + self.assertNotIn("allowed_room_ids", result_room) + result_room_ids.append(result_room["room_id"]) result_children_ids.append( [ -- cgit 1.5.1 From 2ce27a24fe29104ca54e0a879c7ad37d88a3fc69 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 8 Mar 2022 13:23:18 +0000 Subject: Add experimental environment variable to enable asyncio reactor (#12135) --- changelog.d/12135.feature | 1 + mypy.ini | 3 +++ synapse/__init__.py | 21 +++++++++++++++++++++ 3 files changed, 25 insertions(+) create mode 100644 changelog.d/12135.feature diff --git a/changelog.d/12135.feature b/changelog.d/12135.feature new file mode 100644 index 0000000000..b337f51730 --- /dev/null +++ b/changelog.d/12135.feature @@ -0,0 +1 @@ +Add experimental env var `SYNAPSE_ASYNC_IO_REACTOR` that causes Synapse to use the asyncio reactor for Twisted. diff --git a/mypy.ini b/mypy.ini index 481e8a5366..c8390ddba9 100644 --- a/mypy.ini +++ b/mypy.ini @@ -353,3 +353,6 @@ ignore_missing_imports = True [mypy-zope] ignore_missing_imports = True + +[mypy-incremental.*] +ignore_missing_imports = True diff --git a/synapse/__init__.py b/synapse/__init__.py index b21e1ed0f3..674acc7135 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -25,6 +25,27 @@ if sys.version_info < (3, 7): print("Synapse requires Python 3.7 or above.") sys.exit(1) +# Allow using the asyncio reactor via env var. +if bool(os.environ.get("SYNAPSE_ASYNC_IO_REACTOR", False)): + try: + from incremental import Version + + import twisted + + # We need a bugfix that is included in Twisted 21.2.0: + # https://twistedmatrix.com/trac/ticket/9787 + if twisted.version < Version("Twisted", 21, 2, 0): + print("Using asyncio reactor requires Twisted>=21.2.0") + sys.exit(1) + + import asyncio + + from twisted.internet import asyncioreactor + + asyncioreactor.install(asyncio.get_event_loop()) + except ImportError: + pass + # Twisted and canonicaljson will fail to import when this file is executed to # get the __version__ during a fresh install. That's OK and subsequent calls to # actually start Synapse will import these libraries fine. -- cgit 1.5.1 From 65e02b3e6d286a3f04d71286395523d9b2feeee9 Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Tue, 8 Mar 2022 14:00:16 +0000 Subject: Tweak changelog formatting --- CHANGES.md | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index d9ea9fb46c..1c8a52cf1f 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -9,15 +9,14 @@ Bugfixes -------- - Fix a bug introduced in Synapse 1.54.0rc1 preventing the new module callbacks introduced in this release from being registered by modules. ([\#12141](https://github.com/matrix-org/synapse/issues/12141)) -- Fix a bug introduced in Synapse 1.54.0rc1 which meant that Synapse would refuse to start if pre-release versions of dependencies were installed. ([\#12177](https://github.com/matrix-org/synapse/issues/12177)) +- Fix a bug introduced in Synapse 1.54.0rc1 where runtime dependency version checks would mistakenly check development dependencies if they were present and would not accept pre-release versions of dependencies. ([\#12129](https://github.com/matrix-org/synapse/issues/12129), [\#12177](https://github.com/matrix-org/synapse/issues/12177)) Internal Changes ---------------- - Update release script to insert the previous version when writing "No significant changes" line in the changelog. ([\#12127](https://github.com/matrix-org/synapse/issues/12127)) -- Inspect application dependencies using `importlib.metadata` or its backport. ([\#12129](https://github.com/matrix-org/synapse/issues/12129)) -- Relax the version guard for "packaging" added in #12088. ([\#12166](https://github.com/matrix-org/synapse/issues/12166)) +- Relax the version guard for "packaging" added in ([\#12088](https://github.com/matrix-org/synapse/issues/12088)). ([\#12166](https://github.com/matrix-org/synapse/issues/12166)) Synapse 1.54.0rc1 (2022-03-02) -- cgit 1.5.1 From b1989ced00cc0bc6214bfd1a393c7e8f8eda660c Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Tue, 8 Mar 2022 14:01:19 +0000 Subject: Fix silly markdown typo --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 1c8a52cf1f..ef671e73f1 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -16,7 +16,7 @@ Internal Changes ---------------- - Update release script to insert the previous version when writing "No significant changes" line in the changelog. ([\#12127](https://github.com/matrix-org/synapse/issues/12127)) -- Relax the version guard for "packaging" added in ([\#12088](https://github.com/matrix-org/synapse/issues/12088)). ([\#12166](https://github.com/matrix-org/synapse/issues/12166)) +- Relax the version guard for "packaging" added in [\#12088](https://github.com/matrix-org/synapse/issues/12088). ([\#12166](https://github.com/matrix-org/synapse/issues/12166)) Synapse 1.54.0rc1 (2022-03-02) -- cgit 1.5.1 From bfa7d6b03588ec3eca488f404d8fcac38f9e6427 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Tue, 8 Mar 2022 15:11:50 +0000 Subject: Fix CI not attaching source distributions and wheels to the GitHub releases. (#12131) --- .github/workflows/release-artifacts.yml | 3 ++- changelog.d/12131.misc | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelog.d/12131.misc diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml index 65ea761ad7..ed4fc6179d 100644 --- a/.github/workflows/release-artifacts.yml +++ b/.github/workflows/release-artifacts.yml @@ -112,7 +112,8 @@ jobs: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} with: files: | - python-dist/* + Sdist/* + Wheel/* debs.tar.xz # if it's not already published, keep the release as a draft. draft: true diff --git a/changelog.d/12131.misc b/changelog.d/12131.misc new file mode 100644 index 0000000000..8ef23c22d5 --- /dev/null +++ b/changelog.d/12131.misc @@ -0,0 +1 @@ +Fix CI not attaching source distributions and wheels to the GitHub releases. \ No newline at end of file -- cgit 1.5.1 From 562718278847375636ead2ed3afcc9d9d482ef96 Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Tue, 8 Mar 2022 15:58:14 +0000 Subject: Use `ParamSpec` in type hints for `synapse.logging.context` (#12150) Signed-off-by: Sean Quah --- changelog.d/12150.misc | 1 + synapse/handlers/initial_sync.py | 5 ++-- synapse/logging/context.py | 44 +++++++++++++++++-------------- synapse/python_dependencies.py | 3 ++- synapse/rest/media/v1/storage_provider.py | 9 +++++-- 5 files changed, 37 insertions(+), 25 deletions(-) create mode 100644 changelog.d/12150.misc diff --git a/changelog.d/12150.misc b/changelog.d/12150.misc new file mode 100644 index 0000000000..2d2706dac7 --- /dev/null +++ b/changelog.d/12150.misc @@ -0,0 +1 @@ +Use `ParamSpec` in type hints for `synapse.logging.context`. diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py index 316cfae24f..a7db8feb57 100644 --- a/synapse/handlers/initial_sync.py +++ b/synapse/handlers/initial_sync.py @@ -153,8 +153,9 @@ class InitialSyncHandler: public_room_ids = await self.store.get_public_room_ids() - limit = pagin_config.limit - if limit is None: + if pagin_config.limit is not None: + limit = pagin_config.limit + else: limit = 10 serializer_options = SerializeEventConfig(as_client_event=as_client_event) diff --git a/synapse/logging/context.py b/synapse/logging/context.py index c31c2960ad..88cd8a9e1c 100644 --- a/synapse/logging/context.py +++ b/synapse/logging/context.py @@ -29,7 +29,6 @@ import warnings from types import TracebackType from typing import ( TYPE_CHECKING, - Any, Awaitable, Callable, Optional, @@ -41,7 +40,7 @@ from typing import ( ) import attr -from typing_extensions import Literal +from typing_extensions import Literal, ParamSpec from twisted.internet import defer, threads from twisted.python.threadpool import ThreadPool @@ -719,32 +718,33 @@ def nested_logging_context(suffix: str) -> LoggingContext: ) +P = ParamSpec("P") R = TypeVar("R") @overload def preserve_fn( # type: ignore[misc] - f: Callable[..., Awaitable[R]], -) -> Callable[..., "defer.Deferred[R]"]: + f: Callable[P, Awaitable[R]], +) -> Callable[P, "defer.Deferred[R]"]: # The `type: ignore[misc]` above suppresses # "Overloaded function signatures 1 and 2 overlap with incompatible return types" ... @overload -def preserve_fn(f: Callable[..., R]) -> Callable[..., "defer.Deferred[R]"]: +def preserve_fn(f: Callable[P, R]) -> Callable[P, "defer.Deferred[R]"]: ... def preserve_fn( f: Union[ - Callable[..., R], - Callable[..., Awaitable[R]], + Callable[P, R], + Callable[P, Awaitable[R]], ] -) -> Callable[..., "defer.Deferred[R]"]: +) -> Callable[P, "defer.Deferred[R]"]: """Function decorator which wraps the function with run_in_background""" - def g(*args: Any, **kwargs: Any) -> "defer.Deferred[R]": + def g(*args: P.args, **kwargs: P.kwargs) -> "defer.Deferred[R]": return run_in_background(f, *args, **kwargs) return g @@ -752,7 +752,7 @@ def preserve_fn( @overload def run_in_background( # type: ignore[misc] - f: Callable[..., Awaitable[R]], *args: Any, **kwargs: Any + f: Callable[P, Awaitable[R]], *args: P.args, **kwargs: P.kwargs ) -> "defer.Deferred[R]": # The `type: ignore[misc]` above suppresses # "Overloaded function signatures 1 and 2 overlap with incompatible return types" @@ -761,18 +761,22 @@ def run_in_background( # type: ignore[misc] @overload def run_in_background( - f: Callable[..., R], *args: Any, **kwargs: Any + f: Callable[P, R], *args: P.args, **kwargs: P.kwargs ) -> "defer.Deferred[R]": ... -def run_in_background( +def run_in_background( # type: ignore[misc] + # The `type: ignore[misc]` above suppresses + # "Overloaded function implementation does not accept all possible arguments of signature 1" + # "Overloaded function implementation does not accept all possible arguments of signature 2" + # which seems like a bug in mypy. f: Union[ - Callable[..., R], - Callable[..., Awaitable[R]], + Callable[P, R], + Callable[P, Awaitable[R]], ], - *args: Any, - **kwargs: Any, + *args: P.args, + **kwargs: P.kwargs, ) -> "defer.Deferred[R]": """Calls a function, ensuring that the current context is restored after return from the function, and that the sentinel context is set once the @@ -872,7 +876,7 @@ def _set_context_cb(result: ResultT, context: LoggingContext) -> ResultT: def defer_to_thread( - reactor: "ISynapseReactor", f: Callable[..., R], *args: Any, **kwargs: Any + reactor: "ISynapseReactor", f: Callable[P, R], *args: P.args, **kwargs: P.kwargs ) -> "defer.Deferred[R]": """ Calls the function `f` using a thread from the reactor's default threadpool and @@ -908,9 +912,9 @@ def defer_to_thread( def defer_to_threadpool( reactor: "ISynapseReactor", threadpool: ThreadPool, - f: Callable[..., R], - *args: Any, - **kwargs: Any, + f: Callable[P, R], + *args: P.args, + **kwargs: P.kwargs, ) -> "defer.Deferred[R]": """ A wrapper for twisted.internet.threads.deferToThreadpool, which handles diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index b40a7bbb76..1dd39f06cf 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -76,7 +76,8 @@ REQUIREMENTS = [ "netaddr>=0.7.18", "Jinja2>=2.9", "bleach>=1.4.3", - "typing-extensions>=3.7.4", + # We use `ParamSpec`, which was added in `typing-extensions` 3.10.0.0. + "typing-extensions>=3.10.0", # We enforce that we have a `cryptography` version that bundles an `openssl` # with the latest security patches. "cryptography>=3.4.7", diff --git a/synapse/rest/media/v1/storage_provider.py b/synapse/rest/media/v1/storage_provider.py index 18bf977d3d..1c9b71d69c 100644 --- a/synapse/rest/media/v1/storage_provider.py +++ b/synapse/rest/media/v1/storage_provider.py @@ -16,7 +16,7 @@ import abc import logging import os import shutil -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING, Callable, Optional from synapse.config._base import Config from synapse.logging.context import defer_to_thread, run_in_background @@ -150,8 +150,13 @@ class FileStorageProviderBackend(StorageProvider): dirname = os.path.dirname(backup_fname) os.makedirs(dirname, exist_ok=True) + # mypy needs help inferring the type of the second parameter, which is generic + shutil_copyfile: Callable[[str, str], str] = shutil.copyfile await defer_to_thread( - self.hs.get_reactor(), shutil.copyfile, primary_fname, backup_fname + self.hs.get_reactor(), + shutil_copyfile, + primary_fname, + backup_fname, ) async def fetch(self, path: str, file_info: FileInfo) -> Optional[Responder]: -- cgit 1.5.1 From 9a0172d49f3da46c615304c7df3353494500fd49 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 8 Mar 2022 15:02:59 -0500 Subject: Clean-up demo scripts & documentation (#12143) * Rewrites the demo documentation to be clearer, accurate, and moves it to our documentation tree. * Improvements to the demo scripts: * `clean.sh` now runs `stop.sh` first to avoid zombie processes. * Uses more modern Synapse configuration (and removes some obsolete configuration). * Consistently use the HTTP ports for server name, etc. * Remove the `demo/etc` directory and place everything into the `demo/808x` directories. --- README.rst | 3 ++ changelog.d/12143.doc | 1 + demo/.gitignore | 11 +++----- demo/README | 26 ------------------ demo/clean.sh | 3 ++ demo/start.sh | 71 +++++++++++++++++++++++------------------------- docs/SUMMARY.md | 1 + docs/development/demo.md | 41 ++++++++++++++++++++++++++++ docs/federate.md | 3 +- 9 files changed, 89 insertions(+), 71 deletions(-) create mode 100644 changelog.d/12143.doc delete mode 100644 demo/README create mode 100644 docs/development/demo.md diff --git a/README.rst b/README.rst index 4281c87d1f..595fb5ff62 100644 --- a/README.rst +++ b/README.rst @@ -312,6 +312,9 @@ We recommend using the demo which starts 3 federated instances running on ports (to stop, you can use `./demo/stop.sh`) +See the [demo documentation](https://matrix-org.github.io/synapse/develop/development/demo.html) +for more information. + If you just want to start a single instance of the app and run it directly:: # Create the homeserver.yaml config once diff --git a/changelog.d/12143.doc b/changelog.d/12143.doc new file mode 100644 index 0000000000..4b9db74b1f --- /dev/null +++ b/changelog.d/12143.doc @@ -0,0 +1 @@ +Improve documentation for demo scripts. diff --git a/demo/.gitignore b/demo/.gitignore index 4d12712343..5663aba8e7 100644 --- a/demo/.gitignore +++ b/demo/.gitignore @@ -1,7 +1,4 @@ -*.db -*.log -*.log.* -*.pid - -/media_store.* -/etc +# Ignore all the temporary files from the demo servers. +8080/ +8081/ +8082/ diff --git a/demo/README b/demo/README deleted file mode 100644 index a5a95bd196..0000000000 --- a/demo/README +++ /dev/null @@ -1,26 +0,0 @@ -DO NOT USE THESE DEMO SERVERS IN PRODUCTION - -Requires you to have done: - python setup.py develop - - -The demo start.sh will start three synapse servers on ports 8080, 8081 and 8082, with host names localhost:$port. This can be easily changed to `hostname`:$port in start.sh if required. - -To enable the servers to communicate untrusted ssl certs are used. In order to do this the servers do not check the certs -and are configured in a highly insecure way. Do not use these configuration files in production. - -stop.sh will stop the synapse servers and the webclient. - -clean.sh will delete the databases and log files. - -To start a completely new set of servers, run: - - ./demo/stop.sh; ./demo/clean.sh && ./demo/start.sh - - -Logs and sqlitedb will be stored in demo/808{0,1,2}.{log,db} - - - -Also note that when joining a public room on a different HS via "#foo:bar.net", then you are (in the current impl) joining a room with room_id "foo". This means that it won't work if your HS already has a room with that name. - diff --git a/demo/clean.sh b/demo/clean.sh index e9b440d90d..7f1e192021 100755 --- a/demo/clean.sh +++ b/demo/clean.sh @@ -4,6 +4,9 @@ set -e DIR="$( cd "$( dirname "$0" )" && pwd )" +# Ensure that the servers are stopped. +$DIR/stop.sh + PID_FILE="$DIR/servers.pid" if [ -f "$PID_FILE" ]; then diff --git a/demo/start.sh b/demo/start.sh index 8ffb14e30a..55e69685e3 100755 --- a/demo/start.sh +++ b/demo/start.sh @@ -6,8 +6,6 @@ CWD=$(pwd) cd "$DIR/.." || exit -mkdir -p demo/etc - PYTHONPATH=$(readlink -f "$(pwd)") export PYTHONPATH @@ -21,22 +19,26 @@ for port in 8080 8081 8082; do mkdir -p demo/$port pushd demo/$port || exit - #rm $DIR/etc/$port.config + # Generate the configuration for the homeserver at localhost:848x. python3 -m synapse.app.homeserver \ --generate-config \ - -H "localhost:$https_port" \ - --config-path "$DIR/etc/$port.config" \ + --server-name "localhost:$port" \ + --config-path "$port.config" \ --report-stats no - if ! grep -F "Customisation made by demo/start.sh" -q "$DIR/etc/$port.config"; then - # Generate tls keys - openssl req -x509 -newkey rsa:4096 -keyout "$DIR/etc/localhost:$https_port.tls.key" -out "$DIR/etc/localhost:$https_port.tls.crt" -days 365 -nodes -subj "/O=matrix" + if ! grep -F "Customisation made by demo/start.sh" -q "$port.config"; then + # Generate TLS keys. + openssl req -x509 -newkey rsa:4096 \ + -keyout "localhost:$port.tls.key" \ + -out "localhost:$port.tls.crt" \ + -days 365 -nodes -subj "/O=matrix" - # Regenerate configuration + # Add customisations to the configuration. { - printf '\n\n# Customisation made by demo/start.sh\n' + printf '\n\n# Customisation made by demo/start.sh\n\n' echo "public_baseurl: http://localhost:$port/" echo 'enable_registration: true' + echo '' # Warning, this heredoc depends on the interaction of tabs and spaces. # Please don't accidentaly bork me with your fancy settings. @@ -63,38 +65,34 @@ for port in 8080 8081 8082; do echo "${listeners}" - # Disable tls for the servers - printf '\n\n# Disable tls on the servers.' + # Disable TLS for the servers + printf '\n\n# Disable TLS for the servers.' echo '# DO NOT USE IN PRODUCTION' echo 'use_insecure_ssl_client_just_for_testing_do_not_use: true' echo 'federation_verify_certificates: false' - # Set tls paths - echo "tls_certificate_path: \"$DIR/etc/localhost:$https_port.tls.crt\"" - echo "tls_private_key_path: \"$DIR/etc/localhost:$https_port.tls.key\"" + # Set paths for the TLS certificates. + echo "tls_certificate_path: \"$DIR/$port/localhost:$port.tls.crt\"" + echo "tls_private_key_path: \"$DIR/$port/localhost:$port.tls.key\"" # Ignore keys from the trusted keys server echo '# Ignore keys from the trusted keys server' echo 'trusted_key_servers:' echo ' - server_name: "matrix.org"' echo ' accept_keys_insecurely: true' - - # Reduce the blacklist - blacklist=$(cat <<-BLACK - # Set the blacklist so that it doesn't include 127.0.0.1, ::1 - federation_ip_range_blacklist: - - '10.0.0.0/8' - - '172.16.0.0/12' - - '192.168.0.0/16' - - '100.64.0.0/10' - - '169.254.0.0/16' - - 'fe80::/64' - - 'fc00::/7' - BLACK + echo '' + + # Allow the servers to communicate over localhost. + allow_list=$(cat <<-ALLOW_LIST + # Allow the servers to communicate over localhost. + ip_range_whitelist: + - '127.0.0.1/8' + - '::1/128' + ALLOW_LIST ) - echo "${blacklist}" - } >> "$DIR/etc/$port.config" + echo "${allow_list}" + } >> "$port.config" fi # Check script parameters @@ -141,19 +139,18 @@ for port in 8080 8081 8082; do burst_count: 1000 RC ) - echo "${ratelimiting}" >> "$DIR/etc/$port.config" + echo "${ratelimiting}" >> "$port.config" fi fi - if ! grep -F "full_twisted_stacktraces" -q "$DIR/etc/$port.config"; then - echo "full_twisted_stacktraces: true" >> "$DIR/etc/$port.config" - fi - if ! grep -F "report_stats" -q "$DIR/etc/$port.config" ; then - echo "report_stats: false" >> "$DIR/etc/$port.config" + # Always disable reporting of stats if the option is not there. + if ! grep -F "report_stats" -q "$port.config" ; then + echo "report_stats: false" >> "$port.config" fi + # Run the homeserver in the background. python3 -m synapse.app.homeserver \ - --config-path "$DIR/etc/$port.config" \ + --config-path "$port.config" \ -D \ popd || exit diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index ef9cabf555..21f80efc99 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -82,6 +82,7 @@ - [Release Cycle](development/releases.md) - [Git Usage](development/git.md) - [Testing]() + - [Demo scripts](development/demo.md) - [OpenTracing](opentracing.md) - [Database Schemas](development/database_schema.md) - [Experimental features](development/experimental_features.md) diff --git a/docs/development/demo.md b/docs/development/demo.md new file mode 100644 index 0000000000..4277252ceb --- /dev/null +++ b/docs/development/demo.md @@ -0,0 +1,41 @@ +# Synapse demo setup + +**DO NOT USE THESE DEMO SERVERS IN PRODUCTION** + +Requires you to have a [Synapse development environment setup](https://matrix-org.github.io/synapse/develop/development/contributing_guide.html#4-install-the-dependencies). + +The demo setup allows running three federation Synapse servers, with server +names `localhost:8080`, `localhost:8081`, and `localhost:8082`. + +You can access them via any Matrix client over HTTP at `localhost:8080`, +`localhost:8081`, and `localhost:8082` or over HTTPS at `localhost:8480`, +`localhost:8481`, and `localhost:8482`. + +To enable the servers to communicate, self-signed SSL certificates are generated +and the servers are configured in a highly insecure way, including: + +* Not checking certificates over federation. +* Not verifying keys. + +The servers are configured to store their data under `demo/8080`, `demo/8081`, and +`demo/8082`. This includes configuration, logs, SQLite databases, and media. + +Note that when joining a public room on a different HS via "#foo:bar.net", then +you are (in the current impl) joining a room with room_id "foo". This means that +it won't work if your HS already has a room with that name. + +## Using the demo scripts + +There's three main scripts with straightforward purposes: + +* `start.sh` will start the Synapse servers, generating any missing configuration. + * This accepts a single parameter `--no-rate-limit` to "disable" rate limits + (they actually still exist, but are very high). +* `stop.sh` will stop the Synapse servers. +* `clean.sh` will delete the configuration, databases, log files, etc. + +To start a completely new set of servers, run: + +```sh +./demo/stop.sh; ./demo/clean.sh && ./demo/start.sh +``` diff --git a/docs/federate.md b/docs/federate.md index 5107f995be..df4c87da51 100644 --- a/docs/federate.md +++ b/docs/federate.md @@ -63,4 +63,5 @@ release of Synapse. If you want to get up and running quickly with a trio of homeservers in a private federation, there is a script in the `demo` directory. This is mainly -useful just for development purposes. See [demo/README](https://github.com/matrix-org/synapse/tree/develop/demo/). +useful just for development purposes. See +[demo scripts](https://matrix-org.github.io/synapse/develop/development/demo.html). -- cgit 1.5.1 From dc8d825ef26714f610db9c286f2f2517db064b79 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 9 Mar 2022 11:00:48 +0000 Subject: Skip attempt to get state at backwards-extremities (#12173) We don't *have* the state at a backwards-extremity, so this is never going to do anything useful. --- changelog.d/12173.misc | 1 + synapse/handlers/federation.py | 60 +++--------------------------------------- 2 files changed, 4 insertions(+), 57 deletions(-) create mode 100644 changelog.d/12173.misc diff --git a/changelog.d/12173.misc b/changelog.d/12173.misc new file mode 100644 index 0000000000..9f333e718a --- /dev/null +++ b/changelog.d/12173.misc @@ -0,0 +1 @@ +Avoid trying to calculate the state at outlier events. diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index eb03a5accb..db39aeabde 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -23,8 +23,6 @@ from signedjson.key import decode_verify_key_bytes from signedjson.sign import verify_signed_json from unpaddedbase64 import decode_base64 -from twisted.internet import defer - from synapse import event_auth from synapse.api.constants import EventContentFields, EventTypes, Membership from synapse.api.errors import ( @@ -45,11 +43,7 @@ from synapse.events.snapshot import EventContext from synapse.events.validator import EventValidator from synapse.federation.federation_client import InvalidResponseError from synapse.http.servlet import assert_params_in_dict -from synapse.logging.context import ( - make_deferred_yieldable, - nested_logging_context, - preserve_fn, -) +from synapse.logging.context import nested_logging_context from synapse.metrics.background_process_metrics import run_as_background_process from synapse.replication.http.federation import ( ReplicationCleanRoomRestServlet, @@ -355,56 +349,8 @@ class FederationHandler: if success: return True - # Huh, well *those* domains didn't work out. Lets try some domains - # from the time. - - tried_domains = set(likely_domains) - tried_domains.add(self.server_name) - - event_ids = list(extremities.keys()) - - logger.debug("calling resolve_state_groups in _maybe_backfill") - resolve = preserve_fn(self.state_handler.resolve_state_groups_for_events) - states_list = await make_deferred_yieldable( - defer.gatherResults( - [resolve(room_id, [e]) for e in event_ids], consumeErrors=True - ) - ) - - # A map from event_id to state map of event_ids. - state_ids: Dict[str, StateMap[str]] = dict( - zip(event_ids, [s.state for s in states_list]) - ) - - state_map = await self.store.get_events( - [e_id for ids in state_ids.values() for e_id in ids.values()], - get_prev_content=False, - ) - - # A map from event_id to state map of events. - state_events: Dict[str, StateMap[EventBase]] = { - key: { - k: state_map[e_id] - for k, e_id in state_dict.items() - if e_id in state_map - } - for key, state_dict in state_ids.items() - } - - for e_id in event_ids: - likely_extremeties_domains = get_domains_from_state(state_events[e_id]) - - success = await try_backfill( - [ - dom - for dom, _ in likely_extremeties_domains - if dom not in tried_domains - ] - ) - if success: - return True - - tried_domains.update(dom for dom, _ in likely_extremeties_domains) + # TODO: we could also try servers which were previously in the room, but + # are no longer. return False -- cgit 1.5.1 From 180d8ff0d4d706344fa984abbd9ed6fa02ca13dc Mon Sep 17 00:00:00 2001 From: Nick Mills-Barrett Date: Wed, 9 Mar 2022 14:53:28 +0000 Subject: Retry some http replication failures (#12182) This allows for the target process to be down for around a minute which provides time for restarts during synapse upgrades/config updates. Closes: #12178 Signed off by Nick Mills-Barrett nick@beeper.com --- changelog.d/12182.misc | 1 + synapse/replication/http/_base.py | 47 ++++++++++++++++++++++++++++++--------- 2 files changed, 37 insertions(+), 11 deletions(-) create mode 100644 changelog.d/12182.misc diff --git a/changelog.d/12182.misc b/changelog.d/12182.misc new file mode 100644 index 0000000000..7e9ad2c752 --- /dev/null +++ b/changelog.d/12182.misc @@ -0,0 +1 @@ +Retry HTTP replication failures, this should prevent 502's when restarting stateful workers (main, event persisters, stream writers). Contributed by Nick @ Beeper. diff --git a/synapse/replication/http/_base.py b/synapse/replication/http/_base.py index 2e697c74a6..f1abb98653 100644 --- a/synapse/replication/http/_base.py +++ b/synapse/replication/http/_base.py @@ -21,6 +21,7 @@ from typing import TYPE_CHECKING, Any, Awaitable, Callable, Dict, List, Tuple from prometheus_client import Counter, Gauge +from twisted.internet.error import ConnectError, DNSLookupError from twisted.web.server import Request from synapse.api.errors import HttpResponseException, SynapseError @@ -87,6 +88,10 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta): `_handle_request` must return a Deferred. RETRY_ON_TIMEOUT(bool): Whether or not to retry the request when a 504 is received. + RETRY_ON_CONNECT_ERROR (bool): Whether or not to retry the request when + a connection error is received. + RETRY_ON_CONNECT_ERROR_ATTEMPTS (int): Number of attempts to retry when + receiving connection errors, each will backoff exponentially longer. """ NAME: str = abc.abstractproperty() # type: ignore @@ -94,6 +99,8 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta): METHOD = "POST" CACHE = True RETRY_ON_TIMEOUT = True + RETRY_ON_CONNECT_ERROR = True + RETRY_ON_CONNECT_ERROR_ATTEMPTS = 5 # =63s (2^6-1) def __init__(self, hs: "HomeServer"): if self.CACHE: @@ -236,18 +243,20 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta): "/".join(url_args), ) + headers: Dict[bytes, List[bytes]] = {} + # Add an authorization header, if configured. + if replication_secret: + headers[b"Authorization"] = [b"Bearer " + replication_secret] + opentracing.inject_header_dict(headers, check_destination=False) + try: + # Keep track of attempts made so we can bail if we don't manage to + # connect to the target after N tries. + attempts = 0 # We keep retrying the same request for timeouts. This is so that we # have a good idea that the request has either succeeded or failed # on the master, and so whether we should clean up or not. while True: - headers: Dict[bytes, List[bytes]] = {} - # Add an authorization header, if configured. - if replication_secret: - headers[b"Authorization"] = [ - b"Bearer " + replication_secret - ] - opentracing.inject_header_dict(headers, check_destination=False) try: result = await request_func(uri, data, headers=headers) break @@ -255,11 +264,27 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta): if not cls.RETRY_ON_TIMEOUT: raise - logger.warning("%s request timed out; retrying", cls.NAME) + logger.warning("%s request timed out; retrying", cls.NAME) + + # If we timed out we probably don't need to worry about backing + # off too much, but lets just wait a little anyway. + await clock.sleep(1) + except (ConnectError, DNSLookupError) as e: + if not cls.RETRY_ON_CONNECT_ERROR: + raise + if attempts > cls.RETRY_ON_CONNECT_ERROR_ATTEMPTS: + raise + + delay = 2 ** attempts + logger.warning( + "%s request connection failed; retrying in %ds: %r", + cls.NAME, + delay, + e, + ) - # If we timed out we probably don't need to worry about backing - # off too much, but lets just wait a little anyway. - await clock.sleep(1) + await clock.sleep(delay) + attempts += 1 except HttpResponseException as e: # We convert to SynapseError as we know that it was a SynapseError # on the main process that we should send to the client. (And -- cgit 1.5.1 From 032688854babeea832cbb4f762fc70fe31e73cc6 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 9 Mar 2022 10:29:39 -0500 Subject: Remove some unused variables/parameters. (#12187) --- changelog.d/12187.misc | 1 + synapse/storage/databases/main/roommember.py | 14 +++++--------- 2 files changed, 6 insertions(+), 9 deletions(-) create mode 100644 changelog.d/12187.misc diff --git a/changelog.d/12187.misc b/changelog.d/12187.misc new file mode 100644 index 0000000000..c53e68faa5 --- /dev/null +++ b/changelog.d/12187.misc @@ -0,0 +1 @@ +Remove unused variables. diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index e48ec5f495..bef675b845 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -46,7 +46,7 @@ from synapse.storage.roommember import ( ProfileInfo, RoomsForUser, ) -from synapse.types import PersistedEventPosition, StateMap, get_domain_from_id +from synapse.types import PersistedEventPosition, get_domain_from_id from synapse.util.async_helpers import Linearizer from synapse.util.caches import intern_string from synapse.util.caches.descriptors import _CacheContext, cached, cachedList @@ -273,7 +273,7 @@ class RoomMemberWorkerStore(EventsWorkerStore): txn.execute(sql, (room_id,)) res = {} for count, membership in txn: - summary = res.setdefault(membership, MemberSummary([], count)) + res.setdefault(membership, MemberSummary([], count)) # we order by membership and then fairly arbitrarily by event_id so # heroes are consistent @@ -839,18 +839,14 @@ class RoomMemberWorkerStore(EventsWorkerStore): with Measure(self._clock, "get_joined_hosts"): return await self._get_joined_hosts( - room_id, state_group, state_entry.state, state_entry=state_entry + room_id, state_group, state_entry=state_entry ) @cached(num_args=2, max_entries=10000, iterable=True) async def _get_joined_hosts( - self, - room_id: str, - state_group: int, - current_state_ids: StateMap[str], - state_entry: "_StateCacheEntry", + self, room_id: str, state_group: int, state_entry: "_StateCacheEntry" ) -> FrozenSet[str]: - # We don't use `state_group`, its there so that we can cache based on + # We don't use `state_group`, it's there so that we can cache based on # it. However, its important that its never None, since two # current_state's with a state_group of None are likely to be different. # -- cgit 1.5.1 From 690cb4f3b32938f5ced5590abe9429733040a129 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 9 Mar 2022 13:07:41 -0500 Subject: Allow for ignoring some arguments when caching. (#12189) * `@cached` can now take an `uncached_args` which is an iterable of names to not use in the cache key. * Requires `@cached`, @cachedList` and `@lru_cache` to use keyword arguments for clarity. * Asserts that keyword-only arguments in cached functions are not accepted. (I tested this briefly and I don't believe this works properly.) --- changelog.d/12189.misc | 1 + synapse/storage/databases/main/events_worker.py | 4 +- synapse/util/caches/descriptors.py | 74 +++++++++++++++++----- tests/util/caches/test_descriptors.py | 84 ++++++++++++++++++++++++- 4 files changed, 142 insertions(+), 21 deletions(-) create mode 100644 changelog.d/12189.misc diff --git a/changelog.d/12189.misc b/changelog.d/12189.misc new file mode 100644 index 0000000000..015e808e63 --- /dev/null +++ b/changelog.d/12189.misc @@ -0,0 +1 @@ +Support skipping some arguments when generating cache keys. diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index 26784f755e..59454a47df 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -1286,7 +1286,7 @@ class EventsWorkerStore(SQLBaseStore): ) return {eid for ((_rid, eid), have_event) in res.items() if have_event} - @cachedList("have_seen_event", "keys") + @cachedList(cached_method_name="have_seen_event", list_name="keys") async def _have_seen_events_dict( self, keys: Iterable[Tuple[str, str]] ) -> Dict[Tuple[str, str], bool]: @@ -1954,7 +1954,7 @@ class EventsWorkerStore(SQLBaseStore): get_event_id_for_timestamp_txn, ) - @cachedList("is_partial_state_event", list_name="event_ids") + @cachedList(cached_method_name="is_partial_state_event", list_name="event_ids") async def get_partial_state_events( self, event_ids: Collection[str] ) -> Dict[str, bool]: diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index 1cdead02f1..c3c5c16db9 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -20,6 +20,7 @@ from typing import ( Any, Awaitable, Callable, + Collection, Dict, Generic, Hashable, @@ -69,6 +70,7 @@ class _CacheDescriptorBase: self, orig: Callable[..., Any], num_args: Optional[int], + uncached_args: Optional[Collection[str]] = None, cache_context: bool = False, ): self.orig = orig @@ -76,6 +78,13 @@ class _CacheDescriptorBase: arg_spec = inspect.getfullargspec(orig) all_args = arg_spec.args + # There's no reason that keyword-only arguments couldn't be supported, + # but right now they're buggy so do not allow them. + if arg_spec.kwonlyargs: + raise ValueError( + "_CacheDescriptorBase does not support keyword-only arguments." + ) + if "cache_context" in all_args: if not cache_context: raise ValueError( @@ -88,6 +97,9 @@ class _CacheDescriptorBase: " named `cache_context`" ) + if num_args is not None and uncached_args is not None: + raise ValueError("Cannot provide both num_args and uncached_args") + if num_args is None: num_args = len(all_args) - 1 if cache_context: @@ -105,6 +117,12 @@ class _CacheDescriptorBase: # list of the names of the args used as the cache key self.arg_names = all_args[1 : num_args + 1] + # If there are args to not cache on, filter them out (and fix the size of num_args). + if uncached_args is not None: + include_arg_in_cache_key = [n not in uncached_args for n in self.arg_names] + else: + include_arg_in_cache_key = [True] * len(self.arg_names) + # self.arg_defaults is a map of arg name to its default value for each # argument that has a default value if arg_spec.defaults: @@ -119,8 +137,8 @@ class _CacheDescriptorBase: self.add_cache_context = cache_context - self.cache_key_builder = get_cache_key_builder( - self.arg_names, self.arg_defaults + self.cache_key_builder = _get_cache_key_builder( + self.arg_names, include_arg_in_cache_key, self.arg_defaults ) @@ -130,8 +148,7 @@ class _LruCachedFunction(Generic[F]): def lru_cache( - max_entries: int = 1000, - cache_context: bool = False, + *, max_entries: int = 1000, cache_context: bool = False ) -> Callable[[F], _LruCachedFunction[F]]: """A method decorator that applies a memoizing cache around the function. @@ -186,7 +203,9 @@ class LruCacheDescriptor(_CacheDescriptorBase): max_entries: int = 1000, cache_context: bool = False, ): - super().__init__(orig, num_args=None, cache_context=cache_context) + super().__init__( + orig, num_args=None, uncached_args=None, cache_context=cache_context + ) self.max_entries = max_entries def __get__(self, obj: Optional[Any], owner: Optional[Type]) -> Callable[..., Any]: @@ -260,6 +279,9 @@ class DeferredCacheDescriptor(_CacheDescriptorBase): num_args: number of positional arguments (excluding ``self`` and ``cache_context``) to use as cache keys. Defaults to all named args of the function. + uncached_args: a list of argument names to not use as the cache key. + (``self`` and ``cache_context`` are always ignored.) Cannot be used + with num_args. tree: cache_context: iterable: @@ -273,12 +295,18 @@ class DeferredCacheDescriptor(_CacheDescriptorBase): orig: Callable[..., Any], max_entries: int = 1000, num_args: Optional[int] = None, + uncached_args: Optional[Collection[str]] = None, tree: bool = False, cache_context: bool = False, iterable: bool = False, prune_unread_entries: bool = True, ): - super().__init__(orig, num_args=num_args, cache_context=cache_context) + super().__init__( + orig, + num_args=num_args, + uncached_args=uncached_args, + cache_context=cache_context, + ) if tree and self.num_args < 2: raise RuntimeError( @@ -369,7 +397,7 @@ class DeferredCacheListDescriptor(_CacheDescriptorBase): but including list_name) to use as cache keys. Defaults to all named args of the function. """ - super().__init__(orig, num_args=num_args) + super().__init__(orig, num_args=num_args, uncached_args=None) self.list_name = list_name @@ -530,8 +558,10 @@ class _CacheContext: def cached( + *, max_entries: int = 1000, num_args: Optional[int] = None, + uncached_args: Optional[Collection[str]] = None, tree: bool = False, cache_context: bool = False, iterable: bool = False, @@ -541,6 +571,7 @@ def cached( orig, max_entries=max_entries, num_args=num_args, + uncached_args=uncached_args, tree=tree, cache_context=cache_context, iterable=iterable, @@ -551,7 +582,7 @@ def cached( def cachedList( - cached_method_name: str, list_name: str, num_args: Optional[int] = None + *, cached_method_name: str, list_name: str, num_args: Optional[int] = None ) -> Callable[[F], _CachedFunction[F]]: """Creates a descriptor that wraps a function in a `CacheListDescriptor`. @@ -590,13 +621,16 @@ def cachedList( return cast(Callable[[F], _CachedFunction[F]], func) -def get_cache_key_builder( - param_names: Sequence[str], param_defaults: Mapping[str, Any] +def _get_cache_key_builder( + param_names: Sequence[str], + include_params: Sequence[bool], + param_defaults: Mapping[str, Any], ) -> Callable[[Sequence[Any], Mapping[str, Any]], CacheKey]: """Construct a function which will build cache keys suitable for a cached function Args: param_names: list of formal parameter names for the cached function + include_params: list of bools of whether to include the parameter name in the cache key param_defaults: a mapping from parameter name to default value for that param Returns: @@ -608,6 +642,7 @@ def get_cache_key_builder( if len(param_names) == 1: nm = param_names[0] + assert include_params[0] is True def get_cache_key(args: Sequence[Any], kwargs: Mapping[str, Any]) -> CacheKey: if nm in kwargs: @@ -620,13 +655,18 @@ def get_cache_key_builder( else: def get_cache_key(args: Sequence[Any], kwargs: Mapping[str, Any]) -> CacheKey: - return tuple(_get_cache_key_gen(param_names, param_defaults, args, kwargs)) + return tuple( + _get_cache_key_gen( + param_names, include_params, param_defaults, args, kwargs + ) + ) return get_cache_key def _get_cache_key_gen( param_names: Iterable[str], + include_params: Iterable[bool], param_defaults: Mapping[str, Any], args: Sequence[Any], kwargs: Mapping[str, Any], @@ -637,16 +677,18 @@ def _get_cache_key_gen( This is essentially the same operation as `inspect.getcallargs`, but optimised so that we don't need to inspect the target function for each call. """ - # We loop through each arg name, looking up if its in the `kwargs`, # otherwise using the next argument in `args`. If there are no more # args then we try looking the arg name up in the defaults. pos = 0 - for nm in param_names: + for nm, inc in zip(param_names, include_params): if nm in kwargs: - yield kwargs[nm] + if inc: + yield kwargs[nm] elif pos < len(args): - yield args[pos] + if inc: + yield args[pos] pos += 1 else: - yield param_defaults[nm] + if inc: + yield param_defaults[nm] diff --git a/tests/util/caches/test_descriptors.py b/tests/util/caches/test_descriptors.py index 19741ffcda..6a4b17527a 100644 --- a/tests/util/caches/test_descriptors.py +++ b/tests/util/caches/test_descriptors.py @@ -141,6 +141,84 @@ class DescriptorTestCase(unittest.TestCase): self.assertEqual(r, "chips") obj.mock.assert_not_called() + @defer.inlineCallbacks + def test_cache_uncached_args(self): + """ + Only the arguments not named in uncached_args should matter to the cache + + Note that this is identical to test_cache_num_args, but provides the + arguments differently. + """ + + class Cls: + # Note that it is important that this is not the last argument to + # test behaviour of skipping arguments properly. + @descriptors.cached(uncached_args=("arg2",)) + def fn(self, arg1, arg2, arg3): + return self.mock(arg1, arg2, arg3) + + def __init__(self): + self.mock = mock.Mock() + + obj = Cls() + obj.mock.return_value = "fish" + r = yield obj.fn(1, 2, 3) + self.assertEqual(r, "fish") + obj.mock.assert_called_once_with(1, 2, 3) + obj.mock.reset_mock() + + # a call with different params should call the mock again + obj.mock.return_value = "chips" + r = yield obj.fn(2, 3, 4) + self.assertEqual(r, "chips") + obj.mock.assert_called_once_with(2, 3, 4) + obj.mock.reset_mock() + + # the two values should now be cached; we should be able to vary + # the second argument and still get the cached result. + r = yield obj.fn(1, 4, 3) + self.assertEqual(r, "fish") + r = yield obj.fn(2, 5, 4) + self.assertEqual(r, "chips") + obj.mock.assert_not_called() + + @defer.inlineCallbacks + def test_cache_kwargs(self): + """Test that keyword arguments are treated properly""" + + class Cls: + def __init__(self): + self.mock = mock.Mock() + + @descriptors.cached() + def fn(self, arg1, kwarg1=2): + return self.mock(arg1, kwarg1=kwarg1) + + obj = Cls() + obj.mock.return_value = "fish" + r = yield obj.fn(1, kwarg1=2) + self.assertEqual(r, "fish") + obj.mock.assert_called_once_with(1, kwarg1=2) + obj.mock.reset_mock() + + # a call with different params should call the mock again + obj.mock.return_value = "chips" + r = yield obj.fn(1, kwarg1=3) + self.assertEqual(r, "chips") + obj.mock.assert_called_once_with(1, kwarg1=3) + obj.mock.reset_mock() + + # the values should now be cached. + r = yield obj.fn(1, kwarg1=2) + self.assertEqual(r, "fish") + # We should be able to not provide kwarg1 and get the cached value back. + r = yield obj.fn(1) + self.assertEqual(r, "fish") + # Keyword arguments can be in any order. + r = yield obj.fn(kwarg1=2, arg1=1) + self.assertEqual(r, "fish") + obj.mock.assert_not_called() + def test_cache_with_sync_exception(self): """If the wrapped function throws synchronously, things should continue to work""" @@ -656,7 +734,7 @@ class CachedListDescriptorTestCase(unittest.TestCase): def fn(self, arg1, arg2): pass - @descriptors.cachedList("fn", "args1") + @descriptors.cachedList(cached_method_name="fn", list_name="args1") async def list_fn(self, args1, arg2): assert current_context().name == "c1" # we want this to behave like an asynchronous function @@ -715,7 +793,7 @@ class CachedListDescriptorTestCase(unittest.TestCase): def fn(self, arg1): pass - @descriptors.cachedList("fn", "args1") + @descriptors.cachedList(cached_method_name="fn", list_name="args1") def list_fn(self, args1) -> "Deferred[dict]": return self.mock(args1) @@ -758,7 +836,7 @@ class CachedListDescriptorTestCase(unittest.TestCase): def fn(self, arg1, arg2): pass - @descriptors.cachedList("fn", "args1") + @descriptors.cachedList(cached_method_name="fn", list_name="args1") async def list_fn(self, args1, arg2): # we want this to behave like an asynchronous function await run_on_reactor() -- cgit 1.5.1 From 15382b1afad65366df13c3b9040b6fdfb1eccfca Mon Sep 17 00:00:00 2001 From: Will Hunt Date: Wed, 9 Mar 2022 18:23:57 +0000 Subject: Add third_party module callbacks to check if a user can delete a room and deactivate a user (#12028) * Add check_can_deactivate_user * Add check_can_shutdown_rooms * Documentation * callbacks, not functions * Various suggested tweaks * Add tests for test_check_can_shutdown_room and test_check_can_deactivate_user * Update check_can_deactivate_user to not take a Requester * Fix check_can_shutdown_room docs * Renegade and use `by_admin` instead of `admin_user_id` * fix lint * Update docs/modules/third_party_rules_callbacks.md Co-authored-by: Brendan Abolivier * Update docs/modules/third_party_rules_callbacks.md Co-authored-by: Brendan Abolivier * Update docs/modules/third_party_rules_callbacks.md Co-authored-by: Brendan Abolivier * Update docs/modules/third_party_rules_callbacks.md Co-authored-by: Brendan Abolivier Co-authored-by: Brendan Abolivier --- changelog.d/12028.feature | 1 + docs/modules/third_party_rules_callbacks.md | 43 ++++++++++ synapse/events/third_party_rules.py | 55 +++++++++++++ synapse/handlers/deactivate_account.py | 12 ++- synapse/handlers/room.py | 8 ++ synapse/module_api/__init__.py | 6 ++ synapse/rest/admin/rooms.py | 9 +++ tests/rest/client/test_third_party_rules.py | 121 ++++++++++++++++++++++++++++ 8 files changed, 254 insertions(+), 1 deletion(-) create mode 100644 changelog.d/12028.feature diff --git a/changelog.d/12028.feature b/changelog.d/12028.feature new file mode 100644 index 0000000000..5549c8f6fc --- /dev/null +++ b/changelog.d/12028.feature @@ -0,0 +1 @@ +Add third-party rules rules callbacks `check_can_shutdown_room` and `check_can_deactivate_user`. diff --git a/docs/modules/third_party_rules_callbacks.md b/docs/modules/third_party_rules_callbacks.md index 09ac838107..1d3c39967f 100644 --- a/docs/modules/third_party_rules_callbacks.md +++ b/docs/modules/third_party_rules_callbacks.md @@ -148,6 +148,49 @@ deny an incoming event, see [`check_event_for_spam`](spam_checker_callbacks.md#c If multiple modules implement this callback, Synapse runs them all in order. +### `check_can_shutdown_room` + +_First introduced in Synapse v1.55.0_ + +```python +async def check_can_shutdown_room( + user_id: str, room_id: str, +) -> bool: +``` + +Called when an admin user requests the shutdown of a room. The module must return a +boolean indicating whether the shutdown can go through. If the callback returns `False`, +the shutdown will not proceed and the caller will see a `M_FORBIDDEN` error. + +If multiple modules implement this callback, they will be considered in order. If a +callback returns `True`, Synapse falls through to the next one. The value of the first +callback that does not return `True` will be used. If this happens, Synapse will not call +any of the subsequent implementations of this callback. + +### `check_can_deactivate_user` + +_First introduced in Synapse v1.55.0_ + +```python +async def check_can_deactivate_user( + user_id: str, by_admin: bool, +) -> bool: +``` + +Called when the deactivation of a user is requested. User deactivation can be +performed by an admin or the user themselves, so developers are encouraged to check the +requester when implementing this callback. The module must return a +boolean indicating whether the deactivation can go through. If the callback returns `False`, +the deactivation will not proceed and the caller will see a `M_FORBIDDEN` error. + +The module is passed two parameters, `user_id` which is the ID of the user being deactivated, and `by_admin` which is `True` if the request is made by a serve admin, and `False` otherwise. + +If multiple modules implement this callback, they will be considered in order. If a +callback returns `True`, Synapse falls through to the next one. The value of the first +callback that does not return `True` will be used. If this happens, Synapse will not call +any of the subsequent implementations of this callback. + + ### `on_profile_update` _First introduced in Synapse v1.54.0_ diff --git a/synapse/events/third_party_rules.py b/synapse/events/third_party_rules.py index ede72ee876..bfca454f51 100644 --- a/synapse/events/third_party_rules.py +++ b/synapse/events/third_party_rules.py @@ -38,6 +38,8 @@ CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK = Callable[ [str, StateMap[EventBase], str], Awaitable[bool] ] ON_NEW_EVENT_CALLBACK = Callable[[EventBase, StateMap[EventBase]], Awaitable] +CHECK_CAN_SHUTDOWN_ROOM_CALLBACK = Callable[[str, str], Awaitable[bool]] +CHECK_CAN_DEACTIVATE_USER_CALLBACK = Callable[[str, bool], Awaitable[bool]] ON_PROFILE_UPDATE_CALLBACK = Callable[[str, ProfileInfo, bool, bool], Awaitable] ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK = Callable[[str, bool, bool], Awaitable] @@ -157,6 +159,12 @@ class ThirdPartyEventRules: CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK ] = [] self._on_new_event_callbacks: List[ON_NEW_EVENT_CALLBACK] = [] + self._check_can_shutdown_room_callbacks: List[ + CHECK_CAN_SHUTDOWN_ROOM_CALLBACK + ] = [] + self._check_can_deactivate_user_callbacks: List[ + CHECK_CAN_DEACTIVATE_USER_CALLBACK + ] = [] self._on_profile_update_callbacks: List[ON_PROFILE_UPDATE_CALLBACK] = [] self._on_user_deactivation_status_changed_callbacks: List[ ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK @@ -173,6 +181,8 @@ class ThirdPartyEventRules: CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK ] = None, on_new_event: Optional[ON_NEW_EVENT_CALLBACK] = None, + check_can_shutdown_room: Optional[CHECK_CAN_SHUTDOWN_ROOM_CALLBACK] = None, + check_can_deactivate_user: Optional[CHECK_CAN_DEACTIVATE_USER_CALLBACK] = None, on_profile_update: Optional[ON_PROFILE_UPDATE_CALLBACK] = None, on_user_deactivation_status_changed: Optional[ ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK @@ -198,6 +208,11 @@ class ThirdPartyEventRules: if on_new_event is not None: self._on_new_event_callbacks.append(on_new_event) + if check_can_shutdown_room is not None: + self._check_can_shutdown_room_callbacks.append(check_can_shutdown_room) + + if check_can_deactivate_user is not None: + self._check_can_deactivate_user_callbacks.append(check_can_deactivate_user) if on_profile_update is not None: self._on_profile_update_callbacks.append(on_profile_update) @@ -369,6 +384,46 @@ class ThirdPartyEventRules: "Failed to run module API callback %s: %s", callback, e ) + async def check_can_shutdown_room(self, user_id: str, room_id: str) -> bool: + """Intercept requests to shutdown a room. If `False` is returned, the + room must not be shut down. + + Args: + requester: The ID of the user requesting the shutdown. + room_id: The ID of the room. + """ + for callback in self._check_can_shutdown_room_callbacks: + try: + if await callback(user_id, room_id) is False: + return False + except Exception as e: + logger.exception( + "Failed to run module API callback %s: %s", callback, e + ) + return True + + async def check_can_deactivate_user( + self, + user_id: str, + by_admin: bool, + ) -> bool: + """Intercept requests to deactivate a user. If `False` is returned, the + user should not be deactivated. + + Args: + requester + user_id: The ID of the room. + """ + for callback in self._check_can_deactivate_user_callbacks: + try: + if await callback(user_id, by_admin) is False: + return False + except Exception as e: + logger.exception( + "Failed to run module API callback %s: %s", callback, e + ) + return True + async def _get_state_map_for_room(self, room_id: str) -> StateMap[EventBase]: """Given a room ID, return the state events of that room. diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py index 76ae768e6e..816e1a6d79 100644 --- a/synapse/handlers/deactivate_account.py +++ b/synapse/handlers/deactivate_account.py @@ -17,7 +17,7 @@ from typing import TYPE_CHECKING, Optional from synapse.api.errors import SynapseError from synapse.metrics.background_process_metrics import run_as_background_process -from synapse.types import Requester, UserID, create_requester +from synapse.types import Codes, Requester, UserID, create_requester if TYPE_CHECKING: from synapse.server import HomeServer @@ -42,6 +42,7 @@ class DeactivateAccountHandler: # Flag that indicates whether the process to part users from rooms is running self._user_parter_running = False + self._third_party_rules = hs.get_third_party_event_rules() # Start the user parter loop so it can resume parting users from rooms where # it left off (if it has work left to do). @@ -74,6 +75,15 @@ class DeactivateAccountHandler: Returns: True if identity server supports removing threepids, otherwise False. """ + + # Check if this user can be deactivated + if not await self._third_party_rules.check_can_deactivate_user( + user_id, by_admin + ): + raise SynapseError( + 403, "Deactivation of this user is forbidden", Codes.FORBIDDEN + ) + # FIXME: Theoretically there is a race here wherein user resets # password using threepid. diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 7b965b4b96..b9735631fc 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -1475,6 +1475,7 @@ class RoomShutdownHandler: self.room_member_handler = hs.get_room_member_handler() self._room_creation_handler = hs.get_room_creation_handler() self._replication = hs.get_replication_data_handler() + self._third_party_rules = hs.get_third_party_event_rules() self.event_creation_handler = hs.get_event_creation_handler() self.store = hs.get_datastores().main @@ -1548,6 +1549,13 @@ class RoomShutdownHandler: if not RoomID.is_valid(room_id): raise SynapseError(400, "%s is not a legal room ID" % (room_id,)) + if not await self._third_party_rules.check_can_shutdown_room( + requester_user_id, room_id + ): + raise SynapseError( + 403, "Shutdown of this room is forbidden", Codes.FORBIDDEN + ) + # Action the block first (even if the room doesn't exist yet) if block: # This will work even if the room is already blocked, but that is diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index c42eeedd87..d735c1d461 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -54,6 +54,8 @@ from synapse.events.spamcheck import ( USER_MAY_SEND_3PID_INVITE_CALLBACK, ) from synapse.events.third_party_rules import ( + CHECK_CAN_DEACTIVATE_USER_CALLBACK, + CHECK_CAN_SHUTDOWN_ROOM_CALLBACK, CHECK_EVENT_ALLOWED_CALLBACK, CHECK_THREEPID_CAN_BE_INVITED_CALLBACK, CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK, @@ -283,6 +285,8 @@ class ModuleApi: CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK ] = None, on_new_event: Optional[ON_NEW_EVENT_CALLBACK] = None, + check_can_shutdown_room: Optional[CHECK_CAN_SHUTDOWN_ROOM_CALLBACK] = None, + check_can_deactivate_user: Optional[CHECK_CAN_DEACTIVATE_USER_CALLBACK] = None, on_profile_update: Optional[ON_PROFILE_UPDATE_CALLBACK] = None, on_user_deactivation_status_changed: Optional[ ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK @@ -298,6 +302,8 @@ class ModuleApi: check_threepid_can_be_invited=check_threepid_can_be_invited, check_visibility_can_be_modified=check_visibility_can_be_modified, on_new_event=on_new_event, + check_can_shutdown_room=check_can_shutdown_room, + check_can_deactivate_user=check_can_deactivate_user, on_profile_update=on_profile_update, on_user_deactivation_status_changed=on_user_deactivation_status_changed, ) diff --git a/synapse/rest/admin/rooms.py b/synapse/rest/admin/rooms.py index f4736a3dad..356d6f74d7 100644 --- a/synapse/rest/admin/rooms.py +++ b/synapse/rest/admin/rooms.py @@ -67,6 +67,7 @@ class RoomRestV2Servlet(RestServlet): self._auth = hs.get_auth() self._store = hs.get_datastores().main self._pagination_handler = hs.get_pagination_handler() + self._third_party_rules = hs.get_third_party_event_rules() async def on_DELETE( self, request: SynapseRequest, room_id: str @@ -106,6 +107,14 @@ class RoomRestV2Servlet(RestServlet): HTTPStatus.BAD_REQUEST, "%s is not a legal room ID" % (room_id,) ) + # Check this here, as otherwise we'll only fail after the background job has been started. + if not await self._third_party_rules.check_can_shutdown_room( + requester.user.to_string(), room_id + ): + raise SynapseError( + 403, "Shutdown of this room is forbidden", Codes.FORBIDDEN + ) + delete_id = self._pagination_handler.start_shutdown_and_purge_room( room_id=room_id, new_room_user_id=content.get("new_room_user_id"), diff --git a/tests/rest/client/test_third_party_rules.py b/tests/rest/client/test_third_party_rules.py index 58f1ea11b7..e7de67e3a3 100644 --- a/tests/rest/client/test_third_party_rules.py +++ b/tests/rest/client/test_third_party_rules.py @@ -775,3 +775,124 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): self.assertEqual(args[0], user_id) self.assertFalse(args[1]) self.assertTrue(args[2]) + + def test_check_can_deactivate_user(self) -> None: + """Tests that the on_user_deactivation_status_changed module callback is called + correctly when processing a user's deactivation. + """ + # Register a mocked callback. + deactivation_mock = Mock(return_value=make_awaitable(False)) + third_party_rules = self.hs.get_third_party_event_rules() + third_party_rules._check_can_deactivate_user_callbacks.append( + deactivation_mock, + ) + + # Register a user that we'll deactivate. + user_id = self.register_user("altan", "password") + tok = self.login("altan", "password") + + # Deactivate that user. + channel = self.make_request( + "POST", + "/_matrix/client/v3/account/deactivate", + { + "auth": { + "type": LoginType.PASSWORD, + "password": "password", + "identifier": { + "type": "m.id.user", + "user": user_id, + }, + }, + "erase": True, + }, + access_token=tok, + ) + + # Check that the deactivation was blocked + self.assertEqual(channel.code, 403, channel.json_body) + + # Check that the mock was called once. + deactivation_mock.assert_called_once() + args = deactivation_mock.call_args[0] + + # Check that the mock was called with the right user ID + self.assertEqual(args[0], user_id) + + # Check that the request was not made by an admin + self.assertEqual(args[1], False) + + def test_check_can_deactivate_user_admin(self) -> None: + """Tests that the on_user_deactivation_status_changed module callback is called + correctly when processing a user's deactivation triggered by a server admin. + """ + # Register a mocked callback. + deactivation_mock = Mock(return_value=make_awaitable(False)) + third_party_rules = self.hs.get_third_party_event_rules() + third_party_rules._check_can_deactivate_user_callbacks.append( + deactivation_mock, + ) + + # Register an admin user. + self.register_user("admin", "password", admin=True) + admin_tok = self.login("admin", "password") + + # Register a user that we'll deactivate. + user_id = self.register_user("altan", "password") + + # Deactivate the user. + channel = self.make_request( + "PUT", + "/_synapse/admin/v2/users/%s" % user_id, + {"deactivated": True}, + access_token=admin_tok, + ) + + # Check that the deactivation was blocked + self.assertEqual(channel.code, 403, channel.json_body) + + # Check that the mock was called once. + deactivation_mock.assert_called_once() + args = deactivation_mock.call_args[0] + + # Check that the mock was called with the right user ID + self.assertEqual(args[0], user_id) + + # Check that the mock was made by an admin + self.assertEqual(args[1], True) + + def test_check_can_shutdown_room(self) -> None: + """Tests that the check_can_shutdown_room module callback is called + correctly when processing an admin's shutdown room request. + """ + # Register a mocked callback. + shutdown_mock = Mock(return_value=make_awaitable(False)) + third_party_rules = self.hs.get_third_party_event_rules() + third_party_rules._check_can_shutdown_room_callbacks.append( + shutdown_mock, + ) + + # Register an admin user. + admin_user_id = self.register_user("admin", "password", admin=True) + admin_tok = self.login("admin", "password") + + # Shutdown the room. + channel = self.make_request( + "DELETE", + "/_synapse/admin/v2/rooms/%s" % self.room_id, + {}, + access_token=admin_tok, + ) + + # Check that the shutdown was blocked + self.assertEqual(channel.code, 403, channel.json_body) + + # Check that the mock was called once. + shutdown_mock.assert_called_once() + args = shutdown_mock.call_args[0] + + # Check that the mock was called with the right user ID + self.assertEqual(args[0], admin_user_id) + + # Check that the mock was called with the right room ID + self.assertEqual(args[1], self.room_id) -- cgit 1.5.1 From a4c1fdb44a16471964ed6a347be6a191102f5c07 Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Wed, 9 Mar 2022 18:45:21 +0000 Subject: Remove dead code in `tests/storage/test_database.py` (#12197) Signed-off-by: Sean Quah --- changelog.d/12197.misc | 1 + tests/storage/test_database.py | 16 ---------------- 2 files changed, 1 insertion(+), 16 deletions(-) create mode 100644 changelog.d/12197.misc diff --git a/changelog.d/12197.misc b/changelog.d/12197.misc new file mode 100644 index 0000000000..7d0e9b6bbf --- /dev/null +++ b/changelog.d/12197.misc @@ -0,0 +1 @@ +Remove some dead code. diff --git a/tests/storage/test_database.py b/tests/storage/test_database.py index 6fbac0ab14..8597867563 100644 --- a/tests/storage/test_database.py +++ b/tests/storage/test_database.py @@ -13,26 +13,10 @@ # limitations under the License. from synapse.storage.database import make_tuple_comparison_clause -from synapse.storage.engines import BaseDatabaseEngine from tests import unittest -def _stub_db_engine(**kwargs) -> BaseDatabaseEngine: - # returns a DatabaseEngine, circumventing the abc mechanism - # any kwargs are set as attributes on the class before instantiating it - t = type( - "TestBaseDatabaseEngine", - (BaseDatabaseEngine,), - dict(BaseDatabaseEngine.__dict__), - ) - # defeat the abc mechanism - t.__abstractmethods__ = set() - for k, v in kwargs.items(): - setattr(t, k, v) - return t(None, None) - - class TupleComparisonClauseTestCase(unittest.TestCase): def test_native_tuple_comparison(self): clause, args = make_tuple_comparison_clause([("a", 1), ("b", 2)]) -- cgit 1.5.1 From 3e4af36bc8515504721b3c1b1d64d4f45359bf88 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 10 Mar 2022 08:01:56 -0500 Subject: Rename get_tcp_replication to get_replication_command_handler. (#12192) Since the object it returns is a ReplicationCommandHandler. This is clean-up from adding support to Redis where the command handler was added as an additional layer of abstraction from the TCP protocol. --- changelog.d/12192.misc | 1 + synapse/app/generic_worker.py | 2 +- synapse/app/homeserver.py | 2 +- synapse/federation/transport/server/_base.py | 2 +- synapse/handlers/presence.py | 4 ++-- synapse/replication/slave/storage/client_ips.py | 2 +- synapse/replication/tcp/client.py | 4 +++- synapse/replication/tcp/handler.py | 4 +--- synapse/replication/tcp/redis.py | 2 +- synapse/replication/tcp/resource.py | 4 ++-- synapse/server.py | 2 +- tests/replication/_base.py | 4 ++-- tests/replication/tcp/streams/test_events.py | 2 +- tests/replication/tcp/streams/test_typing.py | 2 +- tests/replication/test_federation_ack.py | 2 +- 15 files changed, 20 insertions(+), 19 deletions(-) create mode 100644 changelog.d/12192.misc diff --git a/changelog.d/12192.misc b/changelog.d/12192.misc new file mode 100644 index 0000000000..bdfe8dad98 --- /dev/null +++ b/changelog.d/12192.misc @@ -0,0 +1 @@ +Rename `HomeServer.get_tcp_replication` to `get_replication_command_handler`. diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index 1536a42723..a10a63b06c 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -417,7 +417,7 @@ class GenericWorkerServer(HomeServer): else: logger.warning("Unsupported listener type: %s", listener.type) - self.get_tcp_replication().start_replication(self) + self.get_replication_command_handler().start_replication(self) def start(config_options: List[str]) -> None: diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index a6789a840e..e4dc04c0b4 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -273,7 +273,7 @@ class SynapseHomeServer(HomeServer): # If redis is enabled we connect via the replication command handler # in the same way as the workers (since we're effectively a client # rather than a server). - self.get_tcp_replication().start_replication(self) + self.get_replication_command_handler().start_replication(self) for listener in self.config.server.listeners: if listener.type == "http": diff --git a/synapse/federation/transport/server/_base.py b/synapse/federation/transport/server/_base.py index 87e99c7ddf..2529dee613 100644 --- a/synapse/federation/transport/server/_base.py +++ b/synapse/federation/transport/server/_base.py @@ -63,7 +63,7 @@ class Authenticator: self.replication_client = None if hs.config.worker.worker_app: - self.replication_client = hs.get_tcp_replication() + self.replication_client = hs.get_replication_command_handler() # A method just so we can pass 'self' as the authenticator to the Servlets async def authenticate_request( diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index c155098bee..9927a30e6e 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -424,13 +424,13 @@ class WorkerPresenceHandler(BasePresenceHandler): async def _on_shutdown(self) -> None: if self._presence_enabled: - self.hs.get_tcp_replication().send_command( + self.hs.get_replication_command_handler().send_command( ClearUserSyncsCommand(self.instance_id) ) def send_user_sync(self, user_id: str, is_syncing: bool, last_sync_ms: int) -> None: if self._presence_enabled: - self.hs.get_tcp_replication().send_user_sync( + self.hs.get_replication_command_handler().send_user_sync( self.instance_id, user_id, is_syncing, last_sync_ms ) diff --git a/synapse/replication/slave/storage/client_ips.py b/synapse/replication/slave/storage/client_ips.py index b5b84c09ae..14706a0817 100644 --- a/synapse/replication/slave/storage/client_ips.py +++ b/synapse/replication/slave/storage/client_ips.py @@ -54,6 +54,6 @@ class SlavedClientIpStore(BaseSlavedStore): self.client_ip_last_seen.set(key, now) - self.hs.get_tcp_replication().send_user_ip( + self.hs.get_replication_command_handler().send_user_ip( user_id, access_token, ip, user_agent, device_id, now ) diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index b8fc1d4db9..deeaaec4e6 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -462,6 +462,8 @@ class FederationSenderHandler: # We ACK this token over replication so that the master can drop # its in memory queues - self._hs.get_tcp_replication().send_federation_ack(current_position) + self._hs.get_replication_command_handler().send_federation_ack( + current_position + ) except Exception: logger.exception("Error updating federation stream position") diff --git a/synapse/replication/tcp/handler.py b/synapse/replication/tcp/handler.py index 0d2013a3cf..d51f045f22 100644 --- a/synapse/replication/tcp/handler.py +++ b/synapse/replication/tcp/handler.py @@ -295,9 +295,7 @@ class ReplicationCommandHandler: raise Exception("Unrecognised command %s in stream queue", cmd.NAME) def start_replication(self, hs: "HomeServer") -> None: - """Helper method to start a replication connection to the remote server - using TCP. - """ + """Helper method to start replication.""" if hs.config.redis.redis_enabled: from synapse.replication.tcp.redis import ( RedisDirectTcpReplicationClientFactory, diff --git a/synapse/replication/tcp/redis.py b/synapse/replication/tcp/redis.py index b84e572da1..989c5be032 100644 --- a/synapse/replication/tcp/redis.py +++ b/synapse/replication/tcp/redis.py @@ -325,7 +325,7 @@ class RedisDirectTcpReplicationClientFactory(SynapseRedisFactory): password=hs.config.redis.redis_password, ) - self.synapse_handler = hs.get_tcp_replication() + self.synapse_handler = hs.get_replication_command_handler() self.synapse_stream_name = hs.hostname self.synapse_outbound_redis_connection = outbound_redis_connection diff --git a/synapse/replication/tcp/resource.py b/synapse/replication/tcp/resource.py index 494e42a2be..ab829040cd 100644 --- a/synapse/replication/tcp/resource.py +++ b/synapse/replication/tcp/resource.py @@ -44,7 +44,7 @@ class ReplicationStreamProtocolFactory(ServerFactory): """Factory for new replication connections.""" def __init__(self, hs: "HomeServer"): - self.command_handler = hs.get_tcp_replication() + self.command_handler = hs.get_replication_command_handler() self.clock = hs.get_clock() self.server_name = hs.config.server.server_name @@ -85,7 +85,7 @@ class ReplicationStreamer: self.is_looping = False self.pending_updates = False - self.command_handler = hs.get_tcp_replication() + self.command_handler = hs.get_replication_command_handler() # Set of streams to replicate. self.streams = self.command_handler.get_streams_to_replicate() diff --git a/synapse/server.py b/synapse/server.py index 46a64418ea..1270abb5a3 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -639,7 +639,7 @@ class HomeServer(metaclass=abc.ABCMeta): return ReadMarkerHandler(self) @cache_in_self - def get_tcp_replication(self) -> ReplicationCommandHandler: + def get_replication_command_handler(self) -> ReplicationCommandHandler: return ReplicationCommandHandler(self) @cache_in_self diff --git a/tests/replication/_base.py b/tests/replication/_base.py index a7a05a564f..9c5df266bd 100644 --- a/tests/replication/_base.py +++ b/tests/replication/_base.py @@ -251,7 +251,7 @@ class BaseMultiWorkerStreamTestCase(unittest.HomeserverTestCase): self.connect_any_redis_attempts, ) - self.hs.get_tcp_replication().start_replication(self.hs) + self.hs.get_replication_command_handler().start_replication(self.hs) # When we see a connection attempt to the master replication listener we # automatically set up the connection. This is so that tests don't @@ -375,7 +375,7 @@ class BaseMultiWorkerStreamTestCase(unittest.HomeserverTestCase): ) if worker_hs.config.redis.redis_enabled: - worker_hs.get_tcp_replication().start_replication(worker_hs) + worker_hs.get_replication_command_handler().start_replication(worker_hs) return worker_hs diff --git a/tests/replication/tcp/streams/test_events.py b/tests/replication/tcp/streams/test_events.py index f9d5da723c..641a94133b 100644 --- a/tests/replication/tcp/streams/test_events.py +++ b/tests/replication/tcp/streams/test_events.py @@ -420,7 +420,7 @@ class EventsStreamTestCase(BaseStreamTestCase): # Manually send an old RDATA command, which should get dropped. This # re-uses the row from above, but with an earlier stream token. - self.hs.get_tcp_replication().send_command( + self.hs.get_replication_command_handler().send_command( RdataCommand("events", "master", 1, row) ) diff --git a/tests/replication/tcp/streams/test_typing.py b/tests/replication/tcp/streams/test_typing.py index 3ff5afc6e5..9a229dd23f 100644 --- a/tests/replication/tcp/streams/test_typing.py +++ b/tests/replication/tcp/streams/test_typing.py @@ -118,7 +118,7 @@ class TypingStreamTestCase(BaseStreamTestCase): # Reset the typing handler self.hs.get_replication_streams()["typing"].last_token = 0 - self.hs.get_tcp_replication()._streams["typing"].last_token = 0 + self.hs.get_replication_command_handler()._streams["typing"].last_token = 0 typing._latest_room_serial = 0 typing._typing_stream_change_cache = StreamChangeCache( "TypingStreamChangeCache", typing._latest_room_serial diff --git a/tests/replication/test_federation_ack.py b/tests/replication/test_federation_ack.py index 1b6a4bf4b0..26b8bd512a 100644 --- a/tests/replication/test_federation_ack.py +++ b/tests/replication/test_federation_ack.py @@ -48,7 +48,7 @@ class FederationAckTestCase(HomeserverTestCase): transport, rather than assuming that the implementation has a ReplicationCommandHandler. """ - rch = self.hs.get_tcp_replication() + rch = self.hs.get_replication_command_handler() # wire up the ReplicationCommandHandler to a mock connection, which needs # to implement IReplicationConnection. (Note that Mock doesn't understand -- cgit 1.5.1 From 88cd6f937807e64c05458cec86ef0ba0c1c656b3 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 10 Mar 2022 09:03:59 -0500 Subject: Allow retrieving the relations of a redacted event. (#12130) This is allowed per MSC2675, although the original implementation did not allow for it and would return an empty chunk / not bundle aggregations. The main thing to improve is that the various caches get cleared properly when an event is redacted, and that edits must not leak if the original event is redacted (as that would presumably leak something similar to the original event content). --- changelog.d/12130.bugfix | 1 + changelog.d/12189.bugfix | 1 + changelog.d/12189.misc | 1 - synapse/rest/client/relations.py | 82 +++++++++++++---------------- synapse/storage/databases/main/cache.py | 4 ++ synapse/storage/databases/main/events.py | 11 ++-- synapse/storage/databases/main/relations.py | 60 +++++++++++---------- tests/rest/client/test_relations.py | 45 ++++++++++++++-- 8 files changed, 122 insertions(+), 83 deletions(-) create mode 100644 changelog.d/12130.bugfix create mode 100644 changelog.d/12189.bugfix delete mode 100644 changelog.d/12189.misc diff --git a/changelog.d/12130.bugfix b/changelog.d/12130.bugfix new file mode 100644 index 0000000000..df9b0dc413 --- /dev/null +++ b/changelog.d/12130.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug when redacting events with relations. diff --git a/changelog.d/12189.bugfix b/changelog.d/12189.bugfix new file mode 100644 index 0000000000..df9b0dc413 --- /dev/null +++ b/changelog.d/12189.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug when redacting events with relations. diff --git a/changelog.d/12189.misc b/changelog.d/12189.misc deleted file mode 100644 index 015e808e63..0000000000 --- a/changelog.d/12189.misc +++ /dev/null @@ -1 +0,0 @@ -Support skipping some arguments when generating cache keys. diff --git a/synapse/rest/client/relations.py b/synapse/rest/client/relations.py index 07fa1cdd4c..d9a6be43f7 100644 --- a/synapse/rest/client/relations.py +++ b/synapse/rest/client/relations.py @@ -27,7 +27,7 @@ from synapse.http.server import HttpServer from synapse.http.servlet import RestServlet, parse_integer, parse_string from synapse.http.site import SynapseRequest from synapse.rest.client._base import client_patterns -from synapse.storage.relations import AggregationPaginationToken, PaginationChunk +from synapse.storage.relations import AggregationPaginationToken from synapse.types import JsonDict, StreamToken if TYPE_CHECKING: @@ -82,28 +82,25 @@ class RelationPaginationServlet(RestServlet): from_token_str = parse_string(request, "from") to_token_str = parse_string(request, "to") - if event.internal_metadata.is_redacted(): - # If the event is redacted, return an empty list of relations - pagination_chunk = PaginationChunk(chunk=[]) - else: - # Return the relations - from_token = None - if from_token_str: - from_token = await StreamToken.from_string(self.store, from_token_str) - to_token = None - if to_token_str: - to_token = await StreamToken.from_string(self.store, to_token_str) - - pagination_chunk = await self.store.get_relations_for_event( - event_id=parent_id, - room_id=room_id, - relation_type=relation_type, - event_type=event_type, - limit=limit, - direction=direction, - from_token=from_token, - to_token=to_token, - ) + # Return the relations + from_token = None + if from_token_str: + from_token = await StreamToken.from_string(self.store, from_token_str) + to_token = None + if to_token_str: + to_token = await StreamToken.from_string(self.store, to_token_str) + + pagination_chunk = await self.store.get_relations_for_event( + event_id=parent_id, + event=event, + room_id=room_id, + relation_type=relation_type, + event_type=event_type, + limit=limit, + direction=direction, + from_token=from_token, + to_token=to_token, + ) events = await self.store.get_events_as_list( [c["event_id"] for c in pagination_chunk.chunk] @@ -193,27 +190,23 @@ class RelationAggregationPaginationServlet(RestServlet): from_token_str = parse_string(request, "from") to_token_str = parse_string(request, "to") - if event.internal_metadata.is_redacted(): - # If the event is redacted, return an empty list of relations - pagination_chunk = PaginationChunk(chunk=[]) - else: - # Return the relations - from_token = None - if from_token_str: - from_token = AggregationPaginationToken.from_string(from_token_str) - - to_token = None - if to_token_str: - to_token = AggregationPaginationToken.from_string(to_token_str) - - pagination_chunk = await self.store.get_aggregation_groups_for_event( - event_id=parent_id, - room_id=room_id, - event_type=event_type, - limit=limit, - from_token=from_token, - to_token=to_token, - ) + # Return the relations + from_token = None + if from_token_str: + from_token = AggregationPaginationToken.from_string(from_token_str) + + to_token = None + if to_token_str: + to_token = AggregationPaginationToken.from_string(to_token_str) + + pagination_chunk = await self.store.get_aggregation_groups_for_event( + event_id=parent_id, + room_id=room_id, + event_type=event_type, + limit=limit, + from_token=from_token, + to_token=to_token, + ) return 200, await pagination_chunk.to_dict(self.store) @@ -295,6 +288,7 @@ class RelationAggregationGroupPaginationServlet(RestServlet): result = await self.store.get_relations_for_event( event_id=parent_id, + event=event, room_id=room_id, relation_type=relation_type, event_type=event_type, diff --git a/synapse/storage/databases/main/cache.py b/synapse/storage/databases/main/cache.py index abd54c7dc7..d6a2df1afe 100644 --- a/synapse/storage/databases/main/cache.py +++ b/synapse/storage/databases/main/cache.py @@ -191,6 +191,10 @@ class CacheInvalidationWorkerStore(SQLBaseStore): if redacts: self._invalidate_get_event_cache(redacts) + # Caches which might leak edits must be invalidated for the event being + # redacted. + self.get_relations_for_event.invalidate((redacts,)) + self.get_applicable_edit.invalidate((redacts,)) if etype == EventTypes.Member: self._membership_stream_cache.entity_has_changed(state_key, stream_ordering) diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index 1dc83aa5e3..1a322882bf 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -1619,9 +1619,12 @@ class PersistEventsStore: txn.call_after(prefill) - def _store_redaction(self, txn, event): - # invalidate the cache for the redacted event + def _store_redaction(self, txn: LoggingTransaction, event: EventBase) -> None: + # Invalidate the caches for the redacted event, note that these caches + # are also cleared as part of event replication in _invalidate_caches_for_event. txn.call_after(self.store._invalidate_get_event_cache, event.redacts) + txn.call_after(self.store.get_relations_for_event.invalidate, (event.redacts,)) + txn.call_after(self.store.get_applicable_edit.invalidate, (event.redacts,)) self.db_pool.simple_upsert_txn( txn, @@ -1812,9 +1815,7 @@ class PersistEventsStore: txn.call_after(self.store.get_applicable_edit.invalidate, (parent_id,)) if rel_type == RelationTypes.THREAD: - txn.call_after( - self.store.get_thread_summary.invalidate, (parent_id, event.room_id) - ) + txn.call_after(self.store.get_thread_summary.invalidate, (parent_id,)) # It should be safe to only invalidate the cache if the user has not # previously participated in the thread, but that's difficult (and # potentially error-prone) so it is always invalidated. diff --git a/synapse/storage/databases/main/relations.py b/synapse/storage/databases/main/relations.py index 36aa1092f6..be1500092b 100644 --- a/synapse/storage/databases/main/relations.py +++ b/synapse/storage/databases/main/relations.py @@ -91,10 +91,11 @@ class RelationsWorkerStore(SQLBaseStore): self._msc3440_enabled = hs.config.experimental.msc3440_enabled - @cached(tree=True) + @cached(uncached_args=("event",), tree=True) async def get_relations_for_event( self, event_id: str, + event: EventBase, room_id: str, relation_type: Optional[str] = None, event_type: Optional[str] = None, @@ -108,6 +109,7 @@ class RelationsWorkerStore(SQLBaseStore): Args: event_id: Fetch events that relate to this event ID. + event: The matching EventBase to event_id. room_id: The room the event belongs to. relation_type: Only fetch events with this relation type, if given. event_type: Only fetch events with this event type, if given. @@ -122,9 +124,13 @@ class RelationsWorkerStore(SQLBaseStore): List of event IDs that match relations requested. The rows are of the form `{"event_id": "..."}`. """ + # We don't use `event_id`, it's there so that we can cache based on + # it. The `event_id` must match the `event.event_id`. + assert event.event_id == event_id where_clause = ["relates_to_id = ?", "room_id = ?"] - where_args: List[Union[str, int]] = [event_id, room_id] + where_args: List[Union[str, int]] = [event.event_id, room_id] + is_redacted = event.internal_metadata.is_redacted() if relation_type is not None: where_clause.append("relation_type = ?") @@ -157,7 +163,7 @@ class RelationsWorkerStore(SQLBaseStore): order = "ASC" sql = """ - SELECT event_id, topological_ordering, stream_ordering + SELECT event_id, relation_type, topological_ordering, stream_ordering FROM event_relations INNER JOIN events USING (event_id) WHERE %s @@ -178,9 +184,12 @@ class RelationsWorkerStore(SQLBaseStore): last_stream_id = None events = [] for row in txn: - events.append({"event_id": row[0]}) - last_topo_id = row[1] - last_stream_id = row[2] + # Do not include edits for redacted events as they leak event + # content. + if not is_redacted or row[1] != RelationTypes.REPLACE: + events.append({"event_id": row[0]}) + last_topo_id = row[2] + last_stream_id = row[3] # If there are more events, generate the next pagination key. next_token = None @@ -776,7 +785,7 @@ class RelationsWorkerStore(SQLBaseStore): ) references = await self.get_relations_for_event( - event_id, room_id, RelationTypes.REFERENCE, direction="f" + event_id, event, room_id, RelationTypes.REFERENCE, direction="f" ) if references.chunk: aggregations.references = await references.to_dict(cast("DataStore", self)) @@ -797,41 +806,36 @@ class RelationsWorkerStore(SQLBaseStore): A map of event ID to the bundled aggregation for the event. Not all events may have bundled aggregations in the results. """ - # The already processed event IDs. Tracked separately from the result - # since the result omits events which do not have bundled aggregations. - seen_event_ids = set() - - # State events and redacted events do not get bundled aggregations. - events = [ - event - for event in events - if not event.is_state() and not event.internal_metadata.is_redacted() - ] + # De-duplicate events by ID to handle the same event requested multiple times. + # + # State events do not get bundled aggregations. + events_by_id = { + event.event_id: event for event in events if not event.is_state() + } # event ID -> bundled aggregation in non-serialized form. results: Dict[str, BundledAggregations] = {} # Fetch other relations per event. - for event in events: - # De-duplicate events by ID to handle the same event requested multiple - # times. The caches that _get_bundled_aggregation_for_event use should - # capture this, but best to reduce work. - if event.event_id in seen_event_ids: - continue - seen_event_ids.add(event.event_id) - + for event in events_by_id.values(): event_result = await self._get_bundled_aggregation_for_event(event, user_id) if event_result: results[event.event_id] = event_result - # Fetch any edits. - edits = await self._get_applicable_edits(seen_event_ids) + # Fetch any edits (but not for redacted events). + edits = await self._get_applicable_edits( + [ + event_id + for event_id, event in events_by_id.items() + if not event.internal_metadata.is_redacted() + ] + ) for event_id, edit in edits.items(): results.setdefault(event_id, BundledAggregations()).replace = edit # Fetch thread summaries. if self._msc3440_enabled: - summaries = await self._get_thread_summaries(seen_event_ids) + summaries = await self._get_thread_summaries(events_by_id.keys()) # Only fetch participated for a limited selection based on what had # summaries. participated = await self._get_threads_participated( diff --git a/tests/rest/client/test_relations.py b/tests/rest/client/test_relations.py index a40a5de399..f9ae6e663f 100644 --- a/tests/rest/client/test_relations.py +++ b/tests/rest/client/test_relations.py @@ -1475,12 +1475,13 @@ class RelationRedactionTestCase(BaseRelationsTestCase): self.assertEqual(relations, {}) def test_redact_parent_annotation(self) -> None: - """Test that annotations of an event are redacted when the original event + """Test that annotations of an event are viewable when the original event is redacted. """ # Add a relation channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="👍") self.assertEqual(200, channel.code, channel.json_body) + related_event_id = channel.json_body["event_id"] # The relations should exist. event_ids, relations = self._make_relation_requests() @@ -1494,11 +1495,45 @@ class RelationRedactionTestCase(BaseRelationsTestCase): # Redact the original event. self._redact(self.parent_id) - # The relations are not returned. + # The relations are returned. event_ids, relations = self._make_relation_requests() - self.assertEqual(event_ids, []) - self.assertEqual(relations, {}) + self.assertEquals(event_ids, [related_event_id]) + self.assertEquals( + relations["m.annotation"], + {"chunk": [{"type": "m.reaction", "key": "👍", "count": 1}]}, + ) # There's nothing to aggregate. chunk = self._get_aggregations() - self.assertEqual(chunk, []) + self.assertEqual(chunk, [{"count": 1, "key": "👍", "type": "m.reaction"}]) + + @unittest.override_config({"experimental_features": {"msc3440_enabled": True}}) + def test_redact_parent_thread(self) -> None: + """ + Test that thread replies are still available when the root event is redacted. + """ + channel = self._send_relation( + RelationTypes.THREAD, + EventTypes.Message, + content={"body": "reply 1", "msgtype": "m.text"}, + ) + self.assertEqual(200, channel.code, channel.json_body) + related_event_id = channel.json_body["event_id"] + + # Redact one of the reactions. + self._redact(self.parent_id) + + # The unredacted relation should still exist. + event_ids, relations = self._make_relation_requests() + self.assertEquals(len(event_ids), 1) + self.assertDictContainsSubset( + { + "count": 1, + "current_user_participated": True, + }, + relations[RelationTypes.THREAD], + ) + self.assertEqual( + relations[RelationTypes.THREAD]["latest_event"]["event_id"], + related_event_id, + ) -- cgit 1.5.1 From 52a947dc4603e1bd14916efb8822c4fe58f0d200 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 10 Mar 2022 15:18:31 +0000 Subject: Updates to the Room DAG concepts development document (#12179) Some stuff that came up while we were talking about #12173. --- changelog.d/12179.doc | 1 + docs/development/room-dag-concepts.md | 71 ++++++++++++++++++++++++++--------- 2 files changed, 54 insertions(+), 18 deletions(-) create mode 100644 changelog.d/12179.doc diff --git a/changelog.d/12179.doc b/changelog.d/12179.doc new file mode 100644 index 0000000000..55d8caa45a --- /dev/null +++ b/changelog.d/12179.doc @@ -0,0 +1 @@ +Updates to the Room DAG concepts development document. diff --git a/docs/development/room-dag-concepts.md b/docs/development/room-dag-concepts.md index cbc7cf2949..3eb4d5acc4 100644 --- a/docs/development/room-dag-concepts.md +++ b/docs/development/room-dag-concepts.md @@ -30,37 +30,72 @@ rather than skipping any that arrived late; whereas if you're looking at a historical section of timeline (i.e. `/messages`), you want to see the best representation of the state of the room as others were seeing it at the time. +## Outliers -## Forward extremity +We mark an event as an `outlier` when we haven't figured out the state for the +room at that point in the DAG yet. They are "floating" events that we haven't +yet correlated to the DAG. -Most-recent-in-time events in the DAG which are not referenced by any other events' `prev_events` yet. +Outliers typically arise when we fetch the auth chain or state for a given +event. When that happens, we just grab the events in the state/auth chain, +without calculating the state at those events, or backfilling their +`prev_events`. -The forward extremities of a room are used as the `prev_events` when the next event is sent. +So, typically, we won't have the `prev_events` of an `outlier` in the database, +(though it's entirely possible that we *might* have them for some other +reason). Other things that make outliers different from regular events: + * We don't have state for them, so there should be no entry in + `event_to_state_groups` for an outlier. (In practice this isn't always + the case, though I'm not sure why: see https://github.com/matrix-org/synapse/issues/12201). -## Backward extremity + * We don't record entries for them in the `event_edges`, + `event_forward_extremeties` or `event_backward_extremities` tables. -The current marker of where we have backfilled up to and will generally be the -`prev_events` of the oldest-in-time events we have in the DAG. This gives a starting point when -backfilling history. +Since outliers are not tied into the DAG, they do not normally form part of the +timeline sent down to clients via `/sync` or `/messages`; however there is an +exception: -When we persist a non-outlier event, we clear it as a backward extremity and set -all of its `prev_events` as the new backward extremities if they aren't already -persisted in the `events` table. +### Out-of-band membership events +A special case of outlier events are some membership events for federated rooms +that we aren't full members of. For example: -## Outliers + * invites received over federation, before we join the room + * *rejections* for said invites + * knock events for rooms that we would like to join but have not yet joined. -We mark an event as an `outlier` when we haven't figured out the state for the -room at that point in the DAG yet. +In all the above cases, we don't have the state for the room, which is why they +are treated as outliers. They are a bit special though, in that they are +proactively sent to clients via `/sync`. -We won't *necessarily* have the `prev_events` of an `outlier` in the database, -but it's entirely possible that we *might*. +## Forward extremity + +Most-recent-in-time events in the DAG which are not referenced by any other +events' `prev_events` yet. (In this definition, outliers, rejected events, and +soft-failed events don't count.) + +The forward extremities of a room (or at least, a subset of them, if there are +more than ten) are used as the `prev_events` when the next event is sent. + +The "current state" of a room (ie: the state which would be used if we +generated a new event) is, therefore, the resolution of the room states +at each of the forward extremities. + +## Backward extremity + +The current marker of where we have backfilled up to and will generally be the +`prev_events` of the oldest-in-time events we have in the DAG. This gives a starting point when +backfilling history. -For example, when we fetch the event auth chain or state for a given event, we -mark all of those claimed auth events as outliers because we haven't done the -state calculation ourself. +Note that, unlike forward extremities, we typically don't have any backward +extremity events themselves in the database - or, if we do, they will be "outliers" (see +above). Either way, we don't expect to have the room state at a backward extremity. +When we persist a non-outlier event, if it was previously a backward extremity, +we clear it as a backward extremity and set all of its `prev_events` as the new +backward extremities if they aren't already persisted as non-outliers. This +therefore keeps the backward extremities up-to-date. ## State groups -- cgit 1.5.1 From ea27528b5d177dcfc5a4e38b463baeace916dc8e Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 10 Mar 2022 10:36:13 -0500 Subject: Support stable identifiers for MSC3440: Threading (#12151) The unstable identifiers are still supported if the experimental configuration flag is enabled. The unstable identifiers will be removed in a future release. --- changelog.d/12151.feature | 1 + synapse/api/constants.py | 4 +- synapse/api/filtering.py | 23 ++++----- synapse/events/utils.py | 9 +++- synapse/handlers/message.py | 5 +- synapse/rest/client/versions.py | 1 + synapse/server.py | 2 +- synapse/storage/databases/main/events.py | 5 +- synapse/storage/databases/main/relations.py | 77 ++++++++++++++++++----------- synapse/storage/databases/main/stream.py | 18 ++++--- tests/rest/client/test_relations.py | 7 +-- tests/rest/client/test_rooms.py | 18 +++---- tests/storage/test_stream.py | 20 ++++---- 13 files changed, 109 insertions(+), 81 deletions(-) create mode 100644 changelog.d/12151.feature diff --git a/changelog.d/12151.feature b/changelog.d/12151.feature new file mode 100644 index 0000000000..18432b2da9 --- /dev/null +++ b/changelog.d/12151.feature @@ -0,0 +1 @@ +Support the stable identifiers from [MSC3440](https://github.com/matrix-org/matrix-doc/pull/3440): threads. diff --git a/synapse/api/constants.py b/synapse/api/constants.py index 36ace7c613..b0c08a074d 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -178,7 +178,9 @@ class RelationTypes: ANNOTATION: Final = "m.annotation" REPLACE: Final = "m.replace" REFERENCE: Final = "m.reference" - THREAD: Final = "io.element.thread" + THREAD: Final = "m.thread" + # TODO Remove this in Synapse >= v1.57.0. + UNSTABLE_THREAD: Final = "io.element.thread" class LimitBlockingTypes: diff --git a/synapse/api/filtering.py b/synapse/api/filtering.py index cb532d7238..27e97d6f37 100644 --- a/synapse/api/filtering.py +++ b/synapse/api/filtering.py @@ -88,7 +88,9 @@ ROOM_EVENT_FILTER_SCHEMA = { "org.matrix.labels": {"type": "array", "items": {"type": "string"}}, "org.matrix.not_labels": {"type": "array", "items": {"type": "string"}}, # MSC3440, filtering by event relations. + "related_by_senders": {"type": "array", "items": {"type": "string"}}, "io.element.relation_senders": {"type": "array", "items": {"type": "string"}}, + "related_by_rel_types": {"type": "array", "items": {"type": "string"}}, "io.element.relation_types": {"type": "array", "items": {"type": "string"}}, }, } @@ -318,19 +320,18 @@ class Filter: self.labels = filter_json.get("org.matrix.labels", None) self.not_labels = filter_json.get("org.matrix.not_labels", []) - # Ideally these would be rejected at the endpoint if they were provided - # and not supported, but that would involve modifying the JSON schema - # based on the homeserver configuration. + self.related_by_senders = self.filter_json.get("related_by_senders", None) + self.related_by_rel_types = self.filter_json.get("related_by_rel_types", None) + + # Fallback to the unstable prefix if the stable version is not given. if hs.config.experimental.msc3440_enabled: - self.relation_senders = self.filter_json.get( + self.related_by_senders = self.related_by_senders or self.filter_json.get( "io.element.relation_senders", None ) - self.relation_types = self.filter_json.get( - "io.element.relation_types", None + self.related_by_rel_types = ( + self.related_by_rel_types + or self.filter_json.get("io.element.relation_types", None) ) - else: - self.relation_senders = None - self.relation_types = None def filters_all_types(self) -> bool: return "*" in self.not_types @@ -461,7 +462,7 @@ class Filter: event_ids = [event.event_id for event in events if isinstance(event, EventBase)] # type: ignore[attr-defined] event_ids_to_keep = set( await self._store.events_have_relations( - event_ids, self.relation_senders, self.relation_types + event_ids, self.related_by_senders, self.related_by_rel_types ) ) @@ -474,7 +475,7 @@ class Filter: async def filter(self, events: Iterable[FilterEvent]) -> List[FilterEvent]: result = [event for event in events if self._check(event)] - if self.relation_senders or self.relation_types: + if self.related_by_senders or self.related_by_rel_types: return await self._check_event_relations(result) return result diff --git a/synapse/events/utils.py b/synapse/events/utils.py index ee34cb46e4..b2a237c1e0 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -38,6 +38,7 @@ from synapse.util.frozenutils import unfreeze from . import EventBase if TYPE_CHECKING: + from synapse.server import HomeServer from synapse.storage.databases.main.relations import BundledAggregations @@ -395,6 +396,9 @@ class EventClientSerializer: clients. """ + def __init__(self, hs: "HomeServer"): + self._msc3440_enabled = hs.config.experimental.msc3440_enabled + def serialize_event( self, event: Union[JsonDict, EventBase], @@ -515,11 +519,14 @@ class EventClientSerializer: thread.latest_event, serialized_latest_event, thread.latest_edit ) - serialized_aggregations[RelationTypes.THREAD] = { + thread_summary = { "latest_event": serialized_latest_event, "count": thread.count, "current_user_participated": thread.current_user_participated, } + serialized_aggregations[RelationTypes.THREAD] = thread_summary + if self._msc3440_enabled: + serialized_aggregations[RelationTypes.UNSTABLE_THREAD] = thread_summary # Include the bundled aggregations in the event. if serialized_aggregations: diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 0799ec9a84..f9544fe7fb 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -1079,7 +1079,10 @@ class EventCreationHandler: raise SynapseError(400, "Can't send same reaction twice") # Don't attempt to start a thread if the parent event is a relation. - elif relation_type == RelationTypes.THREAD: + elif ( + relation_type == RelationTypes.THREAD + or relation_type == RelationTypes.UNSTABLE_THREAD + ): if await self.store.event_includes_relation(relates_to): raise SynapseError( 400, "Cannot start threads from an event with a relation" diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py index 2e5d0e4e22..9a65aa4843 100644 --- a/synapse/rest/client/versions.py +++ b/synapse/rest/client/versions.py @@ -101,6 +101,7 @@ class VersionsRestServlet(RestServlet): "org.matrix.msc3030": self.config.experimental.msc3030_enabled, # Adds support for thread relations, per MSC3440. "org.matrix.msc3440": self.config.experimental.msc3440_enabled, + "org.matrix.msc3440.stable": True, # TODO: remove when "v1.3" is added above }, }, ) diff --git a/synapse/server.py b/synapse/server.py index 1270abb5a3..7741ff29dc 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -754,7 +754,7 @@ class HomeServer(metaclass=abc.ABCMeta): @cache_in_self def get_event_client_serializer(self) -> EventClientSerializer: - return EventClientSerializer() + return EventClientSerializer(self) @cache_in_self def get_password_policy_handler(self) -> PasswordPolicyHandler: diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index 1a322882bf..1f60aef180 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -1814,7 +1814,10 @@ class PersistEventsStore: if rel_type == RelationTypes.REPLACE: txn.call_after(self.store.get_applicable_edit.invalidate, (parent_id,)) - if rel_type == RelationTypes.THREAD: + if ( + rel_type == RelationTypes.THREAD + or rel_type == RelationTypes.UNSTABLE_THREAD + ): txn.call_after(self.store.get_thread_summary.invalidate, (parent_id,)) # It should be safe to only invalidate the cache if the user has not # previously participated in the thread, but that's difficult (and diff --git a/synapse/storage/databases/main/relations.py b/synapse/storage/databases/main/relations.py index be1500092b..c4869d64e6 100644 --- a/synapse/storage/databases/main/relations.py +++ b/synapse/storage/databases/main/relations.py @@ -508,7 +508,7 @@ class RelationsWorkerStore(SQLBaseStore): AND parent.room_id = child.room_id WHERE %s - AND relation_type = ? + AND %s ORDER BY parent.event_id, child.topological_ordering DESC, child.stream_ordering DESC """ else: @@ -523,16 +523,22 @@ class RelationsWorkerStore(SQLBaseStore): AND parent.room_id = child.room_id WHERE %s - AND relation_type = ? + AND %s ORDER BY child.topological_ordering DESC, child.stream_ordering DESC """ clause, args = make_in_list_sql_clause( txn.database_engine, "relates_to_id", event_ids ) - args.append(RelationTypes.THREAD) - txn.execute(sql % (clause,), args) + if self._msc3440_enabled: + relations_clause = "(relation_type = ? OR relation_type = ?)" + args.extend((RelationTypes.THREAD, RelationTypes.UNSTABLE_THREAD)) + else: + relations_clause = "relation_type = ?" + args.append(RelationTypes.THREAD) + + txn.execute(sql % (clause, relations_clause), args) latest_event_ids = {} for parent_event_id, child_event_id in txn: # Only consider the latest threaded reply (by topological ordering). @@ -552,7 +558,7 @@ class RelationsWorkerStore(SQLBaseStore): AND parent.room_id = child.room_id WHERE %s - AND relation_type = ? + AND %s GROUP BY parent.event_id """ @@ -561,9 +567,15 @@ class RelationsWorkerStore(SQLBaseStore): clause, args = make_in_list_sql_clause( txn.database_engine, "relates_to_id", latest_event_ids.keys() ) - args.append(RelationTypes.THREAD) - txn.execute(sql % (clause,), args) + if self._msc3440_enabled: + relations_clause = "(relation_type = ? OR relation_type = ?)" + args.extend((RelationTypes.THREAD, RelationTypes.UNSTABLE_THREAD)) + else: + relations_clause = "relation_type = ?" + args.append(RelationTypes.THREAD) + + txn.execute(sql % (clause, relations_clause), args) counts = dict(cast(List[Tuple[str, int]], txn.fetchall())) return counts, latest_event_ids @@ -626,16 +638,24 @@ class RelationsWorkerStore(SQLBaseStore): AND parent.room_id = child.room_id WHERE %s - AND relation_type = ? + AND %s AND child.sender = ? """ clause, args = make_in_list_sql_clause( txn.database_engine, "relates_to_id", event_ids ) - args.extend((RelationTypes.THREAD, user_id)) - txn.execute(sql % (clause,), args) + if self._msc3440_enabled: + relations_clause = "(relation_type = ? OR relation_type = ?)" + args.extend((RelationTypes.THREAD, RelationTypes.UNSTABLE_THREAD)) + else: + relations_clause = "relation_type = ?" + args.append(RelationTypes.THREAD) + + args.append(user_id) + + txn.execute(sql % (clause, relations_clause), args) return {row[0] for row in txn.fetchall()} participated_threads = await self.db_pool.runInteraction( @@ -834,26 +854,23 @@ class RelationsWorkerStore(SQLBaseStore): results.setdefault(event_id, BundledAggregations()).replace = edit # Fetch thread summaries. - if self._msc3440_enabled: - summaries = await self._get_thread_summaries(events_by_id.keys()) - # Only fetch participated for a limited selection based on what had - # summaries. - participated = await self._get_threads_participated( - summaries.keys(), user_id - ) - for event_id, summary in summaries.items(): - if summary: - thread_count, latest_thread_event, edit = summary - results.setdefault( - event_id, BundledAggregations() - ).thread = _ThreadAggregation( - latest_event=latest_thread_event, - latest_edit=edit, - count=thread_count, - # If there's a thread summary it must also exist in the - # participated dictionary. - current_user_participated=participated[event_id], - ) + summaries = await self._get_thread_summaries(events_by_id.keys()) + # Only fetch participated for a limited selection based on what had + # summaries. + participated = await self._get_threads_participated(summaries.keys(), user_id) + for event_id, summary in summaries.items(): + if summary: + thread_count, latest_thread_event, edit = summary + results.setdefault( + event_id, BundledAggregations() + ).thread = _ThreadAggregation( + latest_event=latest_thread_event, + latest_edit=edit, + count=thread_count, + # If there's a thread summary it must also exist in the + # participated dictionary. + current_user_participated=participated[event_id], + ) return results diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py index a898f847e7..39e1efe373 100644 --- a/synapse/storage/databases/main/stream.py +++ b/synapse/storage/databases/main/stream.py @@ -325,21 +325,23 @@ def filter_to_clause(event_filter: Optional[Filter]) -> Tuple[str, List[str]]: args.extend(event_filter.labels) # Filter on relation_senders / relation types from the joined tables. - if event_filter.relation_senders: + if event_filter.related_by_senders: clauses.append( "(%s)" % " OR ".join( - "related_event.sender = ?" for _ in event_filter.relation_senders + "related_event.sender = ?" for _ in event_filter.related_by_senders ) ) - args.extend(event_filter.relation_senders) + args.extend(event_filter.related_by_senders) - if event_filter.relation_types: + if event_filter.related_by_rel_types: clauses.append( "(%s)" - % " OR ".join("relation_type = ?" for _ in event_filter.relation_types) + % " OR ".join( + "relation_type = ?" for _ in event_filter.related_by_rel_types + ) ) - args.extend(event_filter.relation_types) + args.extend(event_filter.related_by_rel_types) return " AND ".join(clauses), args @@ -1203,7 +1205,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): # If there is a filter on relation_senders and relation_types join to the # relations table. if event_filter and ( - event_filter.relation_senders or event_filter.relation_types + event_filter.related_by_senders or event_filter.related_by_rel_types ): # Filtering by relations could cause the same event to appear multiple # times (since there's no limit on the number of relations to an event). @@ -1211,7 +1213,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): join_clause += """ LEFT JOIN event_relations AS relation ON (event.event_id = relation.relates_to_id) """ - if event_filter.relation_senders: + if event_filter.related_by_senders: join_clause += """ LEFT JOIN events AS related_event ON (relation.event_id = related_event.event_id) """ diff --git a/tests/rest/client/test_relations.py b/tests/rest/client/test_relations.py index f9ae6e663f..0cbe6c0cf7 100644 --- a/tests/rest/client/test_relations.py +++ b/tests/rest/client/test_relations.py @@ -547,9 +547,7 @@ class RelationsTestCase(BaseRelationsTestCase): ) self.assertEqual(400, channel.code, channel.json_body) - @unittest.override_config( - {"experimental_features": {"msc3440_enabled": True, "msc3666_enabled": True}} - ) + @unittest.override_config({"experimental_features": {"msc3666_enabled": True}}) def test_bundled_aggregations(self) -> None: """ Test that annotations, references, and threads get correctly bundled. @@ -758,7 +756,6 @@ class RelationsTestCase(BaseRelationsTestCase): }, ) - @unittest.override_config({"experimental_features": {"msc3440_enabled": True}}) def test_ignore_invalid_room(self) -> None: """Test that we ignore invalid relations over federation.""" # Create another room and send a message in it. @@ -1065,7 +1062,6 @@ class RelationsTestCase(BaseRelationsTestCase): {"event_id": edit_event_id, "sender": self.user_id}, m_replace_dict ) - @unittest.override_config({"experimental_features": {"msc3440_enabled": True}}) def test_edit_thread(self) -> None: """Test that editing a thread works.""" @@ -1383,7 +1379,6 @@ class RelationRedactionTestCase(BaseRelationsTestCase): chunk = self._get_aggregations() self.assertEqual(chunk, [{"type": "m.reaction", "key": "a", "count": 1}]) - @unittest.override_config({"experimental_features": {"msc3440_enabled": True}}) def test_redact_relation_thread(self) -> None: """ Test that thread replies are properly handled after the thread reply redacted. diff --git a/tests/rest/client/test_rooms.py b/tests/rest/client/test_rooms.py index 37866ee330..3a9617d6da 100644 --- a/tests/rest/client/test_rooms.py +++ b/tests/rest/client/test_rooms.py @@ -2141,21 +2141,19 @@ class RelationsTestCase(unittest.HomeserverTestCase): def test_filter_relation_senders(self) -> None: # Messages which second user reacted to. - filter = {"io.element.relation_senders": [self.second_user_id]} + filter = {"related_by_senders": [self.second_user_id]} chunk = self._filter_messages(filter) self.assertEqual(len(chunk), 1, chunk) self.assertEqual(chunk[0]["event_id"], self.event_id_1) # Messages which third user reacted to. - filter = {"io.element.relation_senders": [self.third_user_id]} + filter = {"related_by_senders": [self.third_user_id]} chunk = self._filter_messages(filter) self.assertEqual(len(chunk), 1, chunk) self.assertEqual(chunk[0]["event_id"], self.event_id_2) # Messages which either user reacted to. - filter = { - "io.element.relation_senders": [self.second_user_id, self.third_user_id] - } + filter = {"related_by_senders": [self.second_user_id, self.third_user_id]} chunk = self._filter_messages(filter) self.assertEqual(len(chunk), 2, chunk) self.assertCountEqual( @@ -2164,20 +2162,20 @@ class RelationsTestCase(unittest.HomeserverTestCase): def test_filter_relation_type(self) -> None: # Messages which have annotations. - filter = {"io.element.relation_types": [RelationTypes.ANNOTATION]} + filter = {"related_by_rel_types": [RelationTypes.ANNOTATION]} chunk = self._filter_messages(filter) self.assertEqual(len(chunk), 1, chunk) self.assertEqual(chunk[0]["event_id"], self.event_id_1) # Messages which have references. - filter = {"io.element.relation_types": [RelationTypes.REFERENCE]} + filter = {"related_by_rel_types": [RelationTypes.REFERENCE]} chunk = self._filter_messages(filter) self.assertEqual(len(chunk), 1, chunk) self.assertEqual(chunk[0]["event_id"], self.event_id_2) # Messages which have either annotations or references. filter = { - "io.element.relation_types": [ + "related_by_rel_types": [ RelationTypes.ANNOTATION, RelationTypes.REFERENCE, ] @@ -2191,8 +2189,8 @@ class RelationsTestCase(unittest.HomeserverTestCase): def test_filter_relation_senders_and_type(self) -> None: # Messages which second user reacted to. filter = { - "io.element.relation_senders": [self.second_user_id], - "io.element.relation_types": [RelationTypes.ANNOTATION], + "related_by_senders": [self.second_user_id], + "related_by_rel_types": [RelationTypes.ANNOTATION], } chunk = self._filter_messages(filter) self.assertEqual(len(chunk), 1, chunk) diff --git a/tests/storage/test_stream.py b/tests/storage/test_stream.py index 6a1cf33054..eaa0d7d749 100644 --- a/tests/storage/test_stream.py +++ b/tests/storage/test_stream.py @@ -129,21 +129,19 @@ class PaginationTestCase(HomeserverTestCase): def test_filter_relation_senders(self): # Messages which second user reacted to. - filter = {"io.element.relation_senders": [self.second_user_id]} + filter = {"related_by_senders": [self.second_user_id]} chunk = self._filter_messages(filter) self.assertEqual(len(chunk), 1, chunk) self.assertEqual(chunk[0].event_id, self.event_id_1) # Messages which third user reacted to. - filter = {"io.element.relation_senders": [self.third_user_id]} + filter = {"related_by_senders": [self.third_user_id]} chunk = self._filter_messages(filter) self.assertEqual(len(chunk), 1, chunk) self.assertEqual(chunk[0].event_id, self.event_id_2) # Messages which either user reacted to. - filter = { - "io.element.relation_senders": [self.second_user_id, self.third_user_id] - } + filter = {"related_by_senders": [self.second_user_id, self.third_user_id]} chunk = self._filter_messages(filter) self.assertEqual(len(chunk), 2, chunk) self.assertCountEqual( @@ -152,20 +150,20 @@ class PaginationTestCase(HomeserverTestCase): def test_filter_relation_type(self): # Messages which have annotations. - filter = {"io.element.relation_types": [RelationTypes.ANNOTATION]} + filter = {"related_by_rel_types": [RelationTypes.ANNOTATION]} chunk = self._filter_messages(filter) self.assertEqual(len(chunk), 1, chunk) self.assertEqual(chunk[0].event_id, self.event_id_1) # Messages which have references. - filter = {"io.element.relation_types": [RelationTypes.REFERENCE]} + filter = {"related_by_rel_types": [RelationTypes.REFERENCE]} chunk = self._filter_messages(filter) self.assertEqual(len(chunk), 1, chunk) self.assertEqual(chunk[0].event_id, self.event_id_2) # Messages which have either annotations or references. filter = { - "io.element.relation_types": [ + "related_by_rel_types": [ RelationTypes.ANNOTATION, RelationTypes.REFERENCE, ] @@ -179,8 +177,8 @@ class PaginationTestCase(HomeserverTestCase): def test_filter_relation_senders_and_type(self): # Messages which second user reacted to. filter = { - "io.element.relation_senders": [self.second_user_id], - "io.element.relation_types": [RelationTypes.ANNOTATION], + "related_by_senders": [self.second_user_id], + "related_by_rel_types": [RelationTypes.ANNOTATION], } chunk = self._filter_messages(filter) self.assertEqual(len(chunk), 1, chunk) @@ -201,7 +199,7 @@ class PaginationTestCase(HomeserverTestCase): tok=self.second_tok, ) - filter = {"io.element.relation_senders": [self.second_user_id]} + filter = {"related_by_senders": [self.second_user_id]} chunk = self._filter_messages(filter) self.assertEqual(len(chunk), 1, chunk) self.assertEqual(chunk[0].event_id, self.event_id_1) -- cgit 1.5.1 From 72e7f1c420b879a0a1ef1430771698b868693ab0 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Thu, 10 Mar 2022 15:53:23 +0000 Subject: Remove workaround introduced in Synapse v1.50.0rc1 for Mjolnir compatibility. Breaks compatibility with Mjolnir v1.3.1 and earlier. (#11700) --- changelog.d/11700.removal | 1 + docs/upgrade.md | 8 ++++++++ synapse/util/__init__.py | 7 ------- 3 files changed, 9 insertions(+), 7 deletions(-) create mode 100644 changelog.d/11700.removal diff --git a/changelog.d/11700.removal b/changelog.d/11700.removal new file mode 100644 index 0000000000..d3d3c48f0f --- /dev/null +++ b/changelog.d/11700.removal @@ -0,0 +1 @@ +Remove workaround introduced in Synapse 1.50.0 for Mjolnir compatibility. Breaks compatibility with Mjolnir 1.3.1 and earlier. diff --git a/docs/upgrade.md b/docs/upgrade.md index 0d0bb066ee..95005962dc 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -106,6 +106,14 @@ You will need to ensure `synctl` is on your `PATH`. automatically, though you might need to activate a virtual environment depending on how you installed Synapse. + +## Compatibility dropped for Mjolnir 1.3.1 and earlier + +Synapse v1.55.0 drops support for Mjolnir 1.3.1 and earlier. +If you use the Mjolnir module to moderate your homeserver, +please upgrade Mjolnir to version 1.3.2 or later before upgrading Synapse. + + # Upgrading to v1.54.0 ## Legacy structured logging configuration removal diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py index 58b4220ff3..d8046b7553 100644 --- a/synapse/util/__init__.py +++ b/synapse/util/__init__.py @@ -31,13 +31,6 @@ from synapse.logging import context if typing.TYPE_CHECKING: pass -# FIXME Mjolnir imports glob_to_regex from this file, but it was moved to -# matrix_common. -# As a temporary workaround, we import glob_to_regex here for -# compatibility with current versions of Mjolnir. -# See https://github.com/matrix-org/mjolnir/pull/174 -from matrix_common.regex import glob_to_regex # noqa - logger = logging.getLogger(__name__) -- cgit 1.5.1 From ed9aea42fa991428406be96a67c311a8f9cec544 Mon Sep 17 00:00:00 2001 From: Shay Date: Thu, 10 Mar 2022 09:40:07 -0800 Subject: fix misleading comment in `check_events_for_spam` (#12203) --- changelog.d/12203.misc | 1 + synapse/events/spamcheck.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/12203.misc diff --git a/changelog.d/12203.misc b/changelog.d/12203.misc new file mode 100644 index 0000000000..892dc5bfb7 --- /dev/null +++ b/changelog.d/12203.misc @@ -0,0 +1 @@ +Fix a misleading comment in the function `check_event_for_spam`. diff --git a/synapse/events/spamcheck.py b/synapse/events/spamcheck.py index 04afd48274..60904a55f5 100644 --- a/synapse/events/spamcheck.py +++ b/synapse/events/spamcheck.py @@ -245,8 +245,8 @@ class SpamChecker: """Checks if a given event is considered "spammy" by this server. If the server considers an event spammy, then it will be rejected if - sent by a local user. If it is sent by a user on another server, then - users receive a blank event. + sent by a local user. If it is sent by a user on another server, the + event is soft-failed. Args: event: the event to be checked -- cgit 1.5.1 From 7577894bec78a063f3e85ec7d386a58a0c60fb11 Mon Sep 17 00:00:00 2001 From: ~creme Date: Thu, 10 Mar 2022 19:15:19 +0100 Subject: Document that most streams can only have a single writer. (#12196) This includes the `typing`, `to_device`, `account_data`, `receipts`, and `presence` streams (really anything except the `events` stream). --- changelog.d/12196.doc | 1 + docs/workers.md | 31 +++++++++++++++++-------------- 2 files changed, 18 insertions(+), 14 deletions(-) create mode 100644 changelog.d/12196.doc diff --git a/changelog.d/12196.doc b/changelog.d/12196.doc new file mode 100644 index 0000000000..269f06aa33 --- /dev/null +++ b/changelog.d/12196.doc @@ -0,0 +1 @@ +Document that the `typing`, `to_device`, `account_data`, `receipts`, and `presence` stream writer can only be used on a single worker. \ No newline at end of file diff --git a/docs/workers.md b/docs/workers.md index b0f8599ef0..8751134e65 100644 --- a/docs/workers.md +++ b/docs/workers.md @@ -351,8 +351,11 @@ is only supported with Redis-based replication.) To enable this, the worker must have a HTTP replication listener configured, have a `worker_name` and be listed in the `instance_map` config. The same worker -can handle multiple streams. For example, to move event persistence off to a -dedicated worker, the shared configuration would include: +can handle multiple streams, but unless otherwise documented, each stream can only +have a single writer. + +For example, to move event persistence off to a dedicated worker, the shared +configuration would include: ```yaml instance_map: @@ -370,8 +373,8 @@ streams and the endpoints associated with them: ##### The `events` stream -The `events` stream also experimentally supports having multiple writers, where -work is sharded between them by room ID. Note that you *must* restart all worker +The `events` stream experimentally supports having multiple writers, where work +is sharded between them by room ID. Note that you *must* restart all worker instances when adding or removing event persisters. An example `stream_writers` configuration with multiple writers: @@ -384,38 +387,38 @@ stream_writers: ##### The `typing` stream -The following endpoints should be routed directly to the workers configured as -stream writers for the `typing` stream: +The following endpoints should be routed directly to the worker configured as +the stream writer for the `typing` stream: ^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/typing ##### The `to_device` stream -The following endpoints should be routed directly to the workers configured as -stream writers for the `to_device` stream: +The following endpoints should be routed directly to the worker configured as +the stream writer for the `to_device` stream: ^/_matrix/client/(api/v1|r0|v3|unstable)/sendToDevice/ ##### The `account_data` stream -The following endpoints should be routed directly to the workers configured as -stream writers for the `account_data` stream: +The following endpoints should be routed directly to the worker configured as +the stream writer for the `account_data` stream: ^/_matrix/client/(api/v1|r0|v3|unstable)/.*/tags ^/_matrix/client/(api/v1|r0|v3|unstable)/.*/account_data ##### The `receipts` stream -The following endpoints should be routed directly to the workers configured as -stream writers for the `receipts` stream: +The following endpoints should be routed directly to the worker configured as +the stream writer for the `receipts` stream: ^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/receipt ^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/read_markers ##### The `presence` stream -The following endpoints should be routed directly to the workers configured as -stream writers for the `presence` stream: +The following endpoints should be routed directly to the worker configured as +the stream writer for the `presence` stream: ^/_matrix/client/(api/v1|r0|v3|unstable)/presence/ -- cgit 1.5.1 From 483f2aa2eca98500046847364ede04b034530aac Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 11 Mar 2022 10:33:49 +0000 Subject: Retention test: avoid relying on state at purged events (#12202) This test was relying on poking events which weren't in the database into filter_events_for_client. --- changelog.d/12202.misc | 1 + tests/rest/client/test_retention.py | 29 +++++++++++++++++------------ 2 files changed, 18 insertions(+), 12 deletions(-) create mode 100644 changelog.d/12202.misc diff --git a/changelog.d/12202.misc b/changelog.d/12202.misc new file mode 100644 index 0000000000..9f333e718a --- /dev/null +++ b/changelog.d/12202.misc @@ -0,0 +1 @@ +Avoid trying to calculate the state at outlier events. diff --git a/tests/rest/client/test_retention.py b/tests/rest/client/test_retention.py index f3bf8d0934..7b8fe6d025 100644 --- a/tests/rest/client/test_retention.py +++ b/tests/rest/client/test_retention.py @@ -24,6 +24,7 @@ from synapse.util import Clock from synapse.visibility import filter_events_for_client from tests import unittest +from tests.unittest import override_config one_hour_ms = 3600000 one_day_ms = one_hour_ms * 24 @@ -38,7 +39,10 @@ class RetentionTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: config = self.default_config() - config["retention"] = { + + # merge this default retention config with anything that was specified in + # @override_config + retention_config = { "enabled": True, "default_policy": { "min_lifetime": one_day_ms, @@ -47,6 +51,8 @@ class RetentionTestCase(unittest.HomeserverTestCase): "allowed_lifetime_min": one_day_ms, "allowed_lifetime_max": one_day_ms * 3, } + retention_config.update(config.get("retention", {})) + config["retention"] = retention_config self.hs = self.setup_test_homeserver(config=config) @@ -115,22 +121,20 @@ class RetentionTestCase(unittest.HomeserverTestCase): self._test_retention_event_purged(room_id, one_day_ms * 2) + @override_config({"retention": {"purge_jobs": [{"interval": "5d"}]}}) def test_visibility(self) -> None: """Tests that synapse.visibility.filter_events_for_client correctly filters out - outdated events + outdated events, even if the purge job hasn't got to them yet. + + We do this by setting a very long time between purge jobs. """ store = self.hs.get_datastores().main storage = self.hs.get_storage() room_id = self.helper.create_room_as(self.user_id, tok=self.token) - events = [] # Send a first event, which should be filtered out at the end of the test. resp = self.helper.send(room_id=room_id, body="1", tok=self.token) - - # Get the event from the store so that we end up with a FrozenEvent that we can - # give to filter_events_for_client. We need to do this now because the event won't - # be in the database anymore after it has expired. - events.append(self.get_success(store.get_event(resp.get("event_id")))) + first_event_id = resp.get("event_id") # Advance the time by 2 days. We're using the default retention policy, therefore # after this the first event will still be valid. @@ -138,16 +142,17 @@ class RetentionTestCase(unittest.HomeserverTestCase): # Send another event, which shouldn't get filtered out. resp = self.helper.send(room_id=room_id, body="2", tok=self.token) - valid_event_id = resp.get("event_id") - events.append(self.get_success(store.get_event(valid_event_id))) - # Advance the time by another 2 days. After this, the first event should be # outdated but not the second one. self.reactor.advance(one_day_ms * 2 / 1000) - # Run filter_events_for_client with our list of FrozenEvents. + # Fetch the events, and run filter_events_for_client on them + events = self.get_success( + store.get_events_as_list([first_event_id, valid_event_id]) + ) + self.assertEqual(2, len(events), "events retrieved from database") filtered_events = self.get_success( filter_events_for_client(storage, self.user_id, events) ) -- cgit 1.5.1 From 3b12f6d61b3b10b57e7b3a45f1d7a96f9790d674 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Fri, 11 Mar 2022 11:10:20 +0000 Subject: Note that contributors can sign off privately (#12204) Co-authored-by: Patrick Cloke --- changelog.d/12204.doc | 1 + docs/development/contributing_guide.md | 11 +++++++++++ 2 files changed, 12 insertions(+) create mode 100644 changelog.d/12204.doc diff --git a/changelog.d/12204.doc b/changelog.d/12204.doc new file mode 100644 index 0000000000..c4b2805bb1 --- /dev/null +++ b/changelog.d/12204.doc @@ -0,0 +1 @@ +Document that contributors can sign off privately by email. diff --git a/docs/development/contributing_guide.md b/docs/development/contributing_guide.md index 8448685952..071202e196 100644 --- a/docs/development/contributing_guide.md +++ b/docs/development/contributing_guide.md @@ -458,6 +458,17 @@ Git allows you to add this signoff automatically when using the `-s` flag to `git commit`, which uses the name and email set in your `user.name` and `user.email` git configs. +### Private Sign off + +If you would like to provide your legal name privately to the Matrix.org +Foundation (instead of in a public commit or comment), you can do so +by emailing your legal name and a link to the pull request to +[dco@matrix.org](mailto:dco@matrix.org?subject=Private%20sign%20off). +It helps to include "sign off" or similar in the subject line. You will then +be instructed further. + +Once private sign off is complete, doing so for future contributions will not +be required. # 10. Turn feedback into better code. -- cgit 1.5.1 From bc9dff1d9597251a15a15475cb8e8194b2d14910 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 11 Mar 2022 07:06:21 -0500 Subject: Remove unnecessary pass statements. (#12206) --- changelog.d/12206.misc | 1 + synapse/handlers/device.py | 2 -- synapse/handlers/presence.py | 2 -- synapse/http/matrixfederationclient.py | 2 -- synapse/http/server.py | 1 - synapse/rest/media/v1/_base.py | 1 - synapse/server.py | 1 - synapse/storage/databases/main/registration.py | 2 -- synapse/storage/schema/main/delta/30/as_users.py | 1 - synapse/util/caches/treecache.py | 2 -- tests/handlers/test_password_providers.py | 1 - 11 files changed, 1 insertion(+), 15 deletions(-) create mode 100644 changelog.d/12206.misc diff --git a/changelog.d/12206.misc b/changelog.d/12206.misc new file mode 100644 index 0000000000..df59bb56cd --- /dev/null +++ b/changelog.d/12206.misc @@ -0,0 +1 @@ +Remove unnecessary `pass` statements. diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index d90cb259a6..d5ccaa0c37 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -371,7 +371,6 @@ class DeviceHandler(DeviceWorkerHandler): log_kv( {"reason": "User doesn't have device id.", "device_id": device_id} ) - pass else: raise @@ -414,7 +413,6 @@ class DeviceHandler(DeviceWorkerHandler): # no match set_tag("error", True) set_tag("reason", "User doesn't have that device id.") - pass else: raise diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 9927a30e6e..34d9411bbf 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -267,7 +267,6 @@ class BasePresenceHandler(abc.ABC): is_syncing: Whether or not the user is now syncing sync_time_msec: Time in ms when the user was last syncing """ - pass async def update_external_syncs_clear(self, process_id: str) -> None: """Marks all users that had been marked as syncing by a given process @@ -277,7 +276,6 @@ class BasePresenceHandler(abc.ABC): This is a no-op when presence is handled by a different worker. """ - pass async def process_replication_rows( self, stream_name: str, instance_name: str, token: int, rows: list diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 40bf1e06d6..6b98d865f5 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -120,7 +120,6 @@ class ByteParser(ByteWriteable, Generic[T], abc.ABC): """Called when response has finished streaming and the parser should return the final result (or error). """ - pass @attr.s(slots=True, frozen=True, auto_attribs=True) @@ -601,7 +600,6 @@ class MatrixFederationHttpClient: response.code, response_phrase, ) - pass else: logger.info( "{%s} [%s] Got response headers: %d %s", diff --git a/synapse/http/server.py b/synapse/http/server.py index 09b4125489..31ca841889 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -233,7 +233,6 @@ class HttpServer(Protocol): servlet_classname (str): The name of the handler to be used in prometheus and opentracing logs. """ - pass class _AsyncResource(resource.Resource, metaclass=abc.ABCMeta): diff --git a/synapse/rest/media/v1/_base.py b/synapse/rest/media/v1/_base.py index 9b40fd8a6c..c35d42fab8 100644 --- a/synapse/rest/media/v1/_base.py +++ b/synapse/rest/media/v1/_base.py @@ -298,7 +298,6 @@ class Responder: Returns: Resolves once the response has finished being written """ - pass def __enter__(self) -> None: pass diff --git a/synapse/server.py b/synapse/server.py index 7741ff29dc..2fcf18a7a6 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -328,7 +328,6 @@ class HomeServer(metaclass=abc.ABCMeta): Does nothing in this base class; overridden in derived classes to start the appropriate listeners. """ - pass def setup_background_tasks(self) -> None: """ diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py index dc6665237a..a698d10cc5 100644 --- a/synapse/storage/databases/main/registration.py +++ b/synapse/storage/databases/main/registration.py @@ -48,8 +48,6 @@ class ExternalIDReuseException(Exception): """Exception if writing an external id for a user fails, because this external id is given to an other user.""" - pass - @attr.s(frozen=True, slots=True, auto_attribs=True) class TokenLookupResult: diff --git a/synapse/storage/schema/main/delta/30/as_users.py b/synapse/storage/schema/main/delta/30/as_users.py index 22a7901e15..4b4b166e37 100644 --- a/synapse/storage/schema/main/delta/30/as_users.py +++ b/synapse/storage/schema/main/delta/30/as_users.py @@ -36,7 +36,6 @@ def run_upgrade(cur, database_engine, config, *args, **kwargs): config_files = config.appservice.app_service_config_files except AttributeError: logger.warning("Could not get app_service_config_files from config") - pass appservices = load_appservices(config.server.server_name, config_files) diff --git a/synapse/util/caches/treecache.py b/synapse/util/caches/treecache.py index 563845f867..e78305f787 100644 --- a/synapse/util/caches/treecache.py +++ b/synapse/util/caches/treecache.py @@ -22,8 +22,6 @@ class TreeCacheNode(dict): leaves. """ - pass - class TreeCache: """ diff --git a/tests/handlers/test_password_providers.py b/tests/handlers/test_password_providers.py index 49d832de81..d401fda938 100644 --- a/tests/handlers/test_password_providers.py +++ b/tests/handlers/test_password_providers.py @@ -124,7 +124,6 @@ class PasswordCustomAuthProvider: ("m.login.password", ("password",)): self.check_auth, } ) - pass def check_auth(self, *args): return mock_password_provider.check_auth(*args) -- cgit 1.5.1 From e10a2fe0c28ec9206c0e2275df492f61ff5025f2 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 11 Mar 2022 07:07:15 -0500 Subject: Add some type hints to the tests.handlers module. (#12207) --- changelog.d/12108.misc | 2 +- changelog.d/12146.misc | 2 +- changelog.d/12207.misc | 1 + tests/handlers/test_admin.py | 18 +++++--- tests/handlers/test_auth.py | 34 ++++++++------ tests/handlers/test_deactivate_account.py | 2 +- tests/handlers/test_device.py | 76 ++++++++++++++++--------------- 7 files changed, 74 insertions(+), 61 deletions(-) create mode 100644 changelog.d/12207.misc diff --git a/changelog.d/12108.misc b/changelog.d/12108.misc index 0360dbd61e..b67a701dbb 100644 --- a/changelog.d/12108.misc +++ b/changelog.d/12108.misc @@ -1 +1 @@ -Add type hints to `tests/rest/client`. +Add type hints to tests files. diff --git a/changelog.d/12146.misc b/changelog.d/12146.misc index 3ca7c47212..b67a701dbb 100644 --- a/changelog.d/12146.misc +++ b/changelog.d/12146.misc @@ -1 +1 @@ -Add type hints to `tests/rest`. +Add type hints to tests files. diff --git a/changelog.d/12207.misc b/changelog.d/12207.misc new file mode 100644 index 0000000000..b67a701dbb --- /dev/null +++ b/changelog.d/12207.misc @@ -0,0 +1 @@ +Add type hints to tests files. diff --git a/tests/handlers/test_admin.py b/tests/handlers/test_admin.py index abf2a0fe0d..c1579dac61 100644 --- a/tests/handlers/test_admin.py +++ b/tests/handlers/test_admin.py @@ -15,11 +15,15 @@ from collections import Counter from unittest.mock import Mock +from twisted.test.proto_helpers import MemoryReactor + import synapse.rest.admin import synapse.storage from synapse.api.constants import EventTypes, JoinRules from synapse.api.room_versions import RoomVersions from synapse.rest.client import knock, login, room +from synapse.server import HomeServer +from synapse.util import Clock from tests import unittest @@ -32,7 +36,7 @@ class ExfiltrateData(unittest.HomeserverTestCase): knock.register_servlets, ] - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.admin_handler = hs.get_admin_handler() self.user1 = self.register_user("user1", "password") @@ -41,7 +45,7 @@ class ExfiltrateData(unittest.HomeserverTestCase): self.user2 = self.register_user("user2", "password") self.token2 = self.login("user2", "password") - def test_single_public_joined_room(self): + def test_single_public_joined_room(self) -> None: """Test that we write *all* events for a public room""" room_id = self.helper.create_room_as( self.user1, tok=self.token1, is_public=True @@ -74,7 +78,7 @@ class ExfiltrateData(unittest.HomeserverTestCase): self.assertEqual(counter[(EventTypes.Member, self.user1)], 1) self.assertEqual(counter[(EventTypes.Member, self.user2)], 1) - def test_single_private_joined_room(self): + def test_single_private_joined_room(self) -> None: """Tests that we correctly write state when we can't see all events in a room. """ @@ -112,7 +116,7 @@ class ExfiltrateData(unittest.HomeserverTestCase): self.assertEqual(counter[(EventTypes.Member, self.user1)], 1) self.assertEqual(counter[(EventTypes.Member, self.user2)], 1) - def test_single_left_room(self): + def test_single_left_room(self) -> None: """Tests that we don't see events in the room after we leave.""" room_id = self.helper.create_room_as(self.user1, tok=self.token1) self.helper.send(room_id, body="Hello!", tok=self.token1) @@ -144,7 +148,7 @@ class ExfiltrateData(unittest.HomeserverTestCase): self.assertEqual(counter[(EventTypes.Member, self.user1)], 1) self.assertEqual(counter[(EventTypes.Member, self.user2)], 2) - def test_single_left_rejoined_private_room(self): + def test_single_left_rejoined_private_room(self) -> None: """Tests that see the correct events in private rooms when we repeatedly join and leave. """ @@ -185,7 +189,7 @@ class ExfiltrateData(unittest.HomeserverTestCase): self.assertEqual(counter[(EventTypes.Member, self.user1)], 1) self.assertEqual(counter[(EventTypes.Member, self.user2)], 3) - def test_invite(self): + def test_invite(self) -> None: """Tests that pending invites get handled correctly.""" room_id = self.helper.create_room_as(self.user1, tok=self.token1) self.helper.send(room_id, body="Hello!", tok=self.token1) @@ -204,7 +208,7 @@ class ExfiltrateData(unittest.HomeserverTestCase): self.assertEqual(args[1].content["membership"], "invite") self.assertTrue(args[2]) # Assert there is at least one bit of state - def test_knock(self): + def test_knock(self) -> None: """Tests that knock get handled correctly.""" # create a knockable v7 room room_id = self.helper.create_room_as( diff --git a/tests/handlers/test_auth.py b/tests/handlers/test_auth.py index 0c6e55e725..67a7829769 100644 --- a/tests/handlers/test_auth.py +++ b/tests/handlers/test_auth.py @@ -15,8 +15,12 @@ from unittest.mock import Mock import pymacaroons +from twisted.test.proto_helpers import MemoryReactor + from synapse.api.errors import AuthError, ResourceLimitError from synapse.rest import admin +from synapse.server import HomeServer +from synapse.util import Clock from tests import unittest from tests.test_utils import make_awaitable @@ -27,7 +31,7 @@ class AuthTestCase(unittest.HomeserverTestCase): admin.register_servlets, ] - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.auth_handler = hs.get_auth_handler() self.macaroon_generator = hs.get_macaroon_generator() @@ -42,23 +46,23 @@ class AuthTestCase(unittest.HomeserverTestCase): self.user1 = self.register_user("a_user", "pass") - def test_macaroon_caveats(self): + def test_macaroon_caveats(self) -> None: token = self.macaroon_generator.generate_guest_access_token("a_user") macaroon = pymacaroons.Macaroon.deserialize(token) - def verify_gen(caveat): + def verify_gen(caveat: str) -> bool: return caveat == "gen = 1" - def verify_user(caveat): + def verify_user(caveat: str) -> bool: return caveat == "user_id = a_user" - def verify_type(caveat): + def verify_type(caveat: str) -> bool: return caveat == "type = access" - def verify_nonce(caveat): + def verify_nonce(caveat: str) -> bool: return caveat.startswith("nonce =") - def verify_guest(caveat): + def verify_guest(caveat: str) -> bool: return caveat == "guest = true" v = pymacaroons.Verifier() @@ -69,7 +73,7 @@ class AuthTestCase(unittest.HomeserverTestCase): v.satisfy_general(verify_guest) v.verify(macaroon, self.hs.config.key.macaroon_secret_key) - def test_short_term_login_token_gives_user_id(self): + def test_short_term_login_token_gives_user_id(self) -> None: token = self.macaroon_generator.generate_short_term_login_token( self.user1, "", duration_in_ms=5000 ) @@ -84,7 +88,7 @@ class AuthTestCase(unittest.HomeserverTestCase): AuthError, ) - def test_short_term_login_token_gives_auth_provider(self): + def test_short_term_login_token_gives_auth_provider(self) -> None: token = self.macaroon_generator.generate_short_term_login_token( self.user1, auth_provider_id="my_idp" ) @@ -92,7 +96,7 @@ class AuthTestCase(unittest.HomeserverTestCase): self.assertEqual(self.user1, res.user_id) self.assertEqual("my_idp", res.auth_provider_id) - def test_short_term_login_token_cannot_replace_user_id(self): + def test_short_term_login_token_cannot_replace_user_id(self) -> None: token = self.macaroon_generator.generate_short_term_login_token( self.user1, "", duration_in_ms=5000 ) @@ -112,7 +116,7 @@ class AuthTestCase(unittest.HomeserverTestCase): AuthError, ) - def test_mau_limits_disabled(self): + def test_mau_limits_disabled(self) -> None: self.auth_blocking._limit_usage_by_mau = False # Ensure does not throw exception self.get_success( @@ -127,7 +131,7 @@ class AuthTestCase(unittest.HomeserverTestCase): ) ) - def test_mau_limits_exceeded_large(self): + def test_mau_limits_exceeded_large(self) -> None: self.auth_blocking._limit_usage_by_mau = True self.hs.get_datastores().main.get_monthly_active_count = Mock( return_value=make_awaitable(self.large_number_of_users) @@ -150,7 +154,7 @@ class AuthTestCase(unittest.HomeserverTestCase): ResourceLimitError, ) - def test_mau_limits_parity(self): + def test_mau_limits_parity(self) -> None: # Ensure we're not at the unix epoch. self.reactor.advance(1) self.auth_blocking._limit_usage_by_mau = True @@ -189,7 +193,7 @@ class AuthTestCase(unittest.HomeserverTestCase): ) ) - def test_mau_limits_not_exceeded(self): + def test_mau_limits_not_exceeded(self) -> None: self.auth_blocking._limit_usage_by_mau = True self.hs.get_datastores().main.get_monthly_active_count = Mock( @@ -211,7 +215,7 @@ class AuthTestCase(unittest.HomeserverTestCase): ) ) - def _get_macaroon(self): + def _get_macaroon(self) -> pymacaroons.Macaroon: token = self.macaroon_generator.generate_short_term_login_token( self.user1, "", duration_in_ms=5000 ) diff --git a/tests/handlers/test_deactivate_account.py b/tests/handlers/test_deactivate_account.py index ddda36c5a9..3a10791226 100644 --- a/tests/handlers/test_deactivate_account.py +++ b/tests/handlers/test_deactivate_account.py @@ -39,7 +39,7 @@ class DeactivateAccountTestCase(HomeserverTestCase): self.user = self.register_user("user", "pass") self.token = self.login("user", "pass") - def _deactivate_my_account(self): + def _deactivate_my_account(self) -> None: """ Deactivates the account `self.user` using `self.token` and asserts that it returns a 200 success code. diff --git a/tests/handlers/test_device.py b/tests/handlers/test_device.py index 683677fd07..01ea7d2a42 100644 --- a/tests/handlers/test_device.py +++ b/tests/handlers/test_device.py @@ -14,9 +14,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -import synapse.api.errors -import synapse.handlers.device -import synapse.storage +from typing import Optional + +from twisted.test.proto_helpers import MemoryReactor + +from synapse.api.errors import NotFoundError, SynapseError +from synapse.handlers.device import MAX_DEVICE_DISPLAY_NAME_LEN +from synapse.server import HomeServer +from synapse.util import Clock from tests import unittest @@ -25,28 +30,27 @@ user2 = "@theresa:bbb" class DeviceTestCase(unittest.HomeserverTestCase): - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: hs = self.setup_test_homeserver("server", federation_http_client=None) self.handler = hs.get_device_handler() self.store = hs.get_datastores().main return hs - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: # These tests assume that it starts 1000 seconds in. self.reactor.advance(1000) - def test_device_is_created_with_invalid_name(self): + def test_device_is_created_with_invalid_name(self) -> None: self.get_failure( self.handler.check_device_registered( user_id="@boris:foo", device_id="foo", - initial_device_display_name="a" - * (synapse.handlers.device.MAX_DEVICE_DISPLAY_NAME_LEN + 1), + initial_device_display_name="a" * (MAX_DEVICE_DISPLAY_NAME_LEN + 1), ), - synapse.api.errors.SynapseError, + SynapseError, ) - def test_device_is_created_if_doesnt_exist(self): + def test_device_is_created_if_doesnt_exist(self) -> None: res = self.get_success( self.handler.check_device_registered( user_id="@boris:foo", @@ -59,7 +63,7 @@ class DeviceTestCase(unittest.HomeserverTestCase): dev = self.get_success(self.handler.store.get_device("@boris:foo", "fco")) self.assertEqual(dev["display_name"], "display name") - def test_device_is_preserved_if_exists(self): + def test_device_is_preserved_if_exists(self) -> None: res1 = self.get_success( self.handler.check_device_registered( user_id="@boris:foo", @@ -81,7 +85,7 @@ class DeviceTestCase(unittest.HomeserverTestCase): dev = self.get_success(self.handler.store.get_device("@boris:foo", "fco")) self.assertEqual(dev["display_name"], "display name") - def test_device_id_is_made_up_if_unspecified(self): + def test_device_id_is_made_up_if_unspecified(self) -> None: device_id = self.get_success( self.handler.check_device_registered( user_id="@theresa:foo", @@ -93,7 +97,7 @@ class DeviceTestCase(unittest.HomeserverTestCase): dev = self.get_success(self.handler.store.get_device("@theresa:foo", device_id)) self.assertEqual(dev["display_name"], "display") - def test_get_devices_by_user(self): + def test_get_devices_by_user(self) -> None: self._record_users() res = self.get_success(self.handler.get_devices_by_user(user1)) @@ -131,7 +135,7 @@ class DeviceTestCase(unittest.HomeserverTestCase): device_map["abc"], ) - def test_get_device(self): + def test_get_device(self) -> None: self._record_users() res = self.get_success(self.handler.get_device(user1, "abc")) @@ -146,21 +150,19 @@ class DeviceTestCase(unittest.HomeserverTestCase): res, ) - def test_delete_device(self): + def test_delete_device(self) -> None: self._record_users() # delete the device self.get_success(self.handler.delete_device(user1, "abc")) # check the device was deleted - self.get_failure( - self.handler.get_device(user1, "abc"), synapse.api.errors.NotFoundError - ) + self.get_failure(self.handler.get_device(user1, "abc"), NotFoundError) # we'd like to check the access token was invalidated, but that's a # bit of a PITA. - def test_delete_device_and_device_inbox(self): + def test_delete_device_and_device_inbox(self) -> None: self._record_users() # add an device_inbox @@ -191,7 +193,7 @@ class DeviceTestCase(unittest.HomeserverTestCase): ) self.assertIsNone(res) - def test_update_device(self): + def test_update_device(self) -> None: self._record_users() update = {"display_name": "new display"} @@ -200,32 +202,29 @@ class DeviceTestCase(unittest.HomeserverTestCase): res = self.get_success(self.handler.get_device(user1, "abc")) self.assertEqual(res["display_name"], "new display") - def test_update_device_too_long_display_name(self): + def test_update_device_too_long_display_name(self) -> None: """Update a device with a display name that is invalid (too long).""" self._record_users() # Request to update a device display name with a new value that is longer than allowed. - update = { - "display_name": "a" - * (synapse.handlers.device.MAX_DEVICE_DISPLAY_NAME_LEN + 1) - } + update = {"display_name": "a" * (MAX_DEVICE_DISPLAY_NAME_LEN + 1)} self.get_failure( self.handler.update_device(user1, "abc", update), - synapse.api.errors.SynapseError, + SynapseError, ) # Ensure the display name was not updated. res = self.get_success(self.handler.get_device(user1, "abc")) self.assertEqual(res["display_name"], "display 2") - def test_update_unknown_device(self): + def test_update_unknown_device(self) -> None: update = {"display_name": "new_display"} self.get_failure( self.handler.update_device("user_id", "unknown_device_id", update), - synapse.api.errors.NotFoundError, + NotFoundError, ) - def _record_users(self): + def _record_users(self) -> None: # check this works for both devices which have a recorded client_ip, # and those which don't. self._record_user(user1, "xyz", "display 0") @@ -238,8 +237,13 @@ class DeviceTestCase(unittest.HomeserverTestCase): self.reactor.advance(10000) def _record_user( - self, user_id, device_id, display_name, access_token=None, ip=None - ): + self, + user_id: str, + device_id: str, + display_name: str, + access_token: Optional[str] = None, + ip: Optional[str] = None, + ) -> None: device_id = self.get_success( self.handler.check_device_registered( user_id=user_id, @@ -248,7 +252,7 @@ class DeviceTestCase(unittest.HomeserverTestCase): ) ) - if ip is not None: + if access_token is not None and ip is not None: self.get_success( self.store.insert_client_ip( user_id, access_token, ip, "user_agent", device_id @@ -258,7 +262,7 @@ class DeviceTestCase(unittest.HomeserverTestCase): class DehydrationTestCase(unittest.HomeserverTestCase): - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: hs = self.setup_test_homeserver("server", federation_http_client=None) self.handler = hs.get_device_handler() self.registration = hs.get_registration_handler() @@ -266,7 +270,7 @@ class DehydrationTestCase(unittest.HomeserverTestCase): self.store = hs.get_datastores().main return hs - def test_dehydrate_and_rehydrate_device(self): + def test_dehydrate_and_rehydrate_device(self) -> None: user_id = "@boris:dehydration" self.get_success(self.store.register_user(user_id, "foobar")) @@ -303,7 +307,7 @@ class DehydrationTestCase(unittest.HomeserverTestCase): access_token=access_token, device_id="not the right device ID", ), - synapse.api.errors.NotFoundError, + NotFoundError, ) # dehydrating the right devices should succeed and change our device ID @@ -331,7 +335,7 @@ class DehydrationTestCase(unittest.HomeserverTestCase): # make sure that the device ID that we were initially assigned no longer exists self.get_failure( self.handler.get_device(user_id, device_id), - synapse.api.errors.NotFoundError, + NotFoundError, ) # make sure that there's no device available for dehydrating now -- cgit 1.5.1 From 32c828d0f760492711a98b11376e229d795fd1b3 Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Fri, 11 Mar 2022 13:42:22 +0100 Subject: Add type hints to `tests/rest`. (#12208) Co-authored-by: Patrick Cloke --- changelog.d/12208.misc | 1 + mypy.ini | 1 - tests/rest/client/test_transactions.py | 19 +++++- tests/rest/media/v1/test_media_storage.py | 110 ++++++++++++++++++------------ tests/rest/media/v1/test_url_preview.py | 83 +++++++++++----------- 5 files changed, 129 insertions(+), 85 deletions(-) create mode 100644 changelog.d/12208.misc diff --git a/changelog.d/12208.misc b/changelog.d/12208.misc new file mode 100644 index 0000000000..c5b6356799 --- /dev/null +++ b/changelog.d/12208.misc @@ -0,0 +1 @@ +Add type hints to tests files. \ No newline at end of file diff --git a/mypy.ini b/mypy.ini index c8390ddba9..f9c39fcaae 100644 --- a/mypy.ini +++ b/mypy.ini @@ -90,7 +90,6 @@ exclude = (?x) |tests/push/test_push_rule_evaluator.py |tests/rest/client/test_transactions.py |tests/rest/media/v1/test_media_storage.py - |tests/rest/media/v1/test_url_preview.py |tests/scripts/test_new_matrix_user.py |tests/server.py |tests/server_notices/test_resource_limits_server_notices.py diff --git a/tests/rest/client/test_transactions.py b/tests/rest/client/test_transactions.py index 3b5747cb12..8d8251b2ac 100644 --- a/tests/rest/client/test_transactions.py +++ b/tests/rest/client/test_transactions.py @@ -1,3 +1,18 @@ +# Copyright 2018-2021 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from http import HTTPStatus from unittest.mock import Mock, call from twisted.internet import defer, reactor @@ -11,14 +26,14 @@ from tests.utils import MockClock class HttpTransactionCacheTestCase(unittest.TestCase): - def setUp(self): + def setUp(self) -> None: self.clock = MockClock() self.hs = Mock() self.hs.get_clock = Mock(return_value=self.clock) self.hs.get_auth = Mock() self.cache = HttpTransactionCache(self.hs) - self.mock_http_response = (200, "GOOD JOB!") + self.mock_http_response = (HTTPStatus.OK, "GOOD JOB!") self.mock_key = "foo" @defer.inlineCallbacks diff --git a/tests/rest/media/v1/test_media_storage.py b/tests/rest/media/v1/test_media_storage.py index cba9be17c4..7204b2dfe0 100644 --- a/tests/rest/media/v1/test_media_storage.py +++ b/tests/rest/media/v1/test_media_storage.py @@ -16,7 +16,7 @@ import shutil import tempfile from binascii import unhexlify from io import BytesIO -from typing import Optional +from typing import Any, BinaryIO, Dict, List, Optional, Union from unittest.mock import Mock from urllib import parse @@ -26,18 +26,24 @@ from PIL import Image as Image from twisted.internet import defer from twisted.internet.defer import Deferred +from twisted.test.proto_helpers import MemoryReactor +from synapse.events import EventBase from synapse.events.spamcheck import load_legacy_spam_checkers from synapse.logging.context import make_deferred_yieldable +from synapse.module_api import ModuleApi from synapse.rest import admin from synapse.rest.client import login from synapse.rest.media.v1._base import FileInfo from synapse.rest.media.v1.filepath import MediaFilePaths -from synapse.rest.media.v1.media_storage import MediaStorage +from synapse.rest.media.v1.media_storage import MediaStorage, ReadableFileWrapper from synapse.rest.media.v1.storage_provider import FileStorageProviderBackend +from synapse.server import HomeServer +from synapse.types import RoomAlias +from synapse.util import Clock from tests import unittest -from tests.server import FakeSite, make_request +from tests.server import FakeChannel, FakeSite, make_request from tests.test_utils import SMALL_PNG from tests.utils import default_config @@ -46,7 +52,7 @@ class MediaStorageTests(unittest.HomeserverTestCase): needs_threadpool = True - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.test_dir = tempfile.mkdtemp(prefix="synapse-tests-") self.addCleanup(shutil.rmtree, self.test_dir) @@ -62,7 +68,7 @@ class MediaStorageTests(unittest.HomeserverTestCase): hs, self.primary_base_path, self.filepaths, storage_providers ) - def test_ensure_media_is_in_local_cache(self): + def test_ensure_media_is_in_local_cache(self) -> None: media_id = "some_media_id" test_body = "Test\n" @@ -105,7 +111,7 @@ class MediaStorageTests(unittest.HomeserverTestCase): self.assertEqual(test_body, body) -@attr.s(slots=True, frozen=True) +@attr.s(auto_attribs=True, slots=True, frozen=True) class _TestImage: """An image for testing thumbnailing with the expected results @@ -121,18 +127,18 @@ class _TestImage: a 404 is expected. """ - data = attr.ib(type=bytes) - content_type = attr.ib(type=bytes) - extension = attr.ib(type=bytes) - expected_cropped = attr.ib(type=Optional[bytes], default=None) - expected_scaled = attr.ib(type=Optional[bytes], default=None) - expected_found = attr.ib(default=True, type=bool) + data: bytes + content_type: bytes + extension: bytes + expected_cropped: Optional[bytes] = None + expected_scaled: Optional[bytes] = None + expected_found: bool = True @parameterized_class( ("test_image",), [ - # smoll png + # small png ( _TestImage( SMALL_PNG, @@ -193,11 +199,17 @@ class MediaRepoTests(unittest.HomeserverTestCase): hijack_auth = True user_id = "@test:user" - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: self.fetches = [] - def get_file(destination, path, output_stream, args=None, max_size=None): + def get_file( + destination: str, + path: str, + output_stream: BinaryIO, + args: Optional[Dict[str, Union[str, List[str]]]] = None, + max_size: Optional[int] = None, + ) -> Deferred: """ Returns tuple[int,dict,str,int] of file length, response headers, absolute URI, and response code. @@ -238,7 +250,7 @@ class MediaRepoTests(unittest.HomeserverTestCase): return hs - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: media_resource = hs.get_media_repository_resource() self.download_resource = media_resource.children[b"download"] @@ -248,8 +260,9 @@ class MediaRepoTests(unittest.HomeserverTestCase): self.media_id = "example.com/12345" - def _req(self, content_disposition, include_content_type=True): - + def _req( + self, content_disposition: Optional[bytes], include_content_type: bool = True + ) -> FakeChannel: channel = make_request( self.reactor, FakeSite(self.download_resource, self.reactor), @@ -288,7 +301,7 @@ class MediaRepoTests(unittest.HomeserverTestCase): return channel - def test_handle_missing_content_type(self): + def test_handle_missing_content_type(self) -> None: channel = self._req( b"inline; filename=out" + self.test_image.extension, include_content_type=False, @@ -299,7 +312,7 @@ class MediaRepoTests(unittest.HomeserverTestCase): headers.getRawHeaders(b"Content-Type"), [b"application/octet-stream"] ) - def test_disposition_filename_ascii(self): + def test_disposition_filename_ascii(self) -> None: """ If the filename is filename= then Synapse will decode it as an ASCII string, and use filename= in the response. @@ -315,7 +328,7 @@ class MediaRepoTests(unittest.HomeserverTestCase): [b"inline; filename=out" + self.test_image.extension], ) - def test_disposition_filenamestar_utf8escaped(self): + def test_disposition_filenamestar_utf8escaped(self) -> None: """ If the filename is filename=*utf8'' then Synapse will correctly decode it as the UTF-8 string, and use filename* in the @@ -335,7 +348,7 @@ class MediaRepoTests(unittest.HomeserverTestCase): [b"inline; filename*=utf-8''" + filename + self.test_image.extension], ) - def test_disposition_none(self): + def test_disposition_none(self) -> None: """ If there is no filename, one isn't passed on in the Content-Disposition of the request. @@ -348,26 +361,26 @@ class MediaRepoTests(unittest.HomeserverTestCase): ) self.assertEqual(headers.getRawHeaders(b"Content-Disposition"), None) - def test_thumbnail_crop(self): + def test_thumbnail_crop(self) -> None: """Test that a cropped remote thumbnail is available.""" self._test_thumbnail( "crop", self.test_image.expected_cropped, self.test_image.expected_found ) - def test_thumbnail_scale(self): + def test_thumbnail_scale(self) -> None: """Test that a scaled remote thumbnail is available.""" self._test_thumbnail( "scale", self.test_image.expected_scaled, self.test_image.expected_found ) - def test_invalid_type(self): + def test_invalid_type(self) -> None: """An invalid thumbnail type is never available.""" self._test_thumbnail("invalid", None, False) @unittest.override_config( {"thumbnail_sizes": [{"width": 32, "height": 32, "method": "scale"}]} ) - def test_no_thumbnail_crop(self): + def test_no_thumbnail_crop(self) -> None: """ Override the config to generate only scaled thumbnails, but request a cropped one. """ @@ -376,13 +389,13 @@ class MediaRepoTests(unittest.HomeserverTestCase): @unittest.override_config( {"thumbnail_sizes": [{"width": 32, "height": 32, "method": "crop"}]} ) - def test_no_thumbnail_scale(self): + def test_no_thumbnail_scale(self) -> None: """ Override the config to generate only cropped thumbnails, but request a scaled one. """ self._test_thumbnail("scale", None, False) - def test_thumbnail_repeated_thumbnail(self): + def test_thumbnail_repeated_thumbnail(self) -> None: """Test that fetching the same thumbnail works, and deleting the on disk thumbnail regenerates it. """ @@ -443,7 +456,9 @@ class MediaRepoTests(unittest.HomeserverTestCase): channel.result["body"], ) - def _test_thumbnail(self, method, expected_body, expected_found): + def _test_thumbnail( + self, method: str, expected_body: Optional[bytes], expected_found: bool + ) -> None: params = "?width=32&height=32&method=" + method channel = make_request( self.reactor, @@ -485,7 +500,7 @@ class MediaRepoTests(unittest.HomeserverTestCase): ) @parameterized.expand([("crop", 16), ("crop", 64), ("scale", 16), ("scale", 64)]) - def test_same_quality(self, method, desired_size): + def test_same_quality(self, method: str, desired_size: int) -> None: """Test that choosing between thumbnails with the same quality rating succeeds. We are not particular about which thumbnail is chosen.""" @@ -521,7 +536,7 @@ class MediaRepoTests(unittest.HomeserverTestCase): ) ) - def test_x_robots_tag_header(self): + def test_x_robots_tag_header(self) -> None: """ Tests that the `X-Robots-Tag` header is present, which informs web crawlers to not index, archive, or follow links in media. @@ -540,29 +555,38 @@ class TestSpamChecker: `evil`. """ - def __init__(self, config, api): + def __init__(self, config: Dict[str, Any], api: ModuleApi) -> None: self.config = config self.api = api - def parse_config(config): + def parse_config(config: Dict[str, Any]) -> Dict[str, Any]: return config - async def check_event_for_spam(self, foo): + async def check_event_for_spam(self, event: EventBase) -> Union[bool, str]: return False # allow all events - async def user_may_invite(self, inviter_userid, invitee_userid, room_id): + async def user_may_invite( + self, + inviter_userid: str, + invitee_userid: str, + room_id: str, + ) -> bool: return True # allow all invites - async def user_may_create_room(self, userid): + async def user_may_create_room(self, userid: str) -> bool: return True # allow all room creations - async def user_may_create_room_alias(self, userid, room_alias): + async def user_may_create_room_alias( + self, userid: str, room_alias: RoomAlias + ) -> bool: return True # allow all room aliases - async def user_may_publish_room(self, userid, room_id): + async def user_may_publish_room(self, userid: str, room_id: str) -> bool: return True # allow publishing of all rooms - async def check_media_file_for_spam(self, file_wrapper, file_info) -> bool: + async def check_media_file_for_spam( + self, file_wrapper: ReadableFileWrapper, file_info: FileInfo + ) -> bool: buf = BytesIO() await file_wrapper.write_chunks_to(buf.write) @@ -575,7 +599,7 @@ class SpamCheckerTestCase(unittest.HomeserverTestCase): admin.register_servlets, ] - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.user = self.register_user("user", "pass") self.tok = self.login("user", "pass") @@ -586,7 +610,7 @@ class SpamCheckerTestCase(unittest.HomeserverTestCase): load_legacy_spam_checkers(hs) - def default_config(self): + def default_config(self) -> Dict[str, Any]: config = default_config("test") config.update( @@ -602,13 +626,13 @@ class SpamCheckerTestCase(unittest.HomeserverTestCase): return config - def test_upload_innocent(self): + def test_upload_innocent(self) -> None: """Attempt to upload some innocent data that should be allowed.""" self.helper.upload_media( self.upload_resource, SMALL_PNG, tok=self.tok, expect_code=200 ) - def test_upload_ban(self): + def test_upload_ban(self) -> None: """Attempt to upload some data that includes bytes "evil", which should get rejected by the spam checker. """ diff --git a/tests/rest/media/v1/test_url_preview.py b/tests/rest/media/v1/test_url_preview.py index da2c533260..5148c39874 100644 --- a/tests/rest/media/v1/test_url_preview.py +++ b/tests/rest/media/v1/test_url_preview.py @@ -16,16 +16,21 @@ import base64 import json import os import re +from typing import Any, Dict, Optional, Sequence, Tuple, Type from urllib.parse import urlencode from twisted.internet._resolver import HostResolution from twisted.internet.address import IPv4Address, IPv6Address from twisted.internet.error import DNSLookupError -from twisted.test.proto_helpers import AccumulatingProtocol +from twisted.internet.interfaces import IAddress, IResolutionReceiver +from twisted.test.proto_helpers import AccumulatingProtocol, MemoryReactor from synapse.config.oembed import OEmbedEndpointConfig +from synapse.rest.media.v1.media_repository import MediaRepositoryResource from synapse.rest.media.v1.preview_url_resource import IMAGE_CACHE_EXPIRY_MS +from synapse.server import HomeServer from synapse.types import JsonDict +from synapse.util import Clock from synapse.util.stringutils import parse_and_validate_mxc_uri from tests import unittest @@ -52,7 +57,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): b"" ) - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: config = self.default_config() config["url_preview_enabled"] = True @@ -113,22 +118,22 @@ class URLPreviewTests(unittest.HomeserverTestCase): return hs - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.media_repo = hs.get_media_repository_resource() self.preview_url = self.media_repo.children[b"preview_url"] - self.lookups = {} + self.lookups: Dict[str, Any] = {} class Resolver: def resolveHostName( _self, - resolutionReceiver, - hostName, - portNumber=0, - addressTypes=None, - transportSemantics="TCP", - ): + resolutionReceiver: IResolutionReceiver, + hostName: str, + portNumber: int = 0, + addressTypes: Optional[Sequence[Type[IAddress]]] = None, + transportSemantics: str = "TCP", + ) -> IResolutionReceiver: resolution = HostResolution(hostName) resolutionReceiver.resolutionBegan(resolution) @@ -140,9 +145,9 @@ class URLPreviewTests(unittest.HomeserverTestCase): resolutionReceiver.resolutionComplete() return resolutionReceiver - self.reactor.nameResolver = Resolver() + self.reactor.nameResolver = Resolver() # type: ignore[assignment] - def create_test_resource(self): + def create_test_resource(self) -> MediaRepositoryResource: return self.hs.get_media_repository_resource() def _assert_small_png(self, json_body: JsonDict) -> None: @@ -153,7 +158,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): self.assertEqual(json_body["og:image:type"], "image/png") self.assertEqual(json_body["matrix:image:size"], 67) - def test_cache_returns_correct_type(self): + def test_cache_returns_correct_type(self) -> None: self.lookups["matrix.org"] = [(IPv4Address, "10.1.2.3")] channel = self.make_request( @@ -207,7 +212,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): channel.json_body, {"og:title": "~matrix~", "og:description": "hi"} ) - def test_non_ascii_preview_httpequiv(self): + def test_non_ascii_preview_httpequiv(self) -> None: self.lookups["matrix.org"] = [(IPv4Address, "10.1.2.3")] end_content = ( @@ -243,7 +248,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): self.assertEqual(channel.code, 200) self.assertEqual(channel.json_body["og:title"], "\u0434\u043a\u0430") - def test_video_rejected(self): + def test_video_rejected(self) -> None: self.lookups["matrix.org"] = [(IPv4Address, "10.1.2.3")] end_content = b"anything" @@ -279,7 +284,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): }, ) - def test_audio_rejected(self): + def test_audio_rejected(self) -> None: self.lookups["matrix.org"] = [(IPv4Address, "10.1.2.3")] end_content = b"anything" @@ -315,7 +320,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): }, ) - def test_non_ascii_preview_content_type(self): + def test_non_ascii_preview_content_type(self) -> None: self.lookups["matrix.org"] = [(IPv4Address, "10.1.2.3")] end_content = ( @@ -350,7 +355,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): self.assertEqual(channel.code, 200) self.assertEqual(channel.json_body["og:title"], "\u0434\u043a\u0430") - def test_overlong_title(self): + def test_overlong_title(self) -> None: self.lookups["matrix.org"] = [(IPv4Address, "10.1.2.3")] end_content = ( @@ -387,7 +392,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): # We should only see the `og:description` field, as `title` is too long and should be stripped out self.assertCountEqual(["og:description"], res.keys()) - def test_ipaddr(self): + def test_ipaddr(self) -> None: """ IP addresses can be previewed directly. """ @@ -417,7 +422,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): channel.json_body, {"og:title": "~matrix~", "og:description": "hi"} ) - def test_blacklisted_ip_specific(self): + def test_blacklisted_ip_specific(self) -> None: """ Blacklisted IP addresses, found via DNS, are not spidered. """ @@ -438,7 +443,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): }, ) - def test_blacklisted_ip_range(self): + def test_blacklisted_ip_range(self) -> None: """ Blacklisted IP ranges, IPs found over DNS, are not spidered. """ @@ -457,7 +462,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): }, ) - def test_blacklisted_ip_specific_direct(self): + def test_blacklisted_ip_specific_direct(self) -> None: """ Blacklisted IP addresses, accessed directly, are not spidered. """ @@ -476,7 +481,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): ) self.assertEqual(channel.code, 403) - def test_blacklisted_ip_range_direct(self): + def test_blacklisted_ip_range_direct(self) -> None: """ Blacklisted IP ranges, accessed directly, are not spidered. """ @@ -493,7 +498,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): }, ) - def test_blacklisted_ip_range_whitelisted_ip(self): + def test_blacklisted_ip_range_whitelisted_ip(self) -> None: """ Blacklisted but then subsequently whitelisted IP addresses can be spidered. @@ -526,7 +531,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): channel.json_body, {"og:title": "~matrix~", "og:description": "hi"} ) - def test_blacklisted_ip_with_external_ip(self): + def test_blacklisted_ip_with_external_ip(self) -> None: """ If a hostname resolves a blacklisted IP, even if there's a non-blacklisted one, it will be rejected. @@ -549,7 +554,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): }, ) - def test_blacklisted_ipv6_specific(self): + def test_blacklisted_ipv6_specific(self) -> None: """ Blacklisted IP addresses, found via DNS, are not spidered. """ @@ -572,7 +577,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): }, ) - def test_blacklisted_ipv6_range(self): + def test_blacklisted_ipv6_range(self) -> None: """ Blacklisted IP ranges, IPs found over DNS, are not spidered. """ @@ -591,7 +596,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): }, ) - def test_OPTIONS(self): + def test_OPTIONS(self) -> None: """ OPTIONS returns the OPTIONS. """ @@ -601,7 +606,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): self.assertEqual(channel.code, 200) self.assertEqual(channel.json_body, {}) - def test_accept_language_config_option(self): + def test_accept_language_config_option(self) -> None: """ Accept-Language header is sent to the remote server """ @@ -652,7 +657,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): server.data, ) - def test_data_url(self): + def test_data_url(self) -> None: """ Requesting to preview a data URL is not supported. """ @@ -675,7 +680,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): self.assertEqual(channel.code, 500) - def test_inline_data_url(self): + def test_inline_data_url(self) -> None: """ An inline image (as a data URL) should be parsed properly. """ @@ -712,7 +717,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): self.assertEqual(channel.code, 200) self._assert_small_png(channel.json_body) - def test_oembed_photo(self): + def test_oembed_photo(self) -> None: """Test an oEmbed endpoint which returns a 'photo' type which redirects the preview to a new URL.""" self.lookups["publish.twitter.com"] = [(IPv4Address, "10.1.2.3")] self.lookups["cdn.twitter.com"] = [(IPv4Address, "10.1.2.3")] @@ -771,7 +776,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): self.assertEqual(body["og:url"], "http://twitter.com/matrixdotorg/status/12345") self._assert_small_png(body) - def test_oembed_rich(self): + def test_oembed_rich(self) -> None: """Test an oEmbed endpoint which returns HTML content via the 'rich' type.""" self.lookups["publish.twitter.com"] = [(IPv4Address, "10.1.2.3")] @@ -817,7 +822,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): }, ) - def test_oembed_format(self): + def test_oembed_format(self) -> None: """Test an oEmbed endpoint which requires the format in the URL.""" self.lookups["www.hulu.com"] = [(IPv4Address, "10.1.2.3")] @@ -866,7 +871,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): }, ) - def test_oembed_autodiscovery(self): + def test_oembed_autodiscovery(self) -> None: """ Autodiscovery works by finding the link in the HTML response and then requesting an oEmbed URL. 1. Request a preview of a URL which is not known to the oEmbed code. @@ -962,7 +967,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): ) self._assert_small_png(body) - def _download_image(self): + def _download_image(self) -> Tuple[str, str]: """Downloads an image into the URL cache. Returns: A (host, media_id) tuple representing the MXC URI of the image. @@ -995,7 +1000,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): self.assertIsNone(_port) return host, media_id - def test_storage_providers_exclude_files(self): + def test_storage_providers_exclude_files(self) -> None: """Test that files are not stored in or fetched from storage providers.""" host, media_id = self._download_image() @@ -1037,7 +1042,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): "URL cache file was unexpectedly retrieved from a storage provider", ) - def test_storage_providers_exclude_thumbnails(self): + def test_storage_providers_exclude_thumbnails(self) -> None: """Test that thumbnails are not stored in or fetched from storage providers.""" host, media_id = self._download_image() @@ -1090,7 +1095,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): "URL cache thumbnail was unexpectedly retrieved from a storage provider", ) - def test_cache_expiry(self): + def test_cache_expiry(self) -> None: """Test that URL cache files and thumbnails are cleaned up properly on expiry.""" self.preview_url.clock = MockClock() -- cgit 1.5.1 From 003cc6910af177fec86ae7f43683d146975c7f4b Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Fri, 11 Mar 2022 14:20:00 +0100 Subject: Update the SSO username picker template to comply with SIWA guidelines (#12210) Fixes https://github.com/matrix-org/synapse/issues/12205 --- changelog.d/12210.misc | 1 + docs/sample_config.yaml | 9 +++++++-- docs/templates.md | 7 +++++-- synapse/config/oidc.py | 9 +++++++-- synapse/handlers/oidc.py | 12 +++++++++++- synapse/handlers/sso.py | 8 +++++--- synapse/res/templates/sso_auth_account_details.html | 6 +++--- synapse/rest/synapse/client/pick_username.py | 8 ++++++++ 8 files changed, 47 insertions(+), 13 deletions(-) create mode 100644 changelog.d/12210.misc diff --git a/changelog.d/12210.misc b/changelog.d/12210.misc new file mode 100644 index 0000000000..3f6a8747c2 --- /dev/null +++ b/changelog.d/12210.misc @@ -0,0 +1 @@ +Update the SSO username picker template to comply with SIWA guidelines. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 6f3623c88a..ef25a3175f 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -1947,8 +1947,13 @@ saml2_config: # # localpart_template: Jinja2 template for the localpart of the MXID. # If this is not set, the user will be prompted to choose their -# own username (see 'sso_auth_account_details.html' in the 'sso' -# section of this file). +# own username (see the documentation for the +# 'sso_auth_account_details.html' template). +# +# confirm_localpart: Whether to prompt the user to validate (or +# change) the generated localpart (see the documentation for the +# 'sso_auth_account_details.html' template), instead of +# registering the account right away. # # display_name_template: Jinja2 template for the display name to set # on first login. If unset, no displayname will be set. diff --git a/docs/templates.md b/docs/templates.md index 2b66e9d862..b251d05cb9 100644 --- a/docs/templates.md +++ b/docs/templates.md @@ -176,8 +176,11 @@ Below are the templates Synapse will look for when generating pages related to S for the brand of the IdP * `user_attributes`: an object containing details about the user that we received from the IdP. May have the following attributes: - * display_name: the user's display_name - * emails: a list of email addresses + * `display_name`: the user's display name + * `emails`: a list of email addresses + * `localpart`: the local part of the Matrix user ID to register, + if `localpart_template` is set in the mapping provider configuration (empty + string if not) The template should render a form which submits the following fields: * `username`: the localpart of the user's chosen user id * `sso_new_user_consent.html`: HTML page allowing the user to consent to the diff --git a/synapse/config/oidc.py b/synapse/config/oidc.py index f7e4f9ef22..fc95912d9b 100644 --- a/synapse/config/oidc.py +++ b/synapse/config/oidc.py @@ -182,8 +182,13 @@ class OIDCConfig(Config): # # localpart_template: Jinja2 template for the localpart of the MXID. # If this is not set, the user will be prompted to choose their - # own username (see 'sso_auth_account_details.html' in the 'sso' - # section of this file). + # own username (see the documentation for the + # 'sso_auth_account_details.html' template). + # + # confirm_localpart: Whether to prompt the user to validate (or + # change) the generated localpart (see the documentation for the + # 'sso_auth_account_details.html' template), instead of + # registering the account right away. # # display_name_template: Jinja2 template for the display name to set # on first login. If unset, no displayname will be set. diff --git a/synapse/handlers/oidc.py b/synapse/handlers/oidc.py index 593a2aac66..d98659edc7 100644 --- a/synapse/handlers/oidc.py +++ b/synapse/handlers/oidc.py @@ -1228,6 +1228,7 @@ class OidcSessionData: class UserAttributeDict(TypedDict): localpart: Optional[str] + confirm_localpart: bool display_name: Optional[str] emails: List[str] @@ -1316,6 +1317,7 @@ class JinjaOidcMappingConfig: display_name_template: Optional[Template] email_template: Optional[Template] extra_attributes: Dict[str, Template] + confirm_localpart: bool = False class JinjaOidcMappingProvider(OidcMappingProvider[JinjaOidcMappingConfig]): @@ -1357,12 +1359,17 @@ class JinjaOidcMappingProvider(OidcMappingProvider[JinjaOidcMappingConfig]): "invalid jinja template", path=["extra_attributes", key] ) from e + confirm_localpart = config.get("confirm_localpart") or False + if not isinstance(confirm_localpart, bool): + raise ConfigError("must be a bool", path=["confirm_localpart"]) + return JinjaOidcMappingConfig( subject_claim=subject_claim, localpart_template=localpart_template, display_name_template=display_name_template, email_template=email_template, extra_attributes=extra_attributes, + confirm_localpart=confirm_localpart, ) def get_remote_user_id(self, userinfo: UserInfo) -> str: @@ -1398,7 +1405,10 @@ class JinjaOidcMappingProvider(OidcMappingProvider[JinjaOidcMappingConfig]): emails.append(email) return UserAttributeDict( - localpart=localpart, display_name=display_name, emails=emails + localpart=localpart, + display_name=display_name, + emails=emails, + confirm_localpart=self._config.confirm_localpart, ) async def get_extra_attributes(self, userinfo: UserInfo, token: Token) -> JsonDict: diff --git a/synapse/handlers/sso.py b/synapse/handlers/sso.py index ff5b5169ca..4f02a060d9 100644 --- a/synapse/handlers/sso.py +++ b/synapse/handlers/sso.py @@ -132,6 +132,7 @@ class UserAttributes: # if `None`, the mapper has not picked a userid, and the user should be prompted to # enter one. localpart: Optional[str] + confirm_localpart: bool = False display_name: Optional[str] = None emails: Collection[str] = attr.Factory(list) @@ -561,9 +562,10 @@ class SsoHandler: # Must provide either attributes or session, not both assert (attributes is not None) != (session is not None) - if (attributes and attributes.localpart is None) or ( - session and session.chosen_localpart is None - ): + if ( + attributes + and (attributes.localpart is None or attributes.confirm_localpart is True) + ) or (session and session.chosen_localpart is None): return b"/_synapse/client/pick_username/account_details" elif self._consent_at_registration and not ( session and session.terms_accepted_version diff --git a/synapse/res/templates/sso_auth_account_details.html b/synapse/res/templates/sso_auth_account_details.html index 00e1dcdbb8..41315e4fd4 100644 --- a/synapse/res/templates/sso_auth_account_details.html +++ b/synapse/res/templates/sso_auth_account_details.html @@ -130,15 +130,15 @@
-

Your account is nearly ready

-

Check your details before creating an account on {{ server_name }}

+

Choose your user name

+

This is required to create your account on {{ server_name }}, and you can't change this later.

@
- +
:{{ server_name }}
diff --git a/synapse/rest/synapse/client/pick_username.py b/synapse/rest/synapse/client/pick_username.py index 28ae083497..6338fbaaa9 100644 --- a/synapse/rest/synapse/client/pick_username.py +++ b/synapse/rest/synapse/client/pick_username.py @@ -92,12 +92,20 @@ class AccountDetailsResource(DirectServeHtmlResource): self._sso_handler.render_error(request, "bad_session", e.msg, code=e.code) return + # The configuration might mandate going through this step to validate an + # automatically generated localpart, so session.chosen_localpart might already + # be set. + localpart = "" + if session.chosen_localpart is not None: + localpart = session.chosen_localpart + idp_id = session.auth_provider_id template_params = { "idp": self._sso_handler.get_identity_providers()[idp_id], "user_attributes": { "display_name": session.display_name, "emails": session.emails, + "localpart": localpart, }, } -- cgit 1.5.1 From 735e89bd3a0755883ef0a19649adf84192b5d9fc Mon Sep 17 00:00:00 2001 From: Nick Mills-Barrett Date: Fri, 11 Mar 2022 14:45:26 +0100 Subject: Add an additional HTTP pusher + push rule tests. (#12188) And rename the field used for caching from _id to _cache_key. --- changelog.d/12188.misc | 1 + synapse/push/baserules.py | 38 ++++++++--------- synapse/push/bulk_push_rule_evaluator.py | 10 ++--- synapse/push/clientformat.py | 2 +- tests/push/test_http.py | 72 ++++++++++++++++++++++++++++++-- 5 files changed, 95 insertions(+), 28 deletions(-) create mode 100644 changelog.d/12188.misc diff --git a/changelog.d/12188.misc b/changelog.d/12188.misc new file mode 100644 index 0000000000..403158481c --- /dev/null +++ b/changelog.d/12188.misc @@ -0,0 +1 @@ +Add combined test for HTTP pusher and push rule. Contributed by Nick @ Beeper. diff --git a/synapse/push/baserules.py b/synapse/push/baserules.py index 832eaa34e9..f42f605f23 100644 --- a/synapse/push/baserules.py +++ b/synapse/push/baserules.py @@ -169,7 +169,7 @@ BASE_APPEND_OVERRIDE_RULES: List[Dict[str, Any]] = [ "kind": "event_match", "key": "content.msgtype", "pattern": "m.notice", - "_id": "_suppress_notices", + "_cache_key": "_suppress_notices", } ], "actions": ["dont_notify"], @@ -183,13 +183,13 @@ BASE_APPEND_OVERRIDE_RULES: List[Dict[str, Any]] = [ "kind": "event_match", "key": "type", "pattern": "m.room.member", - "_id": "_member", + "_cache_key": "_member", }, { "kind": "event_match", "key": "content.membership", "pattern": "invite", - "_id": "_invite_member", + "_cache_key": "_invite_member", }, {"kind": "event_match", "key": "state_key", "pattern_type": "user_id"}, ], @@ -212,7 +212,7 @@ BASE_APPEND_OVERRIDE_RULES: List[Dict[str, Any]] = [ "kind": "event_match", "key": "type", "pattern": "m.room.member", - "_id": "_member", + "_cache_key": "_member", } ], "actions": ["dont_notify"], @@ -237,12 +237,12 @@ BASE_APPEND_OVERRIDE_RULES: List[Dict[str, Any]] = [ "kind": "event_match", "key": "content.body", "pattern": "@room", - "_id": "_roomnotif_content", + "_cache_key": "_roomnotif_content", }, { "kind": "sender_notification_permission", "key": "room", - "_id": "_roomnotif_pl", + "_cache_key": "_roomnotif_pl", }, ], "actions": ["notify", {"set_tweak": "highlight", "value": True}], @@ -254,13 +254,13 @@ BASE_APPEND_OVERRIDE_RULES: List[Dict[str, Any]] = [ "kind": "event_match", "key": "type", "pattern": "m.room.tombstone", - "_id": "_tombstone", + "_cache_key": "_tombstone", }, { "kind": "event_match", "key": "state_key", "pattern": "", - "_id": "_tombstone_statekey", + "_cache_key": "_tombstone_statekey", }, ], "actions": ["notify", {"set_tweak": "highlight", "value": True}], @@ -272,7 +272,7 @@ BASE_APPEND_OVERRIDE_RULES: List[Dict[str, Any]] = [ "kind": "event_match", "key": "type", "pattern": "m.reaction", - "_id": "_reaction", + "_cache_key": "_reaction", } ], "actions": ["dont_notify"], @@ -288,7 +288,7 @@ BASE_APPEND_UNDERRIDE_RULES: List[Dict[str, Any]] = [ "kind": "event_match", "key": "type", "pattern": "m.call.invite", - "_id": "_call", + "_cache_key": "_call", } ], "actions": [ @@ -302,12 +302,12 @@ BASE_APPEND_UNDERRIDE_RULES: List[Dict[str, Any]] = [ { "rule_id": "global/underride/.m.rule.room_one_to_one", "conditions": [ - {"kind": "room_member_count", "is": "2", "_id": "member_count"}, + {"kind": "room_member_count", "is": "2", "_cache_key": "member_count"}, { "kind": "event_match", "key": "type", "pattern": "m.room.message", - "_id": "_message", + "_cache_key": "_message", }, ], "actions": [ @@ -321,12 +321,12 @@ BASE_APPEND_UNDERRIDE_RULES: List[Dict[str, Any]] = [ { "rule_id": "global/underride/.m.rule.encrypted_room_one_to_one", "conditions": [ - {"kind": "room_member_count", "is": "2", "_id": "member_count"}, + {"kind": "room_member_count", "is": "2", "_cache_key": "member_count"}, { "kind": "event_match", "key": "type", "pattern": "m.room.encrypted", - "_id": "_encrypted", + "_cache_key": "_encrypted", }, ], "actions": [ @@ -342,7 +342,7 @@ BASE_APPEND_UNDERRIDE_RULES: List[Dict[str, Any]] = [ "kind": "event_match", "key": "type", "pattern": "m.room.message", - "_id": "_message", + "_cache_key": "_message", } ], "actions": ["notify", {"set_tweak": "highlight", "value": False}], @@ -356,7 +356,7 @@ BASE_APPEND_UNDERRIDE_RULES: List[Dict[str, Any]] = [ "kind": "event_match", "key": "type", "pattern": "m.room.encrypted", - "_id": "_encrypted", + "_cache_key": "_encrypted", } ], "actions": ["notify", {"set_tweak": "highlight", "value": False}], @@ -368,19 +368,19 @@ BASE_APPEND_UNDERRIDE_RULES: List[Dict[str, Any]] = [ "kind": "event_match", "key": "type", "pattern": "im.vector.modular.widgets", - "_id": "_type_modular_widgets", + "_cache_key": "_type_modular_widgets", }, { "kind": "event_match", "key": "content.type", "pattern": "jitsi", - "_id": "_content_type_jitsi", + "_cache_key": "_content_type_jitsi", }, { "kind": "event_match", "key": "state_key", "pattern": "*", - "_id": "_is_state_event", + "_cache_key": "_is_state_event", }, ], "actions": ["notify", {"set_tweak": "highlight", "value": False}], diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index fecf86034e..8140afcb6b 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -274,17 +274,17 @@ def _condition_checker( cache: Dict[str, bool], ) -> bool: for cond in conditions: - _id = cond.get("_id", None) - if _id: - res = cache.get(_id, None) + _cache_key = cond.get("_cache_key", None) + if _cache_key: + res = cache.get(_cache_key, None) if res is False: return False elif res is True: continue res = evaluator.matches(cond, uid, display_name) - if _id: - cache[_id] = bool(res) + if _cache_key: + cache[_cache_key] = bool(res) if not res: return False diff --git a/synapse/push/clientformat.py b/synapse/push/clientformat.py index c5708cd888..63b22d50ae 100644 --- a/synapse/push/clientformat.py +++ b/synapse/push/clientformat.py @@ -40,7 +40,7 @@ def format_push_rules_for_user( # Remove internal stuff. for c in r["conditions"]: - c.pop("_id", None) + c.pop("_cache_key", None) pattern_type = c.pop("pattern_type", None) if pattern_type == "user_id": diff --git a/tests/push/test_http.py b/tests/push/test_http.py index c284beb37c..6691e07128 100644 --- a/tests/push/test_http.py +++ b/tests/push/test_http.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from typing import List, Tuple from unittest.mock import Mock from twisted.internet.defer import Deferred @@ -18,7 +19,7 @@ from twisted.internet.defer import Deferred import synapse.rest.admin from synapse.logging.context import make_deferred_yieldable from synapse.push import PusherConfigException -from synapse.rest.client import login, receipts, room +from synapse.rest.client import login, push_rule, receipts, room from tests.unittest import HomeserverTestCase, override_config @@ -29,6 +30,7 @@ class HTTPPusherTests(HomeserverTestCase): room.register_servlets, login.register_servlets, receipts.register_servlets, + push_rule.register_servlets, ] user_id = True hijack_auth = False @@ -39,12 +41,12 @@ class HTTPPusherTests(HomeserverTestCase): return config def make_homeserver(self, reactor, clock): - self.push_attempts = [] + self.push_attempts: List[tuple[Deferred, str, dict]] = [] m = Mock() def post_json_get_json(url, body): - d = Deferred() + d: Deferred = Deferred() self.push_attempts.append((d, url, body)) return make_deferred_yieldable(d) @@ -719,3 +721,67 @@ class HTTPPusherTests(HomeserverTestCase): access_token=access_token, ) self.assertEqual(channel.code, 200, channel.json_body) + + def _make_user_with_pusher(self, username: str) -> Tuple[str, str]: + user_id = self.register_user(username, "pass") + access_token = self.login(username, "pass") + + # Register the pusher + user_tuple = self.get_success( + self.hs.get_datastores().main.get_user_by_access_token(access_token) + ) + token_id = user_tuple.token_id + + self.get_success( + self.hs.get_pusherpool().add_pusher( + user_id=user_id, + access_token=token_id, + kind="http", + app_id="m.http", + app_display_name="HTTP Push Notifications", + device_display_name="pushy push", + pushkey="a@example.com", + lang=None, + data={"url": "http://example.com/_matrix/push/v1/notify"}, + ) + ) + + return user_id, access_token + + def test_dont_notify_rule_overrides_message(self): + """ + The override push rule will suppress notification + """ + + user_id, access_token = self._make_user_with_pusher("user") + other_user_id, other_access_token = self._make_user_with_pusher("otheruser") + + # Create a room + room = self.helper.create_room_as(user_id, tok=access_token) + + # Disable user notifications for this room -> user + body = { + "conditions": [{"kind": "event_match", "key": "room_id", "pattern": room}], + "actions": ["dont_notify"], + } + channel = self.make_request( + "PUT", + "/pushrules/global/override/best.friend", + body, + access_token=access_token, + ) + self.assertEqual(channel.code, 200) + + # Check we start with no pushes + self.assertEqual(len(self.push_attempts), 0) + + # The other user joins + self.helper.join(room=room, user=other_user_id, tok=other_access_token) + + # The other user sends a message (ignored by dont_notify push rule set above) + self.helper.send(room, body="Hi!", tok=other_access_token) + self.assertEqual(len(self.push_attempts), 0) + + # The user sends a message back (sends a notification) + self.helper.send(room, body="Hello", tok=access_token) + self.assertEqual(len(self.push_attempts), 1) -- cgit 1.5.1 From 4a53f357379c2dc407617a3d39e6da4790dec9aa Mon Sep 17 00:00:00 2001 From: reivilibre Date: Fri, 11 Mar 2022 14:00:15 +0000 Subject: Improve code documentation for the typing stream over replication. (#12211) --- changelog.d/12211.misc | 1 + synapse/handlers/typing.py | 5 +++-- synapse/replication/tcp/handler.py | 2 +- synapse/replication/tcp/resource.py | 6 +++--- synapse/replication/tcp/streams/_base.py | 12 ++++++++++++ 5 files changed, 20 insertions(+), 6 deletions(-) create mode 100644 changelog.d/12211.misc diff --git a/changelog.d/12211.misc b/changelog.d/12211.misc new file mode 100644 index 0000000000..d11634a1ee --- /dev/null +++ b/changelog.d/12211.misc @@ -0,0 +1 @@ +Improve code documentation for the typing stream over replication. \ No newline at end of file diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py index 3b89126528..6854428b7c 100644 --- a/synapse/handlers/typing.py +++ b/synapse/handlers/typing.py @@ -160,8 +160,9 @@ class FollowerTypingHandler: """Should be called whenever we receive updates for typing stream.""" if self._latest_room_serial > token: - # The master has gone backwards. To prevent inconsistent data, just - # clear everything. + # The typing worker has gone backwards (e.g. it may have restarted). + # To prevent inconsistent data, just clear everything. + logger.info("Typing handler stream went backwards; resetting") self._reset() # Set the latest serial token to whatever the server gave us. diff --git a/synapse/replication/tcp/handler.py b/synapse/replication/tcp/handler.py index d51f045f22..b217c35f99 100644 --- a/synapse/replication/tcp/handler.py +++ b/synapse/replication/tcp/handler.py @@ -709,7 +709,7 @@ class ReplicationCommandHandler: self.send_command(RemoteServerUpCommand(server)) def stream_update(self, stream_name: str, token: Optional[int], data: Any) -> None: - """Called when a new update is available to stream to clients. + """Called when a new update is available to stream to Redis subscribers. We need to check if the client is interested in the stream or not """ diff --git a/synapse/replication/tcp/resource.py b/synapse/replication/tcp/resource.py index ab829040cd..c6870df8f9 100644 --- a/synapse/replication/tcp/resource.py +++ b/synapse/replication/tcp/resource.py @@ -67,8 +67,8 @@ class ReplicationStreamProtocolFactory(ServerFactory): class ReplicationStreamer: """Handles replication connections. - This needs to be poked when new replication data may be available. When new - data is available it will propagate to all connected clients. + This needs to be poked when new replication data may be available. + When new data is available it will propagate to all Redis subscribers. """ def __init__(self, hs: "HomeServer"): @@ -109,7 +109,7 @@ class ReplicationStreamer: def on_notifier_poke(self) -> None: """Checks if there is actually any new data and sends it to the - connections if there are. + Redis subscribers if there are. This should get called each time new data is available, even if it is currently being executed, so that nothing gets missed diff --git a/synapse/replication/tcp/streams/_base.py b/synapse/replication/tcp/streams/_base.py index 23d631a769..495f2f0285 100644 --- a/synapse/replication/tcp/streams/_base.py +++ b/synapse/replication/tcp/streams/_base.py @@ -316,7 +316,19 @@ class PresenceFederationStream(Stream): class TypingStream(Stream): @attr.s(slots=True, frozen=True, auto_attribs=True) class TypingStreamRow: + """ + An entry in the typing stream. + Describes all the users that are 'typing' right now in one room. + + When a user stops typing, it will be streamed as a new update with that + user absent; you can think of the `user_ids` list as overwriting the + entire list that was there previously. + """ + + # The room that this update is for. room_id: str + + # All the users that are 'typing' right now in the specified room. user_ids: List[str] NAME = "typing" -- cgit 1.5.1 From e6a106fd5ebbf30a7c84f8ba09dc903d20213be3 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Fri, 11 Mar 2022 16:15:11 +0100 Subject: Implement a Jinja2 filter to extract localparts from email addresses (#12212) --- changelog.d/12212.feature | 1 + docs/sample_config.yaml | 3 ++- docs/templates.md | 7 +++++++ synapse/config/oidc.py | 3 ++- synapse/handlers/oidc.py | 6 ++++++ synapse/util/templates.py | 5 +++++ 6 files changed, 23 insertions(+), 2 deletions(-) create mode 100644 changelog.d/12212.feature diff --git a/changelog.d/12212.feature b/changelog.d/12212.feature new file mode 100644 index 0000000000..fe337ff990 --- /dev/null +++ b/changelog.d/12212.feature @@ -0,0 +1 @@ +Add a new Jinja2 template filter to extract the local part of an email address. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index ef25a3175f..d634fd8ff5 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -1948,7 +1948,8 @@ saml2_config: # localpart_template: Jinja2 template for the localpart of the MXID. # If this is not set, the user will be prompted to choose their # own username (see the documentation for the -# 'sso_auth_account_details.html' template). +# 'sso_auth_account_details.html' template). This template can +# use the 'localpart_from_email' filter. # # confirm_localpart: Whether to prompt the user to validate (or # change) the generated localpart (see the documentation for the diff --git a/docs/templates.md b/docs/templates.md index b251d05cb9..f87692a453 100644 --- a/docs/templates.md +++ b/docs/templates.md @@ -36,6 +36,13 @@ Turns a `mxc://` URL for media content into an HTTP(S) one using the homeserver' Example: `message.sender_avatar_url|mxc_to_http(32,32)` +```python +localpart_from_email(address: str) -> str +``` + +Returns the local part of an email address (e.g. `alice` in `alice@example.com`). + +Example: `user.email_address|localpart_from_email` ## Email templates diff --git a/synapse/config/oidc.py b/synapse/config/oidc.py index fc95912d9b..5d571651cb 100644 --- a/synapse/config/oidc.py +++ b/synapse/config/oidc.py @@ -183,7 +183,8 @@ class OIDCConfig(Config): # localpart_template: Jinja2 template for the localpart of the MXID. # If this is not set, the user will be prompted to choose their # own username (see the documentation for the - # 'sso_auth_account_details.html' template). + # 'sso_auth_account_details.html' template). This template can + # use the 'localpart_from_email' filter. # # confirm_localpart: Whether to prompt the user to validate (or # change) the generated localpart (see the documentation for the diff --git a/synapse/handlers/oidc.py b/synapse/handlers/oidc.py index d98659edc7..724b9cfcb4 100644 --- a/synapse/handlers/oidc.py +++ b/synapse/handlers/oidc.py @@ -45,6 +45,7 @@ from synapse.types import JsonDict, UserID, map_username_to_mxid_localpart from synapse.util import Clock, json_decoder from synapse.util.caches.cached_call import RetryOnExceptionCachedCall from synapse.util.macaroons import get_value_from_macaroon, satisfy_expiry +from synapse.util.templates import _localpart_from_email_filter if TYPE_CHECKING: from synapse.server import HomeServer @@ -1308,6 +1309,11 @@ def jinja_finalize(thing: Any) -> Any: env = Environment(finalize=jinja_finalize) +env.filters.update( + { + "localpart_from_email": _localpart_from_email_filter, + } +) @attr.s(slots=True, frozen=True, auto_attribs=True) diff --git a/synapse/util/templates.py b/synapse/util/templates.py index 12941065ca..fb758b7180 100644 --- a/synapse/util/templates.py +++ b/synapse/util/templates.py @@ -64,6 +64,7 @@ def build_jinja_env( { "format_ts": _format_ts_filter, "mxc_to_http": _create_mxc_to_http_filter(config.server.public_baseurl), + "localpart_from_email": _localpart_from_email_filter, } ) @@ -112,3 +113,7 @@ def _create_mxc_to_http_filter( def _format_ts_filter(value: int, format: str) -> str: return time.strftime(format, time.localtime(value / 1000)) + + +def _localpart_from_email_filter(address: str) -> str: + return address.rsplit("@", 1)[0] -- cgit 1.5.1 From ef3619e61d84493d98470eb2a69131d15eb1166b Mon Sep 17 00:00:00 2001 From: Shay Date: Fri, 11 Mar 2022 10:46:45 -0800 Subject: Add config settings for background update parameters (#11980) --- changelog.d/11980.misc | 1 + docs/sample_config.yaml | 32 ++++ synapse/config/_base.pyi | 2 + synapse/config/background_updates.py | 68 ++++++++ synapse/config/homeserver.py | 2 + synapse/storage/background_updates.py | 39 +++-- tests/config/test_background_update.py | 58 +++++++ tests/rest/admin/test_background_updates.py | 9 +- tests/storage/test_background_update.py | 253 ++++++++++++++++++++++++++-- 9 files changed, 430 insertions(+), 34 deletions(-) create mode 100644 changelog.d/11980.misc create mode 100644 synapse/config/background_updates.py create mode 100644 tests/config/test_background_update.py diff --git a/changelog.d/11980.misc b/changelog.d/11980.misc new file mode 100644 index 0000000000..36e992e645 --- /dev/null +++ b/changelog.d/11980.misc @@ -0,0 +1 @@ +Add config settings for background update parameters. \ No newline at end of file diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index d634fd8ff5..36c6c56e58 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -2735,3 +2735,35 @@ redis: # Optional password if configured on the Redis instance # #password: + + +## Background Updates ## + +# Background updates are database updates that are run in the background in batches. +# The duration, minimum batch size, default batch size, whether to sleep between batches and if so, how long to +# sleep can all be configured. This is helpful to speed up or slow down the updates. +# +background_updates: + # How long in milliseconds to run a batch of background updates for. Defaults to 100. Uncomment and set + # a time to change the default. + # + #background_update_duration_ms: 500 + + # Whether to sleep between updates. Defaults to True. Uncomment to change the default. + # + #sleep_enabled: false + + # If sleeping between updates, how long in milliseconds to sleep for. Defaults to 1000. Uncomment + # and set a duration to change the default. + # + #sleep_duration_ms: 300 + + # Minimum size a batch of background updates can be. Must be greater than 0. Defaults to 1. Uncomment and + # set a size to change the default. + # + #min_batch_size: 10 + + # The batch size to use for the first iteration of a new background update. The default is 100. + # Uncomment and set a size to change the default. + # + #default_batch_size: 50 diff --git a/synapse/config/_base.pyi b/synapse/config/_base.pyi index 1eb5f5a68c..363d8b4554 100644 --- a/synapse/config/_base.pyi +++ b/synapse/config/_base.pyi @@ -19,6 +19,7 @@ from synapse.config import ( api, appservice, auth, + background_updates, cache, captcha, cas, @@ -113,6 +114,7 @@ class RootConfig: caches: cache.CacheConfig federation: federation.FederationConfig retention: retention.RetentionConfig + background_updates: background_updates.BackgroundUpdateConfig config_classes: List[Type["Config"]] = ... def __init__(self) -> None: ... diff --git a/synapse/config/background_updates.py b/synapse/config/background_updates.py new file mode 100644 index 0000000000..f6cdeacc4b --- /dev/null +++ b/synapse/config/background_updates.py @@ -0,0 +1,68 @@ +# Copyright 2022 Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ._base import Config + + +class BackgroundUpdateConfig(Config): + section = "background_updates" + + def generate_config_section(self, **kwargs) -> str: + return """\ + ## Background Updates ## + + # Background updates are database updates that are run in the background in batches. + # The duration, minimum batch size, default batch size, whether to sleep between batches and if so, how long to + # sleep can all be configured. This is helpful to speed up or slow down the updates. + # + background_updates: + # How long in milliseconds to run a batch of background updates for. Defaults to 100. Uncomment and set + # a time to change the default. + # + #background_update_duration_ms: 500 + + # Whether to sleep between updates. Defaults to True. Uncomment to change the default. + # + #sleep_enabled: false + + # If sleeping between updates, how long in milliseconds to sleep for. Defaults to 1000. Uncomment + # and set a duration to change the default. + # + #sleep_duration_ms: 300 + + # Minimum size a batch of background updates can be. Must be greater than 0. Defaults to 1. Uncomment and + # set a size to change the default. + # + #min_batch_size: 10 + + # The batch size to use for the first iteration of a new background update. The default is 100. + # Uncomment and set a size to change the default. + # + #default_batch_size: 50 + """ + + def read_config(self, config, **kwargs) -> None: + bg_update_config = config.get("background_updates") or {} + + self.update_duration_ms = bg_update_config.get( + "background_update_duration_ms", 100 + ) + + self.sleep_enabled = bg_update_config.get("sleep_enabled", True) + + self.sleep_duration_ms = bg_update_config.get("sleep_duration_ms", 1000) + + self.min_batch_size = bg_update_config.get("min_batch_size", 1) + + self.default_batch_size = bg_update_config.get("default_batch_size", 100) diff --git a/synapse/config/homeserver.py b/synapse/config/homeserver.py index 001605c265..a4ec706908 100644 --- a/synapse/config/homeserver.py +++ b/synapse/config/homeserver.py @@ -16,6 +16,7 @@ from .account_validity import AccountValidityConfig from .api import ApiConfig from .appservice import AppServiceConfig from .auth import AuthConfig +from .background_updates import BackgroundUpdateConfig from .cache import CacheConfig from .captcha import CaptchaConfig from .cas import CasConfig @@ -99,4 +100,5 @@ class HomeServerConfig(RootConfig): WorkerConfig, RedisConfig, ExperimentalConfig, + BackgroundUpdateConfig, ] diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py index 4acc2c997d..08c6eabc6d 100644 --- a/synapse/storage/background_updates.py +++ b/synapse/storage/background_updates.py @@ -60,18 +60,19 @@ class _BackgroundUpdateHandler: class _BackgroundUpdateContextManager: - BACKGROUND_UPDATE_INTERVAL_MS = 1000 - BACKGROUND_UPDATE_DURATION_MS = 100 - - def __init__(self, sleep: bool, clock: Clock): + def __init__( + self, sleep: bool, clock: Clock, sleep_duration_ms: int, update_duration: int + ): self._sleep = sleep self._clock = clock + self._sleep_duration_ms = sleep_duration_ms + self._update_duration_ms = update_duration async def __aenter__(self) -> int: if self._sleep: - await self._clock.sleep(self.BACKGROUND_UPDATE_INTERVAL_MS / 1000) + await self._clock.sleep(self._sleep_duration_ms / 1000) - return self.BACKGROUND_UPDATE_DURATION_MS + return self._update_duration_ms async def __aexit__(self, *exc) -> None: pass @@ -133,9 +134,6 @@ class BackgroundUpdater: process and autotuning the batch size. """ - MINIMUM_BACKGROUND_BATCH_SIZE = 1 - DEFAULT_BACKGROUND_BATCH_SIZE = 100 - def __init__(self, hs: "HomeServer", database: "DatabasePool"): self._clock = hs.get_clock() self.db_pool = database @@ -160,6 +158,14 @@ class BackgroundUpdater: # enable/disable background updates via the admin API. self.enabled = True + self.minimum_background_batch_size = hs.config.background_updates.min_batch_size + self.default_background_batch_size = ( + hs.config.background_updates.default_batch_size + ) + self.update_duration_ms = hs.config.background_updates.update_duration_ms + self.sleep_duration_ms = hs.config.background_updates.sleep_duration_ms + self.sleep_enabled = hs.config.background_updates.sleep_enabled + def register_update_controller_callbacks( self, on_update: ON_UPDATE_CALLBACK, @@ -216,7 +222,9 @@ class BackgroundUpdater: if self._on_update_callback is not None: return self._on_update_callback(update_name, database_name, oneshot) - return _BackgroundUpdateContextManager(sleep, self._clock) + return _BackgroundUpdateContextManager( + sleep, self._clock, self.sleep_duration_ms, self.update_duration_ms + ) async def _default_batch_size(self, update_name: str, database_name: str) -> int: """The batch size to use for the first iteration of a new background @@ -225,7 +233,7 @@ class BackgroundUpdater: if self._default_batch_size_callback is not None: return await self._default_batch_size_callback(update_name, database_name) - return self.DEFAULT_BACKGROUND_BATCH_SIZE + return self.default_background_batch_size async def _min_batch_size(self, update_name: str, database_name: str) -> int: """A lower bound on the batch size of a new background update. @@ -235,7 +243,7 @@ class BackgroundUpdater: if self._min_batch_size_callback is not None: return await self._min_batch_size_callback(update_name, database_name) - return self.MINIMUM_BACKGROUND_BATCH_SIZE + return self.minimum_background_batch_size def get_current_update(self) -> Optional[BackgroundUpdatePerformance]: """Returns the current background update, if any.""" @@ -254,9 +262,12 @@ class BackgroundUpdater: if self.enabled: # if we start a new background update, not all updates are done. self._all_done = False - run_as_background_process("background_updates", self.run_background_updates) + sleep = self.sleep_enabled + run_as_background_process( + "background_updates", self.run_background_updates, sleep + ) - async def run_background_updates(self, sleep: bool = True) -> None: + async def run_background_updates(self, sleep: bool) -> None: if self._running or not self.enabled: return diff --git a/tests/config/test_background_update.py b/tests/config/test_background_update.py new file mode 100644 index 0000000000..0c32c1ca29 --- /dev/null +++ b/tests/config/test_background_update.py @@ -0,0 +1,58 @@ +# Copyright 2022 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import yaml + +from synapse.storage.background_updates import BackgroundUpdater + +from tests.unittest import HomeserverTestCase, override_config + + +class BackgroundUpdateConfigTestCase(HomeserverTestCase): + # Tests that the default values in the config are correctly loaded. Note that the default + # values are loaded when the corresponding config options are commented out, which is why there isn't + # a config specified here. + def test_default_configuration(self): + background_updater = BackgroundUpdater( + self.hs, self.hs.get_datastores().main.db_pool + ) + + self.assertEqual(background_updater.minimum_background_batch_size, 1) + self.assertEqual(background_updater.default_background_batch_size, 100) + self.assertEqual(background_updater.sleep_enabled, True) + self.assertEqual(background_updater.sleep_duration_ms, 1000) + self.assertEqual(background_updater.update_duration_ms, 100) + + # Tests that non-default values for the config options are properly picked up and passed on. + @override_config( + yaml.safe_load( + """ + background_updates: + background_update_duration_ms: 1000 + sleep_enabled: false + sleep_duration_ms: 600 + min_batch_size: 5 + default_batch_size: 50 + """ + ) + ) + def test_custom_configuration(self): + background_updater = BackgroundUpdater( + self.hs, self.hs.get_datastores().main.db_pool + ) + + self.assertEqual(background_updater.minimum_background_batch_size, 5) + self.assertEqual(background_updater.default_background_batch_size, 50) + self.assertEqual(background_updater.sleep_enabled, False) + self.assertEqual(background_updater.sleep_duration_ms, 600) + self.assertEqual(background_updater.update_duration_ms, 1000) diff --git a/tests/rest/admin/test_background_updates.py b/tests/rest/admin/test_background_updates.py index becec84524..6cf56b1e35 100644 --- a/tests/rest/admin/test_background_updates.py +++ b/tests/rest/admin/test_background_updates.py @@ -39,6 +39,7 @@ class BackgroundUpdatesTestCase(unittest.HomeserverTestCase): self.store = hs.get_datastores().main self.admin_user = self.register_user("admin", "pass", admin=True) self.admin_user_tok = self.login("admin", "pass") + self.updater = BackgroundUpdater(hs, self.store.db_pool) @parameterized.expand( [ @@ -135,10 +136,10 @@ class BackgroundUpdatesTestCase(unittest.HomeserverTestCase): """Test the status API works with a background update.""" # Create a new background update - self._register_bg_update() self.store.db_pool.updates.start_doing_background_updates() + self.reactor.pump([1.0, 1.0, 1.0]) channel = self.make_request( @@ -158,7 +159,7 @@ class BackgroundUpdatesTestCase(unittest.HomeserverTestCase): "average_items_per_ms": 0.1, "total_duration_ms": 1000.0, "total_item_count": ( - BackgroundUpdater.DEFAULT_BACKGROUND_BATCH_SIZE + self.updater.default_background_batch_size ), } }, @@ -213,7 +214,7 @@ class BackgroundUpdatesTestCase(unittest.HomeserverTestCase): "average_items_per_ms": 0.1, "total_duration_ms": 1000.0, "total_item_count": ( - BackgroundUpdater.DEFAULT_BACKGROUND_BATCH_SIZE + self.updater.default_background_batch_size ), } }, @@ -242,7 +243,7 @@ class BackgroundUpdatesTestCase(unittest.HomeserverTestCase): "average_items_per_ms": 0.1, "total_duration_ms": 1000.0, "total_item_count": ( - BackgroundUpdater.DEFAULT_BACKGROUND_BATCH_SIZE + self.updater.default_background_batch_size ), } }, diff --git a/tests/storage/test_background_update.py b/tests/storage/test_background_update.py index 9fdf54ea31..5cf18b690e 100644 --- a/tests/storage/test_background_update.py +++ b/tests/storage/test_background_update.py @@ -14,12 +14,15 @@ from unittest.mock import Mock +import yaml + from twisted.internet.defer import Deferred, ensureDeferred from synapse.storage.background_updates import BackgroundUpdater from tests import unittest from tests.test_utils import make_awaitable, simple_async_mock +from tests.unittest import override_config class BackgroundUpdateTestCase(unittest.HomeserverTestCase): @@ -34,6 +37,19 @@ class BackgroundUpdateTestCase(unittest.HomeserverTestCase): self.updates.register_background_update_handler( "test_update", self.update_handler ) + self.store = self.hs.get_datastores().main + + async def update(self, progress, count): + duration_ms = 10 + await self.clock.sleep((count * duration_ms) / 1000) + progress = {"my_key": progress["my_key"] + 1} + await self.store.db_pool.runInteraction( + "update_progress", + self.updates._background_update_progress_txn, + "test_update", + progress, + ) + return count def test_do_background_update(self): # the time we claim it takes to update one item when running the update @@ -42,27 +58,14 @@ class BackgroundUpdateTestCase(unittest.HomeserverTestCase): # the target runtime for each bg update target_background_update_duration_ms = 100 - store = self.hs.get_datastores().main self.get_success( - store.db_pool.simple_insert( + self.store.db_pool.simple_insert( "background_updates", values={"update_name": "test_update", "progress_json": '{"my_key": 1}'}, ) ) - # first step: make a bit of progress - async def update(progress, count): - await self.clock.sleep((count * duration_ms) / 1000) - progress = {"my_key": progress["my_key"] + 1} - await store.db_pool.runInteraction( - "update_progress", - self.updates._background_update_progress_txn, - "test_update", - progress, - ) - return count - - self.update_handler.side_effect = update + self.update_handler.side_effect = self.update self.update_handler.reset_mock() res = self.get_success( self.updates.do_next_background_update(False), @@ -72,7 +75,7 @@ class BackgroundUpdateTestCase(unittest.HomeserverTestCase): # on the first call, we should get run with the default background update size self.update_handler.assert_called_once_with( - {"my_key": 1}, self.updates.DEFAULT_BACKGROUND_BATCH_SIZE + {"my_key": 1}, self.updates.default_background_batch_size ) # second step: complete the update @@ -99,6 +102,224 @@ class BackgroundUpdateTestCase(unittest.HomeserverTestCase): self.assertTrue(result) self.assertFalse(self.update_handler.called) + @override_config( + yaml.safe_load( + """ + background_updates: + default_batch_size: 20 + """ + ) + ) + def test_background_update_default_batch_set_by_config(self): + """ + Test that the background update is run with the default_batch_size set by the config + """ + + self.get_success( + self.store.db_pool.simple_insert( + "background_updates", + values={"update_name": "test_update", "progress_json": '{"my_key": 1}'}, + ) + ) + + self.update_handler.side_effect = self.update + self.update_handler.reset_mock() + res = self.get_success( + self.updates.do_next_background_update(False), + by=0.01, + ) + self.assertFalse(res) + + # on the first call, we should get run with the default background update size specified in the config + self.update_handler.assert_called_once_with({"my_key": 1}, 20) + + def test_background_update_default_sleep_behavior(self): + """ + Test default background update behavior, which is to sleep + """ + + self.get_success( + self.store.db_pool.simple_insert( + "background_updates", + values={"update_name": "test_update", "progress_json": '{"my_key": 1}'}, + ) + ) + + self.update_handler.side_effect = self.update + self.update_handler.reset_mock() + self.updates.start_doing_background_updates(), + + # 2: advance the reactor less than the default sleep duration (1000ms) + self.reactor.pump([0.5]) + # check that an update has not been run + self.update_handler.assert_not_called() + + # advance reactor past default sleep duration + self.reactor.pump([1]) + # check that update has been run + self.update_handler.assert_called() + + @override_config( + yaml.safe_load( + """ + background_updates: + sleep_duration_ms: 500 + """ + ) + ) + def test_background_update_sleep_set_in_config(self): + """ + Test that changing the sleep time in the config changes how long it sleeps + """ + + self.get_success( + self.store.db_pool.simple_insert( + "background_updates", + values={"update_name": "test_update", "progress_json": '{"my_key": 1}'}, + ) + ) + + self.update_handler.side_effect = self.update + self.update_handler.reset_mock() + self.updates.start_doing_background_updates(), + + # 2: advance the reactor less than the configured sleep duration (500ms) + self.reactor.pump([0.45]) + # check that an update has not been run + self.update_handler.assert_not_called() + + # advance reactor past config sleep duration but less than default duration + self.reactor.pump([0.75]) + # check that update has been run + self.update_handler.assert_called() + + @override_config( + yaml.safe_load( + """ + background_updates: + sleep_enabled: false + """ + ) + ) + def test_disabling_background_update_sleep(self): + """ + Test that disabling sleep in the config results in bg update not sleeping + """ + + self.get_success( + self.store.db_pool.simple_insert( + "background_updates", + values={"update_name": "test_update", "progress_json": '{"my_key": 1}'}, + ) + ) + + self.update_handler.side_effect = self.update + self.update_handler.reset_mock() + self.updates.start_doing_background_updates(), + + # 2: advance the reactor very little + self.reactor.pump([0.025]) + # check that an update has run + self.update_handler.assert_called() + + @override_config( + yaml.safe_load( + """ + background_updates: + background_update_duration_ms: 500 + """ + ) + ) + def test_background_update_duration_set_in_config(self): + """ + Test that the desired duration set in the config is used in determining batch size + """ + # Duration of one background update item + duration_ms = 10 + + self.get_success( + self.store.db_pool.simple_insert( + "background_updates", + values={"update_name": "test_update", "progress_json": '{"my_key": 1}'}, + ) + ) + + self.update_handler.side_effect = self.update + self.update_handler.reset_mock() + res = self.get_success( + self.updates.do_next_background_update(False), + by=0.02, + ) + self.assertFalse(res) + + # the first update was run with the default batch size, this should be run with 500ms as the + # desired duration + async def update(progress, count): + self.assertEqual(progress, {"my_key": 2}) + self.assertAlmostEqual( + count, + 500 / duration_ms, + places=0, + ) + await self.updates._end_background_update("test_update") + return count + + self.update_handler.side_effect = update + self.get_success(self.updates.do_next_background_update(False)) + + @override_config( + yaml.safe_load( + """ + background_updates: + min_batch_size: 5 + """ + ) + ) + def test_background_update_min_batch_set_in_config(self): + """ + Test that the minimum batch size set in the config is used + """ + # a very long-running individual update + duration_ms = 50 + + self.get_success( + self.store.db_pool.simple_insert( + "background_updates", + values={"update_name": "test_update", "progress_json": '{"my_key": 1}'}, + ) + ) + + # Run the update with the long-running update item + async def update(progress, count): + await self.clock.sleep((count * duration_ms) / 1000) + progress = {"my_key": progress["my_key"] + 1} + await self.store.db_pool.runInteraction( + "update_progress", + self.updates._background_update_progress_txn, + "test_update", + progress, + ) + return count + + self.update_handler.side_effect = update + self.update_handler.reset_mock() + res = self.get_success( + self.updates.do_next_background_update(False), + by=1, + ) + self.assertFalse(res) + + # the first update was run with the default batch size, this should be run with minimum batch size + # as the first items took a very long time + async def update(progress, count): + self.assertEqual(progress, {"my_key": 2}) + self.assertEqual(count, 5) + await self.updates._end_background_update("test_update") + return count + + self.update_handler.side_effect = update + self.get_success(self.updates.do_next_background_update(False)) + class BackgroundUpdateControllerTestCase(unittest.HomeserverTestCase): def prepare(self, reactor, clock, homeserver): -- cgit 1.5.1 From 54f674f7a9107d3dccd6c126c3e99337314a12c2 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Sat, 12 Mar 2022 13:23:37 -0500 Subject: Deprecate the groups/communities endpoints and add an experimental configuration flag. (#12200) --- changelog.d/12200.removal | 1 + docs/upgrade.md | 14 ++++++++++++++ synapse/app/generic_worker.py | 3 ++- synapse/config/experimental.py | 3 +++ synapse/federation/transport/server/__init__.py | 15 +++++++++++---- synapse/rest/__init__.py | 3 ++- synapse/rest/admin/__init__.py | 3 ++- 7 files changed, 35 insertions(+), 7 deletions(-) create mode 100644 changelog.d/12200.removal diff --git a/changelog.d/12200.removal b/changelog.d/12200.removal new file mode 100644 index 0000000000..312c7ae325 --- /dev/null +++ b/changelog.d/12200.removal @@ -0,0 +1 @@ +The groups/communities feature in Synapse has been deprecated. diff --git a/docs/upgrade.md b/docs/upgrade.md index 95005962dc..f9ac605e7b 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -85,6 +85,20 @@ process, for example: dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb ``` +# Upgrading to v1.56.0 + +## Groups/communities feature has been deprecated + +The non-standard groups/communities feature in Synapse has been deprecated and will +be disabled by default in Synapse v1.58.0. + +You can test disabling it by adding the following to your homeserver configuration: + +```yaml +experimental_features: + groups_enabled: false +``` + # Upgrading to v1.55.0 ## `synctl` script has been moved diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index a10a63b06c..b6f510ed30 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -322,7 +322,8 @@ class GenericWorkerServer(HomeServer): presence.register_servlets(self, resource) - groups.register_servlets(self, resource) + if self.config.experimental.groups_enabled: + groups.register_servlets(self, resource) resources.update({CLIENT_API_PREFIX: resource}) diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index 41338b39df..064db4487c 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -74,3 +74,6 @@ class ExperimentalConfig(Config): # MSC3720 (Account status endpoint) self.msc3720_enabled: bool = experimental.get("msc3720_enabled", False) + + # The deprecated groups feature. + self.groups_enabled: bool = experimental.get("groups_enabled", True) diff --git a/synapse/federation/transport/server/__init__.py b/synapse/federation/transport/server/__init__.py index 67a6347907..71b2f90eb9 100644 --- a/synapse/federation/transport/server/__init__.py +++ b/synapse/federation/transport/server/__init__.py @@ -289,7 +289,7 @@ class OpenIdUserInfo(BaseFederationServlet): return 200, {"sub": user_id} -DEFAULT_SERVLET_GROUPS: Dict[str, Iterable[Type[BaseFederationServlet]]] = { +SERVLET_GROUPS: Dict[str, Iterable[Type[BaseFederationServlet]]] = { "federation": FEDERATION_SERVLET_CLASSES, "room_list": (PublicRoomList,), "group_server": GROUP_SERVER_SERVLET_CLASSES, @@ -298,6 +298,10 @@ DEFAULT_SERVLET_GROUPS: Dict[str, Iterable[Type[BaseFederationServlet]]] = { "openid": (OpenIdUserInfo,), } +DEFAULT_SERVLET_GROUPS = ("federation", "room_list", "openid") + +GROUP_SERVLET_GROUPS = ("group_server", "group_local", "group_attestation") + def register_servlets( hs: "HomeServer", @@ -320,16 +324,19 @@ def register_servlets( Defaults to ``DEFAULT_SERVLET_GROUPS``. """ if not servlet_groups: - servlet_groups = DEFAULT_SERVLET_GROUPS.keys() + servlet_groups = DEFAULT_SERVLET_GROUPS + # Only allow the groups servlets if the deprecated groups feature is enabled. + if hs.config.experimental.groups_enabled: + servlet_groups = servlet_groups + GROUP_SERVLET_GROUPS for servlet_group in servlet_groups: # Skip unknown servlet groups. - if servlet_group not in DEFAULT_SERVLET_GROUPS: + if servlet_group not in SERVLET_GROUPS: raise RuntimeError( f"Attempting to register unknown federation servlet: '{servlet_group}'" ) - for servletclass in DEFAULT_SERVLET_GROUPS[servlet_group]: + for servletclass in SERVLET_GROUPS[servlet_group]: # Only allow the `/timestamp_to_event` servlet if msc3030 is enabled if ( servletclass == FederationTimestampLookupServlet diff --git a/synapse/rest/__init__.py b/synapse/rest/__init__.py index cebdeecb81..762808a571 100644 --- a/synapse/rest/__init__.py +++ b/synapse/rest/__init__.py @@ -118,7 +118,8 @@ class ClientRestResource(JsonResource): thirdparty.register_servlets(hs, client_resource) sendtodevice.register_servlets(hs, client_resource) user_directory.register_servlets(hs, client_resource) - groups.register_servlets(hs, client_resource) + if hs.config.experimental.groups_enabled: + groups.register_servlets(hs, client_resource) room_upgrade_rest_servlet.register_servlets(hs, client_resource) room_batch.register_servlets(hs, client_resource) capabilities.register_servlets(hs, client_resource) diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index 6de302f813..cb4d55c89d 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -293,7 +293,8 @@ def register_servlets_for_client_rest_resource( ResetPasswordRestServlet(hs).register(http_server) SearchUsersRestServlet(hs).register(http_server) UserRegisterServlet(hs).register(http_server) - DeleteGroupAdminRestServlet(hs).register(http_server) + if hs.config.experimental.groups_enabled: + DeleteGroupAdminRestServlet(hs).register(http_server) AccountValidityRenewServlet(hs).register(http_server) # Load the media repo ones if we're using them. Otherwise load the servlets which -- cgit 1.5.1 From 90b2327066d2343faa86c464a182b6f3c4422ecd Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Mon, 14 Mar 2022 17:52:15 +0000 Subject: Add `delay_cancellation` utility function (#12180) `delay_cancellation` behaves like `stop_cancellation`, except it delays `CancelledError`s until the original `Deferred` resolves. This is handy for unifying cleanup paths and ensuring that uncancelled coroutines don't use finished logcontexts. Signed-off-by: Sean Quah --- changelog.d/12180.misc | 1 + synapse/util/async_helpers.py | 48 +++++++++++++-- tests/util/test_async_helpers.py | 124 +++++++++++++++++++++++++++++++++++++-- 3 files changed, 161 insertions(+), 12 deletions(-) create mode 100644 changelog.d/12180.misc diff --git a/changelog.d/12180.misc b/changelog.d/12180.misc new file mode 100644 index 0000000000..7a347352fd --- /dev/null +++ b/changelog.d/12180.misc @@ -0,0 +1 @@ +Add `delay_cancellation` utility function, which behaves like `stop_cancellation` but waits until the original `Deferred` resolves before raising a `CancelledError`. diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py index a9f67dcbac..69c8c1baa9 100644 --- a/synapse/util/async_helpers.py +++ b/synapse/util/async_helpers.py @@ -686,12 +686,48 @@ def stop_cancellation(deferred: "defer.Deferred[T]") -> "defer.Deferred[T]": Synapse logcontext rules. Returns: - A new `Deferred`, which will contain the result of the original `Deferred`, - but will not propagate cancellation through to the original. When cancelled, - the new `Deferred` will fail with a `CancelledError` and will not follow the - Synapse logcontext rules. `make_deferred_yieldable` should be used to wrap - the new `Deferred`. + A new `Deferred`, which will contain the result of the original `Deferred`. + The new `Deferred` will not propagate cancellation through to the original. + When cancelled, the new `Deferred` will fail with a `CancelledError`. + + The new `Deferred` will not follow the Synapse logcontext rules and should be + wrapped with `make_deferred_yieldable`. + """ + new_deferred: "defer.Deferred[T]" = defer.Deferred() + deferred.chainDeferred(new_deferred) + return new_deferred + + +def delay_cancellation(deferred: "defer.Deferred[T]") -> "defer.Deferred[T]": + """Delay cancellation of a `Deferred` until it resolves. + + Has the same effect as `stop_cancellation`, but the returned `Deferred` will not + resolve with a `CancelledError` until the original `Deferred` resolves. + + Args: + deferred: The `Deferred` to protect against cancellation. May optionally follow + the Synapse logcontext rules. + + Returns: + A new `Deferred`, which will contain the result of the original `Deferred`. + The new `Deferred` will not propagate cancellation through to the original. + When cancelled, the new `Deferred` will wait until the original `Deferred` + resolves before failing with a `CancelledError`. + + The new `Deferred` will follow the Synapse logcontext rules if `deferred` + follows the Synapse logcontext rules. Otherwise the new `Deferred` should be + wrapped with `make_deferred_yieldable`. """ - new_deferred: defer.Deferred[T] = defer.Deferred() + + def handle_cancel(new_deferred: "defer.Deferred[T]") -> None: + # before the new deferred is cancelled, we `pause` it to stop the cancellation + # propagating. we then `unpause` it once the wrapped deferred completes, to + # propagate the exception. + new_deferred.pause() + new_deferred.errback(Failure(CancelledError())) + + deferred.addBoth(lambda _: new_deferred.unpause()) + + new_deferred: "defer.Deferred[T]" = defer.Deferred(handle_cancel) deferred.chainDeferred(new_deferred) return new_deferred diff --git a/tests/util/test_async_helpers.py b/tests/util/test_async_helpers.py index ff53ce114b..e5bc416de1 100644 --- a/tests/util/test_async_helpers.py +++ b/tests/util/test_async_helpers.py @@ -13,6 +13,8 @@ # limitations under the License. import traceback +from parameterized import parameterized_class + from twisted.internet import defer from twisted.internet.defer import CancelledError, Deferred, ensureDeferred from twisted.internet.task import Clock @@ -23,10 +25,12 @@ from synapse.logging.context import ( LoggingContext, PreserveLoggingContext, current_context, + make_deferred_yieldable, ) from synapse.util.async_helpers import ( ObservableDeferred, concurrently_execute, + delay_cancellation, stop_cancellation, timeout_deferred, ) @@ -313,13 +317,27 @@ class ConcurrentlyExecuteTest(TestCase): self.successResultOf(d2) -class StopCancellationTests(TestCase): - """Tests for the `stop_cancellation` function.""" +@parameterized_class( + ("wrapper",), + [("stop_cancellation",), ("delay_cancellation",)], +) +class CancellationWrapperTests(TestCase): + """Common tests for the `stop_cancellation` and `delay_cancellation` functions.""" + + wrapper: str + + def wrap_deferred(self, deferred: "Deferred[str]") -> "Deferred[str]": + if self.wrapper == "stop_cancellation": + return stop_cancellation(deferred) + elif self.wrapper == "delay_cancellation": + return delay_cancellation(deferred) + else: + raise ValueError(f"Unsupported wrapper type: {self.wrapper}") def test_succeed(self): """Test that the new `Deferred` receives the result.""" deferred: "Deferred[str]" = Deferred() - wrapper_deferred = stop_cancellation(deferred) + wrapper_deferred = self.wrap_deferred(deferred) # Success should propagate through. deferred.callback("success") @@ -329,7 +347,7 @@ class StopCancellationTests(TestCase): def test_failure(self): """Test that the new `Deferred` receives the `Failure`.""" deferred: "Deferred[str]" = Deferred() - wrapper_deferred = stop_cancellation(deferred) + wrapper_deferred = self.wrap_deferred(deferred) # Failure should propagate through. deferred.errback(ValueError("abc")) @@ -337,6 +355,10 @@ class StopCancellationTests(TestCase): self.failureResultOf(wrapper_deferred, ValueError) self.assertIsNone(deferred.result, "`Failure` was not consumed") + +class StopCancellationTests(TestCase): + """Tests for the `stop_cancellation` function.""" + def test_cancellation(self): """Test that cancellation of the new `Deferred` leaves the original running.""" deferred: "Deferred[str]" = Deferred() @@ -347,11 +369,101 @@ class StopCancellationTests(TestCase): self.assertTrue(wrapper_deferred.called) self.failureResultOf(wrapper_deferred, CancelledError) self.assertFalse( - deferred.called, "Original `Deferred` was unexpectedly cancelled." + deferred.called, "Original `Deferred` was unexpectedly cancelled" + ) + + # Now make the original `Deferred` fail. + # The `Failure` must be consumed, otherwise unwanted tracebacks will be printed + # in logs. + deferred.errback(ValueError("abc")) + self.assertIsNone(deferred.result, "`Failure` was not consumed") + + +class DelayCancellationTests(TestCase): + """Tests for the `delay_cancellation` function.""" + + def test_cancellation(self): + """Test that cancellation of the new `Deferred` waits for the original.""" + deferred: "Deferred[str]" = Deferred() + wrapper_deferred = delay_cancellation(deferred) + + # Cancel the new `Deferred`. + wrapper_deferred.cancel() + self.assertNoResult(wrapper_deferred) + self.assertFalse( + deferred.called, "Original `Deferred` was unexpectedly cancelled" + ) + + # Now make the original `Deferred` fail. + # The `Failure` must be consumed, otherwise unwanted tracebacks will be printed + # in logs. + deferred.errback(ValueError("abc")) + self.assertIsNone(deferred.result, "`Failure` was not consumed") + + # Now that the original `Deferred` has failed, we should get a `CancelledError`. + self.failureResultOf(wrapper_deferred, CancelledError) + + def test_suppresses_second_cancellation(self): + """Test that a second cancellation is suppressed. + + Identical to `test_cancellation` except the new `Deferred` is cancelled twice. + """ + deferred: "Deferred[str]" = Deferred() + wrapper_deferred = delay_cancellation(deferred) + + # Cancel the new `Deferred`, twice. + wrapper_deferred.cancel() + wrapper_deferred.cancel() + self.assertNoResult(wrapper_deferred) + self.assertFalse( + deferred.called, "Original `Deferred` was unexpectedly cancelled" ) - # Now make the inner `Deferred` fail. + # Now make the original `Deferred` fail. # The `Failure` must be consumed, otherwise unwanted tracebacks will be printed # in logs. deferred.errback(ValueError("abc")) self.assertIsNone(deferred.result, "`Failure` was not consumed") + + # Now that the original `Deferred` has failed, we should get a `CancelledError`. + self.failureResultOf(wrapper_deferred, CancelledError) + + def test_propagates_cancelled_error(self): + """Test that a `CancelledError` from the original `Deferred` gets propagated.""" + deferred: "Deferred[str]" = Deferred() + wrapper_deferred = delay_cancellation(deferred) + + # Fail the original `Deferred` with a `CancelledError`. + cancelled_error = CancelledError() + deferred.errback(cancelled_error) + + # The new `Deferred` should fail with exactly the same `CancelledError`. + self.assertTrue(wrapper_deferred.called) + self.assertIs(cancelled_error, self.failureResultOf(wrapper_deferred).value) + + def test_preserves_logcontext(self): + """Test that logging contexts are preserved.""" + blocking_d: "Deferred[None]" = Deferred() + + async def inner(): + await make_deferred_yieldable(blocking_d) + + async def outer(): + with LoggingContext("c") as c: + try: + await delay_cancellation(defer.ensureDeferred(inner())) + self.fail("`CancelledError` was not raised") + except CancelledError: + self.assertEqual(c, current_context()) + # Succeed with no error, unless the logging context is wrong. + + # Run and block inside `inner()`. + d = defer.ensureDeferred(outer()) + self.assertEqual(SENTINEL_CONTEXT, current_context()) + + d.cancel() + + # Now unblock. `outer()` will consume the `CancelledError` and check the + # logging context. + blocking_d.callback(None) + self.successResultOf(d) -- cgit 1.5.1 From 8e5706d14448c0fe8d1c55eaca38a672c701d7a9 Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Mon, 14 Mar 2022 17:52:58 +0000 Subject: Fix broken background updates when using sqlite with `enable_search` off (#12215) Signed-off-by: Sean Quah --- changelog.d/12215.bugfix | 1 + synapse/storage/databases/main/search.py | 13 +++++++------ 2 files changed, 8 insertions(+), 6 deletions(-) create mode 100644 changelog.d/12215.bugfix diff --git a/changelog.d/12215.bugfix b/changelog.d/12215.bugfix new file mode 100644 index 0000000000..593b12556b --- /dev/null +++ b/changelog.d/12215.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in 1.54.0 that broke background updates on sqlite homeservers while search was disabled. diff --git a/synapse/storage/databases/main/search.py b/synapse/storage/databases/main/search.py index e23b119072..c5e9010c83 100644 --- a/synapse/storage/databases/main/search.py +++ b/synapse/storage/databases/main/search.py @@ -125,9 +125,6 @@ class SearchBackgroundUpdateStore(SearchWorkerStore): ): super().__init__(database, db_conn, hs) - if not hs.config.server.enable_search: - return - self.db_pool.updates.register_background_update_handler( self.EVENT_SEARCH_UPDATE_NAME, self._background_reindex_search ) @@ -243,9 +240,13 @@ class SearchBackgroundUpdateStore(SearchWorkerStore): return len(event_search_rows) - result = await self.db_pool.runInteraction( - self.EVENT_SEARCH_UPDATE_NAME, reindex_search_txn - ) + if self.hs.config.server.enable_search: + result = await self.db_pool.runInteraction( + self.EVENT_SEARCH_UPDATE_NAME, reindex_search_txn + ) + else: + # Don't index anything if search is not enabled. + result = 0 if not result: await self.db_pool.updates._end_background_update( -- cgit 1.5.1 From 605d161d7d585847fd1bb98d14d5281daeac8e86 Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Mon, 14 Mar 2022 18:49:07 +0000 Subject: Add cancellation support to `ReadWriteLock` (#12120) Also convert `ReadWriteLock` to use async context managers. Signed-off-by: Sean Quah --- changelog.d/12120.misc | 1 + synapse/handlers/pagination.py | 8 +- synapse/util/async_helpers.py | 71 ++++---- tests/util/test_rwlock.py | 395 +++++++++++++++++++++++++++++++++++------ 4 files changed, 382 insertions(+), 93 deletions(-) create mode 100644 changelog.d/12120.misc diff --git a/changelog.d/12120.misc b/changelog.d/12120.misc new file mode 100644 index 0000000000..3603096500 --- /dev/null +++ b/changelog.d/12120.misc @@ -0,0 +1 @@ +Add support for cancellation to `ReadWriteLock`. diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index 183fabcfc0..60059fec3e 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -350,7 +350,7 @@ class PaginationHandler: """ self._purges_in_progress_by_room.add(room_id) try: - with await self.pagination_lock.write(room_id): + async with self.pagination_lock.write(room_id): await self.storage.purge_events.purge_history( room_id, token, delete_local_events ) @@ -406,7 +406,7 @@ class PaginationHandler: room_id: room to be purged force: set true to skip checking for joined users. """ - with await self.pagination_lock.write(room_id): + async with self.pagination_lock.write(room_id): # first check that we have no users in this room if not force: joined = await self.store.is_host_joined(room_id, self._server_name) @@ -448,7 +448,7 @@ class PaginationHandler: room_token = from_token.room_key - with await self.pagination_lock.read(room_id): + async with self.pagination_lock.read(room_id): ( membership, member_event_id, @@ -615,7 +615,7 @@ class PaginationHandler: self._purges_in_progress_by_room.add(room_id) try: - with await self.pagination_lock.write(room_id): + async with self.pagination_lock.write(room_id): self._delete_by_id[delete_id].status = DeleteStatus.STATUS_SHUTTING_DOWN self._delete_by_id[ delete_id diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py index 69c8c1baa9..6a8e844d63 100644 --- a/synapse/util/async_helpers.py +++ b/synapse/util/async_helpers.py @@ -18,9 +18,10 @@ import collections import inspect import itertools import logging -from contextlib import contextmanager +from contextlib import asynccontextmanager, contextmanager from typing import ( Any, + AsyncIterator, Awaitable, Callable, Collection, @@ -40,7 +41,7 @@ from typing import ( ) import attr -from typing_extensions import ContextManager, Literal +from typing_extensions import AsyncContextManager, Literal from twisted.internet import defer from twisted.internet.defer import CancelledError @@ -491,7 +492,7 @@ class ReadWriteLock: Example: - with await read_write_lock.read("test_key"): + async with read_write_lock.read("test_key"): # do some work """ @@ -514,22 +515,24 @@ class ReadWriteLock: # Latest writer queued self.key_to_current_writer: Dict[str, defer.Deferred] = {} - async def read(self, key: str) -> ContextManager: - new_defer: "defer.Deferred[None]" = defer.Deferred() + def read(self, key: str) -> AsyncContextManager: + @asynccontextmanager + async def _ctx_manager() -> AsyncIterator[None]: + new_defer: "defer.Deferred[None]" = defer.Deferred() - curr_readers = self.key_to_current_readers.setdefault(key, set()) - curr_writer = self.key_to_current_writer.get(key, None) + curr_readers = self.key_to_current_readers.setdefault(key, set()) + curr_writer = self.key_to_current_writer.get(key, None) - curr_readers.add(new_defer) + curr_readers.add(new_defer) - # We wait for the latest writer to finish writing. We can safely ignore - # any existing readers... as they're readers. - if curr_writer: - await make_deferred_yieldable(curr_writer) - - @contextmanager - def _ctx_manager() -> Iterator[None]: try: + # We wait for the latest writer to finish writing. We can safely ignore + # any existing readers... as they're readers. + # May raise a `CancelledError` if the `Deferred` wrapping us is + # cancelled. The `Deferred` we are waiting on must not be cancelled, + # since we do not own it. + if curr_writer: + await make_deferred_yieldable(stop_cancellation(curr_writer)) yield finally: with PreserveLoggingContext(): @@ -538,29 +541,35 @@ class ReadWriteLock: return _ctx_manager() - async def write(self, key: str) -> ContextManager: - new_defer: "defer.Deferred[None]" = defer.Deferred() + def write(self, key: str) -> AsyncContextManager: + @asynccontextmanager + async def _ctx_manager() -> AsyncIterator[None]: + new_defer: "defer.Deferred[None]" = defer.Deferred() - curr_readers = self.key_to_current_readers.get(key, set()) - curr_writer = self.key_to_current_writer.get(key, None) + curr_readers = self.key_to_current_readers.get(key, set()) + curr_writer = self.key_to_current_writer.get(key, None) - # We wait on all latest readers and writer. - to_wait_on = list(curr_readers) - if curr_writer: - to_wait_on.append(curr_writer) + # We wait on all latest readers and writer. + to_wait_on = list(curr_readers) + if curr_writer: + to_wait_on.append(curr_writer) - # We can clear the list of current readers since the new writer waits - # for them to finish. - curr_readers.clear() - self.key_to_current_writer[key] = new_defer + # We can clear the list of current readers since `new_defer` waits + # for them to finish. + curr_readers.clear() + self.key_to_current_writer[key] = new_defer - await make_deferred_yieldable(defer.gatherResults(to_wait_on)) - - @contextmanager - def _ctx_manager() -> Iterator[None]: + to_wait_on_defer = defer.gatherResults(to_wait_on) try: + # Wait for all current readers and the latest writer to finish. + # May raise a `CancelledError` immediately after the wait if the + # `Deferred` wrapping us is cancelled. We must only release the lock + # once we have acquired it, hence the use of `delay_cancellation` + # rather than `stop_cancellation`. + await make_deferred_yieldable(delay_cancellation(to_wait_on_defer)) yield finally: + # Release the lock. with PreserveLoggingContext(): new_defer.callback(None) # `self.key_to_current_writer[key]` may be missing if there was another diff --git a/tests/util/test_rwlock.py b/tests/util/test_rwlock.py index 0774625b85..0c84226197 100644 --- a/tests/util/test_rwlock.py +++ b/tests/util/test_rwlock.py @@ -12,8 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. +from typing import AsyncContextManager, Callable, Sequence, Tuple + from twisted.internet import defer -from twisted.internet.defer import Deferred +from twisted.internet.defer import CancelledError, Deferred from synapse.util.async_helpers import ReadWriteLock @@ -21,87 +23,187 @@ from tests import unittest class ReadWriteLockTestCase(unittest.TestCase): - def _assert_called_before_not_after(self, lst, first_false): - for i, d in enumerate(lst[:first_false]): - self.assertTrue(d.called, msg="%d was unexpectedly false" % i) + def _start_reader_or_writer( + self, + read_or_write: Callable[[str], AsyncContextManager], + key: str, + return_value: str, + ) -> Tuple["Deferred[str]", "Deferred[None]", "Deferred[None]"]: + """Starts a reader or writer which acquires the lock, blocks, then completes. + + Args: + read_or_write: A function returning a context manager for a lock. + Either a bound `ReadWriteLock.read` or `ReadWriteLock.write`. + key: The key to read or write. + return_value: A string that the reader or writer will resolve with when + done. + + Returns: + A tuple of three `Deferred`s: + * A `Deferred` that resolves with `return_value` once the reader or writer + completes successfully. + * A `Deferred` that resolves once the reader or writer acquires the lock. + * A `Deferred` that blocks the reader or writer. Must be resolved by the + caller to allow the reader or writer to release the lock and complete. + """ + acquired_d: "Deferred[None]" = Deferred() + unblock_d: "Deferred[None]" = Deferred() + + async def reader_or_writer(): + async with read_or_write(key): + acquired_d.callback(None) + await unblock_d + return return_value + + d = defer.ensureDeferred(reader_or_writer()) + return d, acquired_d, unblock_d + + def _start_blocking_reader( + self, rwlock: ReadWriteLock, key: str, return_value: str + ) -> Tuple["Deferred[str]", "Deferred[None]", "Deferred[None]"]: + """Starts a reader which acquires the lock, blocks, then releases the lock. + + See the docstring for `_start_reader_or_writer` for details about the arguments + and return values. + """ + return self._start_reader_or_writer(rwlock.read, key, return_value) + + def _start_blocking_writer( + self, rwlock: ReadWriteLock, key: str, return_value: str + ) -> Tuple["Deferred[str]", "Deferred[None]", "Deferred[None]"]: + """Starts a writer which acquires the lock, blocks, then releases the lock. + + See the docstring for `_start_reader_or_writer` for details about the arguments + and return values. + """ + return self._start_reader_or_writer(rwlock.write, key, return_value) + + def _start_nonblocking_reader( + self, rwlock: ReadWriteLock, key: str, return_value: str + ) -> Tuple["Deferred[str]", "Deferred[None]"]: + """Starts a reader which acquires the lock, then releases it immediately. + + See the docstring for `_start_reader_or_writer` for details about the arguments. + + Returns: + A tuple of two `Deferred`s: + * A `Deferred` that resolves with `return_value` once the reader completes + successfully. + * A `Deferred` that resolves once the reader acquires the lock. + """ + d, acquired_d, unblock_d = self._start_reader_or_writer( + rwlock.read, key, return_value + ) + unblock_d.callback(None) + return d, acquired_d + + def _start_nonblocking_writer( + self, rwlock: ReadWriteLock, key: str, return_value: str + ) -> Tuple["Deferred[str]", "Deferred[None]"]: + """Starts a writer which acquires the lock, then releases it immediately. + + See the docstring for `_start_reader_or_writer` for details about the arguments. + + Returns: + A tuple of two `Deferred`s: + * A `Deferred` that resolves with `return_value` once the writer completes + successfully. + * A `Deferred` that resolves once the writer acquires the lock. + """ + d, acquired_d, unblock_d = self._start_reader_or_writer( + rwlock.write, key, return_value + ) + unblock_d.callback(None) + return d, acquired_d + + def _assert_first_n_resolved( + self, deferreds: Sequence["defer.Deferred[None]"], n: int + ) -> None: + """Assert that exactly the first n `Deferred`s in the given list are resolved. - for i, d in enumerate(lst[first_false:]): + Args: + deferreds: The list of `Deferred`s to be checked. + n: The number of `Deferred`s at the start of `deferreds` that should be + resolved. + """ + for i, d in enumerate(deferreds[:n]): + self.assertTrue(d.called, msg="deferred %d was unexpectedly unresolved" % i) + + for i, d in enumerate(deferreds[n:]): self.assertFalse( - d.called, msg="%d was unexpectedly true" % (i + first_false) + d.called, msg="deferred %d was unexpectedly resolved" % (i + n) ) def test_rwlock(self): rwlock = ReadWriteLock() - - key = object() + key = "key" ds = [ - rwlock.read(key), # 0 - rwlock.read(key), # 1 - rwlock.write(key), # 2 - rwlock.write(key), # 3 - rwlock.read(key), # 4 - rwlock.read(key), # 5 - rwlock.write(key), # 6 + self._start_blocking_reader(rwlock, key, "0"), + self._start_blocking_reader(rwlock, key, "1"), + self._start_blocking_writer(rwlock, key, "2"), + self._start_blocking_writer(rwlock, key, "3"), + self._start_blocking_reader(rwlock, key, "4"), + self._start_blocking_reader(rwlock, key, "5"), + self._start_blocking_writer(rwlock, key, "6"), ] - ds = [defer.ensureDeferred(d) for d in ds] + # `Deferred`s that resolve when each reader or writer acquires the lock. + acquired_ds = [acquired_d for _, acquired_d, _ in ds] + # `Deferred`s that will trigger the release of locks when resolved. + release_ds = [release_d for _, _, release_d in ds] - self._assert_called_before_not_after(ds, 2) + # The first two readers should acquire their locks. + self._assert_first_n_resolved(acquired_ds, 2) - with ds[0].result: - self._assert_called_before_not_after(ds, 2) - self._assert_called_before_not_after(ds, 2) + # Release one of the read locks. The next writer should not acquire the lock, + # because there is another reader holding the lock. + self._assert_first_n_resolved(acquired_ds, 2) + release_ds[0].callback(None) + self._assert_first_n_resolved(acquired_ds, 2) - with ds[1].result: - self._assert_called_before_not_after(ds, 2) - self._assert_called_before_not_after(ds, 3) + # Release the other read lock. The next writer should acquire the lock. + self._assert_first_n_resolved(acquired_ds, 2) + release_ds[1].callback(None) + self._assert_first_n_resolved(acquired_ds, 3) - with ds[2].result: - self._assert_called_before_not_after(ds, 3) - self._assert_called_before_not_after(ds, 4) + # Release the write lock. The next writer should acquire the lock. + self._assert_first_n_resolved(acquired_ds, 3) + release_ds[2].callback(None) + self._assert_first_n_resolved(acquired_ds, 4) - with ds[3].result: - self._assert_called_before_not_after(ds, 4) - self._assert_called_before_not_after(ds, 6) + # Release the write lock. The next two readers should acquire locks. + self._assert_first_n_resolved(acquired_ds, 4) + release_ds[3].callback(None) + self._assert_first_n_resolved(acquired_ds, 6) - with ds[5].result: - self._assert_called_before_not_after(ds, 6) - self._assert_called_before_not_after(ds, 6) + # Release one of the read locks. The next writer should not acquire the lock, + # because there is another reader holding the lock. + self._assert_first_n_resolved(acquired_ds, 6) + release_ds[5].callback(None) + self._assert_first_n_resolved(acquired_ds, 6) - with ds[4].result: - self._assert_called_before_not_after(ds, 6) - self._assert_called_before_not_after(ds, 7) + # Release the other read lock. The next writer should acquire the lock. + self._assert_first_n_resolved(acquired_ds, 6) + release_ds[4].callback(None) + self._assert_first_n_resolved(acquired_ds, 7) - with ds[6].result: - pass + # Release the write lock. + release_ds[6].callback(None) - d = defer.ensureDeferred(rwlock.write(key)) - self.assertTrue(d.called) - with d.result: - pass + # Acquire and release the write and read locks one last time for good measure. + _, acquired_d = self._start_nonblocking_writer(rwlock, key, "last writer") + self.assertTrue(acquired_d.called) - d = defer.ensureDeferred(rwlock.read(key)) - self.assertTrue(d.called) - with d.result: - pass + _, acquired_d = self._start_nonblocking_reader(rwlock, key, "last reader") + self.assertTrue(acquired_d.called) def test_lock_handoff_to_nonblocking_writer(self): """Test a writer handing the lock to another writer that completes instantly.""" rwlock = ReadWriteLock() key = "key" - unblock: "Deferred[None]" = Deferred() - - async def blocking_write(): - with await rwlock.write(key): - await unblock - - async def nonblocking_write(): - with await rwlock.write(key): - pass - - d1 = defer.ensureDeferred(blocking_write()) - d2 = defer.ensureDeferred(nonblocking_write()) + d1, _, unblock = self._start_blocking_writer(rwlock, key, "write 1 completed") + d2, _ = self._start_nonblocking_writer(rwlock, key, "write 2 completed") self.assertFalse(d1.called) self.assertFalse(d2.called) @@ -111,5 +213,182 @@ class ReadWriteLockTestCase(unittest.TestCase): self.assertTrue(d2.called) # The `ReadWriteLock` should operate as normal. - d3 = defer.ensureDeferred(nonblocking_write()) + d3, _ = self._start_nonblocking_writer(rwlock, key, "write 3 completed") self.assertTrue(d3.called) + + def test_cancellation_while_holding_read_lock(self): + """Test cancellation while holding a read lock. + + A waiting writer should be given the lock when the reader holding the lock is + cancelled. + """ + rwlock = ReadWriteLock() + key = "key" + + # 1. A reader takes the lock and blocks. + reader_d, _, _ = self._start_blocking_reader(rwlock, key, "read completed") + + # 2. A writer waits for the reader to complete. + writer_d, _ = self._start_nonblocking_writer(rwlock, key, "write completed") + self.assertFalse(writer_d.called) + + # 3. The reader is cancelled. + reader_d.cancel() + self.failureResultOf(reader_d, CancelledError) + + # 4. The writer should take the lock and complete. + self.assertTrue( + writer_d.called, "Writer is stuck waiting for a cancelled reader" + ) + self.assertEqual("write completed", self.successResultOf(writer_d)) + + def test_cancellation_while_holding_write_lock(self): + """Test cancellation while holding a write lock. + + A waiting reader should be given the lock when the writer holding the lock is + cancelled. + """ + rwlock = ReadWriteLock() + key = "key" + + # 1. A writer takes the lock and blocks. + writer_d, _, _ = self._start_blocking_writer(rwlock, key, "write completed") + + # 2. A reader waits for the writer to complete. + reader_d, _ = self._start_nonblocking_reader(rwlock, key, "read completed") + self.assertFalse(reader_d.called) + + # 3. The writer is cancelled. + writer_d.cancel() + self.failureResultOf(writer_d, CancelledError) + + # 4. The reader should take the lock and complete. + self.assertTrue( + reader_d.called, "Reader is stuck waiting for a cancelled writer" + ) + self.assertEqual("read completed", self.successResultOf(reader_d)) + + def test_cancellation_while_waiting_for_read_lock(self): + """Test cancellation while waiting for a read lock. + + Tests that cancelling a waiting reader: + * does not cancel the writer it is waiting on + * does not cancel the next writer waiting on it + * does not allow the next writer to acquire the lock before an earlier writer + has finished + * does not keep the next writer waiting indefinitely + + These correspond to the asserts with explicit messages. + """ + rwlock = ReadWriteLock() + key = "key" + + # 1. A writer takes the lock and blocks. + writer1_d, _, unblock_writer1 = self._start_blocking_writer( + rwlock, key, "write 1 completed" + ) + + # 2. A reader waits for the first writer to complete. + # This reader will be cancelled later. + reader_d, _ = self._start_nonblocking_reader(rwlock, key, "read completed") + self.assertFalse(reader_d.called) + + # 3. A second writer waits for both the first writer and the reader to complete. + writer2_d, _ = self._start_nonblocking_writer(rwlock, key, "write 2 completed") + self.assertFalse(writer2_d.called) + + # 4. The waiting reader is cancelled. + # Neither of the writers should be cancelled. + # The second writer should still be waiting, but only on the first writer. + reader_d.cancel() + self.failureResultOf(reader_d, CancelledError) + self.assertFalse(writer1_d.called, "First writer was unexpectedly cancelled") + self.assertFalse( + writer2_d.called, + "Second writer was unexpectedly cancelled or given the lock before the " + "first writer finished", + ) + + # 5. Unblock the first writer, which should complete. + unblock_writer1.callback(None) + self.assertEqual("write 1 completed", self.successResultOf(writer1_d)) + + # 6. The second writer should take the lock and complete. + self.assertTrue( + writer2_d.called, "Second writer is stuck waiting for a cancelled reader" + ) + self.assertEqual("write 2 completed", self.successResultOf(writer2_d)) + + def test_cancellation_while_waiting_for_write_lock(self): + """Test cancellation while waiting for a write lock. + + Tests that cancelling a waiting writer: + * does not cancel the reader or writer it is waiting on + * does not cancel the next writer waiting on it + * does not allow the next writer to acquire the lock before an earlier reader + and writer have finished + * does not keep the next writer waiting indefinitely + + These correspond to the asserts with explicit messages. + """ + rwlock = ReadWriteLock() + key = "key" + + # 1. A reader takes the lock and blocks. + reader_d, _, unblock_reader = self._start_blocking_reader( + rwlock, key, "read completed" + ) + + # 2. A writer waits for the reader to complete. + writer1_d, _, unblock_writer1 = self._start_blocking_writer( + rwlock, key, "write 1 completed" + ) + + # 3. A second writer waits for both the reader and first writer to complete. + # This writer will be cancelled later. + writer2_d, _ = self._start_nonblocking_writer(rwlock, key, "write 2 completed") + self.assertFalse(writer2_d.called) + + # 4. A third writer waits for the second writer to complete. + writer3_d, _ = self._start_nonblocking_writer(rwlock, key, "write 3 completed") + self.assertFalse(writer3_d.called) + + # 5. The second writer is cancelled, but continues waiting for the lock. + # The reader, first writer and third writer should not be cancelled. + # The first writer should still be waiting on the reader. + # The third writer should still be waiting on the second writer. + writer2_d.cancel() + self.assertNoResult(writer2_d) + self.assertFalse(reader_d.called, "Reader was unexpectedly cancelled") + self.assertFalse(writer1_d.called, "First writer was unexpectedly cancelled") + self.assertFalse( + writer3_d.called, + "Third writer was unexpectedly cancelled or given the lock before the first " + "writer finished", + ) + + # 6. Unblock the reader, which should complete. + # The first writer should be given the lock and block. + # The third writer should still be waiting on the second writer. + unblock_reader.callback(None) + self.assertEqual("read completed", self.successResultOf(reader_d)) + self.assertNoResult(writer2_d) + self.assertFalse( + writer3_d.called, + "Third writer was unexpectedly given the lock before the first writer " + "finished", + ) + + # 7. Unblock the first writer, which should complete. + unblock_writer1.callback(None) + self.assertEqual("write 1 completed", self.successResultOf(writer1_d)) + + # 8. The second writer should take the lock and release it immediately, since it + # has been cancelled. + self.failureResultOf(writer2_d, CancelledError) + + # 9. The third writer should take the lock and complete. + self.assertTrue( + writer3_d.called, "Third writer is stuck waiting for a cancelled writer" + ) + self.assertEqual("write 3 completed", self.successResultOf(writer3_d)) -- cgit 1.5.1 From 2fcf4b3f6cd2a0be6597622664636d2219957c2a Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Mon, 14 Mar 2022 19:04:29 +0000 Subject: Add cancellation support to `@cached` and `@cachedList` decorators (#12183) These decorators mostly support cancellation already. Add cancellation tests and fix use of finished logging contexts by delaying cancellation, as suggested by @erikjohnston. Signed-off-by: Sean Quah --- changelog.d/12183.misc | 1 + synapse/util/caches/descriptors.py | 11 +++ tests/util/caches/test_descriptors.py | 147 +++++++++++++++++++++++++++++++++- 3 files changed, 157 insertions(+), 2 deletions(-) create mode 100644 changelog.d/12183.misc diff --git a/changelog.d/12183.misc b/changelog.d/12183.misc new file mode 100644 index 0000000000..dd441bb64f --- /dev/null +++ b/changelog.d/12183.misc @@ -0,0 +1 @@ +Add cancellation support to `@cached` and `@cachedList` decorators. diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index c3c5c16db9..eda92d864d 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -41,6 +41,7 @@ from twisted.python.failure import Failure from synapse.logging.context import make_deferred_yieldable, preserve_fn from synapse.util import unwrapFirstError +from synapse.util.async_helpers import delay_cancellation from synapse.util.caches.deferred_cache import DeferredCache from synapse.util.caches.lrucache import LruCache @@ -350,6 +351,11 @@ class DeferredCacheDescriptor(_CacheDescriptorBase): ret = defer.maybeDeferred(preserve_fn(self.orig), obj, *args, **kwargs) ret = cache.set(cache_key, ret, callback=invalidate_callback) + # We started a new call to `self.orig`, so we must always wait for it to + # complete. Otherwise we might mark our current logging context as + # finished while `self.orig` is still using it in the background. + ret = delay_cancellation(ret) + return make_deferred_yieldable(ret) wrapped = cast(_CachedFunction, _wrapped) @@ -510,6 +516,11 @@ class DeferredCacheListDescriptor(_CacheDescriptorBase): d = defer.gatherResults(cached_defers, consumeErrors=True).addCallbacks( lambda _: results, unwrapFirstError ) + if missing: + # We started a new call to `self.orig`, so we must always wait for it to + # complete. Otherwise we might mark our current logging context as + # finished while `self.orig` is still using it in the background. + d = delay_cancellation(d) return make_deferred_yieldable(d) else: return defer.succeed(results) diff --git a/tests/util/caches/test_descriptors.py b/tests/util/caches/test_descriptors.py index 6a4b17527a..48e616ac74 100644 --- a/tests/util/caches/test_descriptors.py +++ b/tests/util/caches/test_descriptors.py @@ -17,7 +17,7 @@ from typing import Set from unittest import mock from twisted.internet import defer, reactor -from twisted.internet.defer import Deferred +from twisted.internet.defer import CancelledError, Deferred from synapse.api.errors import SynapseError from synapse.logging.context import ( @@ -28,7 +28,7 @@ from synapse.logging.context import ( make_deferred_yieldable, ) from synapse.util.caches import descriptors -from synapse.util.caches.descriptors import cached, lru_cache +from synapse.util.caches.descriptors import cached, cachedList, lru_cache from tests import unittest from tests.test_utils import get_awaitable_result @@ -493,6 +493,74 @@ class DescriptorTestCase(unittest.TestCase): obj.invalidate() top_invalidate.assert_called_once() + def test_cancel(self): + """Test that cancelling a lookup does not cancel other lookups""" + complete_lookup: "Deferred[None]" = Deferred() + + class Cls: + @cached() + async def fn(self, arg1): + await complete_lookup + return str(arg1) + + obj = Cls() + + d1 = obj.fn(123) + d2 = obj.fn(123) + self.assertFalse(d1.called) + self.assertFalse(d2.called) + + # Cancel `d1`, which is the lookup that caused `fn` to run. + d1.cancel() + + # `d2` should complete normally. + complete_lookup.callback(None) + self.failureResultOf(d1, CancelledError) + self.assertEqual(d2.result, "123") + + def test_cancel_logcontexts(self): + """Test that cancellation does not break logcontexts. + + * The `CancelledError` must be raised with the correct logcontext. + * The inner lookup must not resume with a finished logcontext. + * The inner lookup must not restore a finished logcontext when done. + """ + complete_lookup: "Deferred[None]" = Deferred() + + class Cls: + inner_context_was_finished = False + + @cached() + async def fn(self, arg1): + await make_deferred_yieldable(complete_lookup) + self.inner_context_was_finished = current_context().finished + return str(arg1) + + obj = Cls() + + async def do_lookup(): + with LoggingContext("c1") as c1: + try: + await obj.fn(123) + self.fail("No CancelledError thrown") + except CancelledError: + self.assertEqual( + current_context(), + c1, + "CancelledError was not raised with the correct logcontext", + ) + # suppress the error and succeed + + d = defer.ensureDeferred(do_lookup()) + d.cancel() + + complete_lookup.callback(None) + self.successResultOf(d) + self.assertFalse( + obj.inner_context_was_finished, "Tried to restart a finished logcontext" + ) + self.assertEqual(current_context(), SENTINEL_CONTEXT) + class CacheDecoratorTestCase(unittest.HomeserverTestCase): """More tests for @cached @@ -865,3 +933,78 @@ class CachedListDescriptorTestCase(unittest.TestCase): obj.fn.invalidate((10, 2)) invalidate0.assert_called_once() invalidate1.assert_called_once() + + def test_cancel(self): + """Test that cancelling a lookup does not cancel other lookups""" + complete_lookup: "Deferred[None]" = Deferred() + + class Cls: + @cached() + def fn(self, arg1): + pass + + @cachedList(cached_method_name="fn", list_name="args") + async def list_fn(self, args): + await complete_lookup + return {arg: str(arg) for arg in args} + + obj = Cls() + + d1 = obj.list_fn([123, 456]) + d2 = obj.list_fn([123, 456, 789]) + self.assertFalse(d1.called) + self.assertFalse(d2.called) + + d1.cancel() + + # `d2` should complete normally. + complete_lookup.callback(None) + self.failureResultOf(d1, CancelledError) + self.assertEqual(d2.result, {123: "123", 456: "456", 789: "789"}) + + def test_cancel_logcontexts(self): + """Test that cancellation does not break logcontexts. + + * The `CancelledError` must be raised with the correct logcontext. + * The inner lookup must not resume with a finished logcontext. + * The inner lookup must not restore a finished logcontext when done. + """ + complete_lookup: "Deferred[None]" = Deferred() + + class Cls: + inner_context_was_finished = False + + @cached() + def fn(self, arg1): + pass + + @cachedList(cached_method_name="fn", list_name="args") + async def list_fn(self, args): + await make_deferred_yieldable(complete_lookup) + self.inner_context_was_finished = current_context().finished + return {arg: str(arg) for arg in args} + + obj = Cls() + + async def do_lookup(): + with LoggingContext("c1") as c1: + try: + await obj.list_fn([123]) + self.fail("No CancelledError thrown") + except CancelledError: + self.assertEqual( + current_context(), + c1, + "CancelledError was not raised with the correct logcontext", + ) + # suppress the error and succeed + + d = defer.ensureDeferred(do_lookup()) + d.cancel() + + complete_lookup.callback(None) + self.successResultOf(d) + self.assertFalse( + obj.inner_context_was_finished, "Tried to restart a finished logcontext" + ) + self.assertEqual(current_context(), SENTINEL_CONTEXT) -- cgit 1.5.1 From d1130a249b4f462a3e457b783b483d5a6c7486f0 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 15 Mar 2022 11:00:01 +0000 Subject: 1.55.0rc1 --- CHANGES.md | 82 +++++++++++++++++++++++++++++++++++++++++++++++ changelog.d/11700.removal | 1 - changelog.d/11915.misc | 1 - changelog.d/11980.misc | 1 - changelog.d/11998.doc | 1 - changelog.d/12028.feature | 1 - changelog.d/12042.misc | 1 - changelog.d/12090.bugfix | 1 - changelog.d/12101.misc | 1 - changelog.d/12108.misc | 1 - changelog.d/12113.bugfix | 1 - changelog.d/12118.misc | 1 - changelog.d/12120.misc | 1 - changelog.d/12121.bugfix | 1 - changelog.d/12128.misc | 1 - changelog.d/12130.bugfix | 1 - changelog.d/12131.misc | 1 - changelog.d/12132.feature | 1 - changelog.d/12135.feature | 1 - changelog.d/12136.misc | 1 - changelog.d/12137.misc | 1 - changelog.d/12138.removal | 1 - changelog.d/12140.misc | 1 - changelog.d/12142.misc | 1 - changelog.d/12143.doc | 1 - changelog.d/12144.misc | 1 - changelog.d/12145.misc | 1 - changelog.d/12146.misc | 1 - changelog.d/12149.misc | 1 - changelog.d/12150.misc | 1 - changelog.d/12151.feature | 1 - changelog.d/12152.misc | 1 - changelog.d/12153.misc | 1 - changelog.d/12154.misc | 1 - changelog.d/12155.misc | 1 - changelog.d/12156.misc | 1 - changelog.d/12157.bugfix | 1 - changelog.d/12159.misc | 1 - changelog.d/12161.misc | 1 - changelog.d/12163.misc | 1 - changelog.d/12173.misc | 1 - changelog.d/12175.bugfix | 1 - changelog.d/12179.doc | 1 - changelog.d/12180.misc | 1 - changelog.d/12182.misc | 1 - changelog.d/12183.misc | 1 - changelog.d/12187.misc | 1 - changelog.d/12188.misc | 1 - changelog.d/12189.bugfix | 1 - changelog.d/12192.misc | 1 - changelog.d/12196.doc | 1 - changelog.d/12197.misc | 1 - changelog.d/12200.removal | 1 - changelog.d/12202.misc | 1 - changelog.d/12203.misc | 1 - changelog.d/12204.doc | 1 - changelog.d/12206.misc | 1 - changelog.d/12207.misc | 1 - changelog.d/12208.misc | 1 - changelog.d/12210.misc | 1 - changelog.d/12211.misc | 1 - changelog.d/12212.feature | 1 - changelog.d/12215.bugfix | 1 - debian/changelog | 6 ++++ synapse/__init__.py | 2 +- 65 files changed, 89 insertions(+), 63 deletions(-) delete mode 100644 changelog.d/11700.removal delete mode 100644 changelog.d/11915.misc delete mode 100644 changelog.d/11980.misc delete mode 100644 changelog.d/11998.doc delete mode 100644 changelog.d/12028.feature delete mode 100644 changelog.d/12042.misc delete mode 100644 changelog.d/12090.bugfix delete mode 100644 changelog.d/12101.misc delete mode 100644 changelog.d/12108.misc delete mode 100644 changelog.d/12113.bugfix delete mode 100644 changelog.d/12118.misc delete mode 100644 changelog.d/12120.misc delete mode 100644 changelog.d/12121.bugfix delete mode 100644 changelog.d/12128.misc delete mode 100644 changelog.d/12130.bugfix delete mode 100644 changelog.d/12131.misc delete mode 100644 changelog.d/12132.feature delete mode 100644 changelog.d/12135.feature delete mode 100644 changelog.d/12136.misc delete mode 100644 changelog.d/12137.misc delete mode 100644 changelog.d/12138.removal delete mode 100644 changelog.d/12140.misc delete mode 100644 changelog.d/12142.misc delete mode 100644 changelog.d/12143.doc delete mode 100644 changelog.d/12144.misc delete mode 100644 changelog.d/12145.misc delete mode 100644 changelog.d/12146.misc delete mode 100644 changelog.d/12149.misc delete mode 100644 changelog.d/12150.misc delete mode 100644 changelog.d/12151.feature delete mode 100644 changelog.d/12152.misc delete mode 100644 changelog.d/12153.misc delete mode 100644 changelog.d/12154.misc delete mode 100644 changelog.d/12155.misc delete mode 100644 changelog.d/12156.misc delete mode 100644 changelog.d/12157.bugfix delete mode 100644 changelog.d/12159.misc delete mode 100644 changelog.d/12161.misc delete mode 100644 changelog.d/12163.misc delete mode 100644 changelog.d/12173.misc delete mode 100644 changelog.d/12175.bugfix delete mode 100644 changelog.d/12179.doc delete mode 100644 changelog.d/12180.misc delete mode 100644 changelog.d/12182.misc delete mode 100644 changelog.d/12183.misc delete mode 100644 changelog.d/12187.misc delete mode 100644 changelog.d/12188.misc delete mode 100644 changelog.d/12189.bugfix delete mode 100644 changelog.d/12192.misc delete mode 100644 changelog.d/12196.doc delete mode 100644 changelog.d/12197.misc delete mode 100644 changelog.d/12200.removal delete mode 100644 changelog.d/12202.misc delete mode 100644 changelog.d/12203.misc delete mode 100644 changelog.d/12204.doc delete mode 100644 changelog.d/12206.misc delete mode 100644 changelog.d/12207.misc delete mode 100644 changelog.d/12208.misc delete mode 100644 changelog.d/12210.misc delete mode 100644 changelog.d/12211.misc delete mode 100644 changelog.d/12212.feature delete mode 100644 changelog.d/12215.bugfix diff --git a/CHANGES.md b/CHANGES.md index ef671e73f1..b0311f73bf 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,85 @@ +Synapse 1.55.0rc1 (2022-03-15) +============================== + +Features +-------- + +- Add third-party rules rules callbacks `check_can_shutdown_room` and `check_can_deactivate_user`. ([\#12028](https://github.com/matrix-org/synapse/issues/12028)) +- Improve performance of logging in for large accounts. ([\#12132](https://github.com/matrix-org/synapse/issues/12132)) +- Add experimental env var `SYNAPSE_ASYNC_IO_REACTOR` that causes Synapse to use the asyncio reactor for Twisted. ([\#12135](https://github.com/matrix-org/synapse/issues/12135)) +- Support the stable identifiers from [MSC3440](https://github.com/matrix-org/matrix-doc/pull/3440): threads. ([\#12151](https://github.com/matrix-org/synapse/issues/12151)) +- Add a new Jinja2 template filter to extract the local part of an email address. ([\#12212](https://github.com/matrix-org/synapse/issues/12212)) + + +Bugfixes +-------- + +- Use the proper serialization format for bundled thread aggregations. The bug has existed since Synapse v1.48.0. ([\#12090](https://github.com/matrix-org/synapse/issues/12090)) +- Fix a long-standing bug when redacting events with relations. ([\#12113](https://github.com/matrix-org/synapse/issues/12113), [\#12121](https://github.com/matrix-org/synapse/issues/12121), [\#12130](https://github.com/matrix-org/synapse/issues/12130), [\#12189](https://github.com/matrix-org/synapse/issues/12189)) +- Fix a bug introduced in #4864 whereby background updates are never run with the default background batch size. ([\#12157](https://github.com/matrix-org/synapse/issues/12157)) +- Fix a bug where non-standard information was returned from the `/hierarchy` API. Introduced in Synapse v1.41.0. ([\#12175](https://github.com/matrix-org/synapse/issues/12175)) +- Fix a bug introduced in 1.54.0 that broke background updates on sqlite homeservers while search was disabled. ([\#12215](https://github.com/matrix-org/synapse/issues/12215)) + + +Improved Documentation +---------------------- + +- Fix complexity checking config example in [Resource Constrained Devices](https://matrix-org.github.io/synapse/v1.54/other/running_synapse_on_single_board_computers.html) docs page. ([\#11998](https://github.com/matrix-org/synapse/issues/11998)) +- Improve documentation for demo scripts. ([\#12143](https://github.com/matrix-org/synapse/issues/12143)) +- Updates to the Room DAG concepts development document. ([\#12179](https://github.com/matrix-org/synapse/issues/12179)) +- Document that the `typing`, `to_device`, `account_data`, `receipts`, and `presence` stream writer can only be used on a single worker. ([\#12196](https://github.com/matrix-org/synapse/issues/12196)) +- Document that contributors can sign off privately by email. ([\#12204](https://github.com/matrix-org/synapse/issues/12204)) + + +Deprecations and Removals +------------------------- + +- Remove workaround introduced in Synapse 1.50.0 for Mjolnir compatibility. Breaks compatibility with Mjolnir 1.3.1 and earlier. ([\#11700](https://github.com/matrix-org/synapse/issues/11700)) +- Remove backwards compatibilty with pagination tokens from the `/relations` and `/aggregations` endpoints generated from Synapse < v1.52.0. ([\#12138](https://github.com/matrix-org/synapse/issues/12138)) +- The groups/communities feature in Synapse has been deprecated. ([\#12200](https://github.com/matrix-org/synapse/issues/12200)) + + +Internal Changes +---------------- + +- Simplify the `ApplicationService` class' set of public methods related to interest checking. ([\#11915](https://github.com/matrix-org/synapse/issues/11915)) +- Add config settings for background update parameters. ([\#11980](https://github.com/matrix-org/synapse/issues/11980)) +- Correct type hints for txredis. ([\#12042](https://github.com/matrix-org/synapse/issues/12042)) +- Limit the size of `aggregation_key` on annotations. ([\#12101](https://github.com/matrix-org/synapse/issues/12101)) +- Add type hints to tests files. ([\#12108](https://github.com/matrix-org/synapse/issues/12108), [\#12146](https://github.com/matrix-org/synapse/issues/12146), [\#12207](https://github.com/matrix-org/synapse/issues/12207), [\#12208](https://github.com/matrix-org/synapse/issues/12208)) +- Move scripts to Synapse package and expose as setuptools entry points. ([\#12118](https://github.com/matrix-org/synapse/issues/12118)) +- Add support for cancellation to `ReadWriteLock`. ([\#12120](https://github.com/matrix-org/synapse/issues/12120)) +- Fix data validation to compare to lists, not sequences. ([\#12128](https://github.com/matrix-org/synapse/issues/12128)) +- Fix CI not attaching source distributions and wheels to the GitHub releases. ([\#12131](https://github.com/matrix-org/synapse/issues/12131)) +- Remove unused mocks from `test_typing`. ([\#12136](https://github.com/matrix-org/synapse/issues/12136)) +- Give `scripts-dev` scripts suffixes for neater CI config. ([\#12137](https://github.com/matrix-org/synapse/issues/12137)) +- Move `synctl` into `synapse._scripts` and expose as an entry point. ([\#12140](https://github.com/matrix-org/synapse/issues/12140)) +- Move the snapcraft configuration file to `contrib`. ([\#12142](https://github.com/matrix-org/synapse/issues/12142)) +- Enable [MSC3030](https://github.com/matrix-org/matrix-doc/pull/3030) Complement tests in CI. ([\#12144](https://github.com/matrix-org/synapse/issues/12144)) +- Enable [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) Complement tests in CI. ([\#12145](https://github.com/matrix-org/synapse/issues/12145)) +- Add test for `ObservableDeferred`'s cancellation behaviour. ([\#12149](https://github.com/matrix-org/synapse/issues/12149)) +- Use `ParamSpec` in type hints for `synapse.logging.context`. ([\#12150](https://github.com/matrix-org/synapse/issues/12150)) +- Prune unused jobs from `tox` config. ([\#12152](https://github.com/matrix-org/synapse/issues/12152)) +- Move CI checks out of tox, to facilitate a move to using poetry. ([\#12153](https://github.com/matrix-org/synapse/issues/12153)) +- Avoid generating state groups for local out-of-band leaves. ([\#12154](https://github.com/matrix-org/synapse/issues/12154)) +- Avoid trying to calculate the state at outlier events. ([\#12155](https://github.com/matrix-org/synapse/issues/12155), [\#12173](https://github.com/matrix-org/synapse/issues/12173), [\#12202](https://github.com/matrix-org/synapse/issues/12202)) +- Fix some type annotations. ([\#12156](https://github.com/matrix-org/synapse/issues/12156)) +- Add type hints for `ObservableDeferred` attributes. ([\#12159](https://github.com/matrix-org/synapse/issues/12159)) +- Use a prebuilt Action for the `tests-done` CI job. ([\#12161](https://github.com/matrix-org/synapse/issues/12161)) +- Reduce number of DB queries made during processing of `/sync`. ([\#12163](https://github.com/matrix-org/synapse/issues/12163)) +- Add `delay_cancellation` utility function, which behaves like `stop_cancellation` but waits until the original `Deferred` resolves before raising a `CancelledError`. ([\#12180](https://github.com/matrix-org/synapse/issues/12180)) +- Retry HTTP replication failures, this should prevent 502's when restarting stateful workers (main, event persisters, stream writers). Contributed by Nick @ Beeper. ([\#12182](https://github.com/matrix-org/synapse/issues/12182)) +- Add cancellation support to `@cached` and `@cachedList` decorators. ([\#12183](https://github.com/matrix-org/synapse/issues/12183)) +- Remove unused variables. ([\#12187](https://github.com/matrix-org/synapse/issues/12187)) +- Add combined test for HTTP pusher and push rule. Contributed by Nick @ Beeper. ([\#12188](https://github.com/matrix-org/synapse/issues/12188)) +- Rename `HomeServer.get_tcp_replication` to `get_replication_command_handler`. ([\#12192](https://github.com/matrix-org/synapse/issues/12192)) +- Remove some dead code. ([\#12197](https://github.com/matrix-org/synapse/issues/12197)) +- Fix a misleading comment in the function `check_event_for_spam`. ([\#12203](https://github.com/matrix-org/synapse/issues/12203)) +- Remove unnecessary `pass` statements. ([\#12206](https://github.com/matrix-org/synapse/issues/12206)) +- Update the SSO username picker template to comply with SIWA guidelines. ([\#12210](https://github.com/matrix-org/synapse/issues/12210)) +- Improve code documentation for the typing stream over replication. ([\#12211](https://github.com/matrix-org/synapse/issues/12211)) + + Synapse 1.54.0 (2022-03-08) =========================== diff --git a/changelog.d/11700.removal b/changelog.d/11700.removal deleted file mode 100644 index d3d3c48f0f..0000000000 --- a/changelog.d/11700.removal +++ /dev/null @@ -1 +0,0 @@ -Remove workaround introduced in Synapse 1.50.0 for Mjolnir compatibility. Breaks compatibility with Mjolnir 1.3.1 and earlier. diff --git a/changelog.d/11915.misc b/changelog.d/11915.misc deleted file mode 100644 index e3cef1511e..0000000000 --- a/changelog.d/11915.misc +++ /dev/null @@ -1 +0,0 @@ -Simplify the `ApplicationService` class' set of public methods related to interest checking. \ No newline at end of file diff --git a/changelog.d/11980.misc b/changelog.d/11980.misc deleted file mode 100644 index 36e992e645..0000000000 --- a/changelog.d/11980.misc +++ /dev/null @@ -1 +0,0 @@ -Add config settings for background update parameters. \ No newline at end of file diff --git a/changelog.d/11998.doc b/changelog.d/11998.doc deleted file mode 100644 index 33ab7b7880..0000000000 --- a/changelog.d/11998.doc +++ /dev/null @@ -1 +0,0 @@ -Fix complexity checking config example in [Resource Constrained Devices](https://matrix-org.github.io/synapse/v1.54/other/running_synapse_on_single_board_computers.html) docs page. \ No newline at end of file diff --git a/changelog.d/12028.feature b/changelog.d/12028.feature deleted file mode 100644 index 5549c8f6fc..0000000000 --- a/changelog.d/12028.feature +++ /dev/null @@ -1 +0,0 @@ -Add third-party rules rules callbacks `check_can_shutdown_room` and `check_can_deactivate_user`. diff --git a/changelog.d/12042.misc b/changelog.d/12042.misc deleted file mode 100644 index 6ecdc96021..0000000000 --- a/changelog.d/12042.misc +++ /dev/null @@ -1 +0,0 @@ -Correct type hints for txredis. diff --git a/changelog.d/12090.bugfix b/changelog.d/12090.bugfix deleted file mode 100644 index 087065dcb1..0000000000 --- a/changelog.d/12090.bugfix +++ /dev/null @@ -1 +0,0 @@ -Use the proper serialization format for bundled thread aggregations. The bug has existed since Synapse v1.48.0. diff --git a/changelog.d/12101.misc b/changelog.d/12101.misc deleted file mode 100644 index d165f73d13..0000000000 --- a/changelog.d/12101.misc +++ /dev/null @@ -1 +0,0 @@ -Limit the size of `aggregation_key` on annotations. diff --git a/changelog.d/12108.misc b/changelog.d/12108.misc deleted file mode 100644 index b67a701dbb..0000000000 --- a/changelog.d/12108.misc +++ /dev/null @@ -1 +0,0 @@ -Add type hints to tests files. diff --git a/changelog.d/12113.bugfix b/changelog.d/12113.bugfix deleted file mode 100644 index df9b0dc413..0000000000 --- a/changelog.d/12113.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug when redacting events with relations. diff --git a/changelog.d/12118.misc b/changelog.d/12118.misc deleted file mode 100644 index a2c397d907..0000000000 --- a/changelog.d/12118.misc +++ /dev/null @@ -1 +0,0 @@ -Move scripts to Synapse package and expose as setuptools entry points. diff --git a/changelog.d/12120.misc b/changelog.d/12120.misc deleted file mode 100644 index 3603096500..0000000000 --- a/changelog.d/12120.misc +++ /dev/null @@ -1 +0,0 @@ -Add support for cancellation to `ReadWriteLock`. diff --git a/changelog.d/12121.bugfix b/changelog.d/12121.bugfix deleted file mode 100644 index df9b0dc413..0000000000 --- a/changelog.d/12121.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug when redacting events with relations. diff --git a/changelog.d/12128.misc b/changelog.d/12128.misc deleted file mode 100644 index 0570a8e327..0000000000 --- a/changelog.d/12128.misc +++ /dev/null @@ -1 +0,0 @@ -Fix data validation to compare to lists, not sequences. diff --git a/changelog.d/12130.bugfix b/changelog.d/12130.bugfix deleted file mode 100644 index df9b0dc413..0000000000 --- a/changelog.d/12130.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug when redacting events with relations. diff --git a/changelog.d/12131.misc b/changelog.d/12131.misc deleted file mode 100644 index 8ef23c22d5..0000000000 --- a/changelog.d/12131.misc +++ /dev/null @@ -1 +0,0 @@ -Fix CI not attaching source distributions and wheels to the GitHub releases. \ No newline at end of file diff --git a/changelog.d/12132.feature b/changelog.d/12132.feature deleted file mode 100644 index 3b8362ad35..0000000000 --- a/changelog.d/12132.feature +++ /dev/null @@ -1 +0,0 @@ -Improve performance of logging in for large accounts. diff --git a/changelog.d/12135.feature b/changelog.d/12135.feature deleted file mode 100644 index b337f51730..0000000000 --- a/changelog.d/12135.feature +++ /dev/null @@ -1 +0,0 @@ -Add experimental env var `SYNAPSE_ASYNC_IO_REACTOR` that causes Synapse to use the asyncio reactor for Twisted. diff --git a/changelog.d/12136.misc b/changelog.d/12136.misc deleted file mode 100644 index 98b1c1c9d8..0000000000 --- a/changelog.d/12136.misc +++ /dev/null @@ -1 +0,0 @@ -Remove unused mocks from `test_typing`. \ No newline at end of file diff --git a/changelog.d/12137.misc b/changelog.d/12137.misc deleted file mode 100644 index 118ff77a91..0000000000 --- a/changelog.d/12137.misc +++ /dev/null @@ -1 +0,0 @@ -Give `scripts-dev` scripts suffixes for neater CI config. \ No newline at end of file diff --git a/changelog.d/12138.removal b/changelog.d/12138.removal deleted file mode 100644 index 6ed84d476c..0000000000 --- a/changelog.d/12138.removal +++ /dev/null @@ -1 +0,0 @@ -Remove backwards compatibilty with pagination tokens from the `/relations` and `/aggregations` endpoints generated from Synapse < v1.52.0. diff --git a/changelog.d/12140.misc b/changelog.d/12140.misc deleted file mode 100644 index 33a21a29f0..0000000000 --- a/changelog.d/12140.misc +++ /dev/null @@ -1 +0,0 @@ -Move `synctl` into `synapse._scripts` and expose as an entry point. \ No newline at end of file diff --git a/changelog.d/12142.misc b/changelog.d/12142.misc deleted file mode 100644 index 5d09f90b52..0000000000 --- a/changelog.d/12142.misc +++ /dev/null @@ -1 +0,0 @@ -Move the snapcraft configuration file to `contrib`. \ No newline at end of file diff --git a/changelog.d/12143.doc b/changelog.d/12143.doc deleted file mode 100644 index 4b9db74b1f..0000000000 --- a/changelog.d/12143.doc +++ /dev/null @@ -1 +0,0 @@ -Improve documentation for demo scripts. diff --git a/changelog.d/12144.misc b/changelog.d/12144.misc deleted file mode 100644 index d8f71bb203..0000000000 --- a/changelog.d/12144.misc +++ /dev/null @@ -1 +0,0 @@ -Enable [MSC3030](https://github.com/matrix-org/matrix-doc/pull/3030) Complement tests in CI. diff --git a/changelog.d/12145.misc b/changelog.d/12145.misc deleted file mode 100644 index 4092a2d66e..0000000000 --- a/changelog.d/12145.misc +++ /dev/null @@ -1 +0,0 @@ -Enable [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) Complement tests in CI. diff --git a/changelog.d/12146.misc b/changelog.d/12146.misc deleted file mode 100644 index b67a701dbb..0000000000 --- a/changelog.d/12146.misc +++ /dev/null @@ -1 +0,0 @@ -Add type hints to tests files. diff --git a/changelog.d/12149.misc b/changelog.d/12149.misc deleted file mode 100644 index d39af96723..0000000000 --- a/changelog.d/12149.misc +++ /dev/null @@ -1 +0,0 @@ -Add test for `ObservableDeferred`'s cancellation behaviour. diff --git a/changelog.d/12150.misc b/changelog.d/12150.misc deleted file mode 100644 index 2d2706dac7..0000000000 --- a/changelog.d/12150.misc +++ /dev/null @@ -1 +0,0 @@ -Use `ParamSpec` in type hints for `synapse.logging.context`. diff --git a/changelog.d/12151.feature b/changelog.d/12151.feature deleted file mode 100644 index 18432b2da9..0000000000 --- a/changelog.d/12151.feature +++ /dev/null @@ -1 +0,0 @@ -Support the stable identifiers from [MSC3440](https://github.com/matrix-org/matrix-doc/pull/3440): threads. diff --git a/changelog.d/12152.misc b/changelog.d/12152.misc deleted file mode 100644 index b9877eaccb..0000000000 --- a/changelog.d/12152.misc +++ /dev/null @@ -1 +0,0 @@ -Prune unused jobs from `tox` config. \ No newline at end of file diff --git a/changelog.d/12153.misc b/changelog.d/12153.misc deleted file mode 100644 index f02d140f38..0000000000 --- a/changelog.d/12153.misc +++ /dev/null @@ -1 +0,0 @@ -Move CI checks out of tox, to facilitate a move to using poetry. \ No newline at end of file diff --git a/changelog.d/12154.misc b/changelog.d/12154.misc deleted file mode 100644 index 18d2a4728b..0000000000 --- a/changelog.d/12154.misc +++ /dev/null @@ -1 +0,0 @@ -Avoid generating state groups for local out-of-band leaves. diff --git a/changelog.d/12155.misc b/changelog.d/12155.misc deleted file mode 100644 index 9f333e718a..0000000000 --- a/changelog.d/12155.misc +++ /dev/null @@ -1 +0,0 @@ -Avoid trying to calculate the state at outlier events. diff --git a/changelog.d/12156.misc b/changelog.d/12156.misc deleted file mode 100644 index 4818d988d7..0000000000 --- a/changelog.d/12156.misc +++ /dev/null @@ -1 +0,0 @@ -Fix some type annotations. diff --git a/changelog.d/12157.bugfix b/changelog.d/12157.bugfix deleted file mode 100644 index c3d2e700bb..0000000000 --- a/changelog.d/12157.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in #4864 whereby background updates are never run with the default background batch size. diff --git a/changelog.d/12159.misc b/changelog.d/12159.misc deleted file mode 100644 index 30500f2fd9..0000000000 --- a/changelog.d/12159.misc +++ /dev/null @@ -1 +0,0 @@ -Add type hints for `ObservableDeferred` attributes. diff --git a/changelog.d/12161.misc b/changelog.d/12161.misc deleted file mode 100644 index 43eff08d46..0000000000 --- a/changelog.d/12161.misc +++ /dev/null @@ -1 +0,0 @@ -Use a prebuilt Action for the `tests-done` CI job. diff --git a/changelog.d/12163.misc b/changelog.d/12163.misc deleted file mode 100644 index 13de0895f5..0000000000 --- a/changelog.d/12163.misc +++ /dev/null @@ -1 +0,0 @@ -Reduce number of DB queries made during processing of `/sync`. diff --git a/changelog.d/12173.misc b/changelog.d/12173.misc deleted file mode 100644 index 9f333e718a..0000000000 --- a/changelog.d/12173.misc +++ /dev/null @@ -1 +0,0 @@ -Avoid trying to calculate the state at outlier events. diff --git a/changelog.d/12175.bugfix b/changelog.d/12175.bugfix deleted file mode 100644 index 881cb9b76c..0000000000 --- a/changelog.d/12175.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug where non-standard information was returned from the `/hierarchy` API. Introduced in Synapse v1.41.0. diff --git a/changelog.d/12179.doc b/changelog.d/12179.doc deleted file mode 100644 index 55d8caa45a..0000000000 --- a/changelog.d/12179.doc +++ /dev/null @@ -1 +0,0 @@ -Updates to the Room DAG concepts development document. diff --git a/changelog.d/12180.misc b/changelog.d/12180.misc deleted file mode 100644 index 7a347352fd..0000000000 --- a/changelog.d/12180.misc +++ /dev/null @@ -1 +0,0 @@ -Add `delay_cancellation` utility function, which behaves like `stop_cancellation` but waits until the original `Deferred` resolves before raising a `CancelledError`. diff --git a/changelog.d/12182.misc b/changelog.d/12182.misc deleted file mode 100644 index 7e9ad2c752..0000000000 --- a/changelog.d/12182.misc +++ /dev/null @@ -1 +0,0 @@ -Retry HTTP replication failures, this should prevent 502's when restarting stateful workers (main, event persisters, stream writers). Contributed by Nick @ Beeper. diff --git a/changelog.d/12183.misc b/changelog.d/12183.misc deleted file mode 100644 index dd441bb64f..0000000000 --- a/changelog.d/12183.misc +++ /dev/null @@ -1 +0,0 @@ -Add cancellation support to `@cached` and `@cachedList` decorators. diff --git a/changelog.d/12187.misc b/changelog.d/12187.misc deleted file mode 100644 index c53e68faa5..0000000000 --- a/changelog.d/12187.misc +++ /dev/null @@ -1 +0,0 @@ -Remove unused variables. diff --git a/changelog.d/12188.misc b/changelog.d/12188.misc deleted file mode 100644 index 403158481c..0000000000 --- a/changelog.d/12188.misc +++ /dev/null @@ -1 +0,0 @@ -Add combined test for HTTP pusher and push rule. Contributed by Nick @ Beeper. diff --git a/changelog.d/12189.bugfix b/changelog.d/12189.bugfix deleted file mode 100644 index df9b0dc413..0000000000 --- a/changelog.d/12189.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug when redacting events with relations. diff --git a/changelog.d/12192.misc b/changelog.d/12192.misc deleted file mode 100644 index bdfe8dad98..0000000000 --- a/changelog.d/12192.misc +++ /dev/null @@ -1 +0,0 @@ -Rename `HomeServer.get_tcp_replication` to `get_replication_command_handler`. diff --git a/changelog.d/12196.doc b/changelog.d/12196.doc deleted file mode 100644 index 269f06aa33..0000000000 --- a/changelog.d/12196.doc +++ /dev/null @@ -1 +0,0 @@ -Document that the `typing`, `to_device`, `account_data`, `receipts`, and `presence` stream writer can only be used on a single worker. \ No newline at end of file diff --git a/changelog.d/12197.misc b/changelog.d/12197.misc deleted file mode 100644 index 7d0e9b6bbf..0000000000 --- a/changelog.d/12197.misc +++ /dev/null @@ -1 +0,0 @@ -Remove some dead code. diff --git a/changelog.d/12200.removal b/changelog.d/12200.removal deleted file mode 100644 index 312c7ae325..0000000000 --- a/changelog.d/12200.removal +++ /dev/null @@ -1 +0,0 @@ -The groups/communities feature in Synapse has been deprecated. diff --git a/changelog.d/12202.misc b/changelog.d/12202.misc deleted file mode 100644 index 9f333e718a..0000000000 --- a/changelog.d/12202.misc +++ /dev/null @@ -1 +0,0 @@ -Avoid trying to calculate the state at outlier events. diff --git a/changelog.d/12203.misc b/changelog.d/12203.misc deleted file mode 100644 index 892dc5bfb7..0000000000 --- a/changelog.d/12203.misc +++ /dev/null @@ -1 +0,0 @@ -Fix a misleading comment in the function `check_event_for_spam`. diff --git a/changelog.d/12204.doc b/changelog.d/12204.doc deleted file mode 100644 index c4b2805bb1..0000000000 --- a/changelog.d/12204.doc +++ /dev/null @@ -1 +0,0 @@ -Document that contributors can sign off privately by email. diff --git a/changelog.d/12206.misc b/changelog.d/12206.misc deleted file mode 100644 index df59bb56cd..0000000000 --- a/changelog.d/12206.misc +++ /dev/null @@ -1 +0,0 @@ -Remove unnecessary `pass` statements. diff --git a/changelog.d/12207.misc b/changelog.d/12207.misc deleted file mode 100644 index b67a701dbb..0000000000 --- a/changelog.d/12207.misc +++ /dev/null @@ -1 +0,0 @@ -Add type hints to tests files. diff --git a/changelog.d/12208.misc b/changelog.d/12208.misc deleted file mode 100644 index c5b6356799..0000000000 --- a/changelog.d/12208.misc +++ /dev/null @@ -1 +0,0 @@ -Add type hints to tests files. \ No newline at end of file diff --git a/changelog.d/12210.misc b/changelog.d/12210.misc deleted file mode 100644 index 3f6a8747c2..0000000000 --- a/changelog.d/12210.misc +++ /dev/null @@ -1 +0,0 @@ -Update the SSO username picker template to comply with SIWA guidelines. diff --git a/changelog.d/12211.misc b/changelog.d/12211.misc deleted file mode 100644 index d11634a1ee..0000000000 --- a/changelog.d/12211.misc +++ /dev/null @@ -1 +0,0 @@ -Improve code documentation for the typing stream over replication. \ No newline at end of file diff --git a/changelog.d/12212.feature b/changelog.d/12212.feature deleted file mode 100644 index fe337ff990..0000000000 --- a/changelog.d/12212.feature +++ /dev/null @@ -1 +0,0 @@ -Add a new Jinja2 template filter to extract the local part of an email address. diff --git a/changelog.d/12215.bugfix b/changelog.d/12215.bugfix deleted file mode 100644 index 593b12556b..0000000000 --- a/changelog.d/12215.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in 1.54.0 that broke background updates on sqlite homeservers while search was disabled. diff --git a/debian/changelog b/debian/changelog index 02136a0d60..09ef24ebb0 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.55.0~rc1) stable; urgency=medium + + * New synapse release 1.55.0~rc1. + + -- Synapse Packaging team Tue, 15 Mar 2022 10:59:31 +0000 + matrix-synapse-py3 (1.54.0) stable; urgency=medium * New synapse release 1.54.0. diff --git a/synapse/__init__.py b/synapse/__init__.py index 4b00565976..870707f476 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -68,7 +68,7 @@ try: except ImportError: pass -__version__ = "1.54.0" +__version__ = "1.55.0rc1" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when -- cgit 1.5.1 From 9e90d643e6947c2fa4286c21a6351061cf510026 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 15 Mar 2022 11:16:36 +0000 Subject: Changelog tweaks --- CHANGES.md | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index b0311f73bf..60e7ecb1b9 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,10 +1,12 @@ Synapse 1.55.0rc1 (2022-03-15) ============================== +This release removes a workaround introduced in Synapse 1.50.0 for Mjolnir compatibility. **This breaks compatibility with Mjolnir 1.3.1 and earlier. ([\#11700](https://github.com/matrix-org/synapse/issues/11700))**; Mjolnir users should upgrade Mjolnir before upgrading Synapse to this version. + Features -------- -- Add third-party rules rules callbacks `check_can_shutdown_room` and `check_can_deactivate_user`. ([\#12028](https://github.com/matrix-org/synapse/issues/12028)) +- Add third-party rules callbacks `check_can_shutdown_room` and `check_can_deactivate_user`. ([\#12028](https://github.com/matrix-org/synapse/issues/12028)) - Improve performance of logging in for large accounts. ([\#12132](https://github.com/matrix-org/synapse/issues/12132)) - Add experimental env var `SYNAPSE_ASYNC_IO_REACTOR` that causes Synapse to use the asyncio reactor for Twisted. ([\#12135](https://github.com/matrix-org/synapse/issues/12135)) - Support the stable identifiers from [MSC3440](https://github.com/matrix-org/matrix-doc/pull/3440): threads. ([\#12151](https://github.com/matrix-org/synapse/issues/12151)) @@ -16,9 +18,9 @@ Bugfixes - Use the proper serialization format for bundled thread aggregations. The bug has existed since Synapse v1.48.0. ([\#12090](https://github.com/matrix-org/synapse/issues/12090)) - Fix a long-standing bug when redacting events with relations. ([\#12113](https://github.com/matrix-org/synapse/issues/12113), [\#12121](https://github.com/matrix-org/synapse/issues/12121), [\#12130](https://github.com/matrix-org/synapse/issues/12130), [\#12189](https://github.com/matrix-org/synapse/issues/12189)) -- Fix a bug introduced in #4864 whereby background updates are never run with the default background batch size. ([\#12157](https://github.com/matrix-org/synapse/issues/12157)) +- Fix a bug introduced in Synapse 1.7.2 whereby background updates are never run with the default background batch size. ([\#12157](https://github.com/matrix-org/synapse/issues/12157)) - Fix a bug where non-standard information was returned from the `/hierarchy` API. Introduced in Synapse v1.41.0. ([\#12175](https://github.com/matrix-org/synapse/issues/12175)) -- Fix a bug introduced in 1.54.0 that broke background updates on sqlite homeservers while search was disabled. ([\#12215](https://github.com/matrix-org/synapse/issues/12215)) +- Fix a bug introduced in Synapse 1.54.0 that broke background updates on sqlite homeservers while search was disabled. ([\#12215](https://github.com/matrix-org/synapse/issues/12215)) Improved Documentation @@ -34,7 +36,7 @@ Improved Documentation Deprecations and Removals ------------------------- -- Remove workaround introduced in Synapse 1.50.0 for Mjolnir compatibility. Breaks compatibility with Mjolnir 1.3.1 and earlier. ([\#11700](https://github.com/matrix-org/synapse/issues/11700)) +- **Remove workaround introduced in Synapse 1.50.0 for Mjolnir compatibility. Breaks compatibility with Mjolnir 1.3.1 and earlier. ([\#11700](https://github.com/matrix-org/synapse/issues/11700))** - Remove backwards compatibilty with pagination tokens from the `/relations` and `/aggregations` endpoints generated from Synapse < v1.52.0. ([\#12138](https://github.com/matrix-org/synapse/issues/12138)) - The groups/communities feature in Synapse has been deprecated. ([\#12200](https://github.com/matrix-org/synapse/issues/12200)) -- cgit 1.5.1 From 5dd949bee6158a8b651db9f2ae417a62c8184bfd Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Tue, 15 Mar 2022 14:16:37 +0100 Subject: Add type hints to some tests/handlers files. (#12224) --- changelog.d/12224.misc | 1 + mypy.ini | 5 --- tests/handlers/test_directory.py | 84 +++++++++++++++++++---------------- tests/handlers/test_e2e_keys.py | 36 ++++++++------- tests/handlers/test_oidc.py | 94 +++++++++++++++++++++------------------- tests/handlers/test_profile.py | 43 ++++++++++-------- tests/handlers/test_saml.py | 24 +++++----- 7 files changed, 156 insertions(+), 131 deletions(-) create mode 100644 changelog.d/12224.misc diff --git a/changelog.d/12224.misc b/changelog.d/12224.misc new file mode 100644 index 0000000000..b67a701dbb --- /dev/null +++ b/changelog.d/12224.misc @@ -0,0 +1 @@ +Add type hints to tests files. diff --git a/mypy.ini b/mypy.ini index f9c39fcaae..fe31bfb8bb 100644 --- a/mypy.ini +++ b/mypy.ini @@ -67,13 +67,8 @@ exclude = (?x) |tests/federation/transport/test_knocking.py |tests/federation/transport/test_server.py |tests/handlers/test_cas.py - |tests/handlers/test_directory.py - |tests/handlers/test_e2e_keys.py |tests/handlers/test_federation.py - |tests/handlers/test_oidc.py |tests/handlers/test_presence.py - |tests/handlers/test_profile.py - |tests/handlers/test_saml.py |tests/handlers/test_typing.py |tests/http/federation/test_matrix_federation_agent.py |tests/http/federation/test_srv_resolver.py diff --git a/tests/handlers/test_directory.py b/tests/handlers/test_directory.py index 6e403a87c5..11ad44223d 100644 --- a/tests/handlers/test_directory.py +++ b/tests/handlers/test_directory.py @@ -12,14 +12,18 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +from typing import Any, Awaitable, Callable, Dict from unittest.mock import Mock +from twisted.test.proto_helpers import MemoryReactor + import synapse.api.errors import synapse.rest.admin from synapse.api.constants import EventTypes from synapse.rest.client import directory, login, room -from synapse.types import RoomAlias, create_requester +from synapse.server import HomeServer +from synapse.types import JsonDict, RoomAlias, create_requester +from synapse.util import Clock from tests import unittest from tests.test_utils import make_awaitable @@ -28,13 +32,15 @@ from tests.test_utils import make_awaitable class DirectoryTestCase(unittest.HomeserverTestCase): """Tests the directory service.""" - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: self.mock_federation = Mock() self.mock_registry = Mock() - self.query_handlers = {} + self.query_handlers: Dict[str, Callable[[dict], Awaitable[JsonDict]]] = {} - def register_query_handler(query_type, handler): + def register_query_handler( + query_type: str, handler: Callable[[dict], Awaitable[JsonDict]] + ) -> None: self.query_handlers[query_type] = handler self.mock_registry.register_query_handler = register_query_handler @@ -54,7 +60,7 @@ class DirectoryTestCase(unittest.HomeserverTestCase): return hs - def test_get_local_association(self): + def test_get_local_association(self) -> None: self.get_success( self.store.create_room_alias_association( self.my_room, "!8765qwer:test", ["test"] @@ -65,7 +71,7 @@ class DirectoryTestCase(unittest.HomeserverTestCase): self.assertEqual({"room_id": "!8765qwer:test", "servers": ["test"]}, result) - def test_get_remote_association(self): + def test_get_remote_association(self) -> None: self.mock_federation.make_query.return_value = make_awaitable( {"room_id": "!8765qwer:test", "servers": ["test", "remote"]} ) @@ -83,7 +89,7 @@ class DirectoryTestCase(unittest.HomeserverTestCase): ignore_backoff=True, ) - def test_incoming_fed_query(self): + def test_incoming_fed_query(self) -> None: self.get_success( self.store.create_room_alias_association( self.your_room, "!8765asdf:test", ["test"] @@ -105,7 +111,7 @@ class TestCreateAlias(unittest.HomeserverTestCase): directory.register_servlets, ] - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.handler = hs.get_directory_handler() # Create user @@ -125,7 +131,7 @@ class TestCreateAlias(unittest.HomeserverTestCase): self.test_user_tok = self.login("user", "pass") self.helper.join(room=self.room_id, user=self.test_user, tok=self.test_user_tok) - def test_create_alias_joined_room(self): + def test_create_alias_joined_room(self) -> None: """A user can create an alias for a room they're in.""" self.get_success( self.handler.create_association( @@ -135,7 +141,7 @@ class TestCreateAlias(unittest.HomeserverTestCase): ) ) - def test_create_alias_other_room(self): + def test_create_alias_other_room(self) -> None: """A user cannot create an alias for a room they're NOT in.""" other_room_id = self.helper.create_room_as( self.admin_user, tok=self.admin_user_tok @@ -150,7 +156,7 @@ class TestCreateAlias(unittest.HomeserverTestCase): synapse.api.errors.SynapseError, ) - def test_create_alias_admin(self): + def test_create_alias_admin(self) -> None: """An admin can create an alias for a room they're NOT in.""" other_room_id = self.helper.create_room_as( self.test_user, tok=self.test_user_tok @@ -173,7 +179,7 @@ class TestDeleteAlias(unittest.HomeserverTestCase): directory.register_servlets, ] - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastores().main self.handler = hs.get_directory_handler() self.state_handler = hs.get_state_handler() @@ -195,7 +201,7 @@ class TestDeleteAlias(unittest.HomeserverTestCase): self.test_user_tok = self.login("user", "pass") self.helper.join(room=self.room_id, user=self.test_user, tok=self.test_user_tok) - def _create_alias(self, user): + def _create_alias(self, user) -> None: # Create a new alias to this room. self.get_success( self.store.create_room_alias_association( @@ -203,7 +209,7 @@ class TestDeleteAlias(unittest.HomeserverTestCase): ) ) - def test_delete_alias_not_allowed(self): + def test_delete_alias_not_allowed(self) -> None: """A user that doesn't meet the expected guidelines cannot delete an alias.""" self._create_alias(self.admin_user) self.get_failure( @@ -213,7 +219,7 @@ class TestDeleteAlias(unittest.HomeserverTestCase): synapse.api.errors.AuthError, ) - def test_delete_alias_creator(self): + def test_delete_alias_creator(self) -> None: """An alias creator can delete their own alias.""" # Create an alias from a different user. self._create_alias(self.test_user) @@ -232,7 +238,7 @@ class TestDeleteAlias(unittest.HomeserverTestCase): synapse.api.errors.SynapseError, ) - def test_delete_alias_admin(self): + def test_delete_alias_admin(self) -> None: """A server admin can delete an alias created by another user.""" # Create an alias from a different user. self._create_alias(self.test_user) @@ -251,7 +257,7 @@ class TestDeleteAlias(unittest.HomeserverTestCase): synapse.api.errors.SynapseError, ) - def test_delete_alias_sufficient_power(self): + def test_delete_alias_sufficient_power(self) -> None: """A user with a sufficient power level should be able to delete an alias.""" self._create_alias(self.admin_user) @@ -288,7 +294,7 @@ class CanonicalAliasTestCase(unittest.HomeserverTestCase): directory.register_servlets, ] - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastores().main self.handler = hs.get_directory_handler() self.state_handler = hs.get_state_handler() @@ -317,7 +323,7 @@ class CanonicalAliasTestCase(unittest.HomeserverTestCase): ) return room_alias - def _set_canonical_alias(self, content): + def _set_canonical_alias(self, content) -> None: """Configure the canonical alias state on the room.""" self.helper.send_state( self.room_id, @@ -334,7 +340,7 @@ class CanonicalAliasTestCase(unittest.HomeserverTestCase): ) ) - def test_remove_alias(self): + def test_remove_alias(self) -> None: """Removing an alias that is the canonical alias should remove it there too.""" # Set this new alias as the canonical alias for this room self._set_canonical_alias( @@ -356,7 +362,7 @@ class CanonicalAliasTestCase(unittest.HomeserverTestCase): self.assertNotIn("alias", data["content"]) self.assertNotIn("alt_aliases", data["content"]) - def test_remove_other_alias(self): + def test_remove_other_alias(self) -> None: """Removing an alias listed as in alt_aliases should remove it there too.""" # Create a second alias. other_test_alias = "#test2:test" @@ -393,7 +399,7 @@ class TestCreateAliasACL(unittest.HomeserverTestCase): servlets = [directory.register_servlets, room.register_servlets] - def default_config(self): + def default_config(self) -> Dict[str, Any]: config = super().default_config() # Add custom alias creation rules to the config. @@ -403,7 +409,7 @@ class TestCreateAliasACL(unittest.HomeserverTestCase): return config - def test_denied(self): + def test_denied(self) -> None: room_id = self.helper.create_room_as(self.user_id) channel = self.make_request( @@ -413,7 +419,7 @@ class TestCreateAliasACL(unittest.HomeserverTestCase): ) self.assertEqual(403, channel.code, channel.result) - def test_allowed(self): + def test_allowed(self) -> None: room_id = self.helper.create_room_as(self.user_id) channel = self.make_request( @@ -423,7 +429,7 @@ class TestCreateAliasACL(unittest.HomeserverTestCase): ) self.assertEqual(200, channel.code, channel.result) - def test_denied_during_creation(self): + def test_denied_during_creation(self) -> None: """A room alias that is not allowed should be rejected during creation.""" # Invalid room alias. self.helper.create_room_as( @@ -432,7 +438,7 @@ class TestCreateAliasACL(unittest.HomeserverTestCase): extra_content={"room_alias_name": "foo"}, ) - def test_allowed_during_creation(self): + def test_allowed_during_creation(self) -> None: """A valid room alias should be allowed during creation.""" room_id = self.helper.create_room_as( self.user_id, @@ -459,7 +465,7 @@ class TestCreatePublishedRoomACL(unittest.HomeserverTestCase): data = {"room_alias_name": "unofficial_test"} allowed_localpart = "allowed" - def default_config(self): + def default_config(self) -> Dict[str, Any]: config = super().default_config() # Add custom room list publication rules to the config. @@ -474,7 +480,9 @@ class TestCreatePublishedRoomACL(unittest.HomeserverTestCase): return config - def prepare(self, reactor, clock, hs): + def prepare( + self, reactor: MemoryReactor, clock: Clock, hs: HomeServer + ) -> HomeServer: self.allowed_user_id = self.register_user(self.allowed_localpart, "pass") self.allowed_access_token = self.login(self.allowed_localpart, "pass") @@ -483,7 +491,7 @@ class TestCreatePublishedRoomACL(unittest.HomeserverTestCase): return hs - def test_denied_without_publication_permission(self): + def test_denied_without_publication_permission(self) -> None: """ Try to create a room, register an alias for it, and publish it, as a user without permission to publish rooms. @@ -497,7 +505,7 @@ class TestCreatePublishedRoomACL(unittest.HomeserverTestCase): expect_code=403, ) - def test_allowed_when_creating_private_room(self): + def test_allowed_when_creating_private_room(self) -> None: """ Try to create a room, register an alias for it, and NOT publish it, as a user without permission to publish rooms. @@ -511,7 +519,7 @@ class TestCreatePublishedRoomACL(unittest.HomeserverTestCase): expect_code=200, ) - def test_allowed_with_publication_permission(self): + def test_allowed_with_publication_permission(self) -> None: """ Try to create a room, register an alias for it, and publish it, as a user WITH permission to publish rooms. @@ -525,7 +533,7 @@ class TestCreatePublishedRoomACL(unittest.HomeserverTestCase): expect_code=200, ) - def test_denied_publication_with_invalid_alias(self): + def test_denied_publication_with_invalid_alias(self) -> None: """ Try to create a room, register an alias for it, and publish it, as a user WITH permission to publish rooms. @@ -538,7 +546,7 @@ class TestCreatePublishedRoomACL(unittest.HomeserverTestCase): expect_code=403, ) - def test_can_create_as_private_room_after_rejection(self): + def test_can_create_as_private_room_after_rejection(self) -> None: """ After failing to publish a room with an alias as a user without publish permission, retry as the same user, but without publishing the room. @@ -549,7 +557,7 @@ class TestCreatePublishedRoomACL(unittest.HomeserverTestCase): self.test_denied_without_publication_permission() self.test_allowed_when_creating_private_room() - def test_can_create_with_permission_after_rejection(self): + def test_can_create_with_permission_after_rejection(self) -> None: """ After failing to publish a room with an alias as a user without publish permission, retry as someone with permission, using the same alias. @@ -566,7 +574,9 @@ class TestRoomListSearchDisabled(unittest.HomeserverTestCase): servlets = [directory.register_servlets, room.register_servlets] - def prepare(self, reactor, clock, hs): + def prepare( + self, reactor: MemoryReactor, clock: Clock, hs: HomeServer + ) -> HomeServer: room_id = self.helper.create_room_as(self.user_id) channel = self.make_request( @@ -579,7 +589,7 @@ class TestRoomListSearchDisabled(unittest.HomeserverTestCase): return hs - def test_disabling_room_list(self): + def test_disabling_room_list(self) -> None: self.room_list_handler.enable_room_list_search = True self.directory_handler.enable_room_list_search = True diff --git a/tests/handlers/test_e2e_keys.py b/tests/handlers/test_e2e_keys.py index 9338ab92e9..ac21a28c43 100644 --- a/tests/handlers/test_e2e_keys.py +++ b/tests/handlers/test_e2e_keys.py @@ -20,33 +20,37 @@ from parameterized import parameterized from signedjson import key as key, sign as sign from twisted.internet import defer +from twisted.test.proto_helpers import MemoryReactor from synapse.api.constants import RoomEncryptionAlgorithms from synapse.api.errors import Codes, SynapseError +from synapse.server import HomeServer +from synapse.types import JsonDict +from synapse.util import Clock from tests import unittest from tests.test_utils import make_awaitable class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: return self.setup_test_homeserver(federation_client=mock.Mock()) - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.handler = hs.get_e2e_keys_handler() self.store = self.hs.get_datastores().main - def test_query_local_devices_no_devices(self): + def test_query_local_devices_no_devices(self) -> None: """If the user has no devices, we expect an empty list.""" local_user = "@boris:" + self.hs.hostname res = self.get_success(self.handler.query_local_devices({local_user: None})) self.assertDictEqual(res, {local_user: {}}) - def test_reupload_one_time_keys(self): + def test_reupload_one_time_keys(self) -> None: """we should be able to re-upload the same keys""" local_user = "@boris:" + self.hs.hostname device_id = "xyz" - keys = { + keys: JsonDict = { "alg1:k1": "key1", "alg2:k2": {"key": "key2", "signatures": {"k1": "sig1"}}, "alg2:k3": {"key": "key3"}, @@ -74,7 +78,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): res, {"one_time_key_counts": {"alg1": 1, "alg2": 2, "signed_curve25519": 0}} ) - def test_change_one_time_keys(self): + def test_change_one_time_keys(self) -> None: """attempts to change one-time-keys should be rejected""" local_user = "@boris:" + self.hs.hostname @@ -134,7 +138,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): SynapseError, ) - def test_claim_one_time_key(self): + def test_claim_one_time_key(self) -> None: local_user = "@boris:" + self.hs.hostname device_id = "xyz" keys = {"alg1:k1": "key1"} @@ -161,7 +165,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): }, ) - def test_fallback_key(self): + def test_fallback_key(self) -> None: local_user = "@boris:" + self.hs.hostname device_id = "xyz" fallback_key = {"alg1:k1": "fallback_key1"} @@ -294,7 +298,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): {"failures": {}, "one_time_keys": {local_user: {device_id: fallback_key3}}}, ) - def test_replace_master_key(self): + def test_replace_master_key(self) -> None: """uploading a new signing key should make the old signing key unavailable""" local_user = "@boris:" + self.hs.hostname keys1 = { @@ -328,7 +332,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): ) self.assertDictEqual(devices["master_keys"], {local_user: keys2["master_key"]}) - def test_reupload_signatures(self): + def test_reupload_signatures(self) -> None: """re-uploading a signature should not fail""" local_user = "@boris:" + self.hs.hostname keys1 = { @@ -433,7 +437,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): self.assertDictEqual(devices["device_keys"][local_user]["abc"], device_key_1) self.assertDictEqual(devices["device_keys"][local_user]["def"], device_key_2) - def test_self_signing_key_doesnt_show_up_as_device(self): + def test_self_signing_key_doesnt_show_up_as_device(self) -> None: """signing keys should be hidden when fetching a user's devices""" local_user = "@boris:" + self.hs.hostname keys1 = { @@ -462,7 +466,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): res = self.get_success(self.handler.query_local_devices({local_user: None})) self.assertDictEqual(res, {local_user: {}}) - def test_upload_signatures(self): + def test_upload_signatures(self) -> None: """should check signatures that are uploaded""" # set up a user with cross-signing keys and a device. This user will # try uploading signatures @@ -686,7 +690,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): other_master_key["signatures"][local_user]["ed25519:" + usersigning_pubkey], ) - def test_query_devices_remote_no_sync(self): + def test_query_devices_remote_no_sync(self) -> None: """Tests that querying keys for a remote user that we don't share a room with returns the cross signing keys correctly. """ @@ -759,7 +763,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): }, ) - def test_query_devices_remote_sync(self): + def test_query_devices_remote_sync(self) -> None: """Tests that querying keys for a remote user that we share a room with, but haven't yet fetched the keys for, returns the cross signing keys correctly. @@ -845,7 +849,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): (["device_1", "device_2"],), ] ) - def test_query_all_devices_caches_result(self, device_ids: Iterable[str]): + def test_query_all_devices_caches_result(self, device_ids: Iterable[str]) -> None: """Test that requests for all of a remote user's devices are cached. We do this by asserting that only one call over federation was made, and that @@ -853,7 +857,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): """ local_user_id = "@test:test" remote_user_id = "@test:other" - request_body = {"device_keys": {remote_user_id: []}} + request_body: JsonDict = {"device_keys": {remote_user_id: []}} response_devices = [ { diff --git a/tests/handlers/test_oidc.py b/tests/handlers/test_oidc.py index e8418b6638..014815db6e 100644 --- a/tests/handlers/test_oidc.py +++ b/tests/handlers/test_oidc.py @@ -13,14 +13,18 @@ # limitations under the License. import json import os +from typing import Any, Dict from unittest.mock import ANY, Mock, patch from urllib.parse import parse_qs, urlparse import pymacaroons +from twisted.test.proto_helpers import MemoryReactor + from synapse.handlers.sso import MappingException from synapse.server import HomeServer -from synapse.types import UserID +from synapse.types import JsonDict, UserID +from synapse.util import Clock from synapse.util.macaroons import get_value_from_macaroon from tests.test_utils import FakeResponse, get_awaitable_result, simple_async_mock @@ -98,7 +102,7 @@ class TestMappingProviderFailures(TestMappingProvider): } -async def get_json(url): +async def get_json(url: str) -> JsonDict: # Mock get_json calls to handle jwks & oidc discovery endpoints if url == WELL_KNOWN: # Minimal discovery document, as defined in OpenID.Discovery @@ -116,6 +120,8 @@ async def get_json(url): elif url == JWKS_URI: return {"keys": []} + return {} + def _key_file_path() -> str: """path to a file containing the private half of a test key""" @@ -147,12 +153,12 @@ class OidcHandlerTestCase(HomeserverTestCase): if not HAS_OIDC: skip = "requires OIDC" - def default_config(self): + def default_config(self) -> Dict[str, Any]: config = super().default_config() config["public_baseurl"] = BASE_URL return config - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: self.http_client = Mock(spec=["get_json"]) self.http_client.get_json.side_effect = get_json self.http_client.user_agent = b"Synapse Test" @@ -164,7 +170,7 @@ class OidcHandlerTestCase(HomeserverTestCase): sso_handler = hs.get_sso_handler() # Mock the render error method. self.render_error = Mock(return_value=None) - sso_handler.render_error = self.render_error + sso_handler.render_error = self.render_error # type: ignore[assignment] # Reduce the number of attempts when generating MXIDs. sso_handler._MAP_USERNAME_RETRIES = 3 @@ -193,14 +199,14 @@ class OidcHandlerTestCase(HomeserverTestCase): return args @override_config({"oidc_config": DEFAULT_CONFIG}) - def test_config(self): + def test_config(self) -> None: """Basic config correctly sets up the callback URL and client auth correctly.""" self.assertEqual(self.provider._callback_url, CALLBACK_URL) self.assertEqual(self.provider._client_auth.client_id, CLIENT_ID) self.assertEqual(self.provider._client_auth.client_secret, CLIENT_SECRET) @override_config({"oidc_config": {**DEFAULT_CONFIG, "discover": True}}) - def test_discovery(self): + def test_discovery(self) -> None: """The handler should discover the endpoints from OIDC discovery document.""" # This would throw if some metadata were invalid metadata = self.get_success(self.provider.load_metadata()) @@ -219,13 +225,13 @@ class OidcHandlerTestCase(HomeserverTestCase): self.http_client.get_json.assert_not_called() @override_config({"oidc_config": EXPLICIT_ENDPOINT_CONFIG}) - def test_no_discovery(self): + def test_no_discovery(self) -> None: """When discovery is disabled, it should not try to load from discovery document.""" self.get_success(self.provider.load_metadata()) self.http_client.get_json.assert_not_called() @override_config({"oidc_config": EXPLICIT_ENDPOINT_CONFIG}) - def test_load_jwks(self): + def test_load_jwks(self) -> None: """JWKS loading is done once (then cached) if used.""" jwks = self.get_success(self.provider.load_jwks()) self.http_client.get_json.assert_called_once_with(JWKS_URI) @@ -253,7 +259,7 @@ class OidcHandlerTestCase(HomeserverTestCase): self.get_failure(self.provider.load_jwks(force=True), RuntimeError) @override_config({"oidc_config": DEFAULT_CONFIG}) - def test_validate_config(self): + def test_validate_config(self) -> None: """Provider metadatas are extensively validated.""" h = self.provider @@ -336,14 +342,14 @@ class OidcHandlerTestCase(HomeserverTestCase): force_load_metadata() @override_config({"oidc_config": {**DEFAULT_CONFIG, "skip_verification": True}}) - def test_skip_verification(self): + def test_skip_verification(self) -> None: """Provider metadata validation can be disabled by config.""" with self.metadata_edit({"issuer": "http://insecure"}): # This should not throw get_awaitable_result(self.provider.load_metadata()) @override_config({"oidc_config": DEFAULT_CONFIG}) - def test_redirect_request(self): + def test_redirect_request(self) -> None: """The redirect request has the right arguments & generates a valid session cookie.""" req = Mock(spec=["cookies"]) req.cookies = [] @@ -387,7 +393,7 @@ class OidcHandlerTestCase(HomeserverTestCase): self.assertEqual(redirect, "http://client/redirect") @override_config({"oidc_config": DEFAULT_CONFIG}) - def test_callback_error(self): + def test_callback_error(self) -> None: """Errors from the provider returned in the callback are displayed.""" request = Mock(args={}) request.args[b"error"] = [b"invalid_client"] @@ -399,7 +405,7 @@ class OidcHandlerTestCase(HomeserverTestCase): self.assertRenderedError("invalid_client", "some description") @override_config({"oidc_config": DEFAULT_CONFIG}) - def test_callback(self): + def test_callback(self) -> None: """Code callback works and display errors if something went wrong. A lot of scenarios are tested here: @@ -428,9 +434,9 @@ class OidcHandlerTestCase(HomeserverTestCase): "username": username, } expected_user_id = "@%s:%s" % (username, self.hs.hostname) - self.provider._exchange_code = simple_async_mock(return_value=token) - self.provider._parse_id_token = simple_async_mock(return_value=userinfo) - self.provider._fetch_userinfo = simple_async_mock(return_value=userinfo) + self.provider._exchange_code = simple_async_mock(return_value=token) # type: ignore[assignment] + self.provider._parse_id_token = simple_async_mock(return_value=userinfo) # type: ignore[assignment] + self.provider._fetch_userinfo = simple_async_mock(return_value=userinfo) # type: ignore[assignment] auth_handler = self.hs.get_auth_handler() auth_handler.complete_sso_login = simple_async_mock() @@ -468,7 +474,7 @@ class OidcHandlerTestCase(HomeserverTestCase): self.assertRenderedError("mapping_error") # Handle ID token errors - self.provider._parse_id_token = simple_async_mock(raises=Exception()) + self.provider._parse_id_token = simple_async_mock(raises=Exception()) # type: ignore[assignment] self.get_success(self.handler.handle_oidc_callback(request)) self.assertRenderedError("invalid_token") @@ -483,7 +489,7 @@ class OidcHandlerTestCase(HomeserverTestCase): "type": "bearer", "access_token": "access_token", } - self.provider._exchange_code = simple_async_mock(return_value=token) + self.provider._exchange_code = simple_async_mock(return_value=token) # type: ignore[assignment] self.get_success(self.handler.handle_oidc_callback(request)) auth_handler.complete_sso_login.assert_called_once_with( @@ -510,8 +516,8 @@ class OidcHandlerTestCase(HomeserverTestCase): id_token = { "sid": "abcdefgh", } - self.provider._parse_id_token = simple_async_mock(return_value=id_token) - self.provider._exchange_code = simple_async_mock(return_value=token) + self.provider._parse_id_token = simple_async_mock(return_value=id_token) # type: ignore[assignment] + self.provider._exchange_code = simple_async_mock(return_value=token) # type: ignore[assignment] auth_handler.complete_sso_login.reset_mock() self.provider._fetch_userinfo.reset_mock() self.get_success(self.handler.handle_oidc_callback(request)) @@ -531,21 +537,21 @@ class OidcHandlerTestCase(HomeserverTestCase): self.render_error.assert_not_called() # Handle userinfo fetching error - self.provider._fetch_userinfo = simple_async_mock(raises=Exception()) + self.provider._fetch_userinfo = simple_async_mock(raises=Exception()) # type: ignore[assignment] self.get_success(self.handler.handle_oidc_callback(request)) self.assertRenderedError("fetch_error") # Handle code exchange failure from synapse.handlers.oidc import OidcError - self.provider._exchange_code = simple_async_mock( + self.provider._exchange_code = simple_async_mock( # type: ignore[assignment] raises=OidcError("invalid_request") ) self.get_success(self.handler.handle_oidc_callback(request)) self.assertRenderedError("invalid_request") @override_config({"oidc_config": DEFAULT_CONFIG}) - def test_callback_session(self): + def test_callback_session(self) -> None: """The callback verifies the session presence and validity""" request = Mock(spec=["args", "getCookie", "cookies"]) @@ -590,7 +596,7 @@ class OidcHandlerTestCase(HomeserverTestCase): @override_config( {"oidc_config": {**DEFAULT_CONFIG, "client_auth_method": "client_secret_post"}} ) - def test_exchange_code(self): + def test_exchange_code(self) -> None: """Code exchange behaves correctly and handles various error scenarios.""" token = {"type": "bearer"} token_json = json.dumps(token).encode("utf-8") @@ -686,7 +692,7 @@ class OidcHandlerTestCase(HomeserverTestCase): } } ) - def test_exchange_code_jwt_key(self): + def test_exchange_code_jwt_key(self) -> None: """Test that code exchange works with a JWK client secret.""" from authlib.jose import jwt @@ -741,7 +747,7 @@ class OidcHandlerTestCase(HomeserverTestCase): } } ) - def test_exchange_code_no_auth(self): + def test_exchange_code_no_auth(self) -> None: """Test that code exchange works with no client secret.""" token = {"type": "bearer"} self.http_client.request = simple_async_mock( @@ -776,7 +782,7 @@ class OidcHandlerTestCase(HomeserverTestCase): } } ) - def test_extra_attributes(self): + def test_extra_attributes(self) -> None: """ Login while using a mapping provider that implements get_extra_attributes. """ @@ -790,8 +796,8 @@ class OidcHandlerTestCase(HomeserverTestCase): "username": "foo", "phone": "1234567", } - self.provider._exchange_code = simple_async_mock(return_value=token) - self.provider._parse_id_token = simple_async_mock(return_value=userinfo) + self.provider._exchange_code = simple_async_mock(return_value=token) # type: ignore[assignment] + self.provider._parse_id_token = simple_async_mock(return_value=userinfo) # type: ignore[assignment] auth_handler = self.hs.get_auth_handler() auth_handler.complete_sso_login = simple_async_mock() @@ -817,12 +823,12 @@ class OidcHandlerTestCase(HomeserverTestCase): ) @override_config({"oidc_config": DEFAULT_CONFIG}) - def test_map_userinfo_to_user(self): + def test_map_userinfo_to_user(self) -> None: """Ensure that mapping the userinfo returned from a provider to an MXID works properly.""" auth_handler = self.hs.get_auth_handler() auth_handler.complete_sso_login = simple_async_mock() - userinfo = { + userinfo: dict = { "sub": "test_user", "username": "test_user", } @@ -870,7 +876,7 @@ class OidcHandlerTestCase(HomeserverTestCase): ) @override_config({"oidc_config": {**DEFAULT_CONFIG, "allow_existing_users": True}}) - def test_map_userinfo_to_existing_user(self): + def test_map_userinfo_to_existing_user(self) -> None: """Existing users can log in with OpenID Connect when allow_existing_users is True.""" store = self.hs.get_datastores().main user = UserID.from_string("@test_user:test") @@ -974,7 +980,7 @@ class OidcHandlerTestCase(HomeserverTestCase): ) @override_config({"oidc_config": DEFAULT_CONFIG}) - def test_map_userinfo_to_invalid_localpart(self): + def test_map_userinfo_to_invalid_localpart(self) -> None: """If the mapping provider generates an invalid localpart it should be rejected.""" self.get_success( _make_callback_with_userinfo(self.hs, {"sub": "test2", "username": "föö"}) @@ -991,7 +997,7 @@ class OidcHandlerTestCase(HomeserverTestCase): } } ) - def test_map_userinfo_to_user_retries(self): + def test_map_userinfo_to_user_retries(self) -> None: """The mapping provider can retry generating an MXID if the MXID is already in use.""" auth_handler = self.hs.get_auth_handler() auth_handler.complete_sso_login = simple_async_mock() @@ -1039,7 +1045,7 @@ class OidcHandlerTestCase(HomeserverTestCase): ) @override_config({"oidc_config": DEFAULT_CONFIG}) - def test_empty_localpart(self): + def test_empty_localpart(self) -> None: """Attempts to map onto an empty localpart should be rejected.""" userinfo = { "sub": "tester", @@ -1058,7 +1064,7 @@ class OidcHandlerTestCase(HomeserverTestCase): } } ) - def test_null_localpart(self): + def test_null_localpart(self) -> None: """Mapping onto a null localpart via an empty OIDC attribute should be rejected""" userinfo = { "sub": "tester", @@ -1075,7 +1081,7 @@ class OidcHandlerTestCase(HomeserverTestCase): } } ) - def test_attribute_requirements(self): + def test_attribute_requirements(self) -> None: """The required attributes must be met from the OIDC userinfo response.""" auth_handler = self.hs.get_auth_handler() auth_handler.complete_sso_login = simple_async_mock() @@ -1115,7 +1121,7 @@ class OidcHandlerTestCase(HomeserverTestCase): } } ) - def test_attribute_requirements_contains(self): + def test_attribute_requirements_contains(self) -> None: """Test that auth succeeds if userinfo attribute CONTAINS required value""" auth_handler = self.hs.get_auth_handler() auth_handler.complete_sso_login = simple_async_mock() @@ -1146,7 +1152,7 @@ class OidcHandlerTestCase(HomeserverTestCase): } } ) - def test_attribute_requirements_mismatch(self): + def test_attribute_requirements_mismatch(self) -> None: """ Test that auth fails if attributes exist but don't match, or are non-string values. @@ -1154,7 +1160,7 @@ class OidcHandlerTestCase(HomeserverTestCase): auth_handler = self.hs.get_auth_handler() auth_handler.complete_sso_login = simple_async_mock() # userinfo with "test": "not_foobar" attribute should fail - userinfo = { + userinfo: dict = { "sub": "tester", "username": "tester", "test": "not_foobar", @@ -1248,9 +1254,9 @@ async def _make_callback_with_userinfo( handler = hs.get_oidc_handler() provider = handler._providers["oidc"] - provider._exchange_code = simple_async_mock(return_value={"id_token": ""}) - provider._parse_id_token = simple_async_mock(return_value=userinfo) - provider._fetch_userinfo = simple_async_mock(return_value=userinfo) + provider._exchange_code = simple_async_mock(return_value={"id_token": ""}) # type: ignore[assignment] + provider._parse_id_token = simple_async_mock(return_value=userinfo) # type: ignore[assignment] + provider._fetch_userinfo = simple_async_mock(return_value=userinfo) # type: ignore[assignment] state = "state" session = handler._token_generator.generate_oidc_session_token( diff --git a/tests/handlers/test_profile.py b/tests/handlers/test_profile.py index 972cbac6e4..08733a9f2d 100644 --- a/tests/handlers/test_profile.py +++ b/tests/handlers/test_profile.py @@ -11,14 +11,17 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Dict +from typing import Any, Awaitable, Callable, Dict from unittest.mock import Mock +from twisted.test.proto_helpers import MemoryReactor + import synapse.types from synapse.api.errors import AuthError, SynapseError from synapse.rest import admin from synapse.server import HomeServer -from synapse.types import UserID +from synapse.types import JsonDict, UserID +from synapse.util import Clock from tests import unittest from tests.test_utils import make_awaitable @@ -29,13 +32,15 @@ class ProfileTestCase(unittest.HomeserverTestCase): servlets = [admin.register_servlets] - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: self.mock_federation = Mock() self.mock_registry = Mock() - self.query_handlers = {} + self.query_handlers: Dict[str, Callable[[dict], Awaitable[JsonDict]]] = {} - def register_query_handler(query_type, handler): + def register_query_handler( + query_type: str, handler: Callable[[dict], Awaitable[JsonDict]] + ) -> None: self.query_handlers[query_type] = handler self.mock_registry.register_query_handler = register_query_handler @@ -47,7 +52,7 @@ class ProfileTestCase(unittest.HomeserverTestCase): ) return hs - def prepare(self, reactor, clock, hs: HomeServer): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastores().main self.frank = UserID.from_string("@1234abcd:test") @@ -58,7 +63,7 @@ class ProfileTestCase(unittest.HomeserverTestCase): self.handler = hs.get_profile_handler() - def test_get_my_name(self): + def test_get_my_name(self) -> None: self.get_success( self.store.set_profile_displayname(self.frank.localpart, "Frank") ) @@ -67,7 +72,7 @@ class ProfileTestCase(unittest.HomeserverTestCase): self.assertEqual("Frank", displayname) - def test_set_my_name(self): + def test_set_my_name(self) -> None: self.get_success( self.handler.set_displayname( self.frank, synapse.types.create_requester(self.frank), "Frank Jr." @@ -110,7 +115,7 @@ class ProfileTestCase(unittest.HomeserverTestCase): self.get_success(self.store.get_profile_displayname(self.frank.localpart)) ) - def test_set_my_name_if_disabled(self): + def test_set_my_name_if_disabled(self) -> None: self.hs.config.registration.enable_set_displayname = False # Setting displayname for the first time is allowed @@ -135,7 +140,7 @@ class ProfileTestCase(unittest.HomeserverTestCase): SynapseError, ) - def test_set_my_name_noauth(self): + def test_set_my_name_noauth(self) -> None: self.get_failure( self.handler.set_displayname( self.frank, synapse.types.create_requester(self.bob), "Frank Jr." @@ -143,7 +148,7 @@ class ProfileTestCase(unittest.HomeserverTestCase): AuthError, ) - def test_get_other_name(self): + def test_get_other_name(self) -> None: self.mock_federation.make_query.return_value = make_awaitable( {"displayname": "Alice"} ) @@ -158,7 +163,7 @@ class ProfileTestCase(unittest.HomeserverTestCase): ignore_backoff=True, ) - def test_incoming_fed_query(self): + def test_incoming_fed_query(self) -> None: self.get_success(self.store.create_profile("caroline")) self.get_success(self.store.set_profile_displayname("caroline", "Caroline")) @@ -174,7 +179,7 @@ class ProfileTestCase(unittest.HomeserverTestCase): self.assertEqual({"displayname": "Caroline"}, response) - def test_get_my_avatar(self): + def test_get_my_avatar(self) -> None: self.get_success( self.store.set_profile_avatar_url( self.frank.localpart, "http://my.server/me.png" @@ -184,7 +189,7 @@ class ProfileTestCase(unittest.HomeserverTestCase): self.assertEqual("http://my.server/me.png", avatar_url) - def test_set_my_avatar(self): + def test_set_my_avatar(self) -> None: self.get_success( self.handler.set_avatar_url( self.frank, @@ -225,7 +230,7 @@ class ProfileTestCase(unittest.HomeserverTestCase): (self.get_success(self.store.get_profile_avatar_url(self.frank.localpart))), ) - def test_set_my_avatar_if_disabled(self): + def test_set_my_avatar_if_disabled(self) -> None: self.hs.config.registration.enable_set_avatar_url = False # Setting displayname for the first time is allowed @@ -250,7 +255,7 @@ class ProfileTestCase(unittest.HomeserverTestCase): SynapseError, ) - def test_avatar_constraints_no_config(self): + def test_avatar_constraints_no_config(self) -> None: """Tests that the method to check an avatar against configured constraints skips all of its check if no constraint is configured. """ @@ -263,7 +268,7 @@ class ProfileTestCase(unittest.HomeserverTestCase): self.assertTrue(res) @unittest.override_config({"max_avatar_size": 50}) - def test_avatar_constraints_missing(self): + def test_avatar_constraints_missing(self) -> None: """Tests that an avatar isn't allowed if the file at the given MXC URI couldn't be found. """ @@ -273,7 +278,7 @@ class ProfileTestCase(unittest.HomeserverTestCase): self.assertFalse(res) @unittest.override_config({"max_avatar_size": 50}) - def test_avatar_constraints_file_size(self): + def test_avatar_constraints_file_size(self) -> None: """Tests that a file that's above the allowed file size is forbidden but one that's below it is allowed. """ @@ -295,7 +300,7 @@ class ProfileTestCase(unittest.HomeserverTestCase): self.assertFalse(res) @unittest.override_config({"allowed_avatar_mimetypes": ["image/png"]}) - def test_avatar_constraint_mime_type(self): + def test_avatar_constraint_mime_type(self) -> None: """Tests that a file with an unauthorised MIME type is forbidden but one with an authorised content type is allowed. """ diff --git a/tests/handlers/test_saml.py b/tests/handlers/test_saml.py index 23941abed8..8d4404eda1 100644 --- a/tests/handlers/test_saml.py +++ b/tests/handlers/test_saml.py @@ -12,12 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Optional +from typing import Any, Dict, Optional from unittest.mock import Mock import attr +from twisted.test.proto_helpers import MemoryReactor + from synapse.api.errors import RedirectException +from synapse.server import HomeServer +from synapse.util import Clock from tests.test_utils import simple_async_mock from tests.unittest import HomeserverTestCase, override_config @@ -81,10 +85,10 @@ class TestRedirectMappingProvider(TestMappingProvider): class SamlHandlerTestCase(HomeserverTestCase): - def default_config(self): + def default_config(self) -> Dict[str, Any]: config = super().default_config() config["public_baseurl"] = BASE_URL - saml_config = { + saml_config: Dict[str, Any] = { "sp_config": {"metadata": {}}, # Disable grandfathering. "grandfathered_mxid_source_attribute": None, @@ -98,7 +102,7 @@ class SamlHandlerTestCase(HomeserverTestCase): return config - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: hs = self.setup_test_homeserver() self.handler = hs.get_saml_handler() @@ -114,7 +118,7 @@ class SamlHandlerTestCase(HomeserverTestCase): elif not has_xmlsec1: skip = "Requires xmlsec1" - def test_map_saml_response_to_user(self): + def test_map_saml_response_to_user(self) -> None: """Ensure that mapping the SAML response returned from a provider to an MXID works properly.""" # stub out the auth handler @@ -140,7 +144,7 @@ class SamlHandlerTestCase(HomeserverTestCase): ) @override_config({"saml2_config": {"grandfathered_mxid_source_attribute": "mxid"}}) - def test_map_saml_response_to_existing_user(self): + def test_map_saml_response_to_existing_user(self) -> None: """Existing users can log in with SAML account.""" store = self.hs.get_datastores().main self.get_success( @@ -186,7 +190,7 @@ class SamlHandlerTestCase(HomeserverTestCase): auth_provider_session_id=None, ) - def test_map_saml_response_to_invalid_localpart(self): + def test_map_saml_response_to_invalid_localpart(self) -> None: """If the mapping provider generates an invalid localpart it should be rejected.""" # stub out the auth handler @@ -207,7 +211,7 @@ class SamlHandlerTestCase(HomeserverTestCase): ) auth_handler.complete_sso_login.assert_not_called() - def test_map_saml_response_to_user_retries(self): + def test_map_saml_response_to_user_retries(self) -> None: """The mapping provider can retry generating an MXID if the MXID is already in use.""" # stub out the auth handler and error renderer @@ -271,7 +275,7 @@ class SamlHandlerTestCase(HomeserverTestCase): } } ) - def test_map_saml_response_redirect(self): + def test_map_saml_response_redirect(self) -> None: """Test a mapping provider that raises a RedirectException""" saml_response = FakeAuthnResponse({"uid": "test", "username": "test_user"}) @@ -292,7 +296,7 @@ class SamlHandlerTestCase(HomeserverTestCase): }, } ) - def test_attribute_requirements(self): + def test_attribute_requirements(self) -> None: """The required attributes must be met from the SAML response.""" # stub out the auth handler -- cgit 1.5.1 From dea577998f221297d3ff30bdf904f7147f3c3d8a Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Tue, 15 Mar 2022 15:40:34 +0000 Subject: Add tests for database transaction callbacks (#12198) Signed-off-by: Sean Quah --- changelog.d/12198.misc | 1 + tests/storage/test_database.py | 104 ++++++++++++++++++++++++++++++++++++++++- 2 files changed, 104 insertions(+), 1 deletion(-) create mode 100644 changelog.d/12198.misc diff --git a/changelog.d/12198.misc b/changelog.d/12198.misc new file mode 100644 index 0000000000..6b184a9053 --- /dev/null +++ b/changelog.d/12198.misc @@ -0,0 +1 @@ +Add tests for database transaction callbacks. diff --git a/tests/storage/test_database.py b/tests/storage/test_database.py index 8597867563..ae13bed086 100644 --- a/tests/storage/test_database.py +++ b/tests/storage/test_database.py @@ -12,7 +12,18 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.storage.database import make_tuple_comparison_clause +from typing import Callable, Tuple +from unittest.mock import Mock, call + +from twisted.test.proto_helpers import MemoryReactor + +from synapse.server import HomeServer +from synapse.storage.database import ( + DatabasePool, + LoggingTransaction, + make_tuple_comparison_clause, +) +from synapse.util import Clock from tests import unittest @@ -22,3 +33,94 @@ class TupleComparisonClauseTestCase(unittest.TestCase): clause, args = make_tuple_comparison_clause([("a", 1), ("b", 2)]) self.assertEqual(clause, "(a,b) > (?,?)") self.assertEqual(args, [1, 2]) + + +class CallbacksTestCase(unittest.HomeserverTestCase): + """Tests for transaction callbacks.""" + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.store = hs.get_datastores().main + self.db_pool: DatabasePool = self.store.db_pool + + def _run_interaction( + self, func: Callable[[LoggingTransaction], object] + ) -> Tuple[Mock, Mock]: + """Run the given function in a database transaction, with callbacks registered. + + Args: + func: The function to be run in a transaction. The transaction will be + retried if `func` raises an `OperationalError`. + + Returns: + Two mocks, which were registered as an `after_callback` and an + `exception_callback` respectively, on every transaction attempt. + """ + after_callback = Mock() + exception_callback = Mock() + + def _test_txn(txn: LoggingTransaction) -> None: + txn.call_after(after_callback, 123, 456, extra=789) + txn.call_on_exception(exception_callback, 987, 654, extra=321) + func(txn) + + try: + self.get_success_or_raise( + self.db_pool.runInteraction("test_transaction", _test_txn) + ) + except Exception: + pass + + return after_callback, exception_callback + + def test_after_callback(self) -> None: + """Test that the after callback is called when a transaction succeeds.""" + after_callback, exception_callback = self._run_interaction(lambda txn: None) + + after_callback.assert_called_once_with(123, 456, extra=789) + exception_callback.assert_not_called() + + def test_exception_callback(self) -> None: + """Test that the exception callback is called when a transaction fails.""" + _test_txn = Mock(side_effect=ZeroDivisionError) + after_callback, exception_callback = self._run_interaction(_test_txn) + + after_callback.assert_not_called() + exception_callback.assert_called_once_with(987, 654, extra=321) + + def test_failed_retry(self) -> None: + """Test that the exception callback is called for every failed attempt.""" + # Always raise an `OperationalError`. + _test_txn = Mock(side_effect=self.db_pool.engine.module.OperationalError) + after_callback, exception_callback = self._run_interaction(_test_txn) + + after_callback.assert_not_called() + exception_callback.assert_has_calls( + [ + call(987, 654, extra=321), + call(987, 654, extra=321), + call(987, 654, extra=321), + call(987, 654, extra=321), + call(987, 654, extra=321), + call(987, 654, extra=321), + ] + ) + self.assertEqual(exception_callback.call_count, 6) # no additional calls + + def test_successful_retry(self) -> None: + """Test callbacks for a failed transaction followed by a successful attempt.""" + # Raise an `OperationalError` on the first attempt only. + _test_txn = Mock( + side_effect=[self.db_pool.engine.module.OperationalError, None] + ) + after_callback, exception_callback = self._run_interaction(_test_txn) + + # Calling both `after_callback`s when the first attempt failed is rather + # surprising (#12184). Let's document the behaviour in a test. + after_callback.assert_has_calls( + [ + call(123, 456, extra=789), + call(123, 456, extra=789), + ] + ) + self.assertEqual(after_callback.call_count, 2) # no additional calls + exception_callback.assert_not_called() -- cgit 1.5.1 From dda9b7fc4d2e6ca84a1a994a7ff1943b590e71df Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 15 Mar 2022 14:06:05 -0400 Subject: Use the ignored_users table to test event visibility & sync. (#12225) Instead of fetching the raw account data and re-parsing it. The ignored_users table is a denormalised version of the account data for quick searching. --- changelog.d/12225.misc | 1 + synapse/handlers/sync.py | 30 ++----------------- synapse/push/bulk_push_rule_evaluator.py | 2 +- synapse/storage/databases/main/account_data.py | 41 ++++++++++++++++++++++++-- synapse/visibility.py | 18 ++--------- tests/storage/test_account_data.py | 17 +++++++++++ 6 files changed, 62 insertions(+), 47 deletions(-) create mode 100644 changelog.d/12225.misc diff --git a/changelog.d/12225.misc b/changelog.d/12225.misc new file mode 100644 index 0000000000..23105c727c --- /dev/null +++ b/changelog.d/12225.misc @@ -0,0 +1 @@ +Use the `ignored_users` table in additional places instead of re-parsing the account data. diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 0aa3052fd6..c9d6a18bd7 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -28,7 +28,7 @@ from typing import ( import attr from prometheus_client import Counter -from synapse.api.constants import AccountDataTypes, EventTypes, Membership, ReceiptTypes +from synapse.api.constants import EventTypes, Membership, ReceiptTypes from synapse.api.filtering import FilterCollection from synapse.api.presence import UserPresenceState from synapse.api.room_versions import KNOWN_ROOM_VERSIONS @@ -1601,7 +1601,7 @@ class SyncHandler: return set(), set(), set(), set() # 3. Work out which rooms need reporting in the sync response. - ignored_users = await self._get_ignored_users(user_id) + ignored_users = await self.store.ignored_users(user_id) if since_token: room_changes = await self._get_rooms_changed( sync_result_builder, ignored_users @@ -1627,7 +1627,6 @@ class SyncHandler: logger.debug("Generating room entry for %s", room_entry.room_id) await self._generate_room_entry( sync_result_builder, - ignored_users, room_entry, ephemeral=ephemeral_by_room.get(room_entry.room_id, []), tags=tags_by_room.get(room_entry.room_id), @@ -1657,29 +1656,6 @@ class SyncHandler: newly_left_users, ) - async def _get_ignored_users(self, user_id: str) -> FrozenSet[str]: - """Retrieve the users ignored by the given user from their global account_data. - - Returns an empty set if - - there is no global account_data entry for ignored_users - - there is such an entry, but it's not a JSON object. - """ - # TODO: Can we `SELECT ignored_user_id FROM ignored_users WHERE ignorer_user_id=?;` instead? - ignored_account_data = ( - await self.store.get_global_account_data_by_type_for_user( - user_id=user_id, data_type=AccountDataTypes.IGNORED_USER_LIST - ) - ) - - # If there is ignored users account data and it matches the proper type, - # then use it. - ignored_users: FrozenSet[str] = frozenset() - if ignored_account_data: - ignored_users_data = ignored_account_data.get("ignored_users", {}) - if isinstance(ignored_users_data, dict): - ignored_users = frozenset(ignored_users_data.keys()) - return ignored_users - async def _have_rooms_changed( self, sync_result_builder: "SyncResultBuilder" ) -> bool: @@ -2022,7 +1998,6 @@ class SyncHandler: async def _generate_room_entry( self, sync_result_builder: "SyncResultBuilder", - ignored_users: FrozenSet[str], room_builder: "RoomSyncResultBuilder", ephemeral: List[JsonDict], tags: Optional[Dict[str, Dict[str, Any]]], @@ -2051,7 +2026,6 @@ class SyncHandler: Args: sync_result_builder - ignored_users: Set of users ignored by user. room_builder ephemeral: List of new ephemeral events for room tags: List of *all* tags for room, or None if there has been diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index 8140afcb6b..030898e4d0 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -213,7 +213,7 @@ class BulkPushRuleEvaluator: if not event.is_state(): ignorers = await self.store.ignored_by(event.sender) else: - ignorers = set() + ignorers = frozenset() for uid, rules in rules_by_user.items(): if event.sender == uid: diff --git a/synapse/storage/databases/main/account_data.py b/synapse/storage/databases/main/account_data.py index 52146aacc8..9af9f4f18e 100644 --- a/synapse/storage/databases/main/account_data.py +++ b/synapse/storage/databases/main/account_data.py @@ -14,7 +14,17 @@ # limitations under the License. import logging -from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Set, Tuple, cast +from typing import ( + TYPE_CHECKING, + Any, + Dict, + FrozenSet, + Iterable, + List, + Optional, + Tuple, + cast, +) from synapse.api.constants import AccountDataTypes from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker @@ -365,7 +375,7 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) ) @cached(max_entries=5000, iterable=True) - async def ignored_by(self, user_id: str) -> Set[str]: + async def ignored_by(self, user_id: str) -> FrozenSet[str]: """ Get users which ignore the given user. @@ -375,7 +385,7 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) Return: The user IDs which ignore the given user. """ - return set( + return frozenset( await self.db_pool.simple_select_onecol( table="ignored_users", keyvalues={"ignored_user_id": user_id}, @@ -384,6 +394,26 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) ) ) + @cached(max_entries=5000, iterable=True) + async def ignored_users(self, user_id: str) -> FrozenSet[str]: + """ + Get users which the given user ignores. + + Params: + user_id: The user ID which is making the request. + + Return: + The user IDs which are ignored by the given user. + """ + return frozenset( + await self.db_pool.simple_select_onecol( + table="ignored_users", + keyvalues={"ignorer_user_id": user_id}, + retcol="ignored_user_id", + desc="ignored_users", + ) + ) + def process_replication_rows( self, stream_name: str, @@ -529,6 +559,10 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) else: currently_ignored_users = set() + # If the data has not changed, nothing to do. + if previously_ignored_users == currently_ignored_users: + return + # Delete entries which are no longer ignored. self.db_pool.simple_delete_many_txn( txn, @@ -551,6 +585,7 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) # Invalidate the cache for any ignored users which were added or removed. for ignored_user_id in previously_ignored_users ^ currently_ignored_users: self._invalidate_cache_and_stream(txn, self.ignored_by, (ignored_user_id,)) + self._invalidate_cache_and_stream(txn, self.ignored_users, (user_id,)) async def purge_account_data_for_user(self, user_id: str) -> None: """ diff --git a/synapse/visibility.py b/synapse/visibility.py index 281cbe4d88..49519eb8f5 100644 --- a/synapse/visibility.py +++ b/synapse/visibility.py @@ -14,12 +14,7 @@ import logging from typing import Dict, FrozenSet, List, Optional -from synapse.api.constants import ( - AccountDataTypes, - EventTypes, - HistoryVisibility, - Membership, -) +from synapse.api.constants import EventTypes, HistoryVisibility, Membership from synapse.events import EventBase from synapse.events.utils import prune_event from synapse.storage import Storage @@ -87,15 +82,8 @@ async def filter_events_for_client( state_filter=StateFilter.from_types(types), ) - ignore_dict_content = await storage.main.get_global_account_data_by_type_for_user( - user_id, AccountDataTypes.IGNORED_USER_LIST - ) - - ignore_list: FrozenSet[str] = frozenset() - if ignore_dict_content: - ignored_users_dict = ignore_dict_content.get("ignored_users", {}) - if isinstance(ignored_users_dict, dict): - ignore_list = frozenset(ignored_users_dict.keys()) + # Get the users who are ignored by the requesting user. + ignore_list = await storage.main.ignored_users(user_id) erased_senders = await storage.main.are_users_erased(e.sender for e in events) diff --git a/tests/storage/test_account_data.py b/tests/storage/test_account_data.py index 272cd35402..72bf5b3d31 100644 --- a/tests/storage/test_account_data.py +++ b/tests/storage/test_account_data.py @@ -47,9 +47,18 @@ class IgnoredUsersTestCase(unittest.HomeserverTestCase): expected_ignorer_user_ids, ) + def assert_ignored( + self, ignorer_user_id: str, expected_ignored_user_ids: Set[str] + ) -> None: + self.assertEqual( + self.get_success(self.store.ignored_users(ignorer_user_id)), + expected_ignored_user_ids, + ) + def test_ignoring_users(self): """Basic adding/removing of users from the ignore list.""" self._update_ignore_list("@other:test", "@another:remote") + self.assert_ignored(self.user, {"@other:test", "@another:remote"}) # Check a user which no one ignores. self.assert_ignorers("@user:test", set()) @@ -62,6 +71,7 @@ class IgnoredUsersTestCase(unittest.HomeserverTestCase): # Add one user, remove one user, and leave one user. self._update_ignore_list("@foo:test", "@another:remote") + self.assert_ignored(self.user, {"@foo:test", "@another:remote"}) # Check the removed user. self.assert_ignorers("@other:test", set()) @@ -76,20 +86,24 @@ class IgnoredUsersTestCase(unittest.HomeserverTestCase): """Ensure that caching works properly between different users.""" # The first user ignores a user. self._update_ignore_list("@other:test") + self.assert_ignored(self.user, {"@other:test"}) self.assert_ignorers("@other:test", {self.user}) # The second user ignores them. self._update_ignore_list("@other:test", ignorer_user_id="@second:test") + self.assert_ignored("@second:test", {"@other:test"}) self.assert_ignorers("@other:test", {self.user, "@second:test"}) # The first user un-ignores them. self._update_ignore_list() + self.assert_ignored(self.user, set()) self.assert_ignorers("@other:test", {"@second:test"}) def test_invalid_data(self): """Invalid data ends up clearing out the ignored users list.""" # Add some data and ensure it is there. self._update_ignore_list("@other:test") + self.assert_ignored(self.user, {"@other:test"}) self.assert_ignorers("@other:test", {self.user}) # No ignored_users key. @@ -102,10 +116,12 @@ class IgnoredUsersTestCase(unittest.HomeserverTestCase): ) # No one ignores the user now. + self.assert_ignored(self.user, set()) self.assert_ignorers("@other:test", set()) # Add some data and ensure it is there. self._update_ignore_list("@other:test") + self.assert_ignored(self.user, {"@other:test"}) self.assert_ignorers("@other:test", {self.user}) # Invalid data. @@ -118,4 +134,5 @@ class IgnoredUsersTestCase(unittest.HomeserverTestCase): ) # No one ignores the user now. + self.assert_ignored(self.user, set()) self.assert_ignorers("@other:test", set()) -- cgit 1.5.1 From 4587b35929d22731644a11120a9e7d6a9c3bc304 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 16 Mar 2022 07:21:36 -0400 Subject: Clean-up logic for rebasing URLs during URL preview. (#12219) By using urljoin from the standard library and reducing the number of places URLs are rebased. --- changelog.d/12219.misc | 1 + synapse/rest/media/v1/preview_html.py | 39 +------------------ synapse/rest/media/v1/preview_url_resource.py | 23 ++++++------ tests/rest/media/v1/test_html_preview.py | 54 ++++++--------------------- 4 files changed, 26 insertions(+), 91 deletions(-) create mode 100644 changelog.d/12219.misc diff --git a/changelog.d/12219.misc b/changelog.d/12219.misc new file mode 100644 index 0000000000..6079414092 --- /dev/null +++ b/changelog.d/12219.misc @@ -0,0 +1 @@ +Clean-up logic around rebasing URLs for URL image previews. diff --git a/synapse/rest/media/v1/preview_html.py b/synapse/rest/media/v1/preview_html.py index 872a9e72e8..4cc9c66fbe 100644 --- a/synapse/rest/media/v1/preview_html.py +++ b/synapse/rest/media/v1/preview_html.py @@ -16,7 +16,6 @@ import itertools import logging import re from typing import TYPE_CHECKING, Dict, Generator, Iterable, Optional, Set, Union -from urllib import parse as urlparse if TYPE_CHECKING: from lxml import etree @@ -144,9 +143,7 @@ def decode_body( return etree.fromstring(body, parser) -def parse_html_to_open_graph( - tree: "etree.Element", media_uri: str -) -> Dict[str, Optional[str]]: +def parse_html_to_open_graph(tree: "etree.Element") -> Dict[str, Optional[str]]: """ Parse the HTML document into an Open Graph response. @@ -155,7 +152,6 @@ def parse_html_to_open_graph( Args: tree: The parsed HTML document. - media_url: The URI used to download the body. Returns: The Open Graph response as a dictionary. @@ -209,7 +205,7 @@ def parse_html_to_open_graph( "//*/meta[translate(@itemprop, 'IMAGE', 'image')='image']/@content" ) if meta_image: - og["og:image"] = rebase_url(meta_image[0], media_uri) + og["og:image"] = meta_image[0] else: # TODO: consider inlined CSS styles as well as width & height attribs images = tree.xpath("//img[@src][number(@width)>10][number(@height)>10]") @@ -320,37 +316,6 @@ def _iterate_over_text( ) -def rebase_url(url: str, base: str) -> str: - """ - Resolves a potentially relative `url` against an absolute `base` URL. - - For example: - - >>> rebase_url("subpage", "https://example.com/foo/") - 'https://example.com/foo/subpage' - >>> rebase_url("sibling", "https://example.com/foo") - 'https://example.com/sibling' - >>> rebase_url("/bar", "https://example.com/foo/") - 'https://example.com/bar' - >>> rebase_url("https://alice.com/a/", "https://example.com/foo/") - 'https://alice.com/a' - """ - base_parts = urlparse.urlparse(base) - # Convert the parsed URL to a list for (potential) modification. - url_parts = list(urlparse.urlparse(url)) - # Add a scheme, if one does not exist. - if not url_parts[0]: - url_parts[0] = base_parts.scheme or "http" - # Fix up the hostname, if this is not a data URL. - if url_parts[0] != "data" and not url_parts[1]: - url_parts[1] = base_parts.netloc - # If the path does not start with a /, nest it under the base path's last - # directory. - if not url_parts[2].startswith("/"): - url_parts[2] = re.sub(r"/[^/]+$", "/", base_parts.path) + url_parts[2] - return urlparse.urlunparse(url_parts) - - def summarize_paragraphs( text_nodes: Iterable[str], min_size: int = 200, max_size: int = 500 ) -> Optional[str]: diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index 14ea88b240..d47af8ead6 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -22,7 +22,7 @@ import shutil import sys import traceback from typing import TYPE_CHECKING, BinaryIO, Iterable, Optional, Tuple -from urllib import parse as urlparse +from urllib.parse import urljoin, urlparse, urlsplit from urllib.request import urlopen import attr @@ -44,11 +44,7 @@ from synapse.metrics.background_process_metrics import run_as_background_process from synapse.rest.media.v1._base import get_filename_from_headers from synapse.rest.media.v1.media_storage import MediaStorage from synapse.rest.media.v1.oembed import OEmbedProvider -from synapse.rest.media.v1.preview_html import ( - decode_body, - parse_html_to_open_graph, - rebase_url, -) +from synapse.rest.media.v1.preview_html import decode_body, parse_html_to_open_graph from synapse.types import JsonDict, UserID from synapse.util import json_encoder from synapse.util.async_helpers import ObservableDeferred @@ -187,7 +183,7 @@ class PreviewUrlResource(DirectServeJsonResource): ts = self.clock.time_msec() # XXX: we could move this into _do_preview if we wanted. - url_tuple = urlparse.urlsplit(url) + url_tuple = urlsplit(url) for entry in self.url_preview_url_blacklist: match = True for attrib in entry: @@ -322,7 +318,7 @@ class PreviewUrlResource(DirectServeJsonResource): # Parse Open Graph information from the HTML in case the oEmbed # response failed or is incomplete. - og_from_html = parse_html_to_open_graph(tree, media_info.uri) + og_from_html = parse_html_to_open_graph(tree) # Compile the Open Graph response by using the scraped # information from the HTML and overlaying any information @@ -588,12 +584,17 @@ class PreviewUrlResource(DirectServeJsonResource): if "og:image" not in og or not og["og:image"]: return + # The image URL from the HTML might be relative to the previewed page, + # convert it to an URL which can be requested directly. + image_url = og["og:image"] + url_parts = urlparse(image_url) + if url_parts.scheme != "data": + image_url = urljoin(media_info.uri, image_url) + # FIXME: it might be cleaner to use the same flow as the main /preview_url # request itself and benefit from the same caching etc. But for now we # just rely on the caching on the master request to speed things up. - image_info = await self._handle_url( - rebase_url(og["og:image"], media_info.uri), user, allow_data_urls=True - ) + image_info = await self._handle_url(image_url, user, allow_data_urls=True) if _is_media(image_info.media_type): # TODO: make sure we don't choke on white-on-transparent images diff --git a/tests/rest/media/v1/test_html_preview.py b/tests/rest/media/v1/test_html_preview.py index 3fb37a2a59..62e308814d 100644 --- a/tests/rest/media/v1/test_html_preview.py +++ b/tests/rest/media/v1/test_html_preview.py @@ -16,7 +16,6 @@ from synapse.rest.media.v1.preview_html import ( _get_html_media_encodings, decode_body, parse_html_to_open_graph, - rebase_url, summarize_paragraphs, ) @@ -161,7 +160,7 @@ class CalcOgTestCase(unittest.TestCase): """ tree = decode_body(html, "http://example.com/test.html") - og = parse_html_to_open_graph(tree, "http://example.com/test.html") + og = parse_html_to_open_graph(tree) self.assertEqual(og, {"og:title": "Foo", "og:description": "Some text."}) @@ -177,7 +176,7 @@ class CalcOgTestCase(unittest.TestCase): """ tree = decode_body(html, "http://example.com/test.html") - og = parse_html_to_open_graph(tree, "http://example.com/test.html") + og = parse_html_to_open_graph(tree) self.assertEqual(og, {"og:title": "Foo", "og:description": "Some text."}) @@ -196,7 +195,7 @@ class CalcOgTestCase(unittest.TestCase): """ tree = decode_body(html, "http://example.com/test.html") - og = parse_html_to_open_graph(tree, "http://example.com/test.html") + og = parse_html_to_open_graph(tree) self.assertEqual( og, @@ -218,7 +217,7 @@ class CalcOgTestCase(unittest.TestCase): """ tree = decode_body(html, "http://example.com/test.html") - og = parse_html_to_open_graph(tree, "http://example.com/test.html") + og = parse_html_to_open_graph(tree) self.assertEqual(og, {"og:title": "Foo", "og:description": "Some text."}) @@ -232,7 +231,7 @@ class CalcOgTestCase(unittest.TestCase): """ tree = decode_body(html, "http://example.com/test.html") - og = parse_html_to_open_graph(tree, "http://example.com/test.html") + og = parse_html_to_open_graph(tree) self.assertEqual(og, {"og:title": None, "og:description": "Some text."}) @@ -247,7 +246,7 @@ class CalcOgTestCase(unittest.TestCase): """ tree = decode_body(html, "http://example.com/test.html") - og = parse_html_to_open_graph(tree, "http://example.com/test.html") + og = parse_html_to_open_graph(tree) self.assertEqual(og, {"og:title": "Title", "og:description": "Some text."}) @@ -262,7 +261,7 @@ class CalcOgTestCase(unittest.TestCase): """ tree = decode_body(html, "http://example.com/test.html") - og = parse_html_to_open_graph(tree, "http://example.com/test.html") + og = parse_html_to_open_graph(tree) self.assertEqual(og, {"og:title": None, "og:description": "Some text."}) @@ -290,7 +289,7 @@ class CalcOgTestCase(unittest.TestCase): FooSome text. """.strip() tree = decode_body(html, "http://example.com/test.html") - og = parse_html_to_open_graph(tree, "http://example.com/test.html") + og = parse_html_to_open_graph(tree) self.assertEqual(og, {"og:title": "Foo", "og:description": "Some text."}) def test_invalid_encoding(self) -> None: @@ -304,7 +303,7 @@ class CalcOgTestCase(unittest.TestCase): """ tree = decode_body(html, "http://example.com/test.html", "invalid-encoding") - og = parse_html_to_open_graph(tree, "http://example.com/test.html") + og = parse_html_to_open_graph(tree) self.assertEqual(og, {"og:title": "Foo", "og:description": "Some text."}) def test_invalid_encoding2(self) -> None: @@ -319,7 +318,7 @@ class CalcOgTestCase(unittest.TestCase): """ tree = decode_body(html, "http://example.com/test.html") - og = parse_html_to_open_graph(tree, "http://example.com/test.html") + og = parse_html_to_open_graph(tree) self.assertEqual(og, {"og:title": "ÿÿ Foo", "og:description": "Some text."}) def test_windows_1252(self) -> None: @@ -333,7 +332,7 @@ class CalcOgTestCase(unittest.TestCase): """ tree = decode_body(html, "http://example.com/test.html") - og = parse_html_to_open_graph(tree, "http://example.com/test.html") + og = parse_html_to_open_graph(tree) self.assertEqual(og, {"og:title": "ó", "og:description": "Some text."}) @@ -448,34 +447,3 @@ class MediaEncodingTestCase(unittest.TestCase): 'text/html; charset="invalid"', ) self.assertEqual(list(encodings), ["utf-8", "cp1252"]) - - -class RebaseUrlTestCase(unittest.TestCase): - def test_relative(self) -> None: - """Relative URLs should be resolved based on the context of the base URL.""" - self.assertEqual( - rebase_url("subpage", "https://example.com/foo/"), - "https://example.com/foo/subpage", - ) - self.assertEqual( - rebase_url("sibling", "https://example.com/foo"), - "https://example.com/sibling", - ) - self.assertEqual( - rebase_url("/bar", "https://example.com/foo/"), - "https://example.com/bar", - ) - - def test_absolute(self) -> None: - """Absolute URLs should not be modified.""" - self.assertEqual( - rebase_url("https://alice.com/a/", "https://example.com/foo/"), - "https://alice.com/a/", - ) - - def test_data(self) -> None: - """Data URLs should not be modified.""" - self.assertEqual( - rebase_url("data:,Hello%2C%20World%21", "https://example.com/foo/"), - "data:,Hello%2C%20World%21", - ) -- cgit 1.5.1 From 1da0f79d5455b594f2aa989106a672786f5b990f Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 16 Mar 2022 09:20:57 -0400 Subject: Refactor relations tests (#12232) * Moves the relation pagination tests to a separate class. * Move the assertion of the response code into the `_send_relation` helper. * Moves some helpers into the base-class. --- changelog.d/12232.misc | 1 + tests/rest/client/test_relations.py | 1389 +++++++++++++++++------------------ 2 files changed, 674 insertions(+), 716 deletions(-) create mode 100644 changelog.d/12232.misc diff --git a/changelog.d/12232.misc b/changelog.d/12232.misc new file mode 100644 index 0000000000..4a4132edff --- /dev/null +++ b/changelog.d/12232.misc @@ -0,0 +1 @@ +Refactor relations tests to improve code re-use. diff --git a/tests/rest/client/test_relations.py b/tests/rest/client/test_relations.py index 0cbe6c0cf7..3dbd1304a8 100644 --- a/tests/rest/client/test_relations.py +++ b/tests/rest/client/test_relations.py @@ -79,6 +79,7 @@ class BaseRelationsTestCase(unittest.HomeserverTestCase): content: Optional[dict] = None, access_token: Optional[str] = None, parent_id: Optional[str] = None, + expected_response_code: int = 200, ) -> FakeChannel: """Helper function to send a relation pointing at `self.parent_id` @@ -115,16 +116,50 @@ class BaseRelationsTestCase(unittest.HomeserverTestCase): content, access_token=access_token, ) + self.assertEqual(expected_response_code, channel.code, channel.json_body) return channel + def _get_related_events(self) -> List[str]: + """ + Requests /relations on the parent ID and returns a list of event IDs. + """ + # Request the relations of the event. + channel = self.make_request( + "GET", + f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}", + access_token=self.user_token, + ) + self.assertEquals(200, channel.code, channel.json_body) + return [ev["event_id"] for ev in channel.json_body["chunk"]] + + def _get_bundled_aggregations(self) -> JsonDict: + """ + Requests /event on the parent ID and returns the m.relations field (from unsigned), if it exists. + """ + # Fetch the bundled aggregations of the event. + channel = self.make_request( + "GET", + f"/_matrix/client/unstable/rooms/{self.room}/event/{self.parent_id}", + access_token=self.user_token, + ) + self.assertEquals(200, channel.code, channel.json_body) + return channel.json_body["unsigned"].get("m.relations", {}) + + def _get_aggregations(self) -> List[JsonDict]: + """Request /aggregations on the parent ID and includes the returned chunk.""" + channel = self.make_request( + "GET", + f"/_matrix/client/unstable/rooms/{self.room}/aggregations/{self.parent_id}", + access_token=self.user_token, + ) + self.assertEqual(200, channel.code, channel.json_body) + return channel.json_body["chunk"] + class RelationsTestCase(BaseRelationsTestCase): def test_send_relation(self) -> None: """Tests that sending a relation works.""" - channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="👍") - self.assertEqual(200, channel.code, channel.json_body) - event_id = channel.json_body["event_id"] channel = self.make_request( @@ -151,13 +186,13 @@ class RelationsTestCase(BaseRelationsTestCase): def test_deny_invalid_event(self) -> None: """Test that we deny relations on non-existant events""" - channel = self._send_relation( + self._send_relation( RelationTypes.ANNOTATION, EventTypes.Message, parent_id="foo", content={"body": "foo", "msgtype": "m.text"}, + expected_response_code=400, ) - self.assertEqual(400, channel.code, channel.json_body) # Unless that event is referenced from another event! self.get_success( @@ -171,13 +206,12 @@ class RelationsTestCase(BaseRelationsTestCase): desc="test_deny_invalid_event", ) ) - channel = self._send_relation( + self._send_relation( RelationTypes.THREAD, EventTypes.Message, parent_id="foo", content={"body": "foo", "msgtype": "m.text"}, ) - self.assertEqual(200, channel.code, channel.json_body) def test_deny_invalid_room(self) -> None: """Test that we deny relations on non-existant events""" @@ -187,18 +221,20 @@ class RelationsTestCase(BaseRelationsTestCase): parent_id = res["event_id"] # Attempt to send an annotation to that event. - channel = self._send_relation( - RelationTypes.ANNOTATION, "m.reaction", parent_id=parent_id, key="A" + self._send_relation( + RelationTypes.ANNOTATION, + "m.reaction", + parent_id=parent_id, + key="A", + expected_response_code=400, ) - self.assertEqual(400, channel.code, channel.json_body) def test_deny_double_react(self) -> None: """Test that we deny relations on membership events""" - channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="a") - self.assertEqual(200, channel.code, channel.json_body) - - channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") - self.assertEqual(400, channel.code, channel.json_body) + self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="a") + self._send_relation( + RelationTypes.ANNOTATION, "m.reaction", "a", expected_response_code=400 + ) def test_deny_forked_thread(self) -> None: """It is invalid to start a thread off a thread.""" @@ -208,461 +244,160 @@ class RelationsTestCase(BaseRelationsTestCase): content={"msgtype": "m.text", "body": "foo"}, parent_id=self.parent_id, ) - self.assertEqual(200, channel.code, channel.json_body) parent_id = channel.json_body["event_id"] - channel = self._send_relation( + self._send_relation( RelationTypes.THREAD, "m.room.message", content={"msgtype": "m.text", "body": "foo"}, parent_id=parent_id, + expected_response_code=400, ) - self.assertEqual(400, channel.code, channel.json_body) - def test_basic_paginate_relations(self) -> None: - """Tests that calling pagination API correctly the latest relations.""" - channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") - self.assertEqual(200, channel.code, channel.json_body) - first_annotation_id = channel.json_body["event_id"] + def test_aggregation(self) -> None: + """Test that annotations get correctly aggregated.""" - channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "b") - self.assertEqual(200, channel.code, channel.json_body) - second_annotation_id = channel.json_body["event_id"] + self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") + self._send_relation( + RelationTypes.ANNOTATION, "m.reaction", "a", access_token=self.user2_token + ) + self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "b") channel = self.make_request( "GET", - f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?limit=1", + f"/_matrix/client/unstable/rooms/{self.room}/aggregations/{self.parent_id}", access_token=self.user_token, ) self.assertEqual(200, channel.code, channel.json_body) - # We expect to get back a single pagination result, which is the latest - # full relation event we sent above. - self.assertEqual(len(channel.json_body["chunk"]), 1, channel.json_body) - self.assert_dict( + self.assertEqual( + channel.json_body, { - "event_id": second_annotation_id, - "sender": self.user_id, - "type": "m.reaction", + "chunk": [ + {"type": "m.reaction", "key": "a", "count": 2}, + {"type": "m.reaction", "key": "b", "count": 1}, + ] }, - channel.json_body["chunk"][0], - ) - - # We also expect to get the original event (the id of which is self.parent_id) - self.assertEqual( - channel.json_body["original_event"]["event_id"], self.parent_id ) - # Make sure next_batch has something in it that looks like it could be a - # valid token. - self.assertIsInstance( - channel.json_body.get("next_batch"), str, channel.json_body - ) + def test_aggregation_must_be_annotation(self) -> None: + """Test that aggregations must be annotations.""" - # Request the relations again, but with a different direction. channel = self.make_request( "GET", - f"/_matrix/client/unstable/rooms/{self.room}/relations" - f"/{self.parent_id}?limit=1&org.matrix.msc3715.dir=f", + f"/_matrix/client/unstable/rooms/{self.room}/aggregations" + f"/{self.parent_id}/{RelationTypes.REPLACE}?limit=1", access_token=self.user_token, ) - self.assertEqual(200, channel.code, channel.json_body) + self.assertEqual(400, channel.code, channel.json_body) - # We expect to get back a single pagination result, which is the earliest - # full relation event we sent above. - self.assertEqual(len(channel.json_body["chunk"]), 1, channel.json_body) - self.assert_dict( - { - "event_id": first_annotation_id, - "sender": self.user_id, - "type": "m.reaction", - }, - channel.json_body["chunk"][0], - ) + @unittest.override_config({"experimental_features": {"msc3666_enabled": True}}) + def test_bundled_aggregations(self) -> None: + """ + Test that annotations, references, and threads get correctly bundled. - def test_repeated_paginate_relations(self) -> None: - """Test that if we paginate using a limit and tokens then we get the - expected events. + Note that this doesn't test against /relations since only thread relations + get bundled via that API. See test_aggregation_get_event_for_thread. + + See test_edit for a similar test for edits. """ + # Setup by sending a variety of relations. + self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") + self._send_relation( + RelationTypes.ANNOTATION, "m.reaction", "a", access_token=self.user2_token + ) + self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "b") - expected_event_ids = [] - for idx in range(10): - channel = self._send_relation( - RelationTypes.ANNOTATION, "m.reaction", chr(ord("a") + idx) - ) - self.assertEqual(200, channel.code, channel.json_body) - expected_event_ids.append(channel.json_body["event_id"]) + channel = self._send_relation(RelationTypes.REFERENCE, "m.room.test") + reply_1 = channel.json_body["event_id"] - prev_token = "" - found_event_ids: List[str] = [] - for _ in range(20): - from_token = "" - if prev_token: - from_token = "&from=" + prev_token + channel = self._send_relation(RelationTypes.REFERENCE, "m.room.test") + reply_2 = channel.json_body["event_id"] - channel = self.make_request( - "GET", - f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?limit=1{from_token}", - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) + self._send_relation(RelationTypes.THREAD, "m.room.test") - found_event_ids.extend(e["event_id"] for e in channel.json_body["chunk"]) - next_batch = channel.json_body.get("next_batch") + channel = self._send_relation(RelationTypes.THREAD, "m.room.test") + thread_2 = channel.json_body["event_id"] - self.assertNotEqual(prev_token, next_batch) - prev_token = next_batch + def assert_bundle(event_json: JsonDict) -> None: + """Assert the expected values of the bundled aggregations.""" + relations_dict = event_json["unsigned"].get("m.relations") - if not prev_token: - break + # Ensure the fields are as expected. + self.assertCountEqual( + relations_dict.keys(), + ( + RelationTypes.ANNOTATION, + RelationTypes.REFERENCE, + RelationTypes.THREAD, + ), + ) - # We paginated backwards, so reverse - found_event_ids.reverse() - self.assertEqual(found_event_ids, expected_event_ids) + # Check the values of each field. + self.assertEqual( + { + "chunk": [ + {"type": "m.reaction", "key": "a", "count": 2}, + {"type": "m.reaction", "key": "b", "count": 1}, + ] + }, + relations_dict[RelationTypes.ANNOTATION], + ) - def test_pagination_from_sync_and_messages(self) -> None: - """Pagination tokens from /sync and /messages can be used to paginate /relations.""" - channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "A") - self.assertEqual(200, channel.code, channel.json_body) - annotation_id = channel.json_body["event_id"] - # Send an event after the relation events. - self.helper.send(self.room, body="Latest event", tok=self.user_token) + self.assertEqual( + {"chunk": [{"event_id": reply_1}, {"event_id": reply_2}]}, + relations_dict[RelationTypes.REFERENCE], + ) - # Request /sync, limiting it such that only the latest event is returned - # (and not the relation). - filter = urllib.parse.quote_plus(b'{"room": {"timeline": {"limit": 1}}}') + self.assertEqual( + 2, + relations_dict[RelationTypes.THREAD].get("count"), + ) + self.assertTrue( + relations_dict[RelationTypes.THREAD].get("current_user_participated") + ) + # The latest thread event has some fields that don't matter. + self.assert_dict( + { + "content": { + "m.relates_to": { + "event_id": self.parent_id, + "rel_type": RelationTypes.THREAD, + } + }, + "event_id": thread_2, + "sender": self.user_id, + "type": "m.room.test", + }, + relations_dict[RelationTypes.THREAD].get("latest_event"), + ) + + # Request the event directly. channel = self.make_request( - "GET", f"/sync?filter={filter}", access_token=self.user_token + "GET", + f"/rooms/{self.room}/event/{self.parent_id}", + access_token=self.user_token, ) self.assertEqual(200, channel.code, channel.json_body) - room_timeline = channel.json_body["rooms"]["join"][self.room]["timeline"] - sync_prev_batch = room_timeline["prev_batch"] - self.assertIsNotNone(sync_prev_batch) - # Ensure the relation event is not in the batch returned from /sync. - self.assertNotIn( - annotation_id, [ev["event_id"] for ev in room_timeline["events"]] - ) + assert_bundle(channel.json_body) - # Request /messages, limiting it such that only the latest event is - # returned (and not the relation). + # Request the room messages. channel = self.make_request( "GET", - f"/rooms/{self.room}/messages?dir=b&limit=1", + f"/rooms/{self.room}/messages?dir=b", access_token=self.user_token, ) self.assertEqual(200, channel.code, channel.json_body) - messages_end = channel.json_body["end"] - self.assertIsNotNone(messages_end) - # Ensure the relation event is not in the chunk returned from /messages. - self.assertNotIn( - annotation_id, [ev["event_id"] for ev in channel.json_body["chunk"]] + assert_bundle(self._find_event_in_chunk(channel.json_body["chunk"])) + + # Request the room context. + channel = self.make_request( + "GET", + f"/rooms/{self.room}/context/{self.parent_id}", + access_token=self.user_token, ) - - # Request /relations with the pagination tokens received from both the - # /sync and /messages responses above, in turn. - # - # This is a tiny bit silly since the client wouldn't know the parent ID - # from the requests above; consider the parent ID to be known from a - # previous /sync. - for from_token in (sync_prev_batch, messages_end): - channel = self.make_request( - "GET", - f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?from={from_token}", - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) - - # The relation should be in the returned chunk. - self.assertIn( - annotation_id, [ev["event_id"] for ev in channel.json_body["chunk"]] - ) - - def test_aggregation_pagination_groups(self) -> None: - """Test that we can paginate annotation groups correctly.""" - - # We need to create ten separate users to send each reaction. - access_tokens = [self.user_token, self.user2_token] - idx = 0 - while len(access_tokens) < 10: - user_id, token = self._create_user("test" + str(idx)) - idx += 1 - - self.helper.join(self.room, user=user_id, tok=token) - access_tokens.append(token) - - idx = 0 - sent_groups = {"👍": 10, "a": 7, "b": 5, "c": 3, "d": 2, "e": 1} - for key in itertools.chain.from_iterable( - itertools.repeat(key, num) for key, num in sent_groups.items() - ): - channel = self._send_relation( - RelationTypes.ANNOTATION, - "m.reaction", - key=key, - access_token=access_tokens[idx], - ) - self.assertEqual(200, channel.code, channel.json_body) - - idx += 1 - idx %= len(access_tokens) - - prev_token: Optional[str] = None - found_groups: Dict[str, int] = {} - for _ in range(20): - from_token = "" - if prev_token: - from_token = "&from=" + prev_token - - channel = self.make_request( - "GET", - f"/_matrix/client/unstable/rooms/{self.room}/aggregations/{self.parent_id}?limit=1{from_token}", - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) - - self.assertEqual(len(channel.json_body["chunk"]), 1, channel.json_body) - - for groups in channel.json_body["chunk"]: - # We only expect reactions - self.assertEqual(groups["type"], "m.reaction", channel.json_body) - - # We should only see each key once - self.assertNotIn(groups["key"], found_groups, channel.json_body) - - found_groups[groups["key"]] = groups["count"] - - next_batch = channel.json_body.get("next_batch") - - self.assertNotEqual(prev_token, next_batch) - prev_token = next_batch - - if not prev_token: - break - - self.assertEqual(sent_groups, found_groups) - - def test_aggregation_pagination_within_group(self) -> None: - """Test that we can paginate within an annotation group.""" - - # We need to create ten separate users to send each reaction. - access_tokens = [self.user_token, self.user2_token] - idx = 0 - while len(access_tokens) < 10: - user_id, token = self._create_user("test" + str(idx)) - idx += 1 - - self.helper.join(self.room, user=user_id, tok=token) - access_tokens.append(token) - - idx = 0 - expected_event_ids = [] - for _ in range(10): - channel = self._send_relation( - RelationTypes.ANNOTATION, - "m.reaction", - key="👍", - access_token=access_tokens[idx], - ) - self.assertEqual(200, channel.code, channel.json_body) - expected_event_ids.append(channel.json_body["event_id"]) - - idx += 1 - - # Also send a different type of reaction so that we test we don't see it - channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="a") - self.assertEqual(200, channel.code, channel.json_body) - - prev_token = "" - found_event_ids: List[str] = [] - encoded_key = urllib.parse.quote_plus("👍".encode()) - for _ in range(20): - from_token = "" - if prev_token: - from_token = "&from=" + prev_token - - channel = self.make_request( - "GET", - f"/_matrix/client/unstable/rooms/{self.room}" - f"/aggregations/{self.parent_id}/{RelationTypes.ANNOTATION}" - f"/m.reaction/{encoded_key}?limit=1{from_token}", - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) - - self.assertEqual(len(channel.json_body["chunk"]), 1, channel.json_body) - - found_event_ids.extend(e["event_id"] for e in channel.json_body["chunk"]) - - next_batch = channel.json_body.get("next_batch") - - self.assertNotEqual(prev_token, next_batch) - prev_token = next_batch - - if not prev_token: - break - - # We paginated backwards, so reverse - found_event_ids.reverse() - self.assertEqual(found_event_ids, expected_event_ids) - - def test_aggregation(self) -> None: - """Test that annotations get correctly aggregated.""" - - channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") - self.assertEqual(200, channel.code, channel.json_body) - - channel = self._send_relation( - RelationTypes.ANNOTATION, "m.reaction", "a", access_token=self.user2_token - ) - self.assertEqual(200, channel.code, channel.json_body) - - channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "b") - self.assertEqual(200, channel.code, channel.json_body) - - channel = self.make_request( - "GET", - f"/_matrix/client/unstable/rooms/{self.room}/aggregations/{self.parent_id}", - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) - - self.assertEqual( - channel.json_body, - { - "chunk": [ - {"type": "m.reaction", "key": "a", "count": 2}, - {"type": "m.reaction", "key": "b", "count": 1}, - ] - }, - ) - - def test_aggregation_must_be_annotation(self) -> None: - """Test that aggregations must be annotations.""" - - channel = self.make_request( - "GET", - f"/_matrix/client/unstable/rooms/{self.room}/aggregations" - f"/{self.parent_id}/{RelationTypes.REPLACE}?limit=1", - access_token=self.user_token, - ) - self.assertEqual(400, channel.code, channel.json_body) - - @unittest.override_config({"experimental_features": {"msc3666_enabled": True}}) - def test_bundled_aggregations(self) -> None: - """ - Test that annotations, references, and threads get correctly bundled. - - Note that this doesn't test against /relations since only thread relations - get bundled via that API. See test_aggregation_get_event_for_thread. - - See test_edit for a similar test for edits. - """ - # Setup by sending a variety of relations. - channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") - self.assertEqual(200, channel.code, channel.json_body) - - channel = self._send_relation( - RelationTypes.ANNOTATION, "m.reaction", "a", access_token=self.user2_token - ) - self.assertEqual(200, channel.code, channel.json_body) - - channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "b") - self.assertEqual(200, channel.code, channel.json_body) - - channel = self._send_relation(RelationTypes.REFERENCE, "m.room.test") - self.assertEqual(200, channel.code, channel.json_body) - reply_1 = channel.json_body["event_id"] - - channel = self._send_relation(RelationTypes.REFERENCE, "m.room.test") - self.assertEqual(200, channel.code, channel.json_body) - reply_2 = channel.json_body["event_id"] - - channel = self._send_relation(RelationTypes.THREAD, "m.room.test") - self.assertEqual(200, channel.code, channel.json_body) - - channel = self._send_relation(RelationTypes.THREAD, "m.room.test") - self.assertEqual(200, channel.code, channel.json_body) - thread_2 = channel.json_body["event_id"] - - def assert_bundle(event_json: JsonDict) -> None: - """Assert the expected values of the bundled aggregations.""" - relations_dict = event_json["unsigned"].get("m.relations") - - # Ensure the fields are as expected. - self.assertCountEqual( - relations_dict.keys(), - ( - RelationTypes.ANNOTATION, - RelationTypes.REFERENCE, - RelationTypes.THREAD, - ), - ) - - # Check the values of each field. - self.assertEqual( - { - "chunk": [ - {"type": "m.reaction", "key": "a", "count": 2}, - {"type": "m.reaction", "key": "b", "count": 1}, - ] - }, - relations_dict[RelationTypes.ANNOTATION], - ) - - self.assertEqual( - {"chunk": [{"event_id": reply_1}, {"event_id": reply_2}]}, - relations_dict[RelationTypes.REFERENCE], - ) - - self.assertEqual( - 2, - relations_dict[RelationTypes.THREAD].get("count"), - ) - self.assertTrue( - relations_dict[RelationTypes.THREAD].get("current_user_participated") - ) - # The latest thread event has some fields that don't matter. - self.assert_dict( - { - "content": { - "m.relates_to": { - "event_id": self.parent_id, - "rel_type": RelationTypes.THREAD, - } - }, - "event_id": thread_2, - "sender": self.user_id, - "type": "m.room.test", - }, - relations_dict[RelationTypes.THREAD].get("latest_event"), - ) - - # Request the event directly. - channel = self.make_request( - "GET", - f"/rooms/{self.room}/event/{self.parent_id}", - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) - assert_bundle(channel.json_body) - - # Request the room messages. - channel = self.make_request( - "GET", - f"/rooms/{self.room}/messages?dir=b", - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) - assert_bundle(self._find_event_in_chunk(channel.json_body["chunk"])) - - # Request the room context. - channel = self.make_request( - "GET", - f"/rooms/{self.room}/context/{self.parent_id}", - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) - assert_bundle(channel.json_body["event"]) + self.assertEqual(200, channel.code, channel.json_body) + assert_bundle(channel.json_body["event"]) # Request sync. channel = self.make_request("GET", "/sync", access_token=self.user_token) @@ -693,14 +428,12 @@ class RelationsTestCase(BaseRelationsTestCase): when directly requested. """ channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") - self.assertEqual(200, channel.code, channel.json_body) annotation_id = channel.json_body["event_id"] # Annotate the annotation. - channel = self._send_relation( + self._send_relation( RelationTypes.ANNOTATION, "m.reaction", "a", parent_id=annotation_id ) - self.assertEqual(200, channel.code, channel.json_body) channel = self.make_request( "GET", @@ -713,14 +446,12 @@ class RelationsTestCase(BaseRelationsTestCase): def test_aggregation_get_event_for_thread(self) -> None: """Test that threads get bundled aggregations included when directly requested.""" channel = self._send_relation(RelationTypes.THREAD, "m.room.test") - self.assertEqual(200, channel.code, channel.json_body) thread_id = channel.json_body["event_id"] # Annotate the annotation. - channel = self._send_relation( + self._send_relation( RelationTypes.ANNOTATION, "m.reaction", "a", parent_id=thread_id ) - self.assertEqual(200, channel.code, channel.json_body) channel = self.make_request( "GET", @@ -877,8 +608,6 @@ class RelationsTestCase(BaseRelationsTestCase): "m.room.message", content={"msgtype": "m.text", "body": "foo", "m.new_content": new_body}, ) - self.assertEqual(200, channel.code, channel.json_body) - edit_event_id = channel.json_body["event_id"] def assert_bundle(event_json: JsonDict) -> None: @@ -954,7 +683,7 @@ class RelationsTestCase(BaseRelationsTestCase): shouldn't be allowed, are correctly handled. """ - channel = self._send_relation( + self._send_relation( RelationTypes.REPLACE, "m.room.message", content={ @@ -963,309 +692,575 @@ class RelationsTestCase(BaseRelationsTestCase): "m.new_content": {"msgtype": "m.text", "body": "First edit"}, }, ) + + new_body = {"msgtype": "m.text", "body": "I've been edited!"} + channel = self._send_relation( + RelationTypes.REPLACE, + "m.room.message", + content={"msgtype": "m.text", "body": "foo", "m.new_content": new_body}, + ) + edit_event_id = channel.json_body["event_id"] + + self._send_relation( + RelationTypes.REPLACE, + "m.room.message.WRONG_TYPE", + content={ + "msgtype": "m.text", + "body": "Wibble", + "m.new_content": {"msgtype": "m.text", "body": "Edit, but wrong type"}, + }, + ) + + channel = self.make_request( + "GET", + f"/rooms/{self.room}/event/{self.parent_id}", + access_token=self.user_token, + ) + self.assertEqual(200, channel.code, channel.json_body) + + self.assertEqual(channel.json_body["content"], new_body) + + relations_dict = channel.json_body["unsigned"].get("m.relations") + self.assertIn(RelationTypes.REPLACE, relations_dict) + + m_replace_dict = relations_dict[RelationTypes.REPLACE] + for key in ["event_id", "sender", "origin_server_ts"]: + self.assertIn(key, m_replace_dict) + + self.assert_dict( + {"event_id": edit_event_id, "sender": self.user_id}, m_replace_dict + ) + + def test_edit_reply(self) -> None: + """Test that editing a reply works.""" + + # Create a reply to edit. + channel = self._send_relation( + RelationTypes.REFERENCE, + "m.room.message", + content={"msgtype": "m.text", "body": "A reply!"}, + ) + reply = channel.json_body["event_id"] + + new_body = {"msgtype": "m.text", "body": "I've been edited!"} + channel = self._send_relation( + RelationTypes.REPLACE, + "m.room.message", + content={"msgtype": "m.text", "body": "foo", "m.new_content": new_body}, + parent_id=reply, + ) + edit_event_id = channel.json_body["event_id"] + + channel = self.make_request( + "GET", + f"/rooms/{self.room}/event/{reply}", + access_token=self.user_token, + ) self.assertEqual(200, channel.code, channel.json_body) + # We expect to see the new body in the dict, as well as the reference + # metadata sill intact. + self.assertDictContainsSubset(new_body, channel.json_body["content"]) + self.assertDictContainsSubset( + { + "m.relates_to": { + "event_id": self.parent_id, + "rel_type": "m.reference", + } + }, + channel.json_body["content"], + ) + + # We expect that the edit relation appears in the unsigned relations + # section. + relations_dict = channel.json_body["unsigned"].get("m.relations") + self.assertIn(RelationTypes.REPLACE, relations_dict) + + m_replace_dict = relations_dict[RelationTypes.REPLACE] + for key in ["event_id", "sender", "origin_server_ts"]: + self.assertIn(key, m_replace_dict) + + self.assert_dict( + {"event_id": edit_event_id, "sender": self.user_id}, m_replace_dict + ) + + def test_edit_thread(self) -> None: + """Test that editing a thread works.""" + + # Create a thread and edit the last event. + channel = self._send_relation( + RelationTypes.THREAD, + "m.room.message", + content={"msgtype": "m.text", "body": "A threaded reply!"}, + ) + threaded_event_id = channel.json_body["event_id"] + new_body = {"msgtype": "m.text", "body": "I've been edited!"} channel = self._send_relation( RelationTypes.REPLACE, "m.room.message", content={"msgtype": "m.text", "body": "foo", "m.new_content": new_body}, + parent_id=threaded_event_id, + ) + + # Fetch the thread root, to get the bundled aggregation for the thread. + channel = self.make_request( + "GET", + f"/rooms/{self.room}/event/{self.parent_id}", + access_token=self.user_token, + ) + self.assertEqual(200, channel.code, channel.json_body) + + # We expect that the edit message appears in the thread summary in the + # unsigned relations section. + relations_dict = channel.json_body["unsigned"].get("m.relations") + self.assertIn(RelationTypes.THREAD, relations_dict) + + thread_summary = relations_dict[RelationTypes.THREAD] + self.assertIn("latest_event", thread_summary) + latest_event_in_thread = thread_summary["latest_event"] + self.assertEqual(latest_event_in_thread["content"]["body"], "I've been edited!") + + def test_edit_edit(self) -> None: + """Test that an edit cannot be edited.""" + new_body = {"msgtype": "m.text", "body": "Initial edit"} + channel = self._send_relation( + RelationTypes.REPLACE, + "m.room.message", + content={ + "msgtype": "m.text", + "body": "Wibble", + "m.new_content": new_body, + }, + ) + edit_event_id = channel.json_body["event_id"] + + # Edit the edit event. + channel = self._send_relation( + RelationTypes.REPLACE, + "m.room.message", + content={ + "msgtype": "m.text", + "body": "foo", + "m.new_content": {"msgtype": "m.text", "body": "Ignored edit"}, + }, + parent_id=edit_event_id, + ) + + # Request the original event. + channel = self.make_request( + "GET", + f"/rooms/{self.room}/event/{self.parent_id}", + access_token=self.user_token, + ) + self.assertEqual(200, channel.code, channel.json_body) + # The edit to the edit should be ignored. + self.assertEqual(channel.json_body["content"], new_body) + + # The relations information should not include the edit to the edit. + relations_dict = channel.json_body["unsigned"].get("m.relations") + self.assertIn(RelationTypes.REPLACE, relations_dict) + + m_replace_dict = relations_dict[RelationTypes.REPLACE] + for key in ["event_id", "sender", "origin_server_ts"]: + self.assertIn(key, m_replace_dict) + + self.assert_dict( + {"event_id": edit_event_id, "sender": self.user_id}, m_replace_dict + ) + + def test_unknown_relations(self) -> None: + """Unknown relations should be accepted.""" + channel = self._send_relation("m.relation.test", "m.room.test") + event_id = channel.json_body["event_id"] + + channel = self.make_request( + "GET", + f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?limit=1", + access_token=self.user_token, + ) + self.assertEqual(200, channel.code, channel.json_body) + + # We expect to get back a single pagination result, which is the full + # relation event we sent above. + self.assertEqual(len(channel.json_body["chunk"]), 1, channel.json_body) + self.assert_dict( + {"event_id": event_id, "sender": self.user_id, "type": "m.room.test"}, + channel.json_body["chunk"][0], + ) + + # We also expect to get the original event (the id of which is self.parent_id) + self.assertEqual( + channel.json_body["original_event"]["event_id"], self.parent_id + ) + + # When bundling the unknown relation is not included. + channel = self.make_request( + "GET", + f"/rooms/{self.room}/event/{self.parent_id}", + access_token=self.user_token, + ) + self.assertEqual(200, channel.code, channel.json_body) + self.assertNotIn("m.relations", channel.json_body["unsigned"]) + + # But unknown relations can be directly queried. + channel = self.make_request( + "GET", + f"/_matrix/client/unstable/rooms/{self.room}/aggregations/{self.parent_id}?limit=1", + access_token=self.user_token, + ) + self.assertEqual(200, channel.code, channel.json_body) + self.assertEqual(channel.json_body["chunk"], []) + + def _find_event_in_chunk(self, events: List[JsonDict]) -> JsonDict: + """ + Find the parent event in a chunk of events and assert that it has the proper bundled aggregations. + """ + for event in events: + if event["event_id"] == self.parent_id: + return event + + raise AssertionError(f"Event {self.parent_id} not found in chunk") + + def test_background_update(self) -> None: + """Test the event_arbitrary_relations background update.""" + channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="👍") + annotation_event_id_good = channel.json_body["event_id"] + + channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="A") + annotation_event_id_bad = channel.json_body["event_id"] + + channel = self._send_relation(RelationTypes.THREAD, "m.room.test") + thread_event_id = channel.json_body["event_id"] + + # Clean-up the table as if the inserts did not happen during event creation. + self.get_success( + self.store.db_pool.simple_delete_many( + table="event_relations", + column="event_id", + iterable=(annotation_event_id_bad, thread_event_id), + keyvalues={}, + desc="RelationsTestCase.test_background_update", + ) + ) + + # Only the "good" annotation should be found. + channel = self.make_request( + "GET", + f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?limit=10", + access_token=self.user_token, ) self.assertEqual(200, channel.code, channel.json_body) + self.assertEqual( + [ev["event_id"] for ev in channel.json_body["chunk"]], + [annotation_event_id_good], + ) - edit_event_id = channel.json_body["event_id"] - - channel = self._send_relation( - RelationTypes.REPLACE, - "m.room.message.WRONG_TYPE", - content={ - "msgtype": "m.text", - "body": "Wibble", - "m.new_content": {"msgtype": "m.text", "body": "Edit, but wrong type"}, - }, + # Insert and run the background update. + self.get_success( + self.store.db_pool.simple_insert( + "background_updates", + {"update_name": "event_arbitrary_relations", "progress_json": "{}"}, + ) ) - self.assertEqual(200, channel.code, channel.json_body) + # Ugh, have to reset this flag + self.store.db_pool.updates._all_done = False + self.wait_for_background_updates() + + # The "good" annotation and the thread should be found, but not the "bad" + # annotation. channel = self.make_request( "GET", - f"/rooms/{self.room}/event/{self.parent_id}", + f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?limit=10", access_token=self.user_token, ) self.assertEqual(200, channel.code, channel.json_body) + self.assertCountEqual( + [ev["event_id"] for ev in channel.json_body["chunk"]], + [annotation_event_id_good, thread_event_id], + ) - self.assertEqual(channel.json_body["content"], new_body) - relations_dict = channel.json_body["unsigned"].get("m.relations") - self.assertIn(RelationTypes.REPLACE, relations_dict) +class RelationPaginationTestCase(BaseRelationsTestCase): + def test_basic_paginate_relations(self) -> None: + """Tests that calling pagination API correctly the latest relations.""" + channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") + first_annotation_id = channel.json_body["event_id"] - m_replace_dict = relations_dict[RelationTypes.REPLACE] - for key in ["event_id", "sender", "origin_server_ts"]: - self.assertIn(key, m_replace_dict) + channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "b") + second_annotation_id = channel.json_body["event_id"] - self.assert_dict( - {"event_id": edit_event_id, "sender": self.user_id}, m_replace_dict + channel = self.make_request( + "GET", + f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?limit=1", + access_token=self.user_token, ) + self.assertEqual(200, channel.code, channel.json_body) - def test_edit_reply(self) -> None: - """Test that editing a reply works.""" - - # Create a reply to edit. - channel = self._send_relation( - RelationTypes.REFERENCE, - "m.room.message", - content={"msgtype": "m.text", "body": "A reply!"}, + # We expect to get back a single pagination result, which is the latest + # full relation event we sent above. + self.assertEqual(len(channel.json_body["chunk"]), 1, channel.json_body) + self.assert_dict( + { + "event_id": second_annotation_id, + "sender": self.user_id, + "type": "m.reaction", + }, + channel.json_body["chunk"][0], ) - self.assertEqual(200, channel.code, channel.json_body) - reply = channel.json_body["event_id"] - new_body = {"msgtype": "m.text", "body": "I've been edited!"} - channel = self._send_relation( - RelationTypes.REPLACE, - "m.room.message", - content={"msgtype": "m.text", "body": "foo", "m.new_content": new_body}, - parent_id=reply, + # We also expect to get the original event (the id of which is self.parent_id) + self.assertEqual( + channel.json_body["original_event"]["event_id"], self.parent_id ) - self.assertEqual(200, channel.code, channel.json_body) - edit_event_id = channel.json_body["event_id"] + # Make sure next_batch has something in it that looks like it could be a + # valid token. + self.assertIsInstance( + channel.json_body.get("next_batch"), str, channel.json_body + ) + # Request the relations again, but with a different direction. channel = self.make_request( "GET", - f"/rooms/{self.room}/event/{reply}", + f"/_matrix/client/unstable/rooms/{self.room}/relations" + f"/{self.parent_id}?limit=1&org.matrix.msc3715.dir=f", access_token=self.user_token, ) self.assertEqual(200, channel.code, channel.json_body) - # We expect to see the new body in the dict, as well as the reference - # metadata sill intact. - self.assertDictContainsSubset(new_body, channel.json_body["content"]) - self.assertDictContainsSubset( + # We expect to get back a single pagination result, which is the earliest + # full relation event we sent above. + self.assertEqual(len(channel.json_body["chunk"]), 1, channel.json_body) + self.assert_dict( { - "m.relates_to": { - "event_id": self.parent_id, - "rel_type": "m.reference", - } + "event_id": first_annotation_id, + "sender": self.user_id, + "type": "m.reaction", }, - channel.json_body["content"], + channel.json_body["chunk"][0], ) - # We expect that the edit relation appears in the unsigned relations - # section. - relations_dict = channel.json_body["unsigned"].get("m.relations") - self.assertIn(RelationTypes.REPLACE, relations_dict) + def test_repeated_paginate_relations(self) -> None: + """Test that if we paginate using a limit and tokens then we get the + expected events. + """ - m_replace_dict = relations_dict[RelationTypes.REPLACE] - for key in ["event_id", "sender", "origin_server_ts"]: - self.assertIn(key, m_replace_dict) + expected_event_ids = [] + for idx in range(10): + channel = self._send_relation( + RelationTypes.ANNOTATION, "m.reaction", chr(ord("a") + idx) + ) + expected_event_ids.append(channel.json_body["event_id"]) - self.assert_dict( - {"event_id": edit_event_id, "sender": self.user_id}, m_replace_dict - ) + prev_token = "" + found_event_ids: List[str] = [] + for _ in range(20): + from_token = "" + if prev_token: + from_token = "&from=" + prev_token - def test_edit_thread(self) -> None: - """Test that editing a thread works.""" + channel = self.make_request( + "GET", + f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?limit=1{from_token}", + access_token=self.user_token, + ) + self.assertEqual(200, channel.code, channel.json_body) - # Create a thread and edit the last event. - channel = self._send_relation( - RelationTypes.THREAD, - "m.room.message", - content={"msgtype": "m.text", "body": "A threaded reply!"}, - ) - self.assertEqual(200, channel.code, channel.json_body) - threaded_event_id = channel.json_body["event_id"] + found_event_ids.extend(e["event_id"] for e in channel.json_body["chunk"]) + next_batch = channel.json_body.get("next_batch") - new_body = {"msgtype": "m.text", "body": "I've been edited!"} - channel = self._send_relation( - RelationTypes.REPLACE, - "m.room.message", - content={"msgtype": "m.text", "body": "foo", "m.new_content": new_body}, - parent_id=threaded_event_id, + self.assertNotEqual(prev_token, next_batch) + prev_token = next_batch + + if not prev_token: + break + + # We paginated backwards, so reverse + found_event_ids.reverse() + self.assertEqual(found_event_ids, expected_event_ids) + + def test_pagination_from_sync_and_messages(self) -> None: + """Pagination tokens from /sync and /messages can be used to paginate /relations.""" + channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "A") + annotation_id = channel.json_body["event_id"] + # Send an event after the relation events. + self.helper.send(self.room, body="Latest event", tok=self.user_token) + + # Request /sync, limiting it such that only the latest event is returned + # (and not the relation). + filter = urllib.parse.quote_plus(b'{"room": {"timeline": {"limit": 1}}}') + channel = self.make_request( + "GET", f"/sync?filter={filter}", access_token=self.user_token ) self.assertEqual(200, channel.code, channel.json_body) + room_timeline = channel.json_body["rooms"]["join"][self.room]["timeline"] + sync_prev_batch = room_timeline["prev_batch"] + self.assertIsNotNone(sync_prev_batch) + # Ensure the relation event is not in the batch returned from /sync. + self.assertNotIn( + annotation_id, [ev["event_id"] for ev in room_timeline["events"]] + ) - # Fetch the thread root, to get the bundled aggregation for the thread. + # Request /messages, limiting it such that only the latest event is + # returned (and not the relation). channel = self.make_request( "GET", - f"/rooms/{self.room}/event/{self.parent_id}", + f"/rooms/{self.room}/messages?dir=b&limit=1", access_token=self.user_token, ) self.assertEqual(200, channel.code, channel.json_body) + messages_end = channel.json_body["end"] + self.assertIsNotNone(messages_end) + # Ensure the relation event is not in the chunk returned from /messages. + self.assertNotIn( + annotation_id, [ev["event_id"] for ev in channel.json_body["chunk"]] + ) - # We expect that the edit message appears in the thread summary in the - # unsigned relations section. - relations_dict = channel.json_body["unsigned"].get("m.relations") - self.assertIn(RelationTypes.THREAD, relations_dict) + # Request /relations with the pagination tokens received from both the + # /sync and /messages responses above, in turn. + # + # This is a tiny bit silly since the client wouldn't know the parent ID + # from the requests above; consider the parent ID to be known from a + # previous /sync. + for from_token in (sync_prev_batch, messages_end): + channel = self.make_request( + "GET", + f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?from={from_token}", + access_token=self.user_token, + ) + self.assertEqual(200, channel.code, channel.json_body) - thread_summary = relations_dict[RelationTypes.THREAD] - self.assertIn("latest_event", thread_summary) - latest_event_in_thread = thread_summary["latest_event"] - self.assertEqual(latest_event_in_thread["content"]["body"], "I've been edited!") + # The relation should be in the returned chunk. + self.assertIn( + annotation_id, [ev["event_id"] for ev in channel.json_body["chunk"]] + ) + + def test_aggregation_pagination_groups(self) -> None: + """Test that we can paginate annotation groups correctly.""" + + # We need to create ten separate users to send each reaction. + access_tokens = [self.user_token, self.user2_token] + idx = 0 + while len(access_tokens) < 10: + user_id, token = self._create_user("test" + str(idx)) + idx += 1 + + self.helper.join(self.room, user=user_id, tok=token) + access_tokens.append(token) + + idx = 0 + sent_groups = {"👍": 10, "a": 7, "b": 5, "c": 3, "d": 2, "e": 1} + for key in itertools.chain.from_iterable( + itertools.repeat(key, num) for key, num in sent_groups.items() + ): + self._send_relation( + RelationTypes.ANNOTATION, + "m.reaction", + key=key, + access_token=access_tokens[idx], + ) + + idx += 1 + idx %= len(access_tokens) - def test_edit_edit(self) -> None: - """Test that an edit cannot be edited.""" - new_body = {"msgtype": "m.text", "body": "Initial edit"} - channel = self._send_relation( - RelationTypes.REPLACE, - "m.room.message", - content={ - "msgtype": "m.text", - "body": "Wibble", - "m.new_content": new_body, - }, - ) - self.assertEqual(200, channel.code, channel.json_body) - edit_event_id = channel.json_body["event_id"] + prev_token: Optional[str] = None + found_groups: Dict[str, int] = {} + for _ in range(20): + from_token = "" + if prev_token: + from_token = "&from=" + prev_token - # Edit the edit event. - channel = self._send_relation( - RelationTypes.REPLACE, - "m.room.message", - content={ - "msgtype": "m.text", - "body": "foo", - "m.new_content": {"msgtype": "m.text", "body": "Ignored edit"}, - }, - parent_id=edit_event_id, - ) - self.assertEqual(200, channel.code, channel.json_body) + channel = self.make_request( + "GET", + f"/_matrix/client/unstable/rooms/{self.room}/aggregations/{self.parent_id}?limit=1{from_token}", + access_token=self.user_token, + ) + self.assertEqual(200, channel.code, channel.json_body) - # Request the original event. - channel = self.make_request( - "GET", - f"/rooms/{self.room}/event/{self.parent_id}", - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) - # The edit to the edit should be ignored. - self.assertEqual(channel.json_body["content"], new_body) + self.assertEqual(len(channel.json_body["chunk"]), 1, channel.json_body) - # The relations information should not include the edit to the edit. - relations_dict = channel.json_body["unsigned"].get("m.relations") - self.assertIn(RelationTypes.REPLACE, relations_dict) + for groups in channel.json_body["chunk"]: + # We only expect reactions + self.assertEqual(groups["type"], "m.reaction", channel.json_body) - m_replace_dict = relations_dict[RelationTypes.REPLACE] - for key in ["event_id", "sender", "origin_server_ts"]: - self.assertIn(key, m_replace_dict) + # We should only see each key once + self.assertNotIn(groups["key"], found_groups, channel.json_body) - self.assert_dict( - {"event_id": edit_event_id, "sender": self.user_id}, m_replace_dict - ) + found_groups[groups["key"]] = groups["count"] - def test_unknown_relations(self) -> None: - """Unknown relations should be accepted.""" - channel = self._send_relation("m.relation.test", "m.room.test") - self.assertEqual(200, channel.code, channel.json_body) - event_id = channel.json_body["event_id"] + next_batch = channel.json_body.get("next_batch") - channel = self.make_request( - "GET", - f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?limit=1", - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) + self.assertNotEqual(prev_token, next_batch) + prev_token = next_batch - # We expect to get back a single pagination result, which is the full - # relation event we sent above. - self.assertEqual(len(channel.json_body["chunk"]), 1, channel.json_body) - self.assert_dict( - {"event_id": event_id, "sender": self.user_id, "type": "m.room.test"}, - channel.json_body["chunk"][0], - ) + if not prev_token: + break - # We also expect to get the original event (the id of which is self.parent_id) - self.assertEqual( - channel.json_body["original_event"]["event_id"], self.parent_id - ) + self.assertEqual(sent_groups, found_groups) - # When bundling the unknown relation is not included. - channel = self.make_request( - "GET", - f"/rooms/{self.room}/event/{self.parent_id}", - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) - self.assertNotIn("m.relations", channel.json_body["unsigned"]) + def test_aggregation_pagination_within_group(self) -> None: + """Test that we can paginate within an annotation group.""" - # But unknown relations can be directly queried. - channel = self.make_request( - "GET", - f"/_matrix/client/unstable/rooms/{self.room}/aggregations/{self.parent_id}?limit=1", - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) - self.assertEqual(channel.json_body["chunk"], []) + # We need to create ten separate users to send each reaction. + access_tokens = [self.user_token, self.user2_token] + idx = 0 + while len(access_tokens) < 10: + user_id, token = self._create_user("test" + str(idx)) + idx += 1 - def _find_event_in_chunk(self, events: List[JsonDict]) -> JsonDict: - """ - Find the parent event in a chunk of events and assert that it has the proper bundled aggregations. - """ - for event in events: - if event["event_id"] == self.parent_id: - return event + self.helper.join(self.room, user=user_id, tok=token) + access_tokens.append(token) - raise AssertionError(f"Event {self.parent_id} not found in chunk") + idx = 0 + expected_event_ids = [] + for _ in range(10): + channel = self._send_relation( + RelationTypes.ANNOTATION, + "m.reaction", + key="👍", + access_token=access_tokens[idx], + ) + expected_event_ids.append(channel.json_body["event_id"]) - def test_background_update(self) -> None: - """Test the event_arbitrary_relations background update.""" - channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="👍") - self.assertEqual(200, channel.code, channel.json_body) - annotation_event_id_good = channel.json_body["event_id"] + idx += 1 - channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="A") - self.assertEqual(200, channel.code, channel.json_body) - annotation_event_id_bad = channel.json_body["event_id"] + # Also send a different type of reaction so that we test we don't see it + channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="a") - channel = self._send_relation(RelationTypes.THREAD, "m.room.test") - self.assertEqual(200, channel.code, channel.json_body) - thread_event_id = channel.json_body["event_id"] + prev_token = "" + found_event_ids: List[str] = [] + encoded_key = urllib.parse.quote_plus("👍".encode()) + for _ in range(20): + from_token = "" + if prev_token: + from_token = "&from=" + prev_token - # Clean-up the table as if the inserts did not happen during event creation. - self.get_success( - self.store.db_pool.simple_delete_many( - table="event_relations", - column="event_id", - iterable=(annotation_event_id_bad, thread_event_id), - keyvalues={}, - desc="RelationsTestCase.test_background_update", + channel = self.make_request( + "GET", + f"/_matrix/client/unstable/rooms/{self.room}" + f"/aggregations/{self.parent_id}/{RelationTypes.ANNOTATION}" + f"/m.reaction/{encoded_key}?limit=1{from_token}", + access_token=self.user_token, ) - ) + self.assertEqual(200, channel.code, channel.json_body) - # Only the "good" annotation should be found. - channel = self.make_request( - "GET", - f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?limit=10", - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) - self.assertEqual( - [ev["event_id"] for ev in channel.json_body["chunk"]], - [annotation_event_id_good], - ) + self.assertEqual(len(channel.json_body["chunk"]), 1, channel.json_body) - # Insert and run the background update. - self.get_success( - self.store.db_pool.simple_insert( - "background_updates", - {"update_name": "event_arbitrary_relations", "progress_json": "{}"}, - ) - ) + found_event_ids.extend(e["event_id"] for e in channel.json_body["chunk"]) - # Ugh, have to reset this flag - self.store.db_pool.updates._all_done = False - self.wait_for_background_updates() + next_batch = channel.json_body.get("next_batch") - # The "good" annotation and the thread should be found, but not the "bad" - # annotation. - channel = self.make_request( - "GET", - f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?limit=10", - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) - self.assertCountEqual( - [ev["event_id"] for ev in channel.json_body["chunk"]], - [annotation_event_id_good, thread_event_id], - ) + self.assertNotEqual(prev_token, next_batch) + prev_token = next_batch + + if not prev_token: + break + + # We paginated backwards, so reverse + found_event_ids.reverse() + self.assertEqual(found_event_ids, expected_event_ids) class RelationRedactionTestCase(BaseRelationsTestCase): @@ -1294,46 +1289,6 @@ class RelationRedactionTestCase(BaseRelationsTestCase): ) self.assertEqual(200, channel.code, channel.json_body) - def _make_relation_requests(self) -> Tuple[List[str], JsonDict]: - """ - Makes requests and ensures they result in a 200 response, returns a - tuple of results: - - 1. `/relations` -> Returns a list of event IDs. - 2. `/event` -> Returns the response's m.relations field (from unsigned), - if it exists. - """ - - # Request the relations of the event. - channel = self.make_request( - "GET", - f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}", - access_token=self.user_token, - ) - self.assertEquals(200, channel.code, channel.json_body) - event_ids = [ev["event_id"] for ev in channel.json_body["chunk"]] - - # Fetch the bundled aggregations of the event. - channel = self.make_request( - "GET", - f"/_matrix/client/unstable/rooms/{self.room}/event/{self.parent_id}", - access_token=self.user_token, - ) - self.assertEquals(200, channel.code, channel.json_body) - bundled_relations = channel.json_body["unsigned"].get("m.relations", {}) - - return event_ids, bundled_relations - - def _get_aggregations(self) -> List[JsonDict]: - """Request /aggregations on the parent ID and includes the returned chunk.""" - channel = self.make_request( - "GET", - f"/_matrix/client/unstable/rooms/{self.room}/aggregations/{self.parent_id}", - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) - return channel.json_body["chunk"] - def test_redact_relation_annotation(self) -> None: """ Test that annotations of an event are properly handled after the @@ -1343,17 +1298,16 @@ class RelationRedactionTestCase(BaseRelationsTestCase): the response to relations. """ channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") - self.assertEqual(200, channel.code, channel.json_body) to_redact_event_id = channel.json_body["event_id"] channel = self._send_relation( RelationTypes.ANNOTATION, "m.reaction", "a", access_token=self.user2_token ) - self.assertEqual(200, channel.code, channel.json_body) unredacted_event_id = channel.json_body["event_id"] # Both relations should exist. - event_ids, relations = self._make_relation_requests() + event_ids = self._get_related_events() + relations = self._get_bundled_aggregations() self.assertCountEqual(event_ids, [to_redact_event_id, unredacted_event_id]) self.assertEquals( relations["m.annotation"], @@ -1368,7 +1322,8 @@ class RelationRedactionTestCase(BaseRelationsTestCase): self._redact(to_redact_event_id) # The unredacted relation should still exist. - event_ids, relations = self._make_relation_requests() + event_ids = self._get_related_events() + relations = self._get_bundled_aggregations() self.assertEquals(event_ids, [unredacted_event_id]) self.assertEquals( relations["m.annotation"], @@ -1391,7 +1346,6 @@ class RelationRedactionTestCase(BaseRelationsTestCase): EventTypes.Message, content={"body": "reply 1", "msgtype": "m.text"}, ) - self.assertEqual(200, channel.code, channel.json_body) unredacted_event_id = channel.json_body["event_id"] # Note that the *last* event in the thread is redacted, as that gets @@ -1401,11 +1355,11 @@ class RelationRedactionTestCase(BaseRelationsTestCase): EventTypes.Message, content={"body": "reply 2", "msgtype": "m.text"}, ) - self.assertEqual(200, channel.code, channel.json_body) to_redact_event_id = channel.json_body["event_id"] # Both relations exist. - event_ids, relations = self._make_relation_requests() + event_ids = self._get_related_events() + relations = self._get_bundled_aggregations() self.assertEquals(event_ids, [to_redact_event_id, unredacted_event_id]) self.assertDictContainsSubset( { @@ -1424,7 +1378,8 @@ class RelationRedactionTestCase(BaseRelationsTestCase): self._redact(to_redact_event_id) # The unredacted relation should still exist. - event_ids, relations = self._make_relation_requests() + event_ids = self._get_related_events() + relations = self._get_bundled_aggregations() self.assertEquals(event_ids, [unredacted_event_id]) self.assertDictContainsSubset( { @@ -1444,7 +1399,7 @@ class RelationRedactionTestCase(BaseRelationsTestCase): is redacted. """ # Add a relation - channel = self._send_relation( + self._send_relation( RelationTypes.REPLACE, "m.room.message", parent_id=self.parent_id, @@ -1454,10 +1409,10 @@ class RelationRedactionTestCase(BaseRelationsTestCase): "m.new_content": {"msgtype": "m.text", "body": "First edit"}, }, ) - self.assertEqual(200, channel.code, channel.json_body) # Check the relation is returned - event_ids, relations = self._make_relation_requests() + event_ids = self._get_related_events() + relations = self._get_bundled_aggregations() self.assertEqual(len(event_ids), 1) self.assertIn(RelationTypes.REPLACE, relations) @@ -1465,7 +1420,8 @@ class RelationRedactionTestCase(BaseRelationsTestCase): self._redact(self.parent_id) # The relations are not returned. - event_ids, relations = self._make_relation_requests() + event_ids = self._get_related_events() + relations = self._get_bundled_aggregations() self.assertEqual(len(event_ids), 0) self.assertEqual(relations, {}) @@ -1475,11 +1431,11 @@ class RelationRedactionTestCase(BaseRelationsTestCase): """ # Add a relation channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="👍") - self.assertEqual(200, channel.code, channel.json_body) related_event_id = channel.json_body["event_id"] # The relations should exist. - event_ids, relations = self._make_relation_requests() + event_ids = self._get_related_events() + relations = self._get_bundled_aggregations() self.assertEqual(len(event_ids), 1) self.assertIn(RelationTypes.ANNOTATION, relations) @@ -1491,7 +1447,8 @@ class RelationRedactionTestCase(BaseRelationsTestCase): self._redact(self.parent_id) # The relations are returned. - event_ids, relations = self._make_relation_requests() + event_ids = self._get_related_events() + relations = self._get_bundled_aggregations() self.assertEquals(event_ids, [related_event_id]) self.assertEquals( relations["m.annotation"], @@ -1512,14 +1469,14 @@ class RelationRedactionTestCase(BaseRelationsTestCase): EventTypes.Message, content={"body": "reply 1", "msgtype": "m.text"}, ) - self.assertEqual(200, channel.code, channel.json_body) related_event_id = channel.json_body["event_id"] # Redact one of the reactions. self._redact(self.parent_id) # The unredacted relation should still exist. - event_ids, relations = self._make_relation_requests() + event_ids = self._get_related_events() + relations = self._get_bundled_aggregations() self.assertEquals(len(event_ids), 1) self.assertDictContainsSubset( { -- cgit 1.5.1 From 86965605a4688d80dc0a74ed4993a52f282e970a Mon Sep 17 00:00:00 2001 From: David Robertson Date: Wed, 16 Mar 2022 13:52:59 +0000 Subject: Fix dead link in spam checker warning (#12231) --- changelog.d/12231.doc | 1 + synapse/config/spam_checker.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/12231.doc diff --git a/changelog.d/12231.doc b/changelog.d/12231.doc new file mode 100644 index 0000000000..16593d2b92 --- /dev/null +++ b/changelog.d/12231.doc @@ -0,0 +1 @@ +Fix the link to the module documentation in the legacy spam checker warning message. diff --git a/synapse/config/spam_checker.py b/synapse/config/spam_checker.py index a233a9ce03..4c52103b1c 100644 --- a/synapse/config/spam_checker.py +++ b/synapse/config/spam_checker.py @@ -25,8 +25,8 @@ logger = logging.getLogger(__name__) LEGACY_SPAM_CHECKER_WARNING = """ This server is using a spam checker module that is implementing the deprecated spam checker interface. Please check with the module's maintainer to see if a new version -supporting Synapse's generic modules system is available. -For more information, please see https://matrix-org.github.io/synapse/latest/modules.html +supporting Synapse's generic modules system is available. For more information, please +see https://matrix-org.github.io/synapse/latest/modules/index.html ---------------------------------------------------------------------------------------""" -- cgit 1.5.1 From c486fa5fd9082643e40a55ffa59d902aa6db4c2b Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 16 Mar 2022 10:37:04 -0400 Subject: Add some missing type hints to cache datastore. (#12216) --- changelog.d/12216.misc | 1 + synapse/storage/databases/main/cache.py | 57 +++++++++++++++++++++------------ 2 files changed, 37 insertions(+), 21 deletions(-) create mode 100644 changelog.d/12216.misc diff --git a/changelog.d/12216.misc b/changelog.d/12216.misc new file mode 100644 index 0000000000..dc398ac1e0 --- /dev/null +++ b/changelog.d/12216.misc @@ -0,0 +1 @@ +Add missing type hints for cache storage. diff --git a/synapse/storage/databases/main/cache.py b/synapse/storage/databases/main/cache.py index d6a2df1afe..2d7511d613 100644 --- a/synapse/storage/databases/main/cache.py +++ b/synapse/storage/databases/main/cache.py @@ -23,6 +23,7 @@ from synapse.replication.tcp.streams.events import ( EventsStream, EventsStreamCurrentStateRow, EventsStreamEventRow, + EventsStreamRow, ) from synapse.storage._base import SQLBaseStore from synapse.storage.database import ( @@ -31,6 +32,7 @@ from synapse.storage.database import ( LoggingTransaction, ) from synapse.storage.engines import PostgresEngine +from synapse.util.caches.descriptors import _CachedFunction from synapse.util.iterutils import batch_iter if TYPE_CHECKING: @@ -82,7 +84,9 @@ class CacheInvalidationWorkerStore(SQLBaseStore): if last_id == current_id: return [], current_id, False - def get_all_updated_caches_txn(txn): + def get_all_updated_caches_txn( + txn: LoggingTransaction, + ) -> Tuple[List[Tuple[int, tuple]], int, bool]: # We purposefully don't bound by the current token, as we want to # send across cache invalidations as quickly as possible. Cache # invalidations are idempotent, so duplicates are fine. @@ -107,7 +111,9 @@ class CacheInvalidationWorkerStore(SQLBaseStore): "get_all_updated_caches", get_all_updated_caches_txn ) - def process_replication_rows(self, stream_name, instance_name, token, rows): + def process_replication_rows( + self, stream_name: str, instance_name: str, token: int, rows: Iterable[Any] + ) -> None: if stream_name == EventsStream.NAME: for row in rows: self._process_event_stream_row(token, row) @@ -142,10 +148,11 @@ class CacheInvalidationWorkerStore(SQLBaseStore): super().process_replication_rows(stream_name, instance_name, token, rows) - def _process_event_stream_row(self, token, row): + def _process_event_stream_row(self, token: int, row: EventsStreamRow) -> None: data = row.data if row.type == EventsStreamEventRow.TypeId: + assert isinstance(data, EventsStreamEventRow) self._invalidate_caches_for_event( token, data.event_id, @@ -157,9 +164,8 @@ class CacheInvalidationWorkerStore(SQLBaseStore): backfilled=False, ) elif row.type == EventsStreamCurrentStateRow.TypeId: - self._curr_state_delta_stream_cache.entity_has_changed( - row.data.room_id, token - ) + assert isinstance(data, EventsStreamCurrentStateRow) + self._curr_state_delta_stream_cache.entity_has_changed(data.room_id, token) if data.type == EventTypes.Member: self.get_rooms_for_user_with_stream_ordering.invalidate( @@ -170,15 +176,15 @@ class CacheInvalidationWorkerStore(SQLBaseStore): def _invalidate_caches_for_event( self, - stream_ordering, - event_id, - room_id, - etype, - state_key, - redacts, - relates_to, - backfilled, - ): + stream_ordering: int, + event_id: str, + room_id: str, + etype: str, + state_key: Optional[str], + redacts: Optional[str], + relates_to: Optional[str], + backfilled: bool, + ) -> None: self._invalidate_get_event_cache(event_id) self.have_seen_event.invalidate((room_id, event_id)) @@ -207,7 +213,9 @@ class CacheInvalidationWorkerStore(SQLBaseStore): self.get_thread_summary.invalidate((relates_to,)) self.get_thread_participated.invalidate((relates_to,)) - async def invalidate_cache_and_stream(self, cache_name: str, keys: Tuple[Any, ...]): + async def invalidate_cache_and_stream( + self, cache_name: str, keys: Tuple[Any, ...] + ) -> None: """Invalidates the cache and adds it to the cache stream so slaves will know to invalidate their caches. @@ -227,7 +235,12 @@ class CacheInvalidationWorkerStore(SQLBaseStore): keys, ) - def _invalidate_cache_and_stream(self, txn, cache_func, keys): + def _invalidate_cache_and_stream( + self, + txn: LoggingTransaction, + cache_func: _CachedFunction, + keys: Tuple[Any, ...], + ) -> None: """Invalidates the cache and adds it to the cache stream so slaves will know to invalidate their caches. @@ -238,7 +251,9 @@ class CacheInvalidationWorkerStore(SQLBaseStore): txn.call_after(cache_func.invalidate, keys) self._send_invalidation_to_replication(txn, cache_func.__name__, keys) - def _invalidate_all_cache_and_stream(self, txn, cache_func): + def _invalidate_all_cache_and_stream( + self, txn: LoggingTransaction, cache_func: _CachedFunction + ) -> None: """Invalidates the entire cache and adds it to the cache stream so slaves will know to invalidate their caches. """ @@ -279,8 +294,8 @@ class CacheInvalidationWorkerStore(SQLBaseStore): ) def _send_invalidation_to_replication( - self, txn, cache_name: str, keys: Optional[Iterable[Any]] - ): + self, txn: LoggingTransaction, cache_name: str, keys: Optional[Iterable[Any]] + ) -> None: """Notifies replication that given cache has been invalidated. Note that this does *not* invalidate the cache locally. @@ -315,7 +330,7 @@ class CacheInvalidationWorkerStore(SQLBaseStore): "instance_name": self._instance_name, "cache_func": cache_name, "keys": keys, - "invalidation_ts": self.clock.time_msec(), + "invalidation_ts": self._clock.time_msec(), }, ) -- cgit 1.5.1 From fc9bd620ce94b64af46737e25a524336967782a1 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 16 Mar 2022 10:39:15 -0400 Subject: Add a relations handler to avoid duplication. (#12227) Adds a handler layer between the REST and datastore layers for relations. --- changelog.d/12227.misc | 1 + synapse/handlers/pagination.py | 5 +- synapse/handlers/relations.py | 117 +++++++++++++++++++++++++++++++++++++++ synapse/rest/client/relations.py | 75 +++---------------------- synapse/server.py | 5 ++ 5 files changed, 134 insertions(+), 69 deletions(-) create mode 100644 changelog.d/12227.misc create mode 100644 synapse/handlers/relations.py diff --git a/changelog.d/12227.misc b/changelog.d/12227.misc new file mode 100644 index 0000000000..41c9dcbd37 --- /dev/null +++ b/changelog.d/12227.misc @@ -0,0 +1 @@ +Refactor the relations endpoints to add a `RelationsHandler`. diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index 60059fec3e..41679f7f86 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from typing import TYPE_CHECKING, Any, Collection, Dict, List, Optional, Set +from typing import TYPE_CHECKING, Collection, Dict, List, Optional, Set import attr @@ -422,7 +422,7 @@ class PaginationHandler: pagin_config: PaginationConfig, as_client_event: bool = True, event_filter: Optional[Filter] = None, - ) -> Dict[str, Any]: + ) -> JsonDict: """Get messages in a room. Args: @@ -431,6 +431,7 @@ class PaginationHandler: pagin_config: The pagination config rules to apply, if any. as_client_event: True to get events in client-server format. event_filter: Filter to apply to results or None + Returns: Pagination API results """ diff --git a/synapse/handlers/relations.py b/synapse/handlers/relations.py new file mode 100644 index 0000000000..8e475475ad --- /dev/null +++ b/synapse/handlers/relations.py @@ -0,0 +1,117 @@ +# Copyright 2021 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import logging +from typing import TYPE_CHECKING, Optional + +from synapse.api.errors import SynapseError +from synapse.types import JsonDict, Requester, StreamToken + +if TYPE_CHECKING: + from synapse.server import HomeServer + + +logger = logging.getLogger(__name__) + + +class RelationsHandler: + def __init__(self, hs: "HomeServer"): + self._main_store = hs.get_datastores().main + self._auth = hs.get_auth() + self._clock = hs.get_clock() + self._event_handler = hs.get_event_handler() + self._event_serializer = hs.get_event_client_serializer() + + async def get_relations( + self, + requester: Requester, + event_id: str, + room_id: str, + relation_type: Optional[str] = None, + event_type: Optional[str] = None, + aggregation_key: Optional[str] = None, + limit: int = 5, + direction: str = "b", + from_token: Optional[StreamToken] = None, + to_token: Optional[StreamToken] = None, + ) -> JsonDict: + """Get related events of a event, ordered by topological ordering. + + TODO Accept a PaginationConfig instead of individual pagination parameters. + + Args: + requester: The user requesting the relations. + event_id: Fetch events that relate to this event ID. + room_id: The room the event belongs to. + relation_type: Only fetch events with this relation type, if given. + event_type: Only fetch events with this event type, if given. + aggregation_key: Only fetch events with this aggregation key, if given. + limit: Only fetch the most recent `limit` events. + direction: Whether to fetch the most recent first (`"b"`) or the + oldest first (`"f"`). + from_token: Fetch rows from the given token, or from the start if None. + to_token: Fetch rows up to the given token, or up to the end if None. + + Returns: + The pagination chunk. + """ + + user_id = requester.user.to_string() + + await self._auth.check_user_in_room_or_world_readable( + room_id, user_id, allow_departed_users=True + ) + + # This gets the original event and checks that a) the event exists and + # b) the user is allowed to view it. + event = await self._event_handler.get_event(requester.user, room_id, event_id) + if event is None: + raise SynapseError(404, "Unknown parent event.") + + pagination_chunk = await self._main_store.get_relations_for_event( + event_id=event_id, + event=event, + room_id=room_id, + relation_type=relation_type, + event_type=event_type, + aggregation_key=aggregation_key, + limit=limit, + direction=direction, + from_token=from_token, + to_token=to_token, + ) + + events = await self._main_store.get_events_as_list( + [c["event_id"] for c in pagination_chunk.chunk] + ) + + now = self._clock.time_msec() + # Do not bundle aggregations when retrieving the original event because + # we want the content before relations are applied to it. + original_event = self._event_serializer.serialize_event( + event, now, bundle_aggregations=None + ) + # The relations returned for the requested event do include their + # bundled aggregations. + aggregations = await self._main_store.get_bundled_aggregations( + events, requester.user.to_string() + ) + serialized_events = self._event_serializer.serialize_events( + events, now, bundle_aggregations=aggregations + ) + + return_value = await pagination_chunk.to_dict(self._main_store) + return_value["chunk"] = serialized_events + return_value["original_event"] = original_event + + return return_value diff --git a/synapse/rest/client/relations.py b/synapse/rest/client/relations.py index d9a6be43f7..c16078b187 100644 --- a/synapse/rest/client/relations.py +++ b/synapse/rest/client/relations.py @@ -51,9 +51,7 @@ class RelationPaginationServlet(RestServlet): super().__init__() self.auth = hs.get_auth() self.store = hs.get_datastores().main - self.clock = hs.get_clock() - self._event_serializer = hs.get_event_client_serializer() - self.event_handler = hs.get_event_handler() + self._relations_handler = hs.get_relations_handler() async def on_GET( self, @@ -65,16 +63,6 @@ class RelationPaginationServlet(RestServlet): ) -> Tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request, allow_guest=True) - await self.auth.check_user_in_room_or_world_readable( - room_id, requester.user.to_string(), allow_departed_users=True - ) - - # This gets the original event and checks that a) the event exists and - # b) the user is allowed to view it. - event = await self.event_handler.get_event(requester.user, room_id, parent_id) - if event is None: - raise SynapseError(404, "Unknown parent event.") - limit = parse_integer(request, "limit", default=5) direction = parse_string( request, "org.matrix.msc3715.dir", default="b", allowed_values=["f", "b"] @@ -90,9 +78,9 @@ class RelationPaginationServlet(RestServlet): if to_token_str: to_token = await StreamToken.from_string(self.store, to_token_str) - pagination_chunk = await self.store.get_relations_for_event( + result = await self._relations_handler.get_relations( + requester=requester, event_id=parent_id, - event=event, room_id=room_id, relation_type=relation_type, event_type=event_type, @@ -102,30 +90,7 @@ class RelationPaginationServlet(RestServlet): to_token=to_token, ) - events = await self.store.get_events_as_list( - [c["event_id"] for c in pagination_chunk.chunk] - ) - - now = self.clock.time_msec() - # Do not bundle aggregations when retrieving the original event because - # we want the content before relations are applied to it. - original_event = self._event_serializer.serialize_event( - event, now, bundle_aggregations=None - ) - # The relations returned for the requested event do include their - # bundled aggregations. - aggregations = await self.store.get_bundled_aggregations( - events, requester.user.to_string() - ) - serialized_events = self._event_serializer.serialize_events( - events, now, bundle_aggregations=aggregations - ) - - return_value = await pagination_chunk.to_dict(self.store) - return_value["chunk"] = serialized_events - return_value["original_event"] = original_event - - return 200, return_value + return 200, result class RelationAggregationPaginationServlet(RestServlet): @@ -245,9 +210,7 @@ class RelationAggregationGroupPaginationServlet(RestServlet): super().__init__() self.auth = hs.get_auth() self.store = hs.get_datastores().main - self.clock = hs.get_clock() - self._event_serializer = hs.get_event_client_serializer() - self.event_handler = hs.get_event_handler() + self._relations_handler = hs.get_relations_handler() async def on_GET( self, @@ -260,18 +223,6 @@ class RelationAggregationGroupPaginationServlet(RestServlet): ) -> Tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request, allow_guest=True) - await self.auth.check_user_in_room_or_world_readable( - room_id, - requester.user.to_string(), - allow_departed_users=True, - ) - - # This checks that a) the event exists and b) the user is allowed to - # view it. - event = await self.event_handler.get_event(requester.user, room_id, parent_id) - if event is None: - raise SynapseError(404, "Unknown parent event.") - if relation_type != RelationTypes.ANNOTATION: raise SynapseError(400, "Relation type must be 'annotation'") @@ -286,9 +237,9 @@ class RelationAggregationGroupPaginationServlet(RestServlet): if to_token_str: to_token = await StreamToken.from_string(self.store, to_token_str) - result = await self.store.get_relations_for_event( + result = await self._relations_handler.get_relations( + requester=requester, event_id=parent_id, - event=event, room_id=room_id, relation_type=relation_type, event_type=event_type, @@ -298,17 +249,7 @@ class RelationAggregationGroupPaginationServlet(RestServlet): to_token=to_token, ) - events = await self.store.get_events_as_list( - [c["event_id"] for c in result.chunk] - ) - - now = self.clock.time_msec() - serialized_events = self._event_serializer.serialize_events(events, now) - - return_value = await result.to_dict(self.store) - return_value["chunk"] = serialized_events - - return 200, return_value + return 200, result def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: diff --git a/synapse/server.py b/synapse/server.py index 2fcf18a7a6..380369db92 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -94,6 +94,7 @@ from synapse.handlers.profile import ProfileHandler from synapse.handlers.read_marker import ReadMarkerHandler from synapse.handlers.receipts import ReceiptsHandler from synapse.handlers.register import RegistrationHandler +from synapse.handlers.relations import RelationsHandler from synapse.handlers.room import ( RoomContextHandler, RoomCreationHandler, @@ -719,6 +720,10 @@ class HomeServer(metaclass=abc.ABCMeta): def get_pagination_handler(self) -> PaginationHandler: return PaginationHandler(self) + @cache_in_self + def get_relations_handler(self) -> RelationsHandler: + return RelationsHandler(self) + @cache_in_self def get_room_context_handler(self) -> RoomContextHandler: return RoomContextHandler(self) -- cgit 1.5.1 From 61210567405b1ac7efaa23d5513cc0b443da0a3a Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Wed, 16 Mar 2022 15:07:41 +0000 Subject: Handle cancellation in `DatabasePool.runInteraction()` (#12199) To handle cancellation, we ensure that `after_callback`s and `exception_callback`s are always run, since the transaction will complete on another thread regardless of cancellation. We also wait until everything is done before releasing the `CancelledError`, so that logging contexts won't get used after they have been finished. Signed-off-by: Sean Quah --- changelog.d/12199.misc | 1 + synapse/storage/database.py | 61 +++++++++++++++++++++++++----------------- tests/storage/test_database.py | 58 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 96 insertions(+), 24 deletions(-) create mode 100644 changelog.d/12199.misc diff --git a/changelog.d/12199.misc b/changelog.d/12199.misc new file mode 100644 index 0000000000..16dec1d26d --- /dev/null +++ b/changelog.d/12199.misc @@ -0,0 +1 @@ +Handle cancellation in `DatabasePool.runInteraction()`. diff --git a/synapse/storage/database.py b/synapse/storage/database.py index 99802228c9..9749f0c06e 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -41,6 +41,7 @@ from prometheus_client import Histogram from typing_extensions import Literal from twisted.enterprise import adbapi +from twisted.internet import defer from synapse.api.errors import StoreError from synapse.config.database import DatabaseConnectionConfig @@ -55,6 +56,7 @@ from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage.background_updates import BackgroundUpdater from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine, Sqlite3Engine from synapse.storage.types import Connection, Cursor +from synapse.util.async_helpers import delay_cancellation from synapse.util.iterutils import batch_iter if TYPE_CHECKING: @@ -732,34 +734,45 @@ class DatabasePool: Returns: The result of func """ - after_callbacks: List[_CallbackListEntry] = [] - exception_callbacks: List[_CallbackListEntry] = [] - if not current_context(): - logger.warning("Starting db txn '%s' from sentinel context", desc) + async def _runInteraction() -> R: + after_callbacks: List[_CallbackListEntry] = [] + exception_callbacks: List[_CallbackListEntry] = [] - try: - with opentracing.start_active_span(f"db.{desc}"): - result = await self.runWithConnection( - self.new_transaction, - desc, - after_callbacks, - exception_callbacks, - func, - *args, - db_autocommit=db_autocommit, - isolation_level=isolation_level, - **kwargs, - ) + if not current_context(): + logger.warning("Starting db txn '%s' from sentinel context", desc) - for after_callback, after_args, after_kwargs in after_callbacks: - after_callback(*after_args, **after_kwargs) - except Exception: - for after_callback, after_args, after_kwargs in exception_callbacks: - after_callback(*after_args, **after_kwargs) - raise + try: + with opentracing.start_active_span(f"db.{desc}"): + result = await self.runWithConnection( + self.new_transaction, + desc, + after_callbacks, + exception_callbacks, + func, + *args, + db_autocommit=db_autocommit, + isolation_level=isolation_level, + **kwargs, + ) - return cast(R, result) + for after_callback, after_args, after_kwargs in after_callbacks: + after_callback(*after_args, **after_kwargs) + + return cast(R, result) + except Exception: + for after_callback, after_args, after_kwargs in exception_callbacks: + after_callback(*after_args, **after_kwargs) + raise + + # To handle cancellation, we ensure that `after_callback`s and + # `exception_callback`s are always run, since the transaction will complete + # on another thread regardless of cancellation. + # + # We also wait until everything above is done before releasing the + # `CancelledError`, so that logging contexts won't get used after they have been + # finished. + return await delay_cancellation(defer.ensureDeferred(_runInteraction())) async def runWithConnection( self, diff --git a/tests/storage/test_database.py b/tests/storage/test_database.py index ae13bed086..a40fc20ef9 100644 --- a/tests/storage/test_database.py +++ b/tests/storage/test_database.py @@ -15,6 +15,8 @@ from typing import Callable, Tuple from unittest.mock import Mock, call +from twisted.internet import defer +from twisted.internet.defer import CancelledError, Deferred from twisted.test.proto_helpers import MemoryReactor from synapse.server import HomeServer @@ -124,3 +126,59 @@ class CallbacksTestCase(unittest.HomeserverTestCase): ) self.assertEqual(after_callback.call_count, 2) # no additional calls exception_callback.assert_not_called() + + +class CancellationTestCase(unittest.HomeserverTestCase): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.store = hs.get_datastores().main + self.db_pool: DatabasePool = self.store.db_pool + + def test_after_callback(self) -> None: + """Test that the after callback is called when a transaction succeeds.""" + d: "Deferred[None]" + after_callback = Mock() + exception_callback = Mock() + + def _test_txn(txn: LoggingTransaction) -> None: + txn.call_after(after_callback, 123, 456, extra=789) + txn.call_on_exception(exception_callback, 987, 654, extra=321) + d.cancel() + + d = defer.ensureDeferred( + self.db_pool.runInteraction("test_transaction", _test_txn) + ) + self.get_failure(d, CancelledError) + + after_callback.assert_called_once_with(123, 456, extra=789) + exception_callback.assert_not_called() + + def test_exception_callback(self) -> None: + """Test that the exception callback is called when a transaction fails.""" + d: "Deferred[None]" + after_callback = Mock() + exception_callback = Mock() + + def _test_txn(txn: LoggingTransaction) -> None: + txn.call_after(after_callback, 123, 456, extra=789) + txn.call_on_exception(exception_callback, 987, 654, extra=321) + d.cancel() + # Simulate a retryable failure on every attempt. + raise self.db_pool.engine.module.OperationalError() + + d = defer.ensureDeferred( + self.db_pool.runInteraction("test_transaction", _test_txn) + ) + self.get_failure(d, CancelledError) + + after_callback.assert_not_called() + exception_callback.assert_has_calls( + [ + call(987, 654, extra=321), + call(987, 654, extra=321), + call(987, 654, extra=321), + call(987, 654, extra=321), + call(987, 654, extra=321), + call(987, 654, extra=321), + ] + ) + self.assertEqual(exception_callback.call_count, 6) # no additional calls -- cgit 1.5.1 From 96274565ff0dbb7d21b02b04fcef115330426707 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 16 Mar 2022 12:17:39 -0400 Subject: Fix bundling aggregations if unsigned is not a returned event field. (#12234) An error occured if a filter was supplied with `event_fields` which did not include `unsigned`. In that case, bundled aggregations are still added as the spec states it is allowed for servers to add additional fields. --- changelog.d/12234.bugfix | 1 + synapse/events/utils.py | 9 ++++++--- tests/rest/client/test_relations.py | 28 ++++++++++++++++++++++++++++ 3 files changed, 35 insertions(+), 3 deletions(-) create mode 100644 changelog.d/12234.bugfix diff --git a/changelog.d/12234.bugfix b/changelog.d/12234.bugfix new file mode 100644 index 0000000000..dbb77f36ff --- /dev/null +++ b/changelog.d/12234.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug when a `filter` argument with `event_fields` supplied but not including the `unsigned` field could result in a 500 error on `/sync`. diff --git a/synapse/events/utils.py b/synapse/events/utils.py index b2a237c1e0..a0520068e0 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -530,9 +530,12 @@ class EventClientSerializer: # Include the bundled aggregations in the event. if serialized_aggregations: - serialized_event["unsigned"].setdefault("m.relations", {}).update( - serialized_aggregations - ) + # There is likely already an "unsigned" field, but a filter might + # have stripped it off (via the event_fields option). The server is + # allowed to return additional fields, so add it back. + serialized_event.setdefault("unsigned", {}).setdefault( + "m.relations", {} + ).update(serialized_aggregations) def serialize_events( self, diff --git a/tests/rest/client/test_relations.py b/tests/rest/client/test_relations.py index 0cbe6c0cf7..171f4e97c8 100644 --- a/tests/rest/client/test_relations.py +++ b/tests/rest/client/test_relations.py @@ -1267,6 +1267,34 @@ class RelationsTestCase(BaseRelationsTestCase): [annotation_event_id_good, thread_event_id], ) + def test_bundled_aggregations_with_filter(self) -> None: + """ + If "unsigned" is an omitted field (due to filtering), adding the bundled + aggregations should not break. + + Note that the spec allows for a server to return additional fields beyond + what is specified. + """ + self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") + + # Note that the sync filter does not include "unsigned" as a field. + filter = urllib.parse.quote_plus( + b'{"event_fields": ["content", "event_id"], "room": {"timeline": {"limit": 3}}}' + ) + channel = self.make_request( + "GET", f"/sync?filter={filter}", access_token=self.user_token + ) + self.assertEqual(200, channel.code, channel.json_body) + + # Ensure the timeline is limited, find the parent event. + room_timeline = channel.json_body["rooms"]["join"][self.room]["timeline"] + self.assertTrue(room_timeline["limited"]) + parent_event = self._find_event_in_chunk(room_timeline["events"]) + + # Ensure there's bundled aggregations on it. + self.assertIn("unsigned", parent_event) + self.assertIn("m.relations", parent_event["unsigned"]) + class RelationRedactionTestCase(BaseRelationsTestCase): """ -- cgit 1.5.1 From f70afbd565f34cdc093e083b92376a1154b007a7 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 16 Mar 2022 12:20:05 -0400 Subject: Re-generate changelog. --- CHANGES.md | 1 + changelog.d/12234.bugfix | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) delete mode 100644 changelog.d/12234.bugfix diff --git a/CHANGES.md b/CHANGES.md index 60e7ecb1b9..78498c10b5 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -21,6 +21,7 @@ Bugfixes - Fix a bug introduced in Synapse 1.7.2 whereby background updates are never run with the default background batch size. ([\#12157](https://github.com/matrix-org/synapse/issues/12157)) - Fix a bug where non-standard information was returned from the `/hierarchy` API. Introduced in Synapse v1.41.0. ([\#12175](https://github.com/matrix-org/synapse/issues/12175)) - Fix a bug introduced in Synapse 1.54.0 that broke background updates on sqlite homeservers while search was disabled. ([\#12215](https://github.com/matrix-org/synapse/issues/12215)) +- Fix a long-standing bug when a `filter` argument with `event_fields` which did not include the `unsigned` field could result in a 500 error on `/sync`. ([\#12234](https://github.com/matrix-org/synapse/issues/12234)) Improved Documentation diff --git a/changelog.d/12234.bugfix b/changelog.d/12234.bugfix deleted file mode 100644 index dbb77f36ff..0000000000 --- a/changelog.d/12234.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug when a `filter` argument with `event_fields` supplied but not including the `unsigned` field could result in a 500 error on `/sync`. -- cgit 1.5.1 From 9e06e220649cc0139749c388a894bee0d65d5f4e Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Thu, 17 Mar 2022 12:25:50 +0100 Subject: Add type hints to more tests files. (#12240) --- changelog.d/12240.misc | 1 + mypy.ini | 4 ---- tests/handlers/test_cas.py | 19 ++++++++++++------- tests/handlers/test_federation.py | 36 ++++++++++++++++++++--------------- tests/handlers/test_presence.py | 13 +++++++++---- tests/push/test_http.py | 40 ++++++++++++++++++++++----------------- 6 files changed, 66 insertions(+), 47 deletions(-) create mode 100644 changelog.d/12240.misc diff --git a/changelog.d/12240.misc b/changelog.d/12240.misc new file mode 100644 index 0000000000..c5b6356799 --- /dev/null +++ b/changelog.d/12240.misc @@ -0,0 +1 @@ +Add type hints to tests files. \ No newline at end of file diff --git a/mypy.ini b/mypy.ini index fe31bfb8bb..51f47ff5be 100644 --- a/mypy.ini +++ b/mypy.ini @@ -66,9 +66,6 @@ exclude = (?x) |tests/federation/test_federation_server.py |tests/federation/transport/test_knocking.py |tests/federation/transport/test_server.py - |tests/handlers/test_cas.py - |tests/handlers/test_federation.py - |tests/handlers/test_presence.py |tests/handlers/test_typing.py |tests/http/federation/test_matrix_federation_agent.py |tests/http/federation/test_srv_resolver.py @@ -80,7 +77,6 @@ exclude = (?x) |tests/logging/test_terse_json.py |tests/module_api/test_api.py |tests/push/test_email.py - |tests/push/test_http.py |tests/push/test_presentable_names.py |tests/push/test_push_rule_evaluator.py |tests/rest/client/test_transactions.py diff --git a/tests/handlers/test_cas.py b/tests/handlers/test_cas.py index a267228846..a54aa29cf1 100644 --- a/tests/handlers/test_cas.py +++ b/tests/handlers/test_cas.py @@ -11,9 +11,14 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from typing import Any, Dict from unittest.mock import Mock +from twisted.test.proto_helpers import MemoryReactor + from synapse.handlers.cas import CasResponse +from synapse.server import HomeServer +from synapse.util import Clock from tests.test_utils import simple_async_mock from tests.unittest import HomeserverTestCase, override_config @@ -24,7 +29,7 @@ SERVER_URL = "https://issuer/" class CasHandlerTestCase(HomeserverTestCase): - def default_config(self): + def default_config(self) -> Dict[str, Any]: config = super().default_config() config["public_baseurl"] = BASE_URL cas_config = { @@ -40,7 +45,7 @@ class CasHandlerTestCase(HomeserverTestCase): return config - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: hs = self.setup_test_homeserver() self.handler = hs.get_cas_handler() @@ -51,7 +56,7 @@ class CasHandlerTestCase(HomeserverTestCase): return hs - def test_map_cas_user_to_user(self): + def test_map_cas_user_to_user(self) -> None: """Ensure that mapping the CAS user returned from a provider to an MXID works properly.""" # stub out the auth handler @@ -75,7 +80,7 @@ class CasHandlerTestCase(HomeserverTestCase): auth_provider_session_id=None, ) - def test_map_cas_user_to_existing_user(self): + def test_map_cas_user_to_existing_user(self) -> None: """Existing users can log in with CAS account.""" store = self.hs.get_datastores().main self.get_success( @@ -119,7 +124,7 @@ class CasHandlerTestCase(HomeserverTestCase): auth_provider_session_id=None, ) - def test_map_cas_user_to_invalid_localpart(self): + def test_map_cas_user_to_invalid_localpart(self) -> None: """CAS automaps invalid characters to base-64 encoding.""" # stub out the auth handler @@ -150,7 +155,7 @@ class CasHandlerTestCase(HomeserverTestCase): } } ) - def test_required_attributes(self): + def test_required_attributes(self) -> None: """The required attributes must be met from the CAS response.""" # stub out the auth handler @@ -166,7 +171,7 @@ class CasHandlerTestCase(HomeserverTestCase): auth_handler.complete_sso_login.assert_not_called() # The response doesn't have any department. - cas_response = CasResponse("test_user", {"userGroup": "staff"}) + cas_response = CasResponse("test_user", {"userGroup": ["staff"]}) request.reset_mock() self.get_success( self.handler._handle_cas_response(request, cas_response, "redirect_uri", "") diff --git a/tests/handlers/test_federation.py b/tests/handlers/test_federation.py index e8b4e39d1a..89078fc637 100644 --- a/tests/handlers/test_federation.py +++ b/tests/handlers/test_federation.py @@ -12,9 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from typing import List +from typing import List, cast from unittest import TestCase +from twisted.test.proto_helpers import MemoryReactor + from synapse.api.constants import EventTypes from synapse.api.errors import AuthError, Codes, LimitExceededError, SynapseError from synapse.api.room_versions import RoomVersions @@ -23,7 +25,9 @@ from synapse.federation.federation_base import event_from_pdu_json from synapse.logging.context import LoggingContext, run_in_background from synapse.rest import admin from synapse.rest.client import login, room +from synapse.server import HomeServer from synapse.types import create_requester +from synapse.util import Clock from synapse.util.stringutils import random_string from tests import unittest @@ -42,7 +46,7 @@ class FederationTestCase(unittest.HomeserverTestCase): room.register_servlets, ] - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: hs = self.setup_test_homeserver(federation_http_client=None) self.handler = hs.get_federation_handler() self.store = hs.get_datastores().main @@ -50,7 +54,7 @@ class FederationTestCase(unittest.HomeserverTestCase): self._event_auth_handler = hs.get_event_auth_handler() return hs - def test_exchange_revoked_invite(self): + def test_exchange_revoked_invite(self) -> None: user_id = self.register_user("kermit", "test") tok = self.login("kermit", "test") @@ -96,7 +100,7 @@ class FederationTestCase(unittest.HomeserverTestCase): self.assertEqual(failure.errcode, Codes.FORBIDDEN, failure) self.assertEqual(failure.msg, "You are not invited to this room.") - def test_rejected_message_event_state(self): + def test_rejected_message_event_state(self) -> None: """ Check that we store the state group correctly for rejected non-state events. @@ -126,7 +130,7 @@ class FederationTestCase(unittest.HomeserverTestCase): "content": {}, "room_id": room_id, "sender": "@yetanotheruser:" + OTHER_SERVER, - "depth": join_event["depth"] + 1, + "depth": cast(int, join_event["depth"]) + 1, "prev_events": [join_event.event_id], "auth_events": [], "origin_server_ts": self.clock.time_msec(), @@ -149,7 +153,7 @@ class FederationTestCase(unittest.HomeserverTestCase): self.assertEqual(sg, sg2) - def test_rejected_state_event_state(self): + def test_rejected_state_event_state(self) -> None: """ Check that we store the state group correctly for rejected state events. @@ -180,7 +184,7 @@ class FederationTestCase(unittest.HomeserverTestCase): "content": {}, "room_id": room_id, "sender": "@yetanotheruser:" + OTHER_SERVER, - "depth": join_event["depth"] + 1, + "depth": cast(int, join_event["depth"]) + 1, "prev_events": [join_event.event_id], "auth_events": [], "origin_server_ts": self.clock.time_msec(), @@ -203,7 +207,7 @@ class FederationTestCase(unittest.HomeserverTestCase): self.assertEqual(sg, sg2) - def test_backfill_with_many_backward_extremities(self): + def test_backfill_with_many_backward_extremities(self) -> None: """ Check that we can backfill with many backward extremities. The goal is to make sure that when we only use a portion @@ -262,7 +266,7 @@ class FederationTestCase(unittest.HomeserverTestCase): ) self.get_success(d) - def test_backfill_floating_outlier_membership_auth(self): + def test_backfill_floating_outlier_membership_auth(self) -> None: """ As the local homeserver, check that we can properly process a federated event from the OTHER_SERVER with auth_events that include a floating @@ -377,7 +381,7 @@ class FederationTestCase(unittest.HomeserverTestCase): for ae in auth_events ] - self.handler.federation_client.get_event_auth = get_event_auth + self.handler.federation_client.get_event_auth = get_event_auth # type: ignore[assignment] with LoggingContext("receive_pdu"): # Fake the OTHER_SERVER federating the message event over to our local homeserver @@ -397,7 +401,7 @@ class FederationTestCase(unittest.HomeserverTestCase): @unittest.override_config( {"rc_invites": {"per_user": {"per_second": 0.5, "burst_count": 3}}} ) - def test_invite_by_user_ratelimit(self): + def test_invite_by_user_ratelimit(self) -> None: """Tests that invites from federation to a particular user are actually rate-limited. """ @@ -446,7 +450,9 @@ class FederationTestCase(unittest.HomeserverTestCase): exc=LimitExceededError, ) - def _build_and_send_join_event(self, other_server, other_user, room_id): + def _build_and_send_join_event( + self, other_server: str, other_user: str, room_id: str + ) -> EventBase: join_event = self.get_success( self.handler.on_make_join_request(other_server, room_id, other_user) ) @@ -469,7 +475,7 @@ class FederationTestCase(unittest.HomeserverTestCase): class EventFromPduTestCase(TestCase): - def test_valid_json(self): + def test_valid_json(self) -> None: """Valid JSON should be turned into an event.""" ev = event_from_pdu_json( { @@ -487,7 +493,7 @@ class EventFromPduTestCase(TestCase): self.assertIsInstance(ev, EventBase) - def test_invalid_numbers(self): + def test_invalid_numbers(self) -> None: """Invalid values for an integer should be rejected, all floats should be rejected.""" for value in [ -(2 ** 53), @@ -512,7 +518,7 @@ class EventFromPduTestCase(TestCase): RoomVersions.V6, ) - def test_invalid_nested(self): + def test_invalid_nested(self) -> None: """List and dictionaries are recursively searched.""" with self.assertRaises(SynapseError): event_from_pdu_json( diff --git a/tests/handlers/test_presence.py b/tests/handlers/test_presence.py index 6ddec9ecf1..b2ed9cbe37 100644 --- a/tests/handlers/test_presence.py +++ b/tests/handlers/test_presence.py @@ -331,11 +331,11 @@ class PresenceUpdateTestCase(unittest.HomeserverTestCase): # Extract presence update user ID and state information into lists of tuples db_presence_states = [(ps[0], ps[1]) for _, ps in db_presence_states[0]] - presence_states = [(ps.user_id, ps.state) for ps in presence_states] + presence_states_compare = [(ps.user_id, ps.state) for ps in presence_states] # Compare what we put into the storage with what we got out. # They should be identical. - self.assertEqual(presence_states, db_presence_states) + self.assertEqual(presence_states_compare, db_presence_states) class PresenceTimeoutTestCase(unittest.TestCase): @@ -357,6 +357,7 @@ class PresenceTimeoutTestCase(unittest.TestCase): new_state = handle_timeout(state, is_mine=True, syncing_user_ids=set(), now=now) self.assertIsNotNone(new_state) + assert new_state is not None self.assertEqual(new_state.state, PresenceState.UNAVAILABLE) self.assertEqual(new_state.status_msg, status_msg) @@ -380,6 +381,7 @@ class PresenceTimeoutTestCase(unittest.TestCase): new_state = handle_timeout(state, is_mine=True, syncing_user_ids=set(), now=now) self.assertIsNotNone(new_state) + assert new_state is not None self.assertEqual(new_state.state, PresenceState.BUSY) self.assertEqual(new_state.status_msg, status_msg) @@ -399,6 +401,7 @@ class PresenceTimeoutTestCase(unittest.TestCase): new_state = handle_timeout(state, is_mine=True, syncing_user_ids=set(), now=now) self.assertIsNotNone(new_state) + assert new_state is not None self.assertEqual(new_state.state, PresenceState.OFFLINE) self.assertEqual(new_state.status_msg, status_msg) @@ -420,6 +423,7 @@ class PresenceTimeoutTestCase(unittest.TestCase): ) self.assertIsNotNone(new_state) + assert new_state is not None self.assertEqual(new_state.state, PresenceState.ONLINE) self.assertEqual(new_state.status_msg, status_msg) @@ -477,6 +481,7 @@ class PresenceTimeoutTestCase(unittest.TestCase): ) self.assertIsNotNone(new_state) + assert new_state is not None self.assertEqual(new_state.state, PresenceState.OFFLINE) self.assertEqual(new_state.status_msg, status_msg) @@ -653,13 +658,13 @@ class PresenceHandlerTestCase(unittest.HomeserverTestCase): self._set_presencestate_with_status_msg(user_id, PresenceState.ONLINE, None) def _set_presencestate_with_status_msg( - self, user_id: str, state: PresenceState, status_msg: Optional[str] + self, user_id: str, state: str, status_msg: Optional[str] ): """Set a PresenceState and status_msg and check the result. Args: user_id: User for that the status is to be set. - PresenceState: The new PresenceState. + state: The new PresenceState. status_msg: Status message that is to be set. """ self.get_success( diff --git a/tests/push/test_http.py b/tests/push/test_http.py index 6691e07128..ba158f5d93 100644 --- a/tests/push/test_http.py +++ b/tests/push/test_http.py @@ -11,15 +11,19 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import List, Tuple +from typing import Any, Dict, List, Optional, Tuple from unittest.mock import Mock from twisted.internet.defer import Deferred +from twisted.test.proto_helpers import MemoryReactor import synapse.rest.admin from synapse.logging.context import make_deferred_yieldable from synapse.push import PusherConfigException from synapse.rest.client import login, push_rule, receipts, room +from synapse.server import HomeServer +from synapse.types import JsonDict +from synapse.util import Clock from tests.unittest import HomeserverTestCase, override_config @@ -35,13 +39,13 @@ class HTTPPusherTests(HomeserverTestCase): user_id = True hijack_auth = False - def default_config(self): + def default_config(self) -> Dict[str, Any]: config = super().default_config() config["start_pushers"] = True return config - def make_homeserver(self, reactor, clock): - self.push_attempts: List[tuple[Deferred, str, dict]] = [] + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: + self.push_attempts: List[Tuple[Deferred, str, dict]] = [] m = Mock() @@ -56,7 +60,7 @@ class HTTPPusherTests(HomeserverTestCase): return hs - def test_invalid_configuration(self): + def test_invalid_configuration(self) -> None: """Invalid push configurations should be rejected.""" # Register the user who gets notified user_id = self.register_user("user", "pass") @@ -68,7 +72,7 @@ class HTTPPusherTests(HomeserverTestCase): ) token_id = user_tuple.token_id - def test_data(data): + def test_data(data: Optional[JsonDict]) -> None: self.get_failure( self.hs.get_pusherpool().add_pusher( user_id=user_id, @@ -95,7 +99,7 @@ class HTTPPusherTests(HomeserverTestCase): # A url with an incorrect path isn't accepted. test_data({"url": "http://example.com/foo"}) - def test_sends_http(self): + def test_sends_http(self) -> None: """ The HTTP pusher will send pushes for each message to a HTTP endpoint when configured to do so. @@ -200,7 +204,7 @@ class HTTPPusherTests(HomeserverTestCase): self.assertEqual(len(pushers), 1) self.assertTrue(pushers[0].last_stream_ordering > last_stream_ordering) - def test_sends_high_priority_for_encrypted(self): + def test_sends_high_priority_for_encrypted(self) -> None: """ The HTTP pusher will send pushes at high priority if they correspond to an encrypted message. @@ -321,7 +325,7 @@ class HTTPPusherTests(HomeserverTestCase): ) self.assertEqual(self.push_attempts[1][2]["notification"]["prio"], "high") - def test_sends_high_priority_for_one_to_one_only(self): + def test_sends_high_priority_for_one_to_one_only(self) -> None: """ The HTTP pusher will send pushes at high priority if they correspond to a message in a one-to-one room. @@ -404,7 +408,7 @@ class HTTPPusherTests(HomeserverTestCase): # check that this is low-priority self.assertEqual(self.push_attempts[1][2]["notification"]["prio"], "low") - def test_sends_high_priority_for_mention(self): + def test_sends_high_priority_for_mention(self) -> None: """ The HTTP pusher will send pushes at high priority if they correspond to a message containing the user's display name. @@ -480,7 +484,7 @@ class HTTPPusherTests(HomeserverTestCase): # check that this is low-priority self.assertEqual(self.push_attempts[1][2]["notification"]["prio"], "low") - def test_sends_high_priority_for_atroom(self): + def test_sends_high_priority_for_atroom(self) -> None: """ The HTTP pusher will send pushes at high priority if they correspond to a message that contains @room. @@ -563,7 +567,7 @@ class HTTPPusherTests(HomeserverTestCase): # check that this is low-priority self.assertEqual(self.push_attempts[1][2]["notification"]["prio"], "low") - def test_push_unread_count_group_by_room(self): + def test_push_unread_count_group_by_room(self) -> None: """ The HTTP pusher will group unread count by number of unread rooms. """ @@ -576,7 +580,7 @@ class HTTPPusherTests(HomeserverTestCase): self._check_push_attempt(6, 1) @override_config({"push": {"group_unread_count_by_room": False}}) - def test_push_unread_count_message_count(self): + def test_push_unread_count_message_count(self) -> None: """ The HTTP pusher will send the total unread message count. """ @@ -589,7 +593,7 @@ class HTTPPusherTests(HomeserverTestCase): # last read receipt self._check_push_attempt(6, 3) - def _test_push_unread_count(self): + def _test_push_unread_count(self) -> None: """ Tests that the correct unread count appears in sent push notifications @@ -681,7 +685,7 @@ class HTTPPusherTests(HomeserverTestCase): self.helper.send(room_id, body="HELLO???", tok=other_access_token) - def _advance_time_and_make_push_succeed(self, expected_push_attempts): + def _advance_time_and_make_push_succeed(self, expected_push_attempts: int) -> None: self.pump() self.push_attempts[expected_push_attempts - 1][0].callback({}) @@ -708,7 +712,9 @@ class HTTPPusherTests(HomeserverTestCase): expected_unread_count_last_push, ) - def _send_read_request(self, access_token, message_event_id, room_id): + def _send_read_request( + self, access_token: str, message_event_id: str, room_id: str + ) -> None: # Now set the user's read receipt position to the first event # # This will actually trigger a new notification to be sent out so that @@ -748,7 +754,7 @@ class HTTPPusherTests(HomeserverTestCase): return user_id, access_token - def test_dont_notify_rule_overrides_message(self): + def test_dont_notify_rule_overrides_message(self) -> None: """ The override push rule will suppress notification """ -- cgit 1.5.1 From 12d1f82db213603972d60be3f46f6a36c3c2330f Mon Sep 17 00:00:00 2001 From: David Robertson Date: Thu, 17 Mar 2022 13:46:05 +0000 Subject: Generate announcement links in release script (#12242) --- changelog.d/12242.misc | 1 + scripts-dev/release.py | 41 ++++++++++++++++++++++++++++++++++++++++- 2 files changed, 41 insertions(+), 1 deletion(-) create mode 100644 changelog.d/12242.misc diff --git a/changelog.d/12242.misc b/changelog.d/12242.misc new file mode 100644 index 0000000000..38e7e0f7d1 --- /dev/null +++ b/changelog.d/12242.misc @@ -0,0 +1 @@ +Generate announcement links in the release script. diff --git a/scripts-dev/release.py b/scripts-dev/release.py index 046453e65f..685fa32b03 100755 --- a/scripts-dev/release.py +++ b/scripts-dev/release.py @@ -66,11 +66,15 @@ def cli(): ./scripts-dev/release.py tag - # ... wait for asssets to build ... + # ... wait for assets to build ... ./scripts-dev/release.py publish ./scripts-dev/release.py upload + # Optional: generate some nice links for the announcement + + ./scripts-dev/release.py upload + If the env var GH_TOKEN (or GITHUB_TOKEN) is set, or passed into the `tag`/`publish` command, then a new draft release will be created/published. """ @@ -415,6 +419,41 @@ def upload(): ) +@cli.command() +def announce(): + """Generate markdown to announce the release.""" + + current_version, _, _ = parse_version_from_module() + tag_name = f"v{current_version}" + + click.echo( + f""" +Hi everyone. Synapse {current_version} has just been released. + +[notes](https://github.com/matrix-org/synapse/releases/tag/{tag_name}) |\ +[docker](https://hub.docker.com/r/matrixdotorg/synapse/tags?name={tag_name}) | \ +[debs](https://packages.matrix.org/debian/) | \ +[pypi](https://pypi.org/project/matrix-synapse/{current_version}/)""" + ) + + if "rc" in tag_name: + click.echo( + """ +Announce the RC in +- #homeowners:matrix.org (Synapse Announcements) +- #synapse-dev:matrix.org""" + ) + else: + click.echo( + """ +Announce the release in +- #homeowners:matrix.org (Synapse Announcements), bumping the version in the topic +- #synapse:matrix.org (Synapse Admins), bumping the version in the topic +- #synapse-dev:matrix.org +- #synapse-package-maintainers:matrix.org""" + ) + + def parse_version_from_module() -> Tuple[ version.Version, redbaron.RedBaron, redbaron.Node ]: -- cgit 1.5.1 From 872dbb0181714e201be082c4e8bd9b727c73f177 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Fri, 18 Mar 2022 13:51:41 +0000 Subject: Correct `check_username_for_spam` annotations and docs (#12246) * Formally type the UserProfile in user searches * export UserProfile in synapse.module_api * Update docs Co-authored-by: Sean Quah <8349537+squahtx@users.noreply.github.com> --- changelog.d/12246.doc | 1 + docs/modules/spam_checker_callbacks.md | 10 ++++++---- synapse/events/spamcheck.py | 7 +++---- synapse/handlers/user_directory.py | 4 ++-- synapse/module_api/__init__.py | 2 ++ synapse/rest/client/user_directory.py | 4 ++-- synapse/storage/databases/main/user_directory.py | 23 +++++++++++++++++++---- synapse/types.py | 11 +++++++++++ 8 files changed, 46 insertions(+), 16 deletions(-) create mode 100644 changelog.d/12246.doc diff --git a/changelog.d/12246.doc b/changelog.d/12246.doc new file mode 100644 index 0000000000..e7fcc1b99c --- /dev/null +++ b/changelog.d/12246.doc @@ -0,0 +1 @@ +Correct `check_username_for_spam` annotations and docs. \ No newline at end of file diff --git a/docs/modules/spam_checker_callbacks.md b/docs/modules/spam_checker_callbacks.md index 2b672b78f9..472d957180 100644 --- a/docs/modules/spam_checker_callbacks.md +++ b/docs/modules/spam_checker_callbacks.md @@ -172,7 +172,7 @@ any of the subsequent implementations of this callback. _First introduced in Synapse v1.37.0_ ```python -async def check_username_for_spam(user_profile: Dict[str, str]) -> bool +async def check_username_for_spam(user_profile: synapse.module_api.UserProfile) -> bool ``` Called when computing search results in the user directory. The module must return a @@ -182,9 +182,11 @@ search results; otherwise return `False`. The profile is represented as a dictionary with the following keys: -* `user_id`: The Matrix ID for this user. -* `display_name`: The user's display name. -* `avatar_url`: The `mxc://` URL to the user's avatar. +* `user_id: str`. The Matrix ID for this user. +* `display_name: Optional[str]`. The user's display name, or `None` if this user + has not set a display name. +* `avatar_url: Optional[str]`. The `mxc://` URL to the user's avatar, or `None` + if this user has not set an avatar. The module is given a copy of the original dictionary, so modifying it from within the module cannot modify a user's profile when included in user directory search results. diff --git a/synapse/events/spamcheck.py b/synapse/events/spamcheck.py index 60904a55f5..cd80fcf9d1 100644 --- a/synapse/events/spamcheck.py +++ b/synapse/events/spamcheck.py @@ -21,7 +21,6 @@ from typing import ( Awaitable, Callable, Collection, - Dict, List, Optional, Tuple, @@ -31,7 +30,7 @@ from typing import ( from synapse.rest.media.v1._base import FileInfo from synapse.rest.media.v1.media_storage import ReadableFileWrapper from synapse.spam_checker_api import RegistrationBehaviour -from synapse.types import RoomAlias +from synapse.types import RoomAlias, UserProfile from synapse.util.async_helpers import maybe_awaitable if TYPE_CHECKING: @@ -50,7 +49,7 @@ USER_MAY_SEND_3PID_INVITE_CALLBACK = Callable[[str, str, str, str], Awaitable[bo USER_MAY_CREATE_ROOM_CALLBACK = Callable[[str], Awaitable[bool]] USER_MAY_CREATE_ROOM_ALIAS_CALLBACK = Callable[[str, RoomAlias], Awaitable[bool]] USER_MAY_PUBLISH_ROOM_CALLBACK = Callable[[str, str], Awaitable[bool]] -CHECK_USERNAME_FOR_SPAM_CALLBACK = Callable[[Dict[str, str]], Awaitable[bool]] +CHECK_USERNAME_FOR_SPAM_CALLBACK = Callable[[UserProfile], Awaitable[bool]] LEGACY_CHECK_REGISTRATION_FOR_SPAM_CALLBACK = Callable[ [ Optional[dict], @@ -383,7 +382,7 @@ class SpamChecker: return True - async def check_username_for_spam(self, user_profile: Dict[str, str]) -> bool: + async def check_username_for_spam(self, user_profile: UserProfile) -> bool: """Checks if a user ID or display name are considered "spammy" by this server. If the server considers a username spammy, then it will not be included in diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py index d27ed2be6a..048fd4bb82 100644 --- a/synapse/handlers/user_directory.py +++ b/synapse/handlers/user_directory.py @@ -19,8 +19,8 @@ import synapse.metrics from synapse.api.constants import EventTypes, HistoryVisibility, JoinRules, Membership from synapse.handlers.state_deltas import MatchChange, StateDeltasHandler from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.storage.databases.main.user_directory import SearchResult from synapse.storage.roommember import ProfileInfo -from synapse.types import JsonDict from synapse.util.metrics import Measure if TYPE_CHECKING: @@ -78,7 +78,7 @@ class UserDirectoryHandler(StateDeltasHandler): async def search_users( self, user_id: str, search_term: str, limit: int - ) -> JsonDict: + ) -> SearchResult: """Searches for users in directory Returns: diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index d735c1d461..aa8256b36f 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -111,6 +111,7 @@ from synapse.types import ( StateMap, UserID, UserInfo, + UserProfile, create_requester, ) from synapse.util import Clock @@ -150,6 +151,7 @@ __all__ = [ "EventBase", "StateMap", "ProfileInfo", + "UserProfile", ] logger = logging.getLogger(__name__) diff --git a/synapse/rest/client/user_directory.py b/synapse/rest/client/user_directory.py index a47d9bd01d..116c982ce6 100644 --- a/synapse/rest/client/user_directory.py +++ b/synapse/rest/client/user_directory.py @@ -19,7 +19,7 @@ from synapse.api.errors import SynapseError from synapse.http.server import HttpServer from synapse.http.servlet import RestServlet, parse_json_object_from_request from synapse.http.site import SynapseRequest -from synapse.types import JsonDict +from synapse.types import JsonMapping from ._base import client_patterns @@ -38,7 +38,7 @@ class UserDirectorySearchRestServlet(RestServlet): self.auth = hs.get_auth() self.user_directory_handler = hs.get_user_directory_handler() - async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonMapping]: """Searches for users in directory Returns: diff --git a/synapse/storage/databases/main/user_directory.py b/synapse/storage/databases/main/user_directory.py index e7fddd2426..55cc9178f0 100644 --- a/synapse/storage/databases/main/user_directory.py +++ b/synapse/storage/databases/main/user_directory.py @@ -26,6 +26,8 @@ from typing import ( cast, ) +from typing_extensions import TypedDict + from synapse.api.errors import StoreError if TYPE_CHECKING: @@ -40,7 +42,12 @@ from synapse.storage.database import ( from synapse.storage.databases.main.state import StateFilter from synapse.storage.databases.main.state_deltas import StateDeltasStore from synapse.storage.engines import PostgresEngine, Sqlite3Engine -from synapse.types import JsonDict, get_domain_from_id, get_localpart_from_id +from synapse.types import ( + JsonDict, + UserProfile, + get_domain_from_id, + get_localpart_from_id, +) from synapse.util.caches.descriptors import cached logger = logging.getLogger(__name__) @@ -591,6 +598,11 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore): ) +class SearchResult(TypedDict): + limited: bool + results: List[UserProfile] + + class UserDirectoryStore(UserDirectoryBackgroundUpdateStore): # How many records do we calculate before sending it to # add_users_who_share_private_rooms? @@ -777,7 +789,7 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore): async def search_user_dir( self, user_id: str, search_term: str, limit: int - ) -> JsonDict: + ) -> SearchResult: """Searches for users in directory Returns: @@ -910,8 +922,11 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore): # This should be unreachable. raise Exception("Unrecognized database engine") - results = await self.db_pool.execute( - "search_user_dir", self.db_pool.cursor_to_dict, sql, *args + results = cast( + List[UserProfile], + await self.db_pool.execute( + "search_user_dir", self.db_pool.cursor_to_dict, sql, *args + ), ) limited = len(results) > limit diff --git a/synapse/types.py b/synapse/types.py index 53be3583a0..5ce2a5b0a5 100644 --- a/synapse/types.py +++ b/synapse/types.py @@ -34,6 +34,7 @@ from typing import ( import attr from frozendict import frozendict from signedjson.key import decode_verify_key_bytes +from typing_extensions import TypedDict from unpaddedbase64 import decode_base64 from zope.interface import Interface @@ -63,6 +64,10 @@ MutableStateMap = MutableMapping[StateKey, T] # JSON types. These could be made stronger, but will do for now. # A JSON-serialisable dict. JsonDict = Dict[str, Any] +# A JSON-serialisable mapping; roughly speaking an immutable JSONDict. +# Useful when you have a TypedDict which isn't going to be mutated and you don't want +# to cast to JsonDict everywhere. +JsonMapping = Mapping[str, Any] # A JSON-serialisable object. JsonSerializable = object @@ -791,3 +796,9 @@ class UserInfo: is_deactivated: bool is_guest: bool is_shadow_banned: bool + + +class UserProfile(TypedDict): + user_id: str + display_name: Optional[str] + avatar_url: Optional[str] -- cgit 1.5.1 From c46065fa3d6ad000f5da6e196c769371e0e76ec5 Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Fri, 18 Mar 2022 16:24:18 +0100 Subject: Add some type hints to datastore (#12248) * inherit `MonthlyActiveUsersStore` from `RegistrationWorkerStore` Co-authored-by: Patrick Cloke --- changelog.d/12248.misc | 1 + mypy.ini | 6 - synapse/storage/databases/main/group_server.py | 156 +++++++++++++-------- .../storage/databases/main/monthly_active_users.py | 38 ++--- 4 files changed, 117 insertions(+), 84 deletions(-) create mode 100644 changelog.d/12248.misc diff --git a/changelog.d/12248.misc b/changelog.d/12248.misc new file mode 100644 index 0000000000..2b1290d1e1 --- /dev/null +++ b/changelog.d/12248.misc @@ -0,0 +1 @@ +Add missing type hints for storage. \ No newline at end of file diff --git a/mypy.ini b/mypy.ini index 51f47ff5be..d8b3b3f9e5 100644 --- a/mypy.ini +++ b/mypy.ini @@ -42,9 +42,6 @@ exclude = (?x) |synapse/storage/databases/main/cache.py |synapse/storage/databases/main/devices.py |synapse/storage/databases/main/event_federation.py - |synapse/storage/databases/main/group_server.py - |synapse/storage/databases/main/metrics.py - |synapse/storage/databases/main/monthly_active_users.py |synapse/storage/databases/main/push_rule.py |synapse/storage/databases/main/receipts.py |synapse/storage/databases/main/roommember.py @@ -87,9 +84,6 @@ exclude = (?x) |tests/state/test_v2.py |tests/storage/test_background_update.py |tests/storage/test_base.py - |tests/storage/test_client_ips.py - |tests/storage/test_database.py - |tests/storage/test_event_federation.py |tests/storage/test_id_generators.py |tests/storage/test_roommember.py |tests/test_metrics.py diff --git a/synapse/storage/databases/main/group_server.py b/synapse/storage/databases/main/group_server.py index 3f6086050b..0aef121d83 100644 --- a/synapse/storage/databases/main/group_server.py +++ b/synapse/storage/databases/main/group_server.py @@ -13,13 +13,17 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, cast from typing_extensions import TypedDict from synapse.api.errors import SynapseError from synapse.storage._base import SQLBaseStore, db_to_json -from synapse.storage.database import DatabasePool, LoggingDatabaseConnection +from synapse.storage.database import ( + DatabasePool, + LoggingDatabaseConnection, + LoggingTransaction, +) from synapse.types import JsonDict from synapse.util import json_encoder @@ -75,7 +79,7 @@ class GroupServerWorkerStore(SQLBaseStore): ) -> List[Dict[str, Any]]: # TODO: Pagination - keyvalues = {"group_id": group_id} + keyvalues: JsonDict = {"group_id": group_id} if not include_private: keyvalues["is_public"] = True @@ -117,7 +121,7 @@ class GroupServerWorkerStore(SQLBaseStore): # TODO: Pagination - def _get_rooms_in_group_txn(txn): + def _get_rooms_in_group_txn(txn: LoggingTransaction) -> List[_RoomInGroup]: sql = """ SELECT room_id, is_public FROM group_rooms WHERE group_id = ? @@ -176,8 +180,10 @@ class GroupServerWorkerStore(SQLBaseStore): * "order": int, the sort order of rooms in this category """ - def _get_rooms_for_summary_txn(txn): - keyvalues = {"group_id": group_id} + def _get_rooms_for_summary_txn( + txn: LoggingTransaction, + ) -> Tuple[List[Dict[str, Any]], Dict[str, Any]]: + keyvalues: JsonDict = {"group_id": group_id} if not include_private: keyvalues["is_public"] = True @@ -241,7 +247,7 @@ class GroupServerWorkerStore(SQLBaseStore): "get_rooms_for_summary", _get_rooms_for_summary_txn ) - async def get_group_categories(self, group_id): + async def get_group_categories(self, group_id: str) -> JsonDict: rows = await self.db_pool.simple_select_list( table="group_room_categories", keyvalues={"group_id": group_id}, @@ -257,7 +263,7 @@ class GroupServerWorkerStore(SQLBaseStore): for row in rows } - async def get_group_category(self, group_id, category_id): + async def get_group_category(self, group_id: str, category_id: str) -> JsonDict: category = await self.db_pool.simple_select_one( table="group_room_categories", keyvalues={"group_id": group_id, "category_id": category_id}, @@ -269,7 +275,7 @@ class GroupServerWorkerStore(SQLBaseStore): return category - async def get_group_roles(self, group_id): + async def get_group_roles(self, group_id: str) -> JsonDict: rows = await self.db_pool.simple_select_list( table="group_roles", keyvalues={"group_id": group_id}, @@ -285,7 +291,7 @@ class GroupServerWorkerStore(SQLBaseStore): for row in rows } - async def get_group_role(self, group_id, role_id): + async def get_group_role(self, group_id: str, role_id: str) -> JsonDict: role = await self.db_pool.simple_select_one( table="group_roles", keyvalues={"group_id": group_id, "role_id": role_id}, @@ -311,15 +317,19 @@ class GroupServerWorkerStore(SQLBaseStore): desc="get_local_groups_for_room", ) - async def get_users_for_summary_by_role(self, group_id, include_private=False): + async def get_users_for_summary_by_role( + self, group_id: str, include_private: bool = False + ) -> Tuple[List[JsonDict], JsonDict]: """Get the users and roles that should be included in a summary request Returns: ([users], [roles]) """ - def _get_users_for_summary_txn(txn): - keyvalues = {"group_id": group_id} + def _get_users_for_summary_txn( + txn: LoggingTransaction, + ) -> Tuple[List[JsonDict], JsonDict]: + keyvalues: JsonDict = {"group_id": group_id} if not include_private: keyvalues["is_public"] = True @@ -406,7 +416,9 @@ class GroupServerWorkerStore(SQLBaseStore): allow_none=True, ) - async def get_users_membership_info_in_group(self, group_id, user_id): + async def get_users_membership_info_in_group( + self, group_id: str, user_id: str + ) -> JsonDict: """Get a dict describing the membership of a user in a group. Example if joined: @@ -421,7 +433,7 @@ class GroupServerWorkerStore(SQLBaseStore): An empty dict if the user is not join/invite/etc """ - def _get_users_membership_in_group_txn(txn): + def _get_users_membership_in_group_txn(txn: LoggingTransaction) -> JsonDict: row = self.db_pool.simple_select_one_txn( txn, table="group_users", @@ -463,10 +475,14 @@ class GroupServerWorkerStore(SQLBaseStore): desc="get_publicised_groups_for_user", ) - async def get_attestations_need_renewals(self, valid_until_ms): + async def get_attestations_need_renewals( + self, valid_until_ms: int + ) -> List[Dict[str, Any]]: """Get all attestations that need to be renewed until givent time""" - def _get_attestations_need_renewals_txn(txn): + def _get_attestations_need_renewals_txn( + txn: LoggingTransaction, + ) -> List[Dict[str, Any]]: sql = """ SELECT group_id, user_id FROM group_attestations_renewals WHERE valid_until_ms <= ? @@ -478,7 +494,9 @@ class GroupServerWorkerStore(SQLBaseStore): "get_attestations_need_renewals", _get_attestations_need_renewals_txn ) - async def get_remote_attestation(self, group_id, user_id): + async def get_remote_attestation( + self, group_id: str, user_id: str + ) -> Optional[JsonDict]: """Get the attestation that proves the remote agrees that the user is in the group. """ @@ -504,8 +522,8 @@ class GroupServerWorkerStore(SQLBaseStore): desc="get_joined_groups", ) - async def get_all_groups_for_user(self, user_id, now_token): - def _get_all_groups_for_user_txn(txn): + async def get_all_groups_for_user(self, user_id, now_token) -> List[JsonDict]: + def _get_all_groups_for_user_txn(txn: LoggingTransaction) -> List[JsonDict]: sql = """ SELECT group_id, type, membership, u.content FROM local_group_updates AS u @@ -528,15 +546,16 @@ class GroupServerWorkerStore(SQLBaseStore): "get_all_groups_for_user", _get_all_groups_for_user_txn ) - async def get_groups_changes_for_user(self, user_id, from_token, to_token): - from_token = int(from_token) - has_changed = self._group_updates_stream_cache.has_entity_changed( + async def get_groups_changes_for_user( + self, user_id: str, from_token: int, to_token: int + ) -> List[JsonDict]: + has_changed = self._group_updates_stream_cache.has_entity_changed( # type: ignore[attr-defined] user_id, from_token ) if not has_changed: return [] - def _get_groups_changes_for_user_txn(txn): + def _get_groups_changes_for_user_txn(txn: LoggingTransaction) -> List[JsonDict]: sql = """ SELECT group_id, membership, type, u.content FROM local_group_updates AS u @@ -583,12 +602,14 @@ class GroupServerWorkerStore(SQLBaseStore): """ last_id = int(last_id) - has_changed = self._group_updates_stream_cache.has_any_entity_changed(last_id) + has_changed = self._group_updates_stream_cache.has_any_entity_changed(last_id) # type: ignore[attr-defined] if not has_changed: return [], current_id, False - def _get_all_groups_changes_txn(txn): + def _get_all_groups_changes_txn( + txn: LoggingTransaction, + ) -> Tuple[List[Tuple[int, tuple]], int, bool]: sql = """ SELECT stream_id, group_id, user_id, type, content FROM local_group_updates @@ -596,10 +617,13 @@ class GroupServerWorkerStore(SQLBaseStore): LIMIT ? """ txn.execute(sql, (last_id, current_id, limit)) - updates = [ - (stream_id, (group_id, user_id, gtype, db_to_json(content_json))) - for stream_id, group_id, user_id, gtype, content_json in txn - ] + updates = cast( + List[Tuple[int, tuple]], + [ + (stream_id, (group_id, user_id, gtype, db_to_json(content_json))) + for stream_id, group_id, user_id, gtype, content_json in txn + ], + ) limited = False upto_token = current_id @@ -633,8 +657,8 @@ class GroupServerStore(GroupServerWorkerStore): self, group_id: str, room_id: str, - category_id: str, - order: int, + category_id: Optional[str], + order: Optional[int], is_public: Optional[bool], ) -> None: """Add (or update) room's entry in summary. @@ -661,11 +685,11 @@ class GroupServerStore(GroupServerWorkerStore): def _add_room_to_summary_txn( self, - txn, + txn: LoggingTransaction, group_id: str, room_id: str, - category_id: str, - order: int, + category_id: Optional[str], + order: Optional[int], is_public: Optional[bool], ) -> None: """Add (or update) room's entry in summary. @@ -750,7 +774,7 @@ class GroupServerStore(GroupServerWorkerStore): WHERE group_id = ? AND category_id = ? """ txn.execute(sql, (group_id, category_id)) - (order,) = txn.fetchone() + (order,) = cast(Tuple[int], txn.fetchone()) if existing: to_update = {} @@ -766,7 +790,7 @@ class GroupServerStore(GroupServerWorkerStore): "category_id": category_id, "room_id": room_id, }, - values=to_update, + updatevalues=to_update, ) else: if is_public is None: @@ -785,7 +809,7 @@ class GroupServerStore(GroupServerWorkerStore): ) async def remove_room_from_summary( - self, group_id: str, room_id: str, category_id: str + self, group_id: str, room_id: str, category_id: Optional[str] ) -> int: if category_id is None: category_id = _DEFAULT_CATEGORY_ID @@ -808,8 +832,8 @@ class GroupServerStore(GroupServerWorkerStore): is_public: Optional[bool], ) -> None: """Add/update room category for group""" - insertion_values = {} - update_values = {"category_id": category_id} # This cannot be empty + insertion_values: JsonDict = {} + update_values: JsonDict = {"category_id": category_id} # This cannot be empty if profile is None: insertion_values["profile"] = "{}" @@ -844,8 +868,8 @@ class GroupServerStore(GroupServerWorkerStore): is_public: Optional[bool], ) -> None: """Add/remove user role""" - insertion_values = {} - update_values = {"role_id": role_id} # This cannot be empty + insertion_values: JsonDict = {} + update_values: JsonDict = {"role_id": role_id} # This cannot be empty if profile is None: insertion_values["profile"] = "{}" @@ -876,8 +900,8 @@ class GroupServerStore(GroupServerWorkerStore): self, group_id: str, user_id: str, - role_id: str, - order: int, + role_id: Optional[str], + order: Optional[int], is_public: Optional[bool], ) -> None: """Add (or update) user's entry in summary. @@ -904,13 +928,13 @@ class GroupServerStore(GroupServerWorkerStore): def _add_user_to_summary_txn( self, - txn, + txn: LoggingTransaction, group_id: str, user_id: str, - role_id: str, - order: int, + role_id: Optional[str], + order: Optional[int], is_public: Optional[bool], - ): + ) -> None: """Add (or update) user's entry in summary. Args: @@ -989,7 +1013,7 @@ class GroupServerStore(GroupServerWorkerStore): WHERE group_id = ? AND role_id = ? """ txn.execute(sql, (group_id, role_id)) - (order,) = txn.fetchone() + (order,) = cast(Tuple[int], txn.fetchone()) if existing: to_update = {} @@ -1005,7 +1029,7 @@ class GroupServerStore(GroupServerWorkerStore): "role_id": role_id, "user_id": user_id, }, - values=to_update, + updatevalues=to_update, ) else: if is_public is None: @@ -1024,7 +1048,7 @@ class GroupServerStore(GroupServerWorkerStore): ) async def remove_user_from_summary( - self, group_id: str, user_id: str, role_id: str + self, group_id: str, user_id: str, role_id: Optional[str] ) -> int: if role_id is None: role_id = _DEFAULT_ROLE_ID @@ -1065,7 +1089,7 @@ class GroupServerStore(GroupServerWorkerStore): Optional if the user and group are on the same server """ - def _add_user_to_group_txn(txn): + def _add_user_to_group_txn(txn: LoggingTransaction) -> None: self.db_pool.simple_insert_txn( txn, table="group_users", @@ -1108,7 +1132,7 @@ class GroupServerStore(GroupServerWorkerStore): await self.db_pool.runInteraction("add_user_to_group", _add_user_to_group_txn) async def remove_user_from_group(self, group_id: str, user_id: str) -> None: - def _remove_user_from_group_txn(txn): + def _remove_user_from_group_txn(txn: LoggingTransaction) -> None: self.db_pool.simple_delete_txn( txn, table="group_users", @@ -1159,7 +1183,7 @@ class GroupServerStore(GroupServerWorkerStore): ) async def remove_room_from_group(self, group_id: str, room_id: str) -> None: - def _remove_room_from_group_txn(txn): + def _remove_room_from_group_txn(txn: LoggingTransaction) -> None: self.db_pool.simple_delete_txn( txn, table="group_rooms", @@ -1216,7 +1240,9 @@ class GroupServerStore(GroupServerWorkerStore): content = content or {} - def _register_user_group_membership_txn(txn, next_id): + def _register_user_group_membership_txn( + txn: LoggingTransaction, next_id: int + ) -> int: # TODO: Upsert? self.db_pool.simple_delete_txn( txn, @@ -1249,7 +1275,7 @@ class GroupServerStore(GroupServerWorkerStore): ), }, ) - self._group_updates_stream_cache.entity_has_changed(user_id, next_id) + self._group_updates_stream_cache.entity_has_changed(user_id, next_id) # type: ignore[attr-defined] # TODO: Insert profile to ensure it comes down stream if its a join. @@ -1289,7 +1315,7 @@ class GroupServerStore(GroupServerWorkerStore): return next_id - async with self._group_updates_id_gen.get_next() as next_id: + async with self._group_updates_id_gen.get_next() as next_id: # type: ignore[attr-defined] res = await self.db_pool.runInteraction( "register_user_group_membership", _register_user_group_membership_txn, @@ -1298,7 +1324,13 @@ class GroupServerStore(GroupServerWorkerStore): return res async def create_group( - self, group_id, user_id, name, avatar_url, short_description, long_description + self, + group_id: str, + user_id: str, + name: str, + avatar_url: str, + short_description: str, + long_description: str, ) -> None: await self.db_pool.simple_insert( table="groups", @@ -1313,7 +1345,7 @@ class GroupServerStore(GroupServerWorkerStore): desc="create_group", ) - async def update_group_profile(self, group_id, profile): + async def update_group_profile(self, group_id: str, profile: JsonDict) -> None: await self.db_pool.simple_update_one( table="groups", keyvalues={"group_id": group_id}, @@ -1361,8 +1393,8 @@ class GroupServerStore(GroupServerWorkerStore): desc="remove_attestation_renewal", ) - def get_group_stream_token(self): - return self._group_updates_id_gen.get_current_token() + def get_group_stream_token(self) -> int: + return self._group_updates_id_gen.get_current_token() # type: ignore[attr-defined] async def delete_group(self, group_id: str) -> None: """Deletes a group fully from the database. @@ -1371,7 +1403,7 @@ class GroupServerStore(GroupServerWorkerStore): group_id: The group ID to delete. """ - def _delete_group_txn(txn): + def _delete_group_txn(txn: LoggingTransaction) -> None: tables = [ "groups", "group_users", diff --git a/synapse/storage/databases/main/monthly_active_users.py b/synapse/storage/databases/main/monthly_active_users.py index e9a0cdc6be..216622964a 100644 --- a/synapse/storage/databases/main/monthly_active_users.py +++ b/synapse/storage/databases/main/monthly_active_users.py @@ -12,15 +12,17 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from typing import TYPE_CHECKING, Dict, List, Optional +from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, cast from synapse.metrics.background_process_metrics import wrap_as_background_process from synapse.storage._base import SQLBaseStore from synapse.storage.database import ( DatabasePool, LoggingDatabaseConnection, + LoggingTransaction, make_in_list_sql_clause, ) +from synapse.storage.databases.main.registration import RegistrationWorkerStore from synapse.util.caches.descriptors import cached from synapse.util.threepids import canonicalise_email @@ -56,7 +58,7 @@ class MonthlyActiveUsersWorkerStore(SQLBaseStore): Number of current monthly active users """ - def _count_users(txn): + def _count_users(txn: LoggingTransaction) -> int: # Exclude app service users sql = """ SELECT COUNT(*) @@ -66,7 +68,7 @@ class MonthlyActiveUsersWorkerStore(SQLBaseStore): WHERE (users.appservice_id IS NULL OR users.appservice_id = ''); """ txn.execute(sql) - (count,) = txn.fetchone() + (count,) = cast(Tuple[int], txn.fetchone()) return count return await self.db_pool.runInteraction("count_users", _count_users) @@ -84,7 +86,7 @@ class MonthlyActiveUsersWorkerStore(SQLBaseStore): """ - def _count_users_by_service(txn): + def _count_users_by_service(txn: LoggingTransaction) -> Dict[str, int]: sql = """ SELECT COALESCE(appservice_id, 'native'), COUNT(*) FROM monthly_active_users @@ -93,7 +95,7 @@ class MonthlyActiveUsersWorkerStore(SQLBaseStore): """ txn.execute(sql) - result = txn.fetchall() + result = cast(List[Tuple[str, int]], txn.fetchall()) return dict(result) return await self.db_pool.runInteraction( @@ -141,12 +143,12 @@ class MonthlyActiveUsersWorkerStore(SQLBaseStore): ) @wrap_as_background_process("reap_monthly_active_users") - async def reap_monthly_active_users(self): + async def reap_monthly_active_users(self) -> None: """Cleans out monthly active user table to ensure that no stale entries exist. """ - def _reap_users(txn, reserved_users): + def _reap_users(txn: LoggingTransaction, reserved_users: List[str]) -> None: """ Args: reserved_users (tuple): reserved users to preserve @@ -210,10 +212,10 @@ class MonthlyActiveUsersWorkerStore(SQLBaseStore): # is racy. # Have resolved to invalidate the whole cache for now and do # something about it if and when the perf becomes significant - self._invalidate_all_cache_and_stream( + self._invalidate_all_cache_and_stream( # type: ignore[attr-defined] txn, self.user_last_seen_monthly_active ) - self._invalidate_cache_and_stream(txn, self.get_monthly_active_count, ()) + self._invalidate_cache_and_stream(txn, self.get_monthly_active_count, ()) # type: ignore[attr-defined] reserved_users = await self.get_registered_reserved_users() await self.db_pool.runInteraction( @@ -221,7 +223,7 @@ class MonthlyActiveUsersWorkerStore(SQLBaseStore): ) -class MonthlyActiveUsersStore(MonthlyActiveUsersWorkerStore): +class MonthlyActiveUsersStore(MonthlyActiveUsersWorkerStore, RegistrationWorkerStore): def __init__( self, database: DatabasePool, @@ -242,13 +244,15 @@ class MonthlyActiveUsersStore(MonthlyActiveUsersWorkerStore): hs.config.server.mau_limits_reserved_threepids[: self._max_mau_value], ) - def _initialise_reserved_users(self, txn, threepids): + def _initialise_reserved_users( + self, txn: LoggingTransaction, threepids: List[dict] + ) -> None: """Ensures that reserved threepids are accounted for in the MAU table, should be called on start up. Args: - txn (cursor): - threepids (list[dict]): List of threepid dicts to reserve + txn: + threepids: List of threepid dicts to reserve """ # XXX what is this function trying to achieve? It upserts into @@ -299,7 +303,9 @@ class MonthlyActiveUsersStore(MonthlyActiveUsersWorkerStore): "upsert_monthly_active_user", self.upsert_monthly_active_user_txn, user_id ) - def upsert_monthly_active_user_txn(self, txn, user_id): + def upsert_monthly_active_user_txn( + self, txn: LoggingTransaction, user_id: str + ) -> None: """Updates or inserts monthly active user member We consciously do not call is_support_txn from this method because it @@ -336,7 +342,7 @@ class MonthlyActiveUsersStore(MonthlyActiveUsersWorkerStore): txn, self.user_last_seen_monthly_active, (user_id,) ) - async def populate_monthly_active_users(self, user_id): + async def populate_monthly_active_users(self, user_id: str) -> None: """Checks on the state of monthly active user limits and optionally add the user to the monthly active tables @@ -345,7 +351,7 @@ class MonthlyActiveUsersStore(MonthlyActiveUsersWorkerStore): """ if self._limit_usage_by_mau or self._mau_stats_only: # Trial users and guests should not be included as part of MAU group - is_guest = await self.is_guest(user_id) + is_guest = await self.is_guest(user_id) # type: ignore[attr-defined] if is_guest: return is_trial = await self.is_trial_user(user_id) -- cgit 1.5.1 From 2177e356bc4b62e7a84c6fbee1d48de1abc940b5 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 18 Mar 2022 12:51:27 -0400 Subject: Sync more worker regexes in the documentation. (#12243) --- changelog.d/12243.doc | 1 + docs/workers.md | 30 ++++++++++++++---------------- 2 files changed, 15 insertions(+), 16 deletions(-) create mode 100644 changelog.d/12243.doc diff --git a/changelog.d/12243.doc b/changelog.d/12243.doc new file mode 100644 index 0000000000..b2031f0a40 --- /dev/null +++ b/changelog.d/12243.doc @@ -0,0 +1 @@ +Remove incorrect prefixes in the worker documentation for some endpoints. diff --git a/docs/workers.md b/docs/workers.md index 8751134e65..9eb4194e4d 100644 --- a/docs/workers.md +++ b/docs/workers.md @@ -185,8 +185,8 @@ worker: refer to the [stream writers](#stream-writers) section below for further information. # Sync requests - ^/_matrix/client/(v2_alpha|r0|v3)/sync$ - ^/_matrix/client/(api/v1|v2_alpha|r0|v3)/events$ + ^/_matrix/client/(r0|v3)/sync$ + ^/_matrix/client/(api/v1|r0|v3)/events$ ^/_matrix/client/(api/v1|r0|v3)/initialSync$ ^/_matrix/client/(api/v1|r0|v3)/rooms/[^/]+/initialSync$ @@ -200,13 +200,9 @@ information. ^/_matrix/federation/v1/query/ ^/_matrix/federation/v1/make_join/ ^/_matrix/federation/v1/make_leave/ - ^/_matrix/federation/v1/send_join/ - ^/_matrix/federation/v2/send_join/ - ^/_matrix/federation/v1/send_leave/ - ^/_matrix/federation/v2/send_leave/ - ^/_matrix/federation/v1/invite/ - ^/_matrix/federation/v2/invite/ - ^/_matrix/federation/v1/query_auth/ + ^/_matrix/federation/(v1|v2)/send_join/ + ^/_matrix/federation/(v1|v2)/send_leave/ + ^/_matrix/federation/(v1|v2)/invite/ ^/_matrix/federation/v1/event_auth/ ^/_matrix/federation/v1/exchange_third_party_invite/ ^/_matrix/federation/v1/user/devices/ @@ -274,6 +270,8 @@ information. Additionally, the following REST endpoints can be handled for GET requests: ^/_matrix/federation/v1/groups/ + ^/_matrix/client/(api/v1|r0|v3|unstable)/pushrules/ + ^/_matrix/client/(r0|v3|unstable)/groups/ Pagination requests can also be handled, but all requests for a given room must be routed to the same instance. Additionally, care must be taken to @@ -397,23 +395,23 @@ the stream writer for the `typing` stream: The following endpoints should be routed directly to the worker configured as the stream writer for the `to_device` stream: - ^/_matrix/client/(api/v1|r0|v3|unstable)/sendToDevice/ + ^/_matrix/client/(r0|v3|unstable)/sendToDevice/ ##### The `account_data` stream The following endpoints should be routed directly to the worker configured as the stream writer for the `account_data` stream: - ^/_matrix/client/(api/v1|r0|v3|unstable)/.*/tags - ^/_matrix/client/(api/v1|r0|v3|unstable)/.*/account_data + ^/_matrix/client/(r0|v3|unstable)/.*/tags + ^/_matrix/client/(r0|v3|unstable)/.*/account_data ##### The `receipts` stream The following endpoints should be routed directly to the worker configured as the stream writer for the `receipts` stream: - ^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/receipt - ^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/read_markers + ^/_matrix/client/(r0|v3|unstable)/rooms/.*/receipt + ^/_matrix/client/(r0|v3|unstable)/rooms/.*/read_markers ##### The `presence` stream @@ -528,7 +526,7 @@ Note that if a reverse proxy is used , then `/_matrix/media/` must be routed for Handles searches in the user directory. It can handle REST endpoints matching the following regular expressions: - ^/_matrix/client/(api/v1|r0|v3|unstable)/user_directory/search$ + ^/_matrix/client/(r0|v3|unstable)/user_directory/search$ When using this worker you must also set `update_user_directory: False` in the shared configuration file to stop the main synapse running background @@ -540,7 +538,7 @@ Proxies some frequently-requested client endpoints to add caching and remove load from the main synapse. It can handle REST endpoints matching the following regular expressions: - ^/_matrix/client/(api/v1|r0|v3|unstable)/keys/upload + ^/_matrix/client/(r0|v3|unstable)/keys/upload If `use_presence` is False in the homeserver config, it can also handle REST endpoints matching the following regular expressions: -- cgit 1.5.1 From 80e0e1f35e6b1cdfa0267f9c40a6f212b7d774de Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 18 Mar 2022 13:15:45 -0400 Subject: Only fetch thread participation for events with threads. (#12228) We fetch the thread summary in two phases: 1. The summary that is shared by all users (count of messages and latest event). 2. Whether the requesting user has participated in the thread. There's no use in attempting step 2 for events which did not return a summary from step 1. --- changelog.d/12228.bugfix | 1 + synapse/storage/databases/main/relations.py | 4 +- tests/rest/client/test_relations.py | 509 +++++++++++++++------------- tests/server.py | 20 +- 4 files changed, 289 insertions(+), 245 deletions(-) create mode 100644 changelog.d/12228.bugfix diff --git a/changelog.d/12228.bugfix b/changelog.d/12228.bugfix new file mode 100644 index 0000000000..4755777139 --- /dev/null +++ b/changelog.d/12228.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in v1.53.0 where an unnecessary query could be performed when fetching bundled aggregations for threads. diff --git a/synapse/storage/databases/main/relations.py b/synapse/storage/databases/main/relations.py index c4869d64e6..af2334a65e 100644 --- a/synapse/storage/databases/main/relations.py +++ b/synapse/storage/databases/main/relations.py @@ -857,7 +857,9 @@ class RelationsWorkerStore(SQLBaseStore): summaries = await self._get_thread_summaries(events_by_id.keys()) # Only fetch participated for a limited selection based on what had # summaries. - participated = await self._get_threads_participated(summaries.keys(), user_id) + participated = await self._get_threads_participated( + [event_id for event_id, summary in summaries.items() if summary], user_id + ) for event_id, summary in summaries.items(): if summary: thread_count, latest_thread_event, edit = summary diff --git a/tests/rest/client/test_relations.py b/tests/rest/client/test_relations.py index f3741b3001..329690f8f7 100644 --- a/tests/rest/client/test_relations.py +++ b/tests/rest/client/test_relations.py @@ -15,7 +15,7 @@ import itertools import urllib.parse -from typing import Any, Dict, List, Optional, Tuple +from typing import Any, Callable, Dict, List, Optional, Tuple from unittest.mock import patch from twisted.test.proto_helpers import MemoryReactor @@ -155,6 +155,16 @@ class BaseRelationsTestCase(unittest.HomeserverTestCase): self.assertEqual(200, channel.code, channel.json_body) return channel.json_body["chunk"] + def _find_event_in_chunk(self, events: List[JsonDict]) -> JsonDict: + """ + Find the parent event in a chunk of events and assert that it has the proper bundled aggregations. + """ + for event in events: + if event["event_id"] == self.parent_id: + return event + + raise AssertionError(f"Event {self.parent_id} not found in chunk") + class RelationsTestCase(BaseRelationsTestCase): def test_send_relation(self) -> None: @@ -291,202 +301,6 @@ class RelationsTestCase(BaseRelationsTestCase): ) self.assertEqual(400, channel.code, channel.json_body) - @unittest.override_config({"experimental_features": {"msc3666_enabled": True}}) - def test_bundled_aggregations(self) -> None: - """ - Test that annotations, references, and threads get correctly bundled. - - Note that this doesn't test against /relations since only thread relations - get bundled via that API. See test_aggregation_get_event_for_thread. - - See test_edit for a similar test for edits. - """ - # Setup by sending a variety of relations. - self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") - self._send_relation( - RelationTypes.ANNOTATION, "m.reaction", "a", access_token=self.user2_token - ) - self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "b") - - channel = self._send_relation(RelationTypes.REFERENCE, "m.room.test") - reply_1 = channel.json_body["event_id"] - - channel = self._send_relation(RelationTypes.REFERENCE, "m.room.test") - reply_2 = channel.json_body["event_id"] - - self._send_relation(RelationTypes.THREAD, "m.room.test") - - channel = self._send_relation(RelationTypes.THREAD, "m.room.test") - thread_2 = channel.json_body["event_id"] - - def assert_bundle(event_json: JsonDict) -> None: - """Assert the expected values of the bundled aggregations.""" - relations_dict = event_json["unsigned"].get("m.relations") - - # Ensure the fields are as expected. - self.assertCountEqual( - relations_dict.keys(), - ( - RelationTypes.ANNOTATION, - RelationTypes.REFERENCE, - RelationTypes.THREAD, - ), - ) - - # Check the values of each field. - self.assertEqual( - { - "chunk": [ - {"type": "m.reaction", "key": "a", "count": 2}, - {"type": "m.reaction", "key": "b", "count": 1}, - ] - }, - relations_dict[RelationTypes.ANNOTATION], - ) - - self.assertEqual( - {"chunk": [{"event_id": reply_1}, {"event_id": reply_2}]}, - relations_dict[RelationTypes.REFERENCE], - ) - - self.assertEqual( - 2, - relations_dict[RelationTypes.THREAD].get("count"), - ) - self.assertTrue( - relations_dict[RelationTypes.THREAD].get("current_user_participated") - ) - # The latest thread event has some fields that don't matter. - self.assert_dict( - { - "content": { - "m.relates_to": { - "event_id": self.parent_id, - "rel_type": RelationTypes.THREAD, - } - }, - "event_id": thread_2, - "sender": self.user_id, - "type": "m.room.test", - }, - relations_dict[RelationTypes.THREAD].get("latest_event"), - ) - - # Request the event directly. - channel = self.make_request( - "GET", - f"/rooms/{self.room}/event/{self.parent_id}", - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) - assert_bundle(channel.json_body) - - # Request the room messages. - channel = self.make_request( - "GET", - f"/rooms/{self.room}/messages?dir=b", - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) - assert_bundle(self._find_event_in_chunk(channel.json_body["chunk"])) - - # Request the room context. - channel = self.make_request( - "GET", - f"/rooms/{self.room}/context/{self.parent_id}", - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) - assert_bundle(channel.json_body["event"]) - - # Request sync. - channel = self.make_request("GET", "/sync", access_token=self.user_token) - self.assertEqual(200, channel.code, channel.json_body) - room_timeline = channel.json_body["rooms"]["join"][self.room]["timeline"] - self.assertTrue(room_timeline["limited"]) - assert_bundle(self._find_event_in_chunk(room_timeline["events"])) - - # Request search. - channel = self.make_request( - "POST", - "/search", - # Search term matches the parent message. - content={"search_categories": {"room_events": {"search_term": "Hi"}}}, - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) - chunk = [ - result["result"] - for result in channel.json_body["search_categories"]["room_events"][ - "results" - ] - ] - assert_bundle(self._find_event_in_chunk(chunk)) - - def test_aggregation_get_event_for_annotation(self) -> None: - """Test that annotations do not get bundled aggregations included - when directly requested. - """ - channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") - annotation_id = channel.json_body["event_id"] - - # Annotate the annotation. - self._send_relation( - RelationTypes.ANNOTATION, "m.reaction", "a", parent_id=annotation_id - ) - - channel = self.make_request( - "GET", - f"/rooms/{self.room}/event/{annotation_id}", - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) - self.assertIsNone(channel.json_body["unsigned"].get("m.relations")) - - def test_aggregation_get_event_for_thread(self) -> None: - """Test that threads get bundled aggregations included when directly requested.""" - channel = self._send_relation(RelationTypes.THREAD, "m.room.test") - thread_id = channel.json_body["event_id"] - - # Annotate the annotation. - self._send_relation( - RelationTypes.ANNOTATION, "m.reaction", "a", parent_id=thread_id - ) - - channel = self.make_request( - "GET", - f"/rooms/{self.room}/event/{thread_id}", - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) - self.assertEqual( - channel.json_body["unsigned"].get("m.relations"), - { - RelationTypes.ANNOTATION: { - "chunk": [{"count": 1, "key": "a", "type": "m.reaction"}] - }, - }, - ) - - # It should also be included when the entire thread is requested. - channel = self.make_request( - "GET", - f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?limit=1", - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) - self.assertEqual(len(channel.json_body["chunk"]), 1) - - thread_message = channel.json_body["chunk"][0] - self.assertEqual( - thread_message["unsigned"].get("m.relations"), - { - RelationTypes.ANNOTATION: { - "chunk": [{"count": 1, "key": "a", "type": "m.reaction"}] - }, - }, - ) - def test_ignore_invalid_room(self) -> None: """Test that we ignore invalid relations over federation.""" # Create another room and send a message in it. @@ -796,7 +610,7 @@ class RelationsTestCase(BaseRelationsTestCase): threaded_event_id = channel.json_body["event_id"] new_body = {"msgtype": "m.text", "body": "I've been edited!"} - channel = self._send_relation( + self._send_relation( RelationTypes.REPLACE, "m.room.message", content={"msgtype": "m.text", "body": "foo", "m.new_content": new_body}, @@ -836,7 +650,7 @@ class RelationsTestCase(BaseRelationsTestCase): edit_event_id = channel.json_body["event_id"] # Edit the edit event. - channel = self._send_relation( + self._send_relation( RelationTypes.REPLACE, "m.room.message", content={ @@ -912,16 +726,6 @@ class RelationsTestCase(BaseRelationsTestCase): self.assertEqual(200, channel.code, channel.json_body) self.assertEqual(channel.json_body["chunk"], []) - def _find_event_in_chunk(self, events: List[JsonDict]) -> JsonDict: - """ - Find the parent event in a chunk of events and assert that it has the proper bundled aggregations. - """ - for event in events: - if event["event_id"] == self.parent_id: - return event - - raise AssertionError(f"Event {self.parent_id} not found in chunk") - def test_background_update(self) -> None: """Test the event_arbitrary_relations background update.""" channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="👍") @@ -981,34 +785,6 @@ class RelationsTestCase(BaseRelationsTestCase): [annotation_event_id_good, thread_event_id], ) - def test_bundled_aggregations_with_filter(self) -> None: - """ - If "unsigned" is an omitted field (due to filtering), adding the bundled - aggregations should not break. - - Note that the spec allows for a server to return additional fields beyond - what is specified. - """ - self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") - - # Note that the sync filter does not include "unsigned" as a field. - filter = urllib.parse.quote_plus( - b'{"event_fields": ["content", "event_id"], "room": {"timeline": {"limit": 3}}}' - ) - channel = self.make_request( - "GET", f"/sync?filter={filter}", access_token=self.user_token - ) - self.assertEqual(200, channel.code, channel.json_body) - - # Ensure the timeline is limited, find the parent event. - room_timeline = channel.json_body["rooms"]["join"][self.room]["timeline"] - self.assertTrue(room_timeline["limited"]) - parent_event = self._find_event_in_chunk(room_timeline["events"]) - - # Ensure there's bundled aggregations on it. - self.assertIn("unsigned", parent_event) - self.assertIn("m.relations", parent_event["unsigned"]) - class RelationPaginationTestCase(BaseRelationsTestCase): def test_basic_paginate_relations(self) -> None: @@ -1255,7 +1031,7 @@ class RelationPaginationTestCase(BaseRelationsTestCase): idx += 1 # Also send a different type of reaction so that we test we don't see it - channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="a") + self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="a") prev_token = "" found_event_ids: List[str] = [] @@ -1291,6 +1067,263 @@ class RelationPaginationTestCase(BaseRelationsTestCase): self.assertEqual(found_event_ids, expected_event_ids) +class BundledAggregationsTestCase(BaseRelationsTestCase): + """ + See RelationsTestCase.test_edit for a similar test for edits. + + Note that this doesn't test against /relations since only thread relations + get bundled via that API. See test_aggregation_get_event_for_thread. + """ + + def _test_bundled_aggregations( + self, + relation_type: str, + assertion_callable: Callable[[JsonDict], None], + expected_db_txn_for_event: int, + ) -> None: + """ + Makes requests to various endpoints which should include bundled aggregations + and then calls an assertion function on the bundled aggregations. + + Args: + relation_type: The field to search for in the `m.relations` field in unsigned. + assertion_callable: Called with the contents of unsigned["m.relations"][relation_type] + for relation-specific assertions. + expected_db_txn_for_event: The number of database transactions which + are expected for a call to /event/. + """ + + def assert_bundle(event_json: JsonDict) -> None: + """Assert the expected values of the bundled aggregations.""" + relations_dict = event_json["unsigned"].get("m.relations") + + # Ensure the fields are as expected. + self.assertCountEqual(relations_dict.keys(), (relation_type,)) + assertion_callable(relations_dict[relation_type]) + + # Request the event directly. + channel = self.make_request( + "GET", + f"/rooms/{self.room}/event/{self.parent_id}", + access_token=self.user_token, + ) + self.assertEqual(200, channel.code, channel.json_body) + assert_bundle(channel.json_body) + assert channel.resource_usage is not None + self.assertEqual(channel.resource_usage.db_txn_count, expected_db_txn_for_event) + + # Request the room messages. + channel = self.make_request( + "GET", + f"/rooms/{self.room}/messages?dir=b", + access_token=self.user_token, + ) + self.assertEqual(200, channel.code, channel.json_body) + assert_bundle(self._find_event_in_chunk(channel.json_body["chunk"])) + + # Request the room context. + channel = self.make_request( + "GET", + f"/rooms/{self.room}/context/{self.parent_id}", + access_token=self.user_token, + ) + self.assertEqual(200, channel.code, channel.json_body) + assert_bundle(channel.json_body["event"]) + + # Request sync. + filter = urllib.parse.quote_plus(b'{"room": {"timeline": {"limit": 4}}}') + channel = self.make_request( + "GET", f"/sync?filter={filter}", access_token=self.user_token + ) + self.assertEqual(200, channel.code, channel.json_body) + room_timeline = channel.json_body["rooms"]["join"][self.room]["timeline"] + self.assertTrue(room_timeline["limited"]) + assert_bundle(self._find_event_in_chunk(room_timeline["events"])) + + # Request search. + channel = self.make_request( + "POST", + "/search", + # Search term matches the parent message. + content={"search_categories": {"room_events": {"search_term": "Hi"}}}, + access_token=self.user_token, + ) + self.assertEqual(200, channel.code, channel.json_body) + chunk = [ + result["result"] + for result in channel.json_body["search_categories"]["room_events"][ + "results" + ] + ] + assert_bundle(self._find_event_in_chunk(chunk)) + + @unittest.override_config({"experimental_features": {"msc3666_enabled": True}}) + def test_annotation(self) -> None: + """ + Test that annotations get correctly bundled. + """ + # Setup by sending a variety of relations. + self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") + self._send_relation( + RelationTypes.ANNOTATION, "m.reaction", "a", access_token=self.user2_token + ) + self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "b") + + def assert_annotations(bundled_aggregations: JsonDict) -> None: + self.assertEqual( + { + "chunk": [ + {"type": "m.reaction", "key": "a", "count": 2}, + {"type": "m.reaction", "key": "b", "count": 1}, + ] + }, + bundled_aggregations, + ) + + self._test_bundled_aggregations(RelationTypes.ANNOTATION, assert_annotations, 7) + + @unittest.override_config({"experimental_features": {"msc3666_enabled": True}}) + def test_reference(self) -> None: + """ + Test that references get correctly bundled. + """ + channel = self._send_relation(RelationTypes.REFERENCE, "m.room.test") + reply_1 = channel.json_body["event_id"] + + channel = self._send_relation(RelationTypes.REFERENCE, "m.room.test") + reply_2 = channel.json_body["event_id"] + + def assert_annotations(bundled_aggregations: JsonDict) -> None: + self.assertEqual( + {"chunk": [{"event_id": reply_1}, {"event_id": reply_2}]}, + bundled_aggregations, + ) + + self._test_bundled_aggregations(RelationTypes.REFERENCE, assert_annotations, 7) + + @unittest.override_config({"experimental_features": {"msc3666_enabled": True}}) + def test_thread(self) -> None: + """ + Test that threads get correctly bundled. + """ + self._send_relation(RelationTypes.THREAD, "m.room.test") + channel = self._send_relation(RelationTypes.THREAD, "m.room.test") + thread_2 = channel.json_body["event_id"] + + def assert_annotations(bundled_aggregations: JsonDict) -> None: + self.assertEqual(2, bundled_aggregations.get("count")) + self.assertTrue(bundled_aggregations.get("current_user_participated")) + # The latest thread event has some fields that don't matter. + self.assert_dict( + { + "content": { + "m.relates_to": { + "event_id": self.parent_id, + "rel_type": RelationTypes.THREAD, + } + }, + "event_id": thread_2, + "sender": self.user_id, + "type": "m.room.test", + }, + bundled_aggregations.get("latest_event"), + ) + + self._test_bundled_aggregations(RelationTypes.THREAD, assert_annotations, 9) + + def test_aggregation_get_event_for_annotation(self) -> None: + """Test that annotations do not get bundled aggregations included + when directly requested. + """ + channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") + annotation_id = channel.json_body["event_id"] + + # Annotate the annotation. + self._send_relation( + RelationTypes.ANNOTATION, "m.reaction", "a", parent_id=annotation_id + ) + + channel = self.make_request( + "GET", + f"/rooms/{self.room}/event/{annotation_id}", + access_token=self.user_token, + ) + self.assertEqual(200, channel.code, channel.json_body) + self.assertIsNone(channel.json_body["unsigned"].get("m.relations")) + + def test_aggregation_get_event_for_thread(self) -> None: + """Test that threads get bundled aggregations included when directly requested.""" + channel = self._send_relation(RelationTypes.THREAD, "m.room.test") + thread_id = channel.json_body["event_id"] + + # Annotate the annotation. + self._send_relation( + RelationTypes.ANNOTATION, "m.reaction", "a", parent_id=thread_id + ) + + channel = self.make_request( + "GET", + f"/rooms/{self.room}/event/{thread_id}", + access_token=self.user_token, + ) + self.assertEqual(200, channel.code, channel.json_body) + self.assertEqual( + channel.json_body["unsigned"].get("m.relations"), + { + RelationTypes.ANNOTATION: { + "chunk": [{"count": 1, "key": "a", "type": "m.reaction"}] + }, + }, + ) + + # It should also be included when the entire thread is requested. + channel = self.make_request( + "GET", + f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?limit=1", + access_token=self.user_token, + ) + self.assertEqual(200, channel.code, channel.json_body) + self.assertEqual(len(channel.json_body["chunk"]), 1) + + thread_message = channel.json_body["chunk"][0] + self.assertEqual( + thread_message["unsigned"].get("m.relations"), + { + RelationTypes.ANNOTATION: { + "chunk": [{"count": 1, "key": "a", "type": "m.reaction"}] + }, + }, + ) + + def test_bundled_aggregations_with_filter(self) -> None: + """ + If "unsigned" is an omitted field (due to filtering), adding the bundled + aggregations should not break. + + Note that the spec allows for a server to return additional fields beyond + what is specified. + """ + self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") + + # Note that the sync filter does not include "unsigned" as a field. + filter = urllib.parse.quote_plus( + b'{"event_fields": ["content", "event_id"], "room": {"timeline": {"limit": 3}}}' + ) + channel = self.make_request( + "GET", f"/sync?filter={filter}", access_token=self.user_token + ) + self.assertEqual(200, channel.code, channel.json_body) + + # Ensure the timeline is limited, find the parent event. + room_timeline = channel.json_body["rooms"]["join"][self.room]["timeline"] + self.assertTrue(room_timeline["limited"]) + parent_event = self._find_event_in_chunk(room_timeline["events"]) + + # Ensure there's bundled aggregations on it. + self.assertIn("unsigned", parent_event) + self.assertIn("m.relations", parent_event["unsigned"]) + + class RelationRedactionTestCase(BaseRelationsTestCase): """ Test the behaviour of relations when the parent or child event is redacted. diff --git a/tests/server.py b/tests/server.py index 82990c2eb9..6ce2a17bf4 100644 --- a/tests/server.py +++ b/tests/server.py @@ -54,13 +54,18 @@ from twisted.internet.interfaces import ( ITransport, ) from twisted.python.failure import Failure -from twisted.test.proto_helpers import AccumulatingProtocol, MemoryReactorClock +from twisted.test.proto_helpers import ( + AccumulatingProtocol, + MemoryReactor, + MemoryReactorClock, +) from twisted.web.http_headers import Headers from twisted.web.resource import IResource from twisted.web.server import Request, Site from synapse.config.database import DatabaseConnectionConfig from synapse.http.site import SynapseRequest +from synapse.logging.context import ContextResourceUsage from synapse.server import HomeServer from synapse.storage import DataStore from synapse.storage.engines import PostgresEngine, create_engine @@ -88,18 +93,19 @@ class TimedOutException(Exception): """ -@attr.s +@attr.s(auto_attribs=True) class FakeChannel: """ A fake Twisted Web Channel (the part that interfaces with the wire). """ - site = attr.ib(type=Union[Site, "FakeSite"]) - _reactor = attr.ib() - result = attr.ib(type=dict, default=attr.Factory(dict)) - _ip = attr.ib(type=str, default="127.0.0.1") + site: Union[Site, "FakeSite"] + _reactor: MemoryReactor + result: dict = attr.Factory(dict) + _ip: str = "127.0.0.1" _producer: Optional[Union[IPullProducer, IPushProducer]] = None + resource_usage: Optional[ContextResourceUsage] = None @property def json_body(self): @@ -168,6 +174,8 @@ class FakeChannel: def requestDone(self, _self): self.result["done"] = True + if isinstance(_self, SynapseRequest): + self.resource_usage = _self.logcontext.get_resource_usage() def getPeer(self): # We give an address so that getClientIP returns a non null entry, -- cgit 1.5.1 From 8fe930c215f69913fbcd96d609ec6950644e4ec4 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 18 Mar 2022 13:49:32 -0400 Subject: Move get_bundled_aggregations to relations handler. (#12237) The get_bundled_aggregations code is fairly high-level and uses a lot of store methods, we move it into the handler as that seems like a better fit. --- changelog.d/12237.misc | 1 + synapse/events/utils.py | 2 +- synapse/handlers/pagination.py | 5 +- synapse/handlers/relations.py | 151 +++++++++++++++++++++++++++- synapse/handlers/room.py | 5 +- synapse/handlers/search.py | 3 +- synapse/handlers/sync.py | 9 +- synapse/rest/client/room.py | 3 +- synapse/storage/databases/main/relations.py | 151 +--------------------------- 9 files changed, 173 insertions(+), 157 deletions(-) create mode 100644 changelog.d/12237.misc diff --git a/changelog.d/12237.misc b/changelog.d/12237.misc new file mode 100644 index 0000000000..41c9dcbd37 --- /dev/null +++ b/changelog.d/12237.misc @@ -0,0 +1 @@ +Refactor the relations endpoints to add a `RelationsHandler`. diff --git a/synapse/events/utils.py b/synapse/events/utils.py index a0520068e0..7120062127 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -38,8 +38,8 @@ from synapse.util.frozenutils import unfreeze from . import EventBase if TYPE_CHECKING: + from synapse.handlers.relations import BundledAggregations from synapse.server import HomeServer - from synapse.storage.databases.main.relations import BundledAggregations # Split strings on "." but not "\." This uses a negative lookbehind assertion for '\' diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index 41679f7f86..876b879483 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -134,6 +134,7 @@ class PaginationHandler: self.clock = hs.get_clock() self._server_name = hs.hostname self._room_shutdown_handler = hs.get_room_shutdown_handler() + self._relations_handler = hs.get_relations_handler() self.pagination_lock = ReadWriteLock() # IDs of rooms in which there currently an active purge *or delete* operation. @@ -539,7 +540,9 @@ class PaginationHandler: state_dict = await self.store.get_events(list(state_ids.values())) state = state_dict.values() - aggregations = await self.store.get_bundled_aggregations(events, user_id) + aggregations = await self._relations_handler.get_bundled_aggregations( + events, user_id + ) time_now = self.clock.time_msec() diff --git a/synapse/handlers/relations.py b/synapse/handlers/relations.py index 8e475475ad..57135d4519 100644 --- a/synapse/handlers/relations.py +++ b/synapse/handlers/relations.py @@ -12,18 +12,53 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING, Dict, Iterable, Optional, cast +import attr +from frozendict import frozendict + +from synapse.api.constants import RelationTypes from synapse.api.errors import SynapseError +from synapse.events import EventBase from synapse.types import JsonDict, Requester, StreamToken if TYPE_CHECKING: from synapse.server import HomeServer + from synapse.storage.databases.main import DataStore logger = logging.getLogger(__name__) +@attr.s(slots=True, frozen=True, auto_attribs=True) +class _ThreadAggregation: + # The latest event in the thread. + latest_event: EventBase + # The latest edit to the latest event in the thread. + latest_edit: Optional[EventBase] + # The total number of events in the thread. + count: int + # True if the current user has sent an event to the thread. + current_user_participated: bool + + +@attr.s(slots=True, auto_attribs=True) +class BundledAggregations: + """ + The bundled aggregations for an event. + + Some values require additional processing during serialization. + """ + + annotations: Optional[JsonDict] = None + references: Optional[JsonDict] = None + replace: Optional[EventBase] = None + thread: Optional[_ThreadAggregation] = None + + def __bool__(self) -> bool: + return bool(self.annotations or self.references or self.replace or self.thread) + + class RelationsHandler: def __init__(self, hs: "HomeServer"): self._main_store = hs.get_datastores().main @@ -103,7 +138,7 @@ class RelationsHandler: ) # The relations returned for the requested event do include their # bundled aggregations. - aggregations = await self._main_store.get_bundled_aggregations( + aggregations = await self.get_bundled_aggregations( events, requester.user.to_string() ) serialized_events = self._event_serializer.serialize_events( @@ -115,3 +150,115 @@ class RelationsHandler: return_value["original_event"] = original_event return return_value + + async def _get_bundled_aggregation_for_event( + self, event: EventBase, user_id: str + ) -> Optional[BundledAggregations]: + """Generate bundled aggregations for an event. + + Note that this does not use a cache, but depends on cached methods. + + Args: + event: The event to calculate bundled aggregations for. + user_id: The user requesting the bundled aggregations. + + Returns: + The bundled aggregations for an event, if bundled aggregations are + enabled and the event can have bundled aggregations. + """ + + # Do not bundle aggregations for an event which represents an edit or an + # annotation. It does not make sense for them to have related events. + relates_to = event.content.get("m.relates_to") + if isinstance(relates_to, (dict, frozendict)): + relation_type = relates_to.get("rel_type") + if relation_type in (RelationTypes.ANNOTATION, RelationTypes.REPLACE): + return None + + event_id = event.event_id + room_id = event.room_id + + # The bundled aggregations to include, a mapping of relation type to a + # type-specific value. Some types include the direct return type here + # while others need more processing during serialization. + aggregations = BundledAggregations() + + annotations = await self._main_store.get_aggregation_groups_for_event( + event_id, room_id + ) + if annotations.chunk: + aggregations.annotations = await annotations.to_dict( + cast("DataStore", self) + ) + + references = await self._main_store.get_relations_for_event( + event_id, event, room_id, RelationTypes.REFERENCE, direction="f" + ) + if references.chunk: + aggregations.references = await references.to_dict(cast("DataStore", self)) + + # Store the bundled aggregations in the event metadata for later use. + return aggregations + + async def get_bundled_aggregations( + self, events: Iterable[EventBase], user_id: str + ) -> Dict[str, BundledAggregations]: + """Generate bundled aggregations for events. + + Args: + events: The iterable of events to calculate bundled aggregations for. + user_id: The user requesting the bundled aggregations. + + Returns: + A map of event ID to the bundled aggregation for the event. Not all + events may have bundled aggregations in the results. + """ + # De-duplicate events by ID to handle the same event requested multiple times. + # + # State events do not get bundled aggregations. + events_by_id = { + event.event_id: event for event in events if not event.is_state() + } + + # event ID -> bundled aggregation in non-serialized form. + results: Dict[str, BundledAggregations] = {} + + # Fetch other relations per event. + for event in events_by_id.values(): + event_result = await self._get_bundled_aggregation_for_event(event, user_id) + if event_result: + results[event.event_id] = event_result + + # Fetch any edits (but not for redacted events). + edits = await self._main_store.get_applicable_edits( + [ + event_id + for event_id, event in events_by_id.items() + if not event.internal_metadata.is_redacted() + ] + ) + for event_id, edit in edits.items(): + results.setdefault(event_id, BundledAggregations()).replace = edit + + # Fetch thread summaries. + summaries = await self._main_store.get_thread_summaries(events_by_id.keys()) + # Only fetch participated for a limited selection based on what had + # summaries. + participated = await self._main_store.get_threads_participated( + [event_id for event_id, summary in summaries.items() if summary], user_id + ) + for event_id, summary in summaries.items(): + if summary: + thread_count, latest_thread_event, edit = summary + results.setdefault( + event_id, BundledAggregations() + ).thread = _ThreadAggregation( + latest_event=latest_thread_event, + latest_edit=edit, + count=thread_count, + # If there's a thread summary it must also exist in the + # participated dictionary. + current_user_participated=participated[event_id], + ) + + return results diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index b9735631fc..092e185c99 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -60,8 +60,8 @@ from synapse.events import EventBase from synapse.events.utils import copy_power_levels_contents from synapse.federation.federation_client import InvalidResponseError from synapse.handlers.federation import get_domains_from_state +from synapse.handlers.relations import BundledAggregations from synapse.rest.admin._base import assert_user_is_admin -from synapse.storage.databases.main.relations import BundledAggregations from synapse.storage.state import StateFilter from synapse.streams import EventSource from synapse.types import ( @@ -1118,6 +1118,7 @@ class RoomContextHandler: self.store = hs.get_datastores().main self.storage = hs.get_storage() self.state_store = self.storage.state + self._relations_handler = hs.get_relations_handler() async def get_event_context( self, @@ -1190,7 +1191,7 @@ class RoomContextHandler: event = filtered[0] # Fetch the aggregations. - aggregations = await self.store.get_bundled_aggregations( + aggregations = await self._relations_handler.get_bundled_aggregations( itertools.chain(events_before, (event,), events_after), user.to_string(), ) diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py index aa16e417eb..30eddda65f 100644 --- a/synapse/handlers/search.py +++ b/synapse/handlers/search.py @@ -54,6 +54,7 @@ class SearchHandler: self.clock = hs.get_clock() self.hs = hs self._event_serializer = hs.get_event_client_serializer() + self._relations_handler = hs.get_relations_handler() self.storage = hs.get_storage() self.state_store = self.storage.state self.auth = hs.get_auth() @@ -354,7 +355,7 @@ class SearchHandler: aggregations = None if self._msc3666_enabled: - aggregations = await self.store.get_bundled_aggregations( + aggregations = await self._relations_handler.get_bundled_aggregations( # Generate an iterable of EventBase for all the events that will be # returned, including contextual events. itertools.chain( diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index c9d6a18bd7..6c569cfb1c 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -33,11 +33,11 @@ from synapse.api.filtering import FilterCollection from synapse.api.presence import UserPresenceState from synapse.api.room_versions import KNOWN_ROOM_VERSIONS from synapse.events import EventBase +from synapse.handlers.relations import BundledAggregations from synapse.logging.context import current_context from synapse.logging.opentracing import SynapseTags, log_kv, set_tag, start_active_span from synapse.push.clientformat import format_push_rules_for_user from synapse.storage.databases.main.event_push_actions import NotifCounts -from synapse.storage.databases.main.relations import BundledAggregations from synapse.storage.roommember import MemberSummary from synapse.storage.state import StateFilter from synapse.types import ( @@ -269,6 +269,7 @@ class SyncHandler: self.store = hs.get_datastores().main self.notifier = hs.get_notifier() self.presence_handler = hs.get_presence_handler() + self._relations_handler = hs.get_relations_handler() self.event_sources = hs.get_event_sources() self.clock = hs.get_clock() self.state = hs.get_state_handler() @@ -638,8 +639,10 @@ class SyncHandler: # as clients will have all the necessary information. bundled_aggregations = None if limited or newly_joined_room: - bundled_aggregations = await self.store.get_bundled_aggregations( - recents, sync_config.user.to_string() + bundled_aggregations = ( + await self._relations_handler.get_bundled_aggregations( + recents, sync_config.user.to_string() + ) ) return TimelineBatch( diff --git a/synapse/rest/client/room.py b/synapse/rest/client/room.py index 8a06ab8c5f..47e152c8cc 100644 --- a/synapse/rest/client/room.py +++ b/synapse/rest/client/room.py @@ -645,6 +645,7 @@ class RoomEventServlet(RestServlet): self._store = hs.get_datastores().main self.event_handler = hs.get_event_handler() self._event_serializer = hs.get_event_client_serializer() + self._relations_handler = hs.get_relations_handler() self.auth = hs.get_auth() async def on_GET( @@ -663,7 +664,7 @@ class RoomEventServlet(RestServlet): if event: # Ensure there are bundled aggregations available. - aggregations = await self._store.get_bundled_aggregations( + aggregations = await self._relations_handler.get_bundled_aggregations( [event], requester.user.to_string() ) diff --git a/synapse/storage/databases/main/relations.py b/synapse/storage/databases/main/relations.py index af2334a65e..b2295fd51f 100644 --- a/synapse/storage/databases/main/relations.py +++ b/synapse/storage/databases/main/relations.py @@ -27,7 +27,6 @@ from typing import ( ) import attr -from frozendict import frozendict from synapse.api.constants import RelationTypes from synapse.events import EventBase @@ -41,45 +40,15 @@ from synapse.storage.database import ( from synapse.storage.databases.main.stream import generate_pagination_where_clause from synapse.storage.engines import PostgresEngine from synapse.storage.relations import AggregationPaginationToken, PaginationChunk -from synapse.types import JsonDict, RoomStreamToken, StreamToken +from synapse.types import RoomStreamToken, StreamToken from synapse.util.caches.descriptors import cached, cachedList if TYPE_CHECKING: from synapse.server import HomeServer - from synapse.storage.databases.main import DataStore logger = logging.getLogger(__name__) -@attr.s(slots=True, frozen=True, auto_attribs=True) -class _ThreadAggregation: - # The latest event in the thread. - latest_event: EventBase - # The latest edit to the latest event in the thread. - latest_edit: Optional[EventBase] - # The total number of events in the thread. - count: int - # True if the current user has sent an event to the thread. - current_user_participated: bool - - -@attr.s(slots=True, auto_attribs=True) -class BundledAggregations: - """ - The bundled aggregations for an event. - - Some values require additional processing during serialization. - """ - - annotations: Optional[JsonDict] = None - references: Optional[JsonDict] = None - replace: Optional[EventBase] = None - thread: Optional[_ThreadAggregation] = None - - def __bool__(self) -> bool: - return bool(self.annotations or self.references or self.replace or self.thread) - - class RelationsWorkerStore(SQLBaseStore): def __init__( self, @@ -384,7 +353,7 @@ class RelationsWorkerStore(SQLBaseStore): raise NotImplementedError() @cachedList(cached_method_name="get_applicable_edit", list_name="event_ids") - async def _get_applicable_edits( + async def get_applicable_edits( self, event_ids: Collection[str] ) -> Dict[str, Optional[EventBase]]: """Get the most recent edit (if any) that has happened for the given @@ -473,7 +442,7 @@ class RelationsWorkerStore(SQLBaseStore): raise NotImplementedError() @cachedList(cached_method_name="get_thread_summary", list_name="event_ids") - async def _get_thread_summaries( + async def get_thread_summaries( self, event_ids: Collection[str] ) -> Dict[str, Optional[Tuple[int, EventBase, Optional[EventBase]]]]: """Get the number of threaded replies, the latest reply (if any), and the latest edit for that reply for the given event. @@ -587,7 +556,7 @@ class RelationsWorkerStore(SQLBaseStore): latest_events = await self.get_events(latest_event_ids.values()) # type: ignore[attr-defined] # Check to see if any of those events are edited. - latest_edits = await self._get_applicable_edits(latest_event_ids.values()) + latest_edits = await self.get_applicable_edits(latest_event_ids.values()) # Map to the event IDs to the thread summary. # @@ -610,7 +579,7 @@ class RelationsWorkerStore(SQLBaseStore): raise NotImplementedError() @cachedList(cached_method_name="get_thread_participated", list_name="event_ids") - async def _get_threads_participated( + async def get_threads_participated( self, event_ids: Collection[str], user_id: str ) -> Dict[str, bool]: """Get whether the requesting user participated in the given threads. @@ -766,116 +735,6 @@ class RelationsWorkerStore(SQLBaseStore): "get_if_user_has_annotated_event", _get_if_user_has_annotated_event ) - async def _get_bundled_aggregation_for_event( - self, event: EventBase, user_id: str - ) -> Optional[BundledAggregations]: - """Generate bundled aggregations for an event. - - Note that this does not use a cache, but depends on cached methods. - - Args: - event: The event to calculate bundled aggregations for. - user_id: The user requesting the bundled aggregations. - - Returns: - The bundled aggregations for an event, if bundled aggregations are - enabled and the event can have bundled aggregations. - """ - - # Do not bundle aggregations for an event which represents an edit or an - # annotation. It does not make sense for them to have related events. - relates_to = event.content.get("m.relates_to") - if isinstance(relates_to, (dict, frozendict)): - relation_type = relates_to.get("rel_type") - if relation_type in (RelationTypes.ANNOTATION, RelationTypes.REPLACE): - return None - - event_id = event.event_id - room_id = event.room_id - - # The bundled aggregations to include, a mapping of relation type to a - # type-specific value. Some types include the direct return type here - # while others need more processing during serialization. - aggregations = BundledAggregations() - - annotations = await self.get_aggregation_groups_for_event(event_id, room_id) - if annotations.chunk: - aggregations.annotations = await annotations.to_dict( - cast("DataStore", self) - ) - - references = await self.get_relations_for_event( - event_id, event, room_id, RelationTypes.REFERENCE, direction="f" - ) - if references.chunk: - aggregations.references = await references.to_dict(cast("DataStore", self)) - - # Store the bundled aggregations in the event metadata for later use. - return aggregations - - async def get_bundled_aggregations( - self, events: Iterable[EventBase], user_id: str - ) -> Dict[str, BundledAggregations]: - """Generate bundled aggregations for events. - - Args: - events: The iterable of events to calculate bundled aggregations for. - user_id: The user requesting the bundled aggregations. - - Returns: - A map of event ID to the bundled aggregation for the event. Not all - events may have bundled aggregations in the results. - """ - # De-duplicate events by ID to handle the same event requested multiple times. - # - # State events do not get bundled aggregations. - events_by_id = { - event.event_id: event for event in events if not event.is_state() - } - - # event ID -> bundled aggregation in non-serialized form. - results: Dict[str, BundledAggregations] = {} - - # Fetch other relations per event. - for event in events_by_id.values(): - event_result = await self._get_bundled_aggregation_for_event(event, user_id) - if event_result: - results[event.event_id] = event_result - - # Fetch any edits (but not for redacted events). - edits = await self._get_applicable_edits( - [ - event_id - for event_id, event in events_by_id.items() - if not event.internal_metadata.is_redacted() - ] - ) - for event_id, edit in edits.items(): - results.setdefault(event_id, BundledAggregations()).replace = edit - - # Fetch thread summaries. - summaries = await self._get_thread_summaries(events_by_id.keys()) - # Only fetch participated for a limited selection based on what had - # summaries. - participated = await self._get_threads_participated( - [event_id for event_id, summary in summaries.items() if summary], user_id - ) - for event_id, summary in summaries.items(): - if summary: - thread_count, latest_thread_event, edit = summary - results.setdefault( - event_id, BundledAggregations() - ).thread = _ThreadAggregation( - latest_event=latest_thread_event, - latest_edit=edit, - count=thread_count, - # If there's a thread summary it must also exist in the - # participated dictionary. - current_user_participated=participated[event_id], - ) - - return results - class RelationsStore(RelationsWorkerStore): pass -- cgit 1.5.1 From bf9d549e3ad944e1e53a2ecc898640d690bf1eac Mon Sep 17 00:00:00 2001 From: David Robertson Date: Fri, 18 Mar 2022 19:03:46 +0000 Subject: Try to detect borked package installations. (#12244) * Try to detect borked package installations. Fixes #12223. Co-authored-by: Sean Quah <8349537+squahtx@users.noreply.github.com> --- changelog.d/12244.misc | 1 + synapse/util/check_dependencies.py | 24 +++++++++++++++++++++++- tests/util/test_check_dependencies.py | 15 ++++++++++++++- 3 files changed, 38 insertions(+), 2 deletions(-) create mode 100644 changelog.d/12244.misc diff --git a/changelog.d/12244.misc b/changelog.d/12244.misc new file mode 100644 index 0000000000..950d48e4c6 --- /dev/null +++ b/changelog.d/12244.misc @@ -0,0 +1 @@ +Improve error message when dependencies check finds a broken installation. \ No newline at end of file diff --git a/synapse/util/check_dependencies.py b/synapse/util/check_dependencies.py index 12cd804939..66f1da7502 100644 --- a/synapse/util/check_dependencies.py +++ b/synapse/util/check_dependencies.py @@ -128,6 +128,19 @@ def _incorrect_version( ) +def _no_reported_version(requirement: Requirement, extra: Optional[str] = None) -> str: + if extra: + return ( + f"Synapse {VERSION} needs {requirement} for {extra}, " + f"but can't determine {requirement.name}'s version" + ) + else: + return ( + f"Synapse {VERSION} needs {requirement}, " + f"but can't determine {requirement.name}'s version" + ) + + def check_requirements(extra: Optional[str] = None) -> None: """Check Synapse's dependencies are present and correctly versioned. @@ -163,8 +176,17 @@ def check_requirements(extra: Optional[str] = None) -> None: deps_unfulfilled.append(requirement.name) errors.append(_not_installed(requirement, extra)) else: + if dist.version is None: + # This shouldn't happen---it suggests a borked virtualenv. (See #12223) + # Try to give a vaguely helpful error message anyway. + # Type-ignore: the annotations don't reflect reality: see + # https://github.com/python/typeshed/issues/7513 + # https://bugs.python.org/issue47060 + deps_unfulfilled.append(requirement.name) # type: ignore[unreachable] + errors.append(_no_reported_version(requirement, extra)) + # We specify prereleases=True to allow prereleases such as RCs. - if not requirement.specifier.contains(dist.version, prereleases=True): + elif not requirement.specifier.contains(dist.version, prereleases=True): deps_unfulfilled.append(requirement.name) errors.append(_incorrect_version(requirement, dist.version, extra)) diff --git a/tests/util/test_check_dependencies.py b/tests/util/test_check_dependencies.py index 38e9f58ac6..5d1aa025d1 100644 --- a/tests/util/test_check_dependencies.py +++ b/tests/util/test_check_dependencies.py @@ -12,7 +12,7 @@ from tests.unittest import TestCase class DummyDistribution(metadata.Distribution): - def __init__(self, version: str): + def __init__(self, version: object): self._version = version @property @@ -30,6 +30,7 @@ old = DummyDistribution("0.1.2") old_release_candidate = DummyDistribution("0.1.2rc3") new = DummyDistribution("1.2.3") new_release_candidate = DummyDistribution("1.2.3rc4") +distribution_with_no_version = DummyDistribution(None) # could probably use stdlib TestCase --- no need for twisted here @@ -67,6 +68,18 @@ class TestDependencyChecker(TestCase): # should not raise check_requirements() + def test_version_reported_as_none(self) -> None: + """Complain if importlib.metadata.version() returns None. + + This shouldn't normally happen, but it was seen in the wild (#12223). + """ + with patch( + "synapse.util.check_dependencies.metadata.requires", + return_value=["dummypkg >= 1"], + ): + with self.mock_installed_package(distribution_with_no_version): + self.assertRaises(DependencyException, check_requirements) + def test_checks_ignore_dev_dependencies(self) -> None: """Bot generic and per-extra checks should ignore dev dependencies.""" with patch( -- cgit 1.5.1 From afa17f0eabf06087d53697eafc748f7c935fb13f Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 21 Mar 2022 11:23:32 +0000 Subject: Return a 404 from `/state` for an outlier (#12087) * Replace `get_state_for_pdu` with `get_state_ids_for_pdu` and `get_events_as_list`. * Return a 404 from `/state` and `/state_ids` for an outlier --- changelog.d/12087.bugfix | 1 + synapse/federation/federation_server.py | 7 ++-- synapse/handlers/federation.py | 61 ++++++++++++--------------------- 3 files changed, 25 insertions(+), 44 deletions(-) create mode 100644 changelog.d/12087.bugfix diff --git a/changelog.d/12087.bugfix b/changelog.d/12087.bugfix new file mode 100644 index 0000000000..6dacdddd0d --- /dev/null +++ b/changelog.d/12087.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug which caused the `/_matrix/federation/v1/state` and `.../state_ids` endpoints to return incorrect or invalid data when called for an event which we have stored as an "outlier". diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 482bbdd867..af2d0f7d79 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -22,7 +22,6 @@ from typing import ( Callable, Collection, Dict, - Iterable, List, Optional, Tuple, @@ -577,10 +576,10 @@ class FederationServer(FederationBase): async def _on_context_state_request_compute( self, room_id: str, event_id: Optional[str] ) -> Dict[str, list]: + pdus: Collection[EventBase] if event_id: - pdus: Iterable[EventBase] = await self.handler.get_state_for_pdu( - room_id, event_id - ) + event_ids = await self.handler.get_state_ids_for_pdu(room_id, event_id) + pdus = await self.store.get_events_as_list(event_ids) else: pdus = (await self.state.get_current_state(room_id)).values() diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index db39aeabde..350ec9c03a 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -950,54 +950,35 @@ class FederationHandler: return event - async def get_state_for_pdu(self, room_id: str, event_id: str) -> List[EventBase]: - """Returns the state at the event. i.e. not including said event.""" - - event = await self.store.get_event(event_id, check_room_id=room_id) - - state_groups = await self.state_store.get_state_groups(room_id, [event_id]) - - if state_groups: - _, state = list(state_groups.items()).pop() - results = {(e.type, e.state_key): e for e in state} - - if event.is_state(): - # Get previous state - if "replaces_state" in event.unsigned: - prev_id = event.unsigned["replaces_state"] - if prev_id != event.event_id: - prev_event = await self.store.get_event(prev_id) - results[(event.type, event.state_key)] = prev_event - else: - del results[(event.type, event.state_key)] - - res = list(results.values()) - return res - else: - return [] - async def get_state_ids_for_pdu(self, room_id: str, event_id: str) -> List[str]: """Returns the state at the event. i.e. not including said event.""" event = await self.store.get_event(event_id, check_room_id=room_id) + if event.internal_metadata.outlier: + raise NotFoundError("State not known at event %s" % (event_id,)) state_groups = await self.state_store.get_state_groups_ids(room_id, [event_id]) - if state_groups: - _, state = list(state_groups.items()).pop() - results = state + # get_state_groups_ids should return exactly one result + assert len(state_groups) == 1 - if event.is_state(): - # Get previous state - if "replaces_state" in event.unsigned: - prev_id = event.unsigned["replaces_state"] - if prev_id != event.event_id: - results[(event.type, event.state_key)] = prev_id - else: - results.pop((event.type, event.state_key), None) + state_map = next(iter(state_groups.values())) - return list(results.values()) - else: - return [] + state_key = event.get_state_key() + if state_key is not None: + # the event was not rejected (get_event raises a NotFoundError for rejected + # events) so the state at the event should include the event itself. + assert ( + state_map.get((event.type, state_key)) == event.event_id + ), "State at event did not include event itself" + + # ... but we need the state *before* that event + if "replaces_state" in event.unsigned: + prev_id = event.unsigned["replaces_state"] + state_map[(event.type, state_key)] = prev_id + else: + del state_map[(event.type, state_key)] + + return list(state_map.values()) async def on_backfill_request( self, origin: str, room_id: str, pdu_list: List[str], limit: int -- cgit 1.5.1 From 1530cef19244e21d8b160bee2d925dcabbc0c4be Mon Sep 17 00:00:00 2001 From: Nick Mills-Barrett Date: Mon, 21 Mar 2022 11:52:10 +0000 Subject: Make it possible to enable compression for the metrics HTTP resource (#12258) * Make it possible to enable compression for the metrics HTTP resource This can provide significant bandwidth savings pulling metrics from synapse instances. * Add changelog file. * Fix type hint --- changelog.d/12258.misc | 1 + synapse/app/homeserver.py | 5 ++++- 2 files changed, 5 insertions(+), 1 deletion(-) create mode 100644 changelog.d/12258.misc diff --git a/changelog.d/12258.misc b/changelog.d/12258.misc new file mode 100644 index 0000000000..80024c8e91 --- /dev/null +++ b/changelog.d/12258.misc @@ -0,0 +1 @@ +Compress metrics HTTP resource when enabled. Contributed by Nick @ Beeper. diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index e4dc04c0b4..ad2b7c9515 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -261,7 +261,10 @@ class SynapseHomeServer(HomeServer): resources[SERVER_KEY_V2_PREFIX] = KeyApiV2Resource(self) if name == "metrics" and self.config.metrics.enable_metrics: - resources[METRICS_PREFIX] = MetricsResource(RegistryProxy) + metrics_resource: Resource = MetricsResource(RegistryProxy) + if compress: + metrics_resource = gz_wrap(metrics_resource) + resources[METRICS_PREFIX] = metrics_resource if name == "replication": resources[REPLICATION_PREFIX] = ReplicationRestResource(self) -- cgit 1.5.1 From 6134b3079e954e15c5a92ad3b89050085197b851 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Mon, 21 Mar 2022 12:16:46 +0000 Subject: Reword 'Choose your user name' as 'Choose your account name' in the SSO registration template, in order to comply with SIWA guidelines. (#12260) * Reword as 'Choose your account name' * Newsfile Signed-off-by: Olivier Wilkinson (reivilibre) --- changelog.d/12260.misc | 1 + synapse/res/templates/sso_auth_account_details.html | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/12260.misc diff --git a/changelog.d/12260.misc b/changelog.d/12260.misc new file mode 100644 index 0000000000..deacf034de --- /dev/null +++ b/changelog.d/12260.misc @@ -0,0 +1 @@ +Reword 'Choose your user name' as 'Choose your account name' in the SSO registration template, in order to comply with SIWA guidelines. \ No newline at end of file diff --git a/synapse/res/templates/sso_auth_account_details.html b/synapse/res/templates/sso_auth_account_details.html index 41315e4fd4..b231aace01 100644 --- a/synapse/res/templates/sso_auth_account_details.html +++ b/synapse/res/templates/sso_auth_account_details.html @@ -130,7 +130,7 @@
-

Choose your user name

+

Choose your account name

This is required to create your account on {{ server_name }}, and you can't change this later.

-- cgit 1.5.1 From 9d21ecf7ceab55bc19c4457b8b07401b0b1623a7 Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Mon, 21 Mar 2022 14:43:16 +0100 Subject: Add type hints to tests files. (#12256) --- changelog.d/12256.misc | 1 + mypy.ini | 2 - tests/handlers/test_typing.py | 35 ++++++++------- tests/push/test_push_rule_evaluator.py | 23 +++++----- tests/storage/test_background_update.py | 48 +++++++++++--------- tests/storage/test_id_generators.py | 80 +++++++++++++++++---------------- 6 files changed, 101 insertions(+), 88 deletions(-) create mode 100644 changelog.d/12256.misc diff --git a/changelog.d/12256.misc b/changelog.d/12256.misc new file mode 100644 index 0000000000..c5b6356799 --- /dev/null +++ b/changelog.d/12256.misc @@ -0,0 +1 @@ +Add type hints to tests files. \ No newline at end of file diff --git a/mypy.ini b/mypy.ini index d8b3b3f9e5..24d4ba15d4 100644 --- a/mypy.ini +++ b/mypy.ini @@ -82,9 +82,7 @@ exclude = (?x) |tests/server.py |tests/server_notices/test_resource_limits_server_notices.py |tests/state/test_v2.py - |tests/storage/test_background_update.py |tests/storage/test_base.py - |tests/storage/test_id_generators.py |tests/storage/test_roommember.py |tests/test_metrics.py |tests/test_phone_home.py diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py index f91a80b9fa..ffd5c4cb93 100644 --- a/tests/handlers/test_typing.py +++ b/tests/handlers/test_typing.py @@ -18,11 +18,14 @@ from typing import Dict from unittest.mock import ANY, Mock, call from twisted.internet import defer +from twisted.test.proto_helpers import MemoryReactor from twisted.web.resource import Resource from synapse.api.errors import AuthError from synapse.federation.transport.server import TransportLayerServer -from synapse.types import UserID, create_requester +from synapse.server import HomeServer +from synapse.types import JsonDict, UserID, create_requester +from synapse.util import Clock from tests import unittest from tests.test_utils import make_awaitable @@ -42,7 +45,9 @@ ROOM_ID = "a-room" OTHER_ROOM_ID = "another-room" -def _expect_edu_transaction(edu_type, content, origin="test"): +def _expect_edu_transaction( + edu_type: str, content: JsonDict, origin: str = "test" +) -> JsonDict: return { "origin": origin, "origin_server_ts": 1000000, @@ -51,12 +56,12 @@ def _expect_edu_transaction(edu_type, content, origin="test"): } -def _make_edu_transaction_json(edu_type, content): +def _make_edu_transaction_json(edu_type: str, content: JsonDict) -> bytes: return json.dumps(_expect_edu_transaction(edu_type, content)).encode("utf8") class TypingNotificationsTestCase(unittest.HomeserverTestCase): - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: # we mock out the keyring so as to skip the authentication check on the # federation API call. mock_keyring = Mock(spec=["verify_json_for_server"]) @@ -83,7 +88,7 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase): d["/_matrix/federation"] = TransportLayerServer(self.hs) return d - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: mock_notifier = hs.get_notifier() self.on_new_event = mock_notifier.on_new_event @@ -111,24 +116,24 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase): self.room_members = [] - async def check_user_in_room(room_id, user_id): + async def check_user_in_room(room_id: str, user_id: str) -> None: if user_id not in [u.to_string() for u in self.room_members]: raise AuthError(401, "User is not in the room") return None hs.get_auth().check_user_in_room = check_user_in_room - async def check_host_in_room(room_id, server_name): + async def check_host_in_room(room_id: str, server_name: str) -> bool: return room_id == ROOM_ID hs.get_event_auth_handler().check_host_in_room = check_host_in_room - def get_joined_hosts_for_room(room_id): + def get_joined_hosts_for_room(room_id: str): return {member.domain for member in self.room_members} self.datastore.get_joined_hosts_for_room = get_joined_hosts_for_room - async def get_users_in_room(room_id): + async def get_users_in_room(room_id: str): return {str(u) for u in self.room_members} self.datastore.get_users_in_room = get_users_in_room @@ -153,7 +158,7 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase): lambda *args, **kwargs: make_awaitable(None) ) - def test_started_typing_local(self): + def test_started_typing_local(self) -> None: self.room_members = [U_APPLE, U_BANANA] self.assertEqual(self.event_source.get_current_key(), 0) @@ -187,7 +192,7 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase): ) @override_config({"send_federation": True}) - def test_started_typing_remote_send(self): + def test_started_typing_remote_send(self) -> None: self.room_members = [U_APPLE, U_ONION] self.get_success( @@ -217,7 +222,7 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase): try_trailing_slash_on_400=True, ) - def test_started_typing_remote_recv(self): + def test_started_typing_remote_recv(self) -> None: self.room_members = [U_APPLE, U_ONION] self.assertEqual(self.event_source.get_current_key(), 0) @@ -256,7 +261,7 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase): ], ) - def test_started_typing_remote_recv_not_in_room(self): + def test_started_typing_remote_recv_not_in_room(self) -> None: self.room_members = [U_APPLE, U_ONION] self.assertEqual(self.event_source.get_current_key(), 0) @@ -292,7 +297,7 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase): self.assertEqual(events[1], 0) @override_config({"send_federation": True}) - def test_stopped_typing(self): + def test_stopped_typing(self) -> None: self.room_members = [U_APPLE, U_BANANA, U_ONION] # Gut-wrenching @@ -343,7 +348,7 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase): [{"type": "m.typing", "room_id": ROOM_ID, "content": {"user_ids": []}}], ) - def test_typing_timeout(self): + def test_typing_timeout(self) -> None: self.room_members = [U_APPLE, U_BANANA] self.assertEqual(self.event_source.get_current_key(), 0) diff --git a/tests/push/test_push_rule_evaluator.py b/tests/push/test_push_rule_evaluator.py index 3849beb9d6..5dba187076 100644 --- a/tests/push/test_push_rule_evaluator.py +++ b/tests/push/test_push_rule_evaluator.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Dict +from typing import Dict, Optional, Union import frozendict @@ -20,12 +20,13 @@ from synapse.api.room_versions import RoomVersions from synapse.events import FrozenEvent from synapse.push import push_rule_evaluator from synapse.push.push_rule_evaluator import PushRuleEvaluatorForEvent +from synapse.types import JsonDict from tests import unittest class PushRuleEvaluatorTestCase(unittest.TestCase): - def _get_evaluator(self, content): + def _get_evaluator(self, content: JsonDict) -> PushRuleEvaluatorForEvent: event = FrozenEvent( { "event_id": "$event_id", @@ -39,12 +40,12 @@ class PushRuleEvaluatorTestCase(unittest.TestCase): ) room_member_count = 0 sender_power_level = 0 - power_levels = {} + power_levels: Dict[str, Union[int, Dict[str, int]]] = {} return PushRuleEvaluatorForEvent( event, room_member_count, sender_power_level, power_levels ) - def test_display_name(self): + def test_display_name(self) -> None: """Check for a matching display name in the body of the event.""" evaluator = self._get_evaluator({"body": "foo bar baz"}) @@ -71,20 +72,20 @@ class PushRuleEvaluatorTestCase(unittest.TestCase): self.assertTrue(evaluator.matches(condition, "@user:test", "foo bar")) def _assert_matches( - self, condition: Dict[str, Any], content: Dict[str, Any], msg=None + self, condition: JsonDict, content: JsonDict, msg: Optional[str] = None ) -> None: evaluator = self._get_evaluator(content) self.assertTrue(evaluator.matches(condition, "@user:test", "display_name"), msg) def _assert_not_matches( - self, condition: Dict[str, Any], content: Dict[str, Any], msg=None + self, condition: JsonDict, content: JsonDict, msg: Optional[str] = None ) -> None: evaluator = self._get_evaluator(content) self.assertFalse( evaluator.matches(condition, "@user:test", "display_name"), msg ) - def test_event_match_body(self): + def test_event_match_body(self) -> None: """Check that event_match conditions on content.body work as expected""" # if the key is `content.body`, the pattern matches substrings. @@ -165,7 +166,7 @@ class PushRuleEvaluatorTestCase(unittest.TestCase): r"? after \ should match any character", ) - def test_event_match_non_body(self): + def test_event_match_non_body(self) -> None: """Check that event_match conditions on other keys work as expected""" # if the key is anything other than 'content.body', the pattern must match the @@ -241,7 +242,7 @@ class PushRuleEvaluatorTestCase(unittest.TestCase): "pattern should not match before a newline", ) - def test_no_body(self): + def test_no_body(self) -> None: """Not having a body shouldn't break the evaluator.""" evaluator = self._get_evaluator({}) @@ -250,7 +251,7 @@ class PushRuleEvaluatorTestCase(unittest.TestCase): } self.assertFalse(evaluator.matches(condition, "@user:test", "foo")) - def test_invalid_body(self): + def test_invalid_body(self) -> None: """A non-string body should not break the evaluator.""" condition = { "kind": "contains_display_name", @@ -260,7 +261,7 @@ class PushRuleEvaluatorTestCase(unittest.TestCase): evaluator = self._get_evaluator({"body": body}) self.assertFalse(evaluator.matches(condition, "@user:test", "foo")) - def test_tweaks_for_actions(self): + def test_tweaks_for_actions(self) -> None: """ This tests the behaviour of tweaks_for_actions. """ diff --git a/tests/storage/test_background_update.py b/tests/storage/test_background_update.py index 5cf18b690e..fd619b64d4 100644 --- a/tests/storage/test_background_update.py +++ b/tests/storage/test_background_update.py @@ -17,8 +17,12 @@ from unittest.mock import Mock import yaml from twisted.internet.defer import Deferred, ensureDeferred +from twisted.test.proto_helpers import MemoryReactor +from synapse.server import HomeServer from synapse.storage.background_updates import BackgroundUpdater +from synapse.types import JsonDict +from synapse.util import Clock from tests import unittest from tests.test_utils import make_awaitable, simple_async_mock @@ -26,7 +30,7 @@ from tests.unittest import override_config class BackgroundUpdateTestCase(unittest.HomeserverTestCase): - def prepare(self, reactor, clock, homeserver): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.updates: BackgroundUpdater = self.hs.get_datastores().main.db_pool.updates # the base test class should have run the real bg updates for us self.assertTrue( @@ -39,7 +43,7 @@ class BackgroundUpdateTestCase(unittest.HomeserverTestCase): ) self.store = self.hs.get_datastores().main - async def update(self, progress, count): + async def update(self, progress: JsonDict, count: int) -> int: duration_ms = 10 await self.clock.sleep((count * duration_ms) / 1000) progress = {"my_key": progress["my_key"] + 1} @@ -51,7 +55,7 @@ class BackgroundUpdateTestCase(unittest.HomeserverTestCase): ) return count - def test_do_background_update(self): + def test_do_background_update(self) -> None: # the time we claim it takes to update one item when running the update duration_ms = 10 @@ -80,7 +84,7 @@ class BackgroundUpdateTestCase(unittest.HomeserverTestCase): # second step: complete the update # we should now get run with a much bigger number of items to update - async def update(progress, count): + async def update(progress: JsonDict, count: int) -> int: self.assertEqual(progress, {"my_key": 2}) self.assertAlmostEqual( count, @@ -110,7 +114,7 @@ class BackgroundUpdateTestCase(unittest.HomeserverTestCase): """ ) ) - def test_background_update_default_batch_set_by_config(self): + def test_background_update_default_batch_set_by_config(self) -> None: """ Test that the background update is run with the default_batch_size set by the config """ @@ -133,7 +137,7 @@ class BackgroundUpdateTestCase(unittest.HomeserverTestCase): # on the first call, we should get run with the default background update size specified in the config self.update_handler.assert_called_once_with({"my_key": 1}, 20) - def test_background_update_default_sleep_behavior(self): + def test_background_update_default_sleep_behavior(self) -> None: """ Test default background update behavior, which is to sleep """ @@ -147,7 +151,7 @@ class BackgroundUpdateTestCase(unittest.HomeserverTestCase): self.update_handler.side_effect = self.update self.update_handler.reset_mock() - self.updates.start_doing_background_updates(), + self.updates.start_doing_background_updates() # 2: advance the reactor less than the default sleep duration (1000ms) self.reactor.pump([0.5]) @@ -167,7 +171,7 @@ class BackgroundUpdateTestCase(unittest.HomeserverTestCase): """ ) ) - def test_background_update_sleep_set_in_config(self): + def test_background_update_sleep_set_in_config(self) -> None: """ Test that changing the sleep time in the config changes how long it sleeps """ @@ -181,7 +185,7 @@ class BackgroundUpdateTestCase(unittest.HomeserverTestCase): self.update_handler.side_effect = self.update self.update_handler.reset_mock() - self.updates.start_doing_background_updates(), + self.updates.start_doing_background_updates() # 2: advance the reactor less than the configured sleep duration (500ms) self.reactor.pump([0.45]) @@ -201,7 +205,7 @@ class BackgroundUpdateTestCase(unittest.HomeserverTestCase): """ ) ) - def test_disabling_background_update_sleep(self): + def test_disabling_background_update_sleep(self) -> None: """ Test that disabling sleep in the config results in bg update not sleeping """ @@ -215,7 +219,7 @@ class BackgroundUpdateTestCase(unittest.HomeserverTestCase): self.update_handler.side_effect = self.update self.update_handler.reset_mock() - self.updates.start_doing_background_updates(), + self.updates.start_doing_background_updates() # 2: advance the reactor very little self.reactor.pump([0.025]) @@ -230,7 +234,7 @@ class BackgroundUpdateTestCase(unittest.HomeserverTestCase): """ ) ) - def test_background_update_duration_set_in_config(self): + def test_background_update_duration_set_in_config(self) -> None: """ Test that the desired duration set in the config is used in determining batch size """ @@ -254,7 +258,7 @@ class BackgroundUpdateTestCase(unittest.HomeserverTestCase): # the first update was run with the default batch size, this should be run with 500ms as the # desired duration - async def update(progress, count): + async def update(progress: JsonDict, count: int) -> int: self.assertEqual(progress, {"my_key": 2}) self.assertAlmostEqual( count, @@ -275,7 +279,7 @@ class BackgroundUpdateTestCase(unittest.HomeserverTestCase): """ ) ) - def test_background_update_min_batch_set_in_config(self): + def test_background_update_min_batch_set_in_config(self) -> None: """ Test that the minimum batch size set in the config is used """ @@ -290,7 +294,7 @@ class BackgroundUpdateTestCase(unittest.HomeserverTestCase): ) # Run the update with the long-running update item - async def update(progress, count): + async def update_long(progress: JsonDict, count: int) -> int: await self.clock.sleep((count * duration_ms) / 1000) progress = {"my_key": progress["my_key"] + 1} await self.store.db_pool.runInteraction( @@ -301,7 +305,7 @@ class BackgroundUpdateTestCase(unittest.HomeserverTestCase): ) return count - self.update_handler.side_effect = update + self.update_handler.side_effect = update_long self.update_handler.reset_mock() res = self.get_success( self.updates.do_next_background_update(False), @@ -311,25 +315,25 @@ class BackgroundUpdateTestCase(unittest.HomeserverTestCase): # the first update was run with the default batch size, this should be run with minimum batch size # as the first items took a very long time - async def update(progress, count): + async def update_short(progress: JsonDict, count: int) -> int: self.assertEqual(progress, {"my_key": 2}) self.assertEqual(count, 5) await self.updates._end_background_update("test_update") return count - self.update_handler.side_effect = update + self.update_handler.side_effect = update_short self.get_success(self.updates.do_next_background_update(False)) class BackgroundUpdateControllerTestCase(unittest.HomeserverTestCase): - def prepare(self, reactor, clock, homeserver): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.updates: BackgroundUpdater = self.hs.get_datastores().main.db_pool.updates # the base test class should have run the real bg updates for us self.assertTrue( self.get_success(self.updates.has_completed_background_updates()) ) - self.update_deferred = Deferred() + self.update_deferred: Deferred[int] = Deferred() self.update_handler = Mock(return_value=self.update_deferred) self.updates.register_background_update_handler( "test_update", self.update_handler @@ -358,7 +362,7 @@ class BackgroundUpdateControllerTestCase(unittest.HomeserverTestCase): ), ) - def test_controller(self): + def test_controller(self) -> None: store = self.hs.get_datastores().main self.get_success( store.db_pool.simple_insert( @@ -368,7 +372,7 @@ class BackgroundUpdateControllerTestCase(unittest.HomeserverTestCase): ) # Set the return value for the context manager. - enter_defer = Deferred() + enter_defer: Deferred[int] = Deferred() self._update_ctx_manager.__aenter__ = Mock(return_value=enter_defer) # Start the background update. diff --git a/tests/storage/test_id_generators.py b/tests/storage/test_id_generators.py index 6ac4b93f98..395396340b 100644 --- a/tests/storage/test_id_generators.py +++ b/tests/storage/test_id_generators.py @@ -13,9 +13,13 @@ # limitations under the License. from typing import List, Optional -from synapse.storage.database import DatabasePool +from twisted.test.proto_helpers import MemoryReactor + +from synapse.server import HomeServer +from synapse.storage.database import DatabasePool, LoggingTransaction from synapse.storage.engines import IncorrectDatabaseSetup from synapse.storage.util.id_generators import MultiWriterIdGenerator +from synapse.util import Clock from tests.unittest import HomeserverTestCase from tests.utils import USE_POSTGRES_FOR_TESTS @@ -25,13 +29,13 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase): if not USE_POSTGRES_FOR_TESTS: skip = "Requires Postgres" - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastores().main self.db_pool: DatabasePool = self.store.db_pool self.get_success(self.db_pool.runInteraction("_setup_db", self._setup_db)) - def _setup_db(self, txn): + def _setup_db(self, txn: LoggingTransaction) -> None: txn.execute("CREATE SEQUENCE foobar_seq") txn.execute( """ @@ -59,12 +63,12 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase): return self.get_success_or_raise(self.db_pool.runWithConnection(_create)) - def _insert_rows(self, instance_name: str, number: int): + def _insert_rows(self, instance_name: str, number: int) -> None: """Insert N rows as the given instance, inserting with stream IDs pulled from the postgres sequence. """ - def _insert(txn): + def _insert(txn: LoggingTransaction) -> None: for _ in range(number): txn.execute( "INSERT INTO foobar VALUES (nextval('foobar_seq'), ?)", @@ -80,12 +84,12 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase): self.get_success(self.db_pool.runInteraction("_insert_rows", _insert)) - def _insert_row_with_id(self, instance_name: str, stream_id: int): + def _insert_row_with_id(self, instance_name: str, stream_id: int) -> None: """Insert one row as the given instance with given stream_id, updating the postgres sequence position to match. """ - def _insert(txn): + def _insert(txn: LoggingTransaction) -> None: txn.execute( "INSERT INTO foobar VALUES (?, ?)", ( @@ -104,7 +108,7 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase): self.get_success(self.db_pool.runInteraction("_insert_row_with_id", _insert)) - def test_empty(self): + def test_empty(self) -> None: """Test an ID generator against an empty database gives sensible current positions. """ @@ -114,7 +118,7 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase): # The table is empty so we expect an empty map for positions self.assertEqual(id_gen.get_positions(), {}) - def test_single_instance(self): + def test_single_instance(self) -> None: """Test that reads and writes from a single process are handled correctly. """ @@ -130,7 +134,7 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase): # Try allocating a new ID gen and check that we only see position # advanced after we leave the context manager. - async def _get_next_async(): + async def _get_next_async() -> None: async with id_gen.get_next() as stream_id: self.assertEqual(stream_id, 8) @@ -142,7 +146,7 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase): self.assertEqual(id_gen.get_positions(), {"master": 8}) self.assertEqual(id_gen.get_current_token_for_writer("master"), 8) - def test_out_of_order_finish(self): + def test_out_of_order_finish(self) -> None: """Test that IDs persisted out of order are correctly handled""" # Prefill table with 7 rows written by 'master' @@ -191,7 +195,7 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase): self.assertEqual(id_gen.get_positions(), {"master": 11}) self.assertEqual(id_gen.get_current_token_for_writer("master"), 11) - def test_multi_instance(self): + def test_multi_instance(self) -> None: """Test that reads and writes from multiple processes are handled correctly. """ @@ -215,7 +219,7 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase): # Try allocating a new ID gen and check that we only see position # advanced after we leave the context manager. - async def _get_next_async(): + async def _get_next_async() -> None: async with first_id_gen.get_next() as stream_id: self.assertEqual(stream_id, 8) @@ -233,7 +237,7 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase): # ... but calling `get_next` on the second instance should give a unique # stream ID - async def _get_next_async(): + async def _get_next_async2() -> None: async with second_id_gen.get_next() as stream_id: self.assertEqual(stream_id, 9) @@ -241,7 +245,7 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase): second_id_gen.get_positions(), {"first": 3, "second": 7} ) - self.get_success(_get_next_async()) + self.get_success(_get_next_async2()) self.assertEqual(second_id_gen.get_positions(), {"first": 3, "second": 9}) @@ -249,7 +253,7 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase): second_id_gen.advance("first", 8) self.assertEqual(second_id_gen.get_positions(), {"first": 8, "second": 9}) - def test_get_next_txn(self): + def test_get_next_txn(self) -> None: """Test that the `get_next_txn` function works correctly.""" # Prefill table with 7 rows written by 'master' @@ -263,7 +267,7 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase): # Try allocating a new ID gen and check that we only see position # advanced after we leave the context manager. - def _get_next_txn(txn): + def _get_next_txn(txn: LoggingTransaction) -> None: stream_id = id_gen.get_next_txn(txn) self.assertEqual(stream_id, 8) @@ -275,7 +279,7 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase): self.assertEqual(id_gen.get_positions(), {"master": 8}) self.assertEqual(id_gen.get_current_token_for_writer("master"), 8) - def test_get_persisted_upto_position(self): + def test_get_persisted_upto_position(self) -> None: """Test that `get_persisted_upto_position` correctly tracks updates to positions. """ @@ -317,7 +321,7 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase): id_gen.advance("second", 15) self.assertEqual(id_gen.get_persisted_upto_position(), 11) - def test_get_persisted_upto_position_get_next(self): + def test_get_persisted_upto_position_get_next(self) -> None: """Test that `get_persisted_upto_position` correctly tracks updates to positions when `get_next` is called. """ @@ -331,7 +335,7 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase): self.assertEqual(id_gen.get_persisted_upto_position(), 5) - async def _get_next_async(): + async def _get_next_async() -> None: async with id_gen.get_next() as stream_id: self.assertEqual(stream_id, 6) self.assertEqual(id_gen.get_persisted_upto_position(), 5) @@ -344,7 +348,7 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase): # `persisted_upto_position` in this case, then it will be correct in the # other cases that are tested above (since they'll hit the same code). - def test_restart_during_out_of_order_persistence(self): + def test_restart_during_out_of_order_persistence(self) -> None: """Test that restarting a process while another process is writing out of order updates are handled correctly. """ @@ -388,7 +392,7 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase): id_gen_worker.advance("master", 9) self.assertEqual(id_gen_worker.get_positions(), {"master": 9}) - def test_writer_config_change(self): + def test_writer_config_change(self) -> None: """Test that changing the writer config correctly works.""" self._insert_row_with_id("first", 3) @@ -421,7 +425,7 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase): # Check that we get a sane next stream ID with this new config. - async def _get_next_async(): + async def _get_next_async() -> None: async with id_gen_3.get_next() as stream_id: self.assertEqual(stream_id, 6) @@ -435,7 +439,7 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase): self.assertEqual(id_gen_5.get_current_token_for_writer("first"), 6) self.assertEqual(id_gen_5.get_current_token_for_writer("third"), 6) - def test_sequence_consistency(self): + def test_sequence_consistency(self) -> None: """Test that we error out if the table and sequence diverges.""" # Prefill with some rows @@ -458,13 +462,13 @@ class BackwardsMultiWriterIdGeneratorTestCase(HomeserverTestCase): if not USE_POSTGRES_FOR_TESTS: skip = "Requires Postgres" - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastores().main self.db_pool: DatabasePool = self.store.db_pool self.get_success(self.db_pool.runInteraction("_setup_db", self._setup_db)) - def _setup_db(self, txn): + def _setup_db(self, txn: LoggingTransaction) -> None: txn.execute("CREATE SEQUENCE foobar_seq") txn.execute( """ @@ -493,10 +497,10 @@ class BackwardsMultiWriterIdGeneratorTestCase(HomeserverTestCase): return self.get_success(self.db_pool.runWithConnection(_create)) - def _insert_row(self, instance_name: str, stream_id: int): + def _insert_row(self, instance_name: str, stream_id: int) -> None: """Insert one row as the given instance with given stream_id.""" - def _insert(txn): + def _insert(txn: LoggingTransaction) -> None: txn.execute( "INSERT INTO foobar VALUES (?, ?)", ( @@ -514,13 +518,13 @@ class BackwardsMultiWriterIdGeneratorTestCase(HomeserverTestCase): self.get_success(self.db_pool.runInteraction("_insert_row", _insert)) - def test_single_instance(self): + def test_single_instance(self) -> None: """Test that reads and writes from a single process are handled correctly. """ id_gen = self._create_id_generator() - async def _get_next_async(): + async def _get_next_async() -> None: async with id_gen.get_next() as stream_id: self._insert_row("master", stream_id) @@ -530,7 +534,7 @@ class BackwardsMultiWriterIdGeneratorTestCase(HomeserverTestCase): self.assertEqual(id_gen.get_current_token_for_writer("master"), -1) self.assertEqual(id_gen.get_persisted_upto_position(), -1) - async def _get_next_async2(): + async def _get_next_async2() -> None: async with id_gen.get_next_mult(3) as stream_ids: for stream_id in stream_ids: self._insert_row("master", stream_id) @@ -548,14 +552,14 @@ class BackwardsMultiWriterIdGeneratorTestCase(HomeserverTestCase): self.assertEqual(second_id_gen.get_current_token_for_writer("master"), -4) self.assertEqual(second_id_gen.get_persisted_upto_position(), -4) - def test_multiple_instance(self): + def test_multiple_instance(self) -> None: """Tests that having multiple instances that get advanced over federation works corretly. """ id_gen_1 = self._create_id_generator("first", writers=["first", "second"]) id_gen_2 = self._create_id_generator("second", writers=["first", "second"]) - async def _get_next_async(): + async def _get_next_async() -> None: async with id_gen_1.get_next() as stream_id: self._insert_row("first", stream_id) id_gen_2.advance("first", stream_id) @@ -567,7 +571,7 @@ class BackwardsMultiWriterIdGeneratorTestCase(HomeserverTestCase): self.assertEqual(id_gen_1.get_persisted_upto_position(), -1) self.assertEqual(id_gen_2.get_persisted_upto_position(), -1) - async def _get_next_async2(): + async def _get_next_async2() -> None: async with id_gen_2.get_next() as stream_id: self._insert_row("second", stream_id) id_gen_1.advance("second", stream_id) @@ -584,13 +588,13 @@ class MultiTableMultiWriterIdGeneratorTestCase(HomeserverTestCase): if not USE_POSTGRES_FOR_TESTS: skip = "Requires Postgres" - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastores().main self.db_pool: DatabasePool = self.store.db_pool self.get_success(self.db_pool.runInteraction("_setup_db", self._setup_db)) - def _setup_db(self, txn): + def _setup_db(self, txn: LoggingTransaction) -> None: txn.execute("CREATE SEQUENCE foobar_seq") txn.execute( """ @@ -642,7 +646,7 @@ class MultiTableMultiWriterIdGeneratorTestCase(HomeserverTestCase): from the postgres sequence. """ - def _insert(txn): + def _insert(txn: LoggingTransaction) -> None: for _ in range(number): txn.execute( "INSERT INTO %s VALUES (nextval('foobar_seq'), ?)" % (table,), @@ -659,7 +663,7 @@ class MultiTableMultiWriterIdGeneratorTestCase(HomeserverTestCase): self.get_success(self.db_pool.runInteraction("_insert_rows", _insert)) - def test_load_existing_stream(self): + def test_load_existing_stream(self) -> None: """Test creating ID gens with multiple tables that have rows from after the position in `stream_positions` table. """ -- cgit 1.5.1 From d9bc65918e406b8ae15c42b2ea3680d2c9fb79c3 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Mon, 21 Mar 2022 17:27:59 +0000 Subject: Call out synctl change --- CHANGES.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 78498c10b5..06396c3f6f 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -3,6 +3,8 @@ Synapse 1.55.0rc1 (2022-03-15) This release removes a workaround introduced in Synapse 1.50.0 for Mjolnir compatibility. **This breaks compatibility with Mjolnir 1.3.1 and earlier. ([\#11700](https://github.com/matrix-org/synapse/issues/11700))**; Mjolnir users should upgrade Mjolnir before upgrading Synapse to this version. +This release also moves the location of the `synctl` script; see the [upgrade notes](https://github.com/matrix-org/synapse/blob/develop/docs/upgrade.md#synctl-script-has-been-moved) for more details. + Features -------- @@ -38,6 +40,7 @@ Deprecations and Removals ------------------------- - **Remove workaround introduced in Synapse 1.50.0 for Mjolnir compatibility. Breaks compatibility with Mjolnir 1.3.1 and earlier. ([\#11700](https://github.com/matrix-org/synapse/issues/11700))** +- **`synctl` has been moved into into `synapse._scripts` and is exposed as an entry point; see [upgrade notes](https://github.com/matrix-org/synapse/blob/develop/docs/upgrade.md#synctl-script-has-been-moved). ([\#12140](https://github.com/matrix-org/synapse/issues/12140)) - Remove backwards compatibilty with pagination tokens from the `/relations` and `/aggregations` endpoints generated from Synapse < v1.52.0. ([\#12138](https://github.com/matrix-org/synapse/issues/12138)) - The groups/communities feature in Synapse has been deprecated. ([\#12200](https://github.com/matrix-org/synapse/issues/12200)) @@ -56,7 +59,6 @@ Internal Changes - Fix CI not attaching source distributions and wheels to the GitHub releases. ([\#12131](https://github.com/matrix-org/synapse/issues/12131)) - Remove unused mocks from `test_typing`. ([\#12136](https://github.com/matrix-org/synapse/issues/12136)) - Give `scripts-dev` scripts suffixes for neater CI config. ([\#12137](https://github.com/matrix-org/synapse/issues/12137)) -- Move `synctl` into `synapse._scripts` and expose as an entry point. ([\#12140](https://github.com/matrix-org/synapse/issues/12140)) - Move the snapcraft configuration file to `contrib`. ([\#12142](https://github.com/matrix-org/synapse/issues/12142)) - Enable [MSC3030](https://github.com/matrix-org/matrix-doc/pull/3030) Complement tests in CI. ([\#12144](https://github.com/matrix-org/synapse/issues/12144)) - Enable [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) Complement tests in CI. ([\#12145](https://github.com/matrix-org/synapse/issues/12145)) -- cgit 1.5.1 From 01211e0c16758f41883e42f1d3e6306b7a683e96 Mon Sep 17 00:00:00 2001 From: Michael Telatynski <7t3chguy@gmail.com> Date: Tue, 22 Mar 2022 10:22:25 +0000 Subject: Tweak copy for sso account details template (#12265) * Tweak copy for sso account details template * Update sso footer copyright year * Add newsfragment Signed-off-by: Michael Telatynski <7t3chguy@gmail.com> --- changelog.d/12265.misc | 1 + synapse/res/templates/sso_auth_account_details.html | 8 ++++---- synapse/res/templates/sso_auth_account_details.js | 2 +- synapse/res/templates/sso_footer.html | 2 +- 4 files changed, 7 insertions(+), 6 deletions(-) create mode 100644 changelog.d/12265.misc diff --git a/changelog.d/12265.misc b/changelog.d/12265.misc new file mode 100644 index 0000000000..4213f58555 --- /dev/null +++ b/changelog.d/12265.misc @@ -0,0 +1 @@ +Tweak copy for default sso account details template to better adhere to mobile app store guidelines. \ No newline at end of file diff --git a/synapse/res/templates/sso_auth_account_details.html b/synapse/res/templates/sso_auth_account_details.html index b231aace01..1ba850369a 100644 --- a/synapse/res/templates/sso_auth_account_details.html +++ b/synapse/res/templates/sso_auth_account_details.html @@ -130,13 +130,13 @@
-

Choose your account name

-

This is required to create your account on {{ server_name }}, and you can't change this later.

+

Create your account

+

This is required. Continue to create your account on {{ server_name }}. You can't change this later.

- +
@
:{{ server_name }}
@@ -145,7 +145,7 @@ {% if user_attributes.avatar_url or user_attributes.display_name or user_attributes.emails %}
-

{% if idp.idp_icon %}{% endif %}Information from {{ idp.idp_name }}

+

{% if idp.idp_icon %}{% endif %}Optional data from {{ idp.idp_name }}

{% if user_attributes.avatar_url %}