diff options
author | Patrick Cloke <patrickc@matrix.org> | 2023-05-24 10:23:49 -0400 |
---|---|---|
committer | Patrick Cloke <patrickc@matrix.org> | 2023-05-24 10:23:49 -0400 |
commit | 87fbff5a46079e6952f6f1f74d2910b05290e1f6 (patch) | |
tree | 955e7547256dba85a0d7b8075626925dbc9b28c5 | |
parent | Fix advertised flows when SSO is not in use (diff) | |
parent | Improve type hints for cached decorator. (#15658) (diff) | |
download | synapse-87fbff5a46079e6952f6f1f74d2910b05290e1f6.tar.xz |
Merge remote-tracking branch 'origin/develop' into hughns/msc3882-r1
341 files changed, 8979 insertions, 3101 deletions
diff --git a/.ci/scripts/prepare_old_deps.sh b/.ci/scripts/prepare_old_deps.sh index e536a9db8b..580f87bbdf 100755 --- a/.ci/scripts/prepare_old_deps.sh +++ b/.ci/scripts/prepare_old_deps.sh @@ -31,35 +31,6 @@ sed -i \ -e '/systemd/d' \ pyproject.toml -# Use poetry to do the installation. This ensures that the versions are all mutually -# compatible (as far the package metadata declares, anyway); pip's package resolver -# is more lax. -# -# Rather than `poetry install --no-dev`, we drop all dev dependencies and the dev-docs -# group from the toml file. This means we don't have to ensure compatibility between -# old deps and dev tools. - -pip install toml wheel - -REMOVE_DEV_DEPENDENCIES=" -import toml -with open('pyproject.toml', 'r') as f: - data = toml.loads(f.read()) - -del data['tool']['poetry']['dev-dependencies'] -del data['tool']['poetry']['group']['dev-docs'] - -with open('pyproject.toml', 'w') as f: - toml.dump(data, f) -" -python3 -c "$REMOVE_DEV_DEPENDENCIES" - -pip install poetry==1.3.2 -poetry lock - echo "::group::Patched pyproject.toml" cat pyproject.toml echo "::endgroup::" -echo "::group::Lockfile after patch" -cat poetry.lock -echo "::endgroup::" diff --git a/.ci/scripts/setup_complement_prerequisites.sh b/.ci/scripts/setup_complement_prerequisites.sh index 3778478da6..47a3ff8e69 100755 --- a/.ci/scripts/setup_complement_prerequisites.sh +++ b/.ci/scripts/setup_complement_prerequisites.sh @@ -9,16 +9,6 @@ set -eu alias block='{ set +x; } 2>/dev/null; func() { echo "::group::$*"; set -x; }; func' alias endblock='{ set +x; } 2>/dev/null; func() { echo "::endgroup::"; set -x; }; func' -block Set Go Version - # The path is set via a file given by $GITHUB_PATH. We need both Go 1.17 and GOPATH on the path to run Complement. - # See https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#adding-a-system-path - - # Add Go 1.17 to the PATH: see https://github.com/actions/virtual-environments/blob/main/images/linux/Ubuntu2004-Readme.md#environment-variables-2 - echo "$GOROOT_1_17_X64/bin" >> $GITHUB_PATH - # Add the Go path to the PATH: We need this so we can call gotestfmt - echo "~/go/bin" >> $GITHUB_PATH -endblock - block Install Complement Dependencies sudo apt-get -qq update && sudo apt-get install -qqy libolm3 libolm-dev go install -v github.com/gotesttools/gotestfmt/v2/cmd/gotestfmt@latest diff --git a/.github/ISSUE_TEMPLATE/BUG_REPORT.yml b/.github/ISSUE_TEMPLATE/BUG_REPORT.yml index abe0f656a2..79578eeaaa 100644 --- a/.github/ISSUE_TEMPLATE/BUG_REPORT.yml +++ b/.github/ISSUE_TEMPLATE/BUG_REPORT.yml @@ -129,7 +129,7 @@ body: attributes: label: Relevant log output description: | - Please copy and paste any relevant log output, ideally at INFO or DEBUG log level. + Please copy and paste any relevant log output as text (not images), ideally at INFO or DEBUG log level. This will be automatically formatted into code, so there is no need for backticks (`\``). Please be careful to remove any personal or private data. diff --git a/.github/workflows/docs-pr-netlify.yaml b/.github/workflows/docs-pr-netlify.yaml index a5e74eb297..928bcae8cf 100644 --- a/.github/workflows/docs-pr-netlify.yaml +++ b/.github/workflows/docs-pr-netlify.yaml @@ -14,7 +14,7 @@ jobs: # There's a 'download artifact' action, but it hasn't been updated for the workflow_run action # (https://github.com/actions/download-artifact/issues/60) so instead we get this mess: - name: 📥 Download artifact - uses: dawidd6/action-download-artifact@5e780fc7bbd0cac69fc73271ed86edf5dcb72d67 # v2.26.0 + uses: dawidd6/action-download-artifact@246dbf436b23d7c49e21a7ab8204ca9ecd1fe615 # v2.27.0 with: workflow: docs-pr.yaml run_id: ${{ github.event.workflow_run.id }} @@ -22,7 +22,7 @@ jobs: path: book - name: 📤 Deploy to Netlify - uses: matrix-org/netlify-pr-preview@v1 + uses: matrix-org/netlify-pr-preview@v2 with: path: book owner: ${{ github.event.workflow_run.head_repository.owner.login }} diff --git a/.github/workflows/latest_deps.yml b/.github/workflows/latest_deps.yml index d5a68ffa1f..452600ba16 100644 --- a/.github/workflows/latest_deps.yml +++ b/.github/workflows/latest_deps.yml @@ -27,9 +27,7 @@ jobs: steps: - uses: actions/checkout@v3 - name: Install Rust - uses: dtolnay/rust-toolchain@fc3253060d0c959bea12a59f10f8391454a0b02d - with: - toolchain: stable + uses: dtolnay/rust-toolchain@stable - uses: Swatinem/rust-cache@v2 # The dev dependencies aren't exposed in the wheel metadata (at least with current @@ -61,9 +59,7 @@ jobs: - uses: actions/checkout@v3 - name: Install Rust - uses: dtolnay/rust-toolchain@fc3253060d0c959bea12a59f10f8391454a0b02d - with: - toolchain: stable + uses: dtolnay/rust-toolchain@stable - uses: Swatinem/rust-cache@v2 - run: sudo apt-get -qq install xmlsec1 @@ -134,9 +130,7 @@ jobs: - uses: actions/checkout@v3 - name: Install Rust - uses: dtolnay/rust-toolchain@fc3253060d0c959bea12a59f10f8391454a0b02d - with: - toolchain: stable + uses: dtolnay/rust-toolchain@stable - uses: Swatinem/rust-cache@v2 - name: Ensure sytest runs `pip install` @@ -184,6 +178,8 @@ jobs: with: path: synapse + - uses: actions/setup-go@v4 + - name: Prepare Complement's Prerequisites run: synapse/.ci/scripts/setup_complement_prerequisites.sh diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index a2cec324a5..f84a4ef644 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -35,12 +35,7 @@ jobs: steps: - uses: actions/checkout@v3 - name: Install Rust - # There don't seem to be versioned releases of this action per se: for each rust - # version there is a branch which gets constantly rebased on top of master. - # We pin to a specific commit for paranoia's sake. - uses: dtolnay/rust-toolchain@fc3253060d0c959bea12a59f10f8391454a0b02d - with: - toolchain: 1.58.1 + uses: dtolnay/rust-toolchain@1.58.1 - uses: Swatinem/rust-cache@v2 - uses: matrix-org/setup-python-poetry@v1 with: @@ -70,9 +65,60 @@ jobs: - run: .ci/scripts/check_lockfile.py lint: - uses: "matrix-org/backend-meta/.github/workflows/python-poetry-ci.yml@v2" - with: - typechecking-extras: "all" + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v3 + + - name: Setup Poetry + uses: matrix-org/setup-python-poetry@v1 + with: + install-project: "false" + + - name: Import order (isort) + run: poetry run isort --check --diff . + + - name: Code style (black) + run: poetry run black --check --diff . + + - name: Semantic checks (ruff) + # --quiet suppresses the update check. + run: poetry run ruff --quiet . + + lint-mypy: + runs-on: ubuntu-latest + name: Typechecking + steps: + - name: Checkout repository + uses: actions/checkout@v3 + + - name: Setup Poetry + uses: matrix-org/setup-python-poetry@v1 + with: + # We want to make use of type hints in optional dependencies too. + extras: all + # We have seen odd mypy failures that were resolved when we started + # installing the project again: + # https://github.com/matrix-org/synapse/pull/15376#issuecomment-1498983775 + # To make CI green, err towards caution and install the project. + install-project: "true" + + - name: Install Rust + uses: dtolnay/rust-toolchain@1.58.1 + - uses: Swatinem/rust-cache@v2 + + # Cribbed from + # https://github.com/AustinScola/mypy-cache-github-action/blob/85ea4f2972abed39b33bd02c36e341b28ca59213/src/restore.ts#L10-L17 + - name: Restore/persist mypy's cache + uses: actions/cache@v3 + with: + path: | + .mypy_cache + key: mypy-cache-${{ github.context.sha }} + restore-keys: mypy-cache- + + - name: Run mypy + run: poetry run mypy lint-crlf: runs-on: ubuntu-latest @@ -104,12 +150,7 @@ jobs: with: ref: ${{ github.event.pull_request.head.sha }} - name: Install Rust - # There don't seem to be versioned releases of this action per se: for each rust - # version there is a branch which gets constantly rebased on top of master. - # We pin to a specific commit for paranoia's sake. - uses: dtolnay/rust-toolchain@fc3253060d0c959bea12a59f10f8391454a0b02d - with: - toolchain: 1.58.1 + uses: dtolnay/rust-toolchain@1.58.1 - uses: Swatinem/rust-cache@v2 - uses: matrix-org/setup-python-poetry@v1 with: @@ -126,12 +167,8 @@ jobs: - uses: actions/checkout@v3 - name: Install Rust - # There don't seem to be versioned releases of this action per se: for each rust - # version there is a branch which gets constantly rebased on top of master. - # We pin to a specific commit for paranoia's sake. - uses: dtolnay/rust-toolchain@fc3253060d0c959bea12a59f10f8391454a0b02d + uses: dtolnay/rust-toolchain@1.58.1 with: - toolchain: 1.58.1 components: clippy - uses: Swatinem/rust-cache@v2 @@ -148,10 +185,7 @@ jobs: - uses: actions/checkout@v3 - name: Install Rust - # There don't seem to be versioned releases of this action per se: for each rust - # version there is a branch which gets constantly rebased on top of master. - # We pin to a specific commit for paranoia's sake. - uses: dtolnay/rust-toolchain@fc3253060d0c959bea12a59f10f8391454a0b02d + uses: dtolnay/rust-toolchain@master with: toolchain: nightly-2022-12-01 components: clippy @@ -168,10 +202,7 @@ jobs: - uses: actions/checkout@v3 - name: Install Rust - # There don't seem to be versioned releases of this action per se: for each rust - # version there is a branch which gets constantly rebased on top of master. - # We pin to a specific commit for paranoia's sake. - uses: dtolnay/rust-toolchain@fc3253060d0c959bea12a59f10f8391454a0b02d + uses: dtolnay/rust-toolchain@master with: # We use nightly so that it correctly groups together imports toolchain: nightly-2022-12-01 @@ -185,6 +216,7 @@ jobs: if: ${{ !cancelled() }} # Run this even if prior jobs were skipped needs: - lint + - lint-mypy - lint-crlf - lint-newsfile - lint-pydantic @@ -236,12 +268,7 @@ jobs: postgres:${{ matrix.job.postgres-version }} - name: Install Rust - # There don't seem to be versioned releases of this action per se: for each rust - # version there is a branch which gets constantly rebased on top of master. - # We pin to a specific commit for paranoia's sake. - uses: dtolnay/rust-toolchain@fc3253060d0c959bea12a59f10f8391454a0b02d - with: - toolchain: 1.58.1 + uses: dtolnay/rust-toolchain@1.58.1 - uses: Swatinem/rust-cache@v2 - uses: matrix-org/setup-python-poetry@v1 @@ -281,52 +308,39 @@ jobs: - uses: actions/checkout@v3 - name: Install Rust - # There don't seem to be versioned releases of this action per se: for each rust - # version there is a branch which gets constantly rebased on top of master. - # We pin to a specific commit for paranoia's sake. - uses: dtolnay/rust-toolchain@fc3253060d0c959bea12a59f10f8391454a0b02d - with: - toolchain: 1.58.1 + uses: dtolnay/rust-toolchain@1.58.1 - uses: Swatinem/rust-cache@v2 # There aren't wheels for some of the older deps, so we need to install # their build dependencies - run: | + sudo apt update sudo apt-get -qq install build-essential libffi-dev python-dev \ - libxml2-dev libxslt-dev xmlsec1 zlib1g-dev libjpeg-dev libwebp-dev + libxml2-dev libxslt-dev xmlsec1 zlib1g-dev libjpeg-dev libwebp-dev - uses: actions/setup-python@v4 with: python-version: '3.7' - # Calculating the old-deps actually takes a bunch of time, so we cache the - # pyproject.toml / poetry.lock. We need to cache pyproject.toml as - # otherwise the `poetry install` step will error due to the poetry.lock - # file being outdated. - # - # This caches the output of `Prepare old deps`, which should generate the - # same `pyproject.toml` and `poetry.lock` for a given `pyproject.toml` input. - - uses: actions/cache@v3 - id: cache-poetry-old-deps - name: Cache poetry.lock - with: - path: | - poetry.lock - pyproject.toml - key: poetry-old-deps2-${{ hashFiles('pyproject.toml') }} - name: Prepare old deps if: steps.cache-poetry-old-deps.outputs.cache-hit != 'true' run: .ci/scripts/prepare_old_deps.sh - # We only now install poetry so that `setup-python-poetry` caches the - # right poetry.lock's dependencies. - - uses: matrix-org/setup-python-poetry@v1 - with: - python-version: '3.7' - poetry-version: "1.3.2" - extras: "all test" + # Note: we install using `pip` here, not poetry. `poetry install` ignores the + # build-system section (https://github.com/python-poetry/poetry/issues/6154), but + # we explicitly want to test that you can `pip install` using the oldest version + # of poetry-core and setuptools-rust. + - run: pip install .[all,test] + + # We nuke the local copy, as we've installed synapse into the virtualenv + # (rather than use an editable install, which we no longer support). If we + # don't do this then python can't find the native lib. + - run: rm -rf synapse/ + + # Sanity check we can import/run Synapse + - run: python -m synapse.app.homeserver --help - - run: poetry run trial -j6 tests + - run: python -m twisted.trial -j6 tests - name: Dump logs # Logs are most useful when the command fails, always include them. if: ${{ always() }} @@ -402,12 +416,7 @@ jobs: run: cat sytest-blacklist .ci/worker-blacklist > synapse-blacklist-with-workers - name: Install Rust - # There don't seem to be versioned releases of this action per se: for each rust - # version there is a branch which gets constantly rebased on top of master. - # We pin to a specific commit for paranoia's sake. - uses: dtolnay/rust-toolchain@fc3253060d0c959bea12a59f10f8391454a0b02d - with: - toolchain: 1.58.1 + uses: dtolnay/rust-toolchain@1.58.1 - uses: Swatinem/rust-cache@v2 - name: Run SyTest @@ -547,14 +556,11 @@ jobs: path: synapse - name: Install Rust - # There don't seem to be versioned releases of this action per se: for each rust - # version there is a branch which gets constantly rebased on top of master. - # We pin to a specific commit for paranoia's sake. - uses: dtolnay/rust-toolchain@fc3253060d0c959bea12a59f10f8391454a0b02d - with: - toolchain: 1.58.1 + uses: dtolnay/rust-toolchain@1.58.1 - uses: Swatinem/rust-cache@v2 + - uses: actions/setup-go@v4 + - name: Prepare Complement's Prerequisites run: synapse/.ci/scripts/setup_complement_prerequisites.sh @@ -578,12 +584,7 @@ jobs: - uses: actions/checkout@v3 - name: Install Rust - # There don't seem to be versioned releases of this action per se: for each rust - # version there is a branch which gets constantly rebased on top of master. - # We pin to a specific commit for paranoia's sake. - uses: dtolnay/rust-toolchain@fc3253060d0c959bea12a59f10f8391454a0b02d - with: - toolchain: 1.58.1 + uses: dtolnay/rust-toolchain@1.58.1 - uses: Swatinem/rust-cache@v2 - run: cargo test @@ -601,10 +602,7 @@ jobs: - uses: actions/checkout@v3 - name: Install Rust - # There don't seem to be versioned releases of this action per se: for each rust - # version there is a branch which gets constantly rebased on top of master. - # We pin to a specific commit for paranoia's sake. - uses: dtolnay/rust-toolchain@fc3253060d0c959bea12a59f10f8391454a0b02d + uses: dtolnay/rust-toolchain@master with: toolchain: nightly-2022-12-01 - uses: Swatinem/rust-cache@v2 diff --git a/.github/workflows/twisted_trunk.yml b/.github/workflows/twisted_trunk.yml index 461c85067c..14fc6a0389 100644 --- a/.github/workflows/twisted_trunk.yml +++ b/.github/workflows/twisted_trunk.yml @@ -25,9 +25,7 @@ jobs: - uses: actions/checkout@v3 - name: Install Rust - uses: dtolnay/rust-toolchain@fc3253060d0c959bea12a59f10f8391454a0b02d - with: - toolchain: stable + uses: dtolnay/rust-toolchain@stable - uses: Swatinem/rust-cache@v2 - uses: matrix-org/setup-python-poetry@v1 @@ -50,9 +48,7 @@ jobs: - run: sudo apt-get -qq install xmlsec1 - name: Install Rust - uses: dtolnay/rust-toolchain@fc3253060d0c959bea12a59f10f8391454a0b02d - with: - toolchain: stable + uses: dtolnay/rust-toolchain@stable - uses: Swatinem/rust-cache@v2 - uses: matrix-org/setup-python-poetry@v1 @@ -89,9 +85,7 @@ jobs: - uses: actions/checkout@v3 - name: Install Rust - uses: dtolnay/rust-toolchain@fc3253060d0c959bea12a59f10f8391454a0b02d - with: - toolchain: stable + uses: dtolnay/rust-toolchain@stable - uses: Swatinem/rust-cache@v2 - name: Patch dependencies @@ -147,6 +141,8 @@ jobs: with: path: synapse + - uses: actions/setup-go@v4 + - name: Prepare Complement's Prerequisites run: synapse/.ci/scripts/setup_complement_prerequisites.sh diff --git a/.gitignore b/.gitignore index 9d037f28e7..8cf504324b 100644 --- a/.gitignore +++ b/.gitignore @@ -15,9 +15,10 @@ _trial_temp*/ .DS_Store __pycache__/ -# We do want the poetry and cargo lockfile. +# We do want poetry, cargo and flake lockfiles. !poetry.lock !Cargo.lock +!flake.lock # stuff that is likely to exist when you run a server locally /*.db @@ -38,6 +39,9 @@ __pycache__/ /.envrc .direnv/ +# For nix/devenv users +.devenv/ + # IDEs /.idea/ /.ropeproject/ diff --git a/CHANGES.md b/CHANGES.md index 2a6ee1490d..e9397158f1 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,6 +1,233 @@ -Synapse 1.81.0rc1 (2023-04-04) +Synapse 1.84.0 (2023-05-23) +=========================== + +The `worker_replication_*` configuration settings have been deprecated in favour of configuring the main process consistently with other instances in the `instance_map`. The deprecated settings will be removed in Synapse v1.88.0, but changing your configuration in advance is recommended. See the [upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.84/docs/upgrade.md#upgrading-to-v1840) for more information. + +Bugfixes +-------- + +- Fix a bug introduced in Synapse 1.84.0rc1 where errors during startup were not reported correctly on Python < 3.10. ([\#15599](https://github.com/matrix-org/synapse/issues/15599)) + + +Synapse 1.84.0rc1 (2023-05-16) ============================== +Features +-------- + +- Add an option to prevent media downloads from configured domains. ([\#15197](https://github.com/matrix-org/synapse/issues/15197)) +- Add `forget_rooms_on_leave` config option to automatically forget rooms when users leave them or are removed from them. ([\#15224](https://github.com/matrix-org/synapse/issues/15224)) +- Add redis TLS configuration options. ([\#15312](https://github.com/matrix-org/synapse/issues/15312)) +- Add a config option to delay push notifications by a random amount, to discourage time-based profiling. ([\#15516](https://github.com/matrix-org/synapse/issues/15516)) +- Stabilize support for [MSC2659](https://github.com/matrix-org/matrix-spec-proposals/pull/2659): application service ping endpoint. Contributed by Tulir @ Beeper. ([\#15528](https://github.com/matrix-org/synapse/issues/15528)) +- Implement [MSC4009](https://github.com/matrix-org/matrix-spec-proposals/pull/4009) to expand the supported characters in Matrix IDs. ([\#15536](https://github.com/matrix-org/synapse/issues/15536)) +- Advertise support for Matrix 1.6 on `/_matrix/client/versions`. ([\#15559](https://github.com/matrix-org/synapse/issues/15559)) +- Print full error and stack-trace of any exception that occurs during startup/initialization. ([\#15569](https://github.com/matrix-org/synapse/issues/15569)) + + +Bugfixes +-------- + +- Don't fail on federation over TOR where SRV queries are not supported. Contributed by Zdzichu. ([\#15523](https://github.com/matrix-org/synapse/issues/15523)) +- Experimental support for [MSC4010](https://github.com/matrix-org/matrix-spec-proposals/pull/4010) which rejects setting the `"m.push_rules"` via account data. ([\#15554](https://github.com/matrix-org/synapse/issues/15554), [\#15555](https://github.com/matrix-org/synapse/issues/15555)) +- Fix a long-standing bug where an invalid membership event could cause an internal server error. ([\#15564](https://github.com/matrix-org/synapse/issues/15564)) +- Require at least poetry-core v1.1.0. ([\#15566](https://github.com/matrix-org/synapse/issues/15566), [\#15571](https://github.com/matrix-org/synapse/issues/15571)) + + +Deprecations and Removals +------------------------- + +- Remove need for `worker_replication_*` based settings in worker configuration yaml by placing this data directly on the `instance_map` instead. ([\#15491](https://github.com/matrix-org/synapse/issues/15491)) + + +Updates to the Docker image +--------------------------- + +- Add pkg-config package to Stage 0 to be able to build Dockerfile on ppc64le architecture. ([\#15567](https://github.com/matrix-org/synapse/issues/15567)) + + +Improved Documentation +---------------------- + +- Clarify documentation of the "Create or modify account" Admin API. ([\#15544](https://github.com/matrix-org/synapse/issues/15544)) +- Fix path to the `statistics/database/rooms` admin API in documentation. ([\#15560](https://github.com/matrix-org/synapse/issues/15560)) +- Update and improve Mastodon Single Sign-On documentation. ([\#15587](https://github.com/matrix-org/synapse/issues/15587)) + + +Internal Changes +---------------- + +- Use oEmbed to generate URL previews for YouTube Shorts. ([\#15025](https://github.com/matrix-org/synapse/issues/15025)) +- Create new `Client` for use with HTTP Replication between workers. Contributed by Jason Little. ([\#15470](https://github.com/matrix-org/synapse/issues/15470)) +- Bump pyicu from 2.10.2 to 2.11. ([\#15509](https://github.com/matrix-org/synapse/issues/15509)) +- Remove references to supporting per-user flag for [MSC2654](https://github.com/matrix-org/matrix-spec-proposals/pull/2654). ([\#15522](https://github.com/matrix-org/synapse/issues/15522)) +- Don't use a trusted key server when running the demo scripts. ([\#15527](https://github.com/matrix-org/synapse/issues/15527)) +- Speed up rebuilding of the user directory for local users. ([\#15529](https://github.com/matrix-org/synapse/issues/15529)) +- Speed up deleting of old rows in `event_push_actions`. ([\#15531](https://github.com/matrix-org/synapse/issues/15531)) +- Install the `xmlsec` and `mdbook` packages and switch back to the upstream [cachix/devenv](https://github.com/cachix/devenv) repo in the nix development environment. ([\#15532](https://github.com/matrix-org/synapse/issues/15532), [\#15533](https://github.com/matrix-org/synapse/issues/15533), [\#15545](https://github.com/matrix-org/synapse/issues/15545)) +- Implement [MSC3987](https://github.com/matrix-org/matrix-spec-proposals/pull/3987) by removing `"dont_notify"` from the list of actions in default push rules. ([\#15534](https://github.com/matrix-org/synapse/issues/15534)) +- Move various module API callback registration methods to a dedicated class. ([\#15535](https://github.com/matrix-org/synapse/issues/15535)) +- Proxy `/user/devices` federation queries to application services for [MSC3984](https://github.com/matrix-org/matrix-spec-proposals/pull/3984). ([\#15539](https://github.com/matrix-org/synapse/issues/15539)) +- Factor out an `is_mine_server_name` method. ([\#15542](https://github.com/matrix-org/synapse/issues/15542)) +- Allow running Complement tests using [podman](https://podman.io/) by adding a `PODMAN` environment variable to `scripts-dev/complement.sh`. ([\#15543](https://github.com/matrix-org/synapse/issues/15543)) +- Bump serde from 1.0.160 to 1.0.162. ([\#15548](https://github.com/matrix-org/synapse/issues/15548)) +- Bump types-setuptools from 67.6.0.5 to 67.7.0.1. ([\#15549](https://github.com/matrix-org/synapse/issues/15549)) +- Bump sentry-sdk from 1.19.1 to 1.22.1. ([\#15550](https://github.com/matrix-org/synapse/issues/15550)) +- Bump ruff from 0.0.259 to 0.0.265. ([\#15551](https://github.com/matrix-org/synapse/issues/15551)) +- Bump hiredis from 2.2.2 to 2.2.3. ([\#15552](https://github.com/matrix-org/synapse/issues/15552)) +- Bump types-requests from 2.29.0.0 to 2.30.0.0. ([\#15553](https://github.com/matrix-org/synapse/issues/15553)) +- Add `org.matrix.msc3981` info to `/_matrix/client/versions`. ([\#15558](https://github.com/matrix-org/synapse/issues/15558)) +- Declare unstable support for [MSC3391](https://github.com/matrix-org/matrix-spec-proposals/pull/3391) under `/_matrix/client/versions` if the experimental implementation is enabled. ([\#15562](https://github.com/matrix-org/synapse/issues/15562)) +- Implement [MSC3821](https://github.com/matrix-org/matrix-spec-proposals/pull/3821) to update the redaction rules. ([\#15563](https://github.com/matrix-org/synapse/issues/15563)) +- Implement updated redaction rules from [MSC3389](https://github.com/matrix-org/matrix-spec-proposals/pull/3389). ([\#15565](https://github.com/matrix-org/synapse/issues/15565)) +- Allow `pip install` to use setuptools_rust 1.6.0 when building Synapse. ([\#15570](https://github.com/matrix-org/synapse/issues/15570)) +- Deal with upcoming Github Actions deprecations. ([\#15576](https://github.com/matrix-org/synapse/issues/15576)) +- Export `run_as_background_process` from the module API. ([\#15577](https://github.com/matrix-org/synapse/issues/15577)) +- Update build system requirements to allow building with poetry-core==1.6.0. ([\#15588](https://github.com/matrix-org/synapse/issues/15588)) +- Bump serde from 1.0.162 to 1.0.163. ([\#15589](https://github.com/matrix-org/synapse/issues/15589)) +- Bump phonenumbers from 8.13.7 to 8.13.11. ([\#15590](https://github.com/matrix-org/synapse/issues/15590)) +- Bump types-psycopg2 from 2.9.21.9 to 2.9.21.10. ([\#15591](https://github.com/matrix-org/synapse/issues/15591)) +- Bump types-commonmark from 0.9.2.2 to 0.9.2.3. ([\#15592](https://github.com/matrix-org/synapse/issues/15592)) +- Bump types-setuptools from 67.7.0.1 to 67.7.0.2. ([\#15594](https://github.com/matrix-org/synapse/issues/15594)) + + +Synapse 1.83.0 (2023-05-09) +=========================== + +No significant changes since 1.83.0rc1. + + +Synapse 1.83.0rc1 (2023-05-02) +============================== + +Features +-------- + +- Experimental support to recursively provide relations per [MSC3981](https://github.com/matrix-org/matrix-spec-proposals/pull/3981). ([\#15315](https://github.com/matrix-org/synapse/issues/15315)) +- Experimental support for [MSC3970](https://github.com/matrix-org/matrix-spec-proposals/pull/3970): Scope transaction IDs to devices. ([\#15318](https://github.com/matrix-org/synapse/issues/15318)) +- Add an [admin API endpoint](https://matrix-org.github.io/synapse/v1.83/admin_api/experimental_features.html) to support per-user feature flags. ([\#15344](https://github.com/matrix-org/synapse/issues/15344)) +- Add a module API to send an HTTP push notification. ([\#15387](https://github.com/matrix-org/synapse/issues/15387)) +- Add an [admin API endpoint](https://matrix-org.github.io/synapse/v1.83/admin_api/statistics.html#get-largest-rooms-by-size-in-database) to query the largest rooms by disk space used in the database. ([\#15482](https://github.com/matrix-org/synapse/issues/15482)) + + +Bugfixes +-------- + +- Disable push rule evaluation for rooms excluded from sync. ([\#15361](https://github.com/matrix-org/synapse/issues/15361)) +- Fix a long-standing bug where cached server key results which were directly fetched would not be properly re-used. ([\#15417](https://github.com/matrix-org/synapse/issues/15417)) +- Fix a bug introduced in Synapse 1.73.0 where some experimental push rules were returned by default. ([\#15494](https://github.com/matrix-org/synapse/issues/15494)) + + +Improved Documentation +---------------------- + +- Add Nginx loadbalancing example with sticky mxid for workers. ([\#15411](https://github.com/matrix-org/synapse/issues/15411)) +- Update outdated development docs that mention restrictions in versions of SQLite that we no longer support. ([\#15498](https://github.com/matrix-org/synapse/issues/15498)) + + +Internal Changes +---------------- + +- Speedup tests by caching HomeServerConfig instances. ([\#15284](https://github.com/matrix-org/synapse/issues/15284)) +- Add denormalised event stream ordering column to membership state tables for future use. Contributed by Nick @ Beeper (@fizzadar). ([\#15356](https://github.com/matrix-org/synapse/issues/15356)) +- Always use multi-user device resync replication endpoints. ([\#15418](https://github.com/matrix-org/synapse/issues/15418)) +- Add column `full_user_id` to tables `profiles` and `user_filters`. ([\#15458](https://github.com/matrix-org/synapse/issues/15458)) +- Update support for [MSC3983](https://github.com/matrix-org/matrix-spec-proposals/pull/3983) to allow always returning fallback-keys in a `/keys/claim` request. ([\#15462](https://github.com/matrix-org/synapse/issues/15462)) +- Improve type hints. ([\#15465](https://github.com/matrix-org/synapse/issues/15465), [\#15496](https://github.com/matrix-org/synapse/issues/15496), [\#15497](https://github.com/matrix-org/synapse/issues/15497)) +- Support claiming more than one OTK at a time. ([\#15468](https://github.com/matrix-org/synapse/issues/15468)) +- Bump types-pyyaml from 6.0.12.8 to 6.0.12.9. ([\#15471](https://github.com/matrix-org/synapse/issues/15471)) +- Bump pyasn1-modules from 0.2.8 to 0.3.0. ([\#15473](https://github.com/matrix-org/synapse/issues/15473)) +- Bump cryptography from 40.0.1 to 40.0.2. ([\#15474](https://github.com/matrix-org/synapse/issues/15474)) +- Bump types-netaddr from 0.8.0.7 to 0.8.0.8. ([\#15475](https://github.com/matrix-org/synapse/issues/15475)) +- Bump types-jsonschema from 4.17.0.6 to 4.17.0.7. ([\#15476](https://github.com/matrix-org/synapse/issues/15476)) +- Ask bug reporters to provide logs as text. ([\#15479](https://github.com/matrix-org/synapse/issues/15479)) +- Add a Nix flake for use as a development environment. ([\#15495](https://github.com/matrix-org/synapse/issues/15495)) +- Bump anyhow from 1.0.70 to 1.0.71. ([\#15507](https://github.com/matrix-org/synapse/issues/15507)) +- Bump types-pillow from 9.4.0.19 to 9.5.0.2. ([\#15508](https://github.com/matrix-org/synapse/issues/15508)) +- Bump packaging from 23.0 to 23.1. ([\#15510](https://github.com/matrix-org/synapse/issues/15510)) +- Bump types-requests from 2.28.11.16 to 2.29.0.0. ([\#15511](https://github.com/matrix-org/synapse/issues/15511)) +- Bump setuptools-rust from 1.5.2 to 1.6.0. ([\#15512](https://github.com/matrix-org/synapse/issues/15512)) +- Update the check_schema_delta script to account for when the schema version has been bumped locally. ([\#15466](https://github.com/matrix-org/synapse/issues/15466)) + + +Synapse 1.82.0 (2023-04-25) +=========================== + +No significant changes since 1.82.0rc1. + + +Synapse 1.82.0rc1 (2023-04-18) +============================== + +Features +-------- + +- Allow loading the `/directory/room/{roomAlias}` endpoint on workers. ([\#15333](https://github.com/matrix-org/synapse/issues/15333)) +- Add some validation to `instance_map` configuration loading. ([\#15431](https://github.com/matrix-org/synapse/issues/15431)) +- Allow loading the `/capabilities` endpoint on workers. ([\#15436](https://github.com/matrix-org/synapse/issues/15436)) + + +Bugfixes +-------- + +- Delete server-side backup keys when deactivating an account. ([\#15181](https://github.com/matrix-org/synapse/issues/15181)) +- Fix and document untold assumption that `on_logged_out` module hooks will be called before the deletion of pushers. ([\#15410](https://github.com/matrix-org/synapse/issues/15410)) +- Improve robustness when handling a perspective key response by deduplicating received server keys. ([\#15423](https://github.com/matrix-org/synapse/issues/15423)) +- Synapse now correctly fails to start if the config option `app_service_config_files` is not a list. ([\#15425](https://github.com/matrix-org/synapse/issues/15425)) +- Disable loading `RefreshTokenServlet` (`/_matrix/client/(r0|v3|unstable)/refresh`) on workers. ([\#15428](https://github.com/matrix-org/synapse/issues/15428)) + + +Improved Documentation +---------------------- + +- Note that the `delete_stale_devices_after` background job always runs on the main process. ([\#15452](https://github.com/matrix-org/synapse/issues/15452)) + + +Deprecations and Removals +------------------------- + +- Remove the broken, unspecced registration fallback. Note that the *login* fallback is unaffected by this change. ([\#15405](https://github.com/matrix-org/synapse/issues/15405)) + + +Internal Changes +---------------- + +- Bump black from 23.1.0 to 23.3.0. ([\#15372](https://github.com/matrix-org/synapse/issues/15372)) +- Bump pyopenssl from 23.1.0 to 23.1.1. ([\#15373](https://github.com/matrix-org/synapse/issues/15373)) +- Bump types-psycopg2 from 2.9.21.8 to 2.9.21.9. ([\#15374](https://github.com/matrix-org/synapse/issues/15374)) +- Bump types-netaddr from 0.8.0.6 to 0.8.0.7. ([\#15375](https://github.com/matrix-org/synapse/issues/15375)) +- Bump types-opentracing from 2.4.10.3 to 2.4.10.4. ([\#15376](https://github.com/matrix-org/synapse/issues/15376)) +- Bump dawidd6/action-download-artifact from 2.26.0 to 2.26.1. ([\#15404](https://github.com/matrix-org/synapse/issues/15404)) +- Bump parameterized from 0.8.1 to 0.9.0. ([\#15412](https://github.com/matrix-org/synapse/issues/15412)) +- Bump types-pillow from 9.4.0.17 to 9.4.0.19. ([\#15413](https://github.com/matrix-org/synapse/issues/15413)) +- Bump sentry-sdk from 1.17.0 to 1.19.1. ([\#15414](https://github.com/matrix-org/synapse/issues/15414)) +- Bump immutabledict from 2.2.3 to 2.2.4. ([\#15415](https://github.com/matrix-org/synapse/issues/15415)) +- Bump dawidd6/action-download-artifact from 2.26.1 to 2.27.0. ([\#15441](https://github.com/matrix-org/synapse/issues/15441)) +- Bump serde_json from 1.0.95 to 1.0.96. ([\#15442](https://github.com/matrix-org/synapse/issues/15442)) +- Bump serde from 1.0.159 to 1.0.160. ([\#15443](https://github.com/matrix-org/synapse/issues/15443)) +- Bump pillow from 9.4.0 to 9.5.0. ([\#15444](https://github.com/matrix-org/synapse/issues/15444)) +- Bump furo from 2023.3.23 to 2023.3.27. ([\#15445](https://github.com/matrix-org/synapse/issues/15445)) +- Bump types-pyopenssl from 23.1.0.0 to 23.1.0.2. ([\#15446](https://github.com/matrix-org/synapse/issues/15446)) +- Bump mypy from 1.0.0 to 1.0.1. ([\#15447](https://github.com/matrix-org/synapse/issues/15447)) +- Bump psycopg2 from 2.9.5 to 2.9.6. ([\#15448](https://github.com/matrix-org/synapse/issues/15448)) +- Improve DB performance of clearing out old data from `stream_ordering_to_exterm`. ([\#15382](https://github.com/matrix-org/synapse/issues/15382), [\#15429](https://github.com/matrix-org/synapse/issues/15429)) +- Implement [MSC3989](https://github.com/matrix-org/matrix-spec-proposals/pull/3989) redaction algorithm. ([\#15393](https://github.com/matrix-org/synapse/issues/15393)) +- Implement [MSC2175](https://github.com/matrix-org/matrix-doc/pull/2175) to stop adding `creator` to create events. ([\#15394](https://github.com/matrix-org/synapse/issues/15394)) +- Implement [MSC2174](https://github.com/matrix-org/matrix-spec-proposals/pull/2174) to move the `redacts` key to a `content` property. ([\#15395](https://github.com/matrix-org/synapse/issues/15395)) +- Trust dtonlay/rust-toolchain in CI. ([\#15406](https://github.com/matrix-org/synapse/issues/15406)) +- Explicitly install Synapse during typechecking in CI. ([\#15409](https://github.com/matrix-org/synapse/issues/15409)) +- Only load the SSO redirect servlet if SSO is enabled. ([\#15421](https://github.com/matrix-org/synapse/issues/15421)) +- Refactor `SimpleHttpClient` to pull out a base class. ([\#15427](https://github.com/matrix-org/synapse/issues/15427)) +- Improve type hints. ([\#15432](https://github.com/matrix-org/synapse/issues/15432)) +- Convert async to normal tests in `TestSSOHandler`. ([\#15433](https://github.com/matrix-org/synapse/issues/15433)) +- Speed up the user directory background update. ([\#15435](https://github.com/matrix-org/synapse/issues/15435)) +- Disable directory listing for static resources in `/_matrix/static/`. ([\#15438](https://github.com/matrix-org/synapse/issues/15438)) +- Move various module API callback registration methods to a dedicated class. ([\#15453](https://github.com/matrix-org/synapse/issues/15453)) + + +Synapse 1.81.0 (2023-04-11) +=========================== + Synapse now attempts the versioned appservice paths before falling back to the [legacy paths](https://spec.matrix.org/v1.6/application-service-api/#legacy-routes). Usage of the legacy routes should be considered deprecated. @@ -14,6 +241,27 @@ A future version of Synapse (v1.88.0 or later) will remove support for legacy application service routes and query parameter authorization. +No significant changes since 1.81.0rc2. + + +Synapse 1.81.0rc2 (2023-04-06) +============================== + +Bugfixes +-------- + +- Fix the `set_device_id_for_pushers_txn` background update crash. ([\#15391](https://github.com/matrix-org/synapse/issues/15391)) + + +Internal Changes +---------------- + +- Update CI to run complement under the latest stable go version. ([\#15403](https://github.com/matrix-org/synapse/issues/15403)) + + +Synapse 1.81.0rc1 (2023-04-04) +============================== + Features -------- diff --git a/Cargo.lock b/Cargo.lock index 4a2c6af8f3..e169a665b6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -13,9 +13,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.70" +version = "1.0.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7de8ce5e0f9f8d88245311066a578d72b7af3e7088f32783804676302df237e4" +checksum = "9c7d0618f0e0b7e8ff11427422b64564d5fb0be1940354bfe2e0529b18a9d9b8" [[package]] name = "arc-swap" @@ -323,18 +323,18 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" [[package]] name = "serde" -version = "1.0.159" +version = "1.0.163" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c04e8343c3daeec41f58990b9d77068df31209f2af111e059e9fe9646693065" +checksum = "2113ab51b87a539ae008b5c6c02dc020ffa39afd2d83cffcb3f4eb2722cebec2" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.159" +version = "1.0.163" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c614d17805b093df4b147b51339e7e44bf05ef59fba1e45d83500bcfb4d8585" +checksum = "8c805777e3930c8883389c602315a24224bcc738b63905ef87cd1420353ea93e" dependencies = [ "proc-macro2", "quote", @@ -343,9 +343,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.95" +version = "1.0.96" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d721eca97ac802aa7777b701877c8004d950fc142651367300d21c1cc0194744" +checksum = "057d394a50403bcac12672b2b18fb387ab6d289d957dab67dd201875391e52f1" dependencies = [ "itoa", "ryu", diff --git a/changelog.d/10428.removal b/changelog.d/10428.removal new file mode 100644 index 0000000000..c056e89585 --- /dev/null +++ b/changelog.d/10428.removal @@ -0,0 +1 @@ +Remove the old version of the R30 (30-day retained users) phone-home metric. diff --git a/changelog.d/15464.bugfix b/changelog.d/15464.bugfix new file mode 100644 index 0000000000..3c655989b3 --- /dev/null +++ b/changelog.d/15464.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug where setting the read marker could fail when using message retention. Contributed by Nick @ Beeper (@fizzadar). diff --git a/changelog.d/15537.misc b/changelog.d/15537.misc new file mode 100644 index 0000000000..979e0ba977 --- /dev/null +++ b/changelog.d/15537.misc @@ -0,0 +1 @@ +Add not null constraint to column full_user_id of tables profiles and user_filters. diff --git a/changelog.d/15578.misc b/changelog.d/15578.misc new file mode 100644 index 0000000000..a54422239b --- /dev/null +++ b/changelog.d/15578.misc @@ -0,0 +1 @@ +Allow connecting to HTTP Replication Endpoints by using `worker_name` when constructing the request. diff --git a/changelog.d/15599.bugfix b/changelog.d/15599.bugfix new file mode 100644 index 0000000000..b58af8ad55 --- /dev/null +++ b/changelog.d/15599.bugfix @@ -0,0 +1 @@ +Print full error and stack-trace of any exception that occurs during startup/initialization. diff --git a/changelog.d/15601.bugfix b/changelog.d/15601.bugfix new file mode 100644 index 0000000000..426db6cea3 --- /dev/null +++ b/changelog.d/15601.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug where the `url_preview_url_blacklist` configuration setting was not applied to oEmbed or image URLs found while previewing a URL. diff --git a/changelog.d/15602.misc b/changelog.d/15602.misc new file mode 100644 index 0000000000..cdd0c039bd --- /dev/null +++ b/changelog.d/15602.misc @@ -0,0 +1 @@ +Run mypy type checking with the minimum supported Python version to catch new usage that isn't backwards-compatible. diff --git a/changelog.d/15604.misc b/changelog.d/15604.misc new file mode 100644 index 0000000000..92d1d600bc --- /dev/null +++ b/changelog.d/15604.misc @@ -0,0 +1 @@ +Fix subscriptable type usage in Python <3.9. diff --git a/changelog.d/15606.misc b/changelog.d/15606.misc new file mode 100644 index 0000000000..568c0d3fc5 --- /dev/null +++ b/changelog.d/15606.misc @@ -0,0 +1 @@ +Update internal terminology. diff --git a/changelog.d/15607.bugfix b/changelog.d/15607.bugfix new file mode 100644 index 0000000000..a2767adbe2 --- /dev/null +++ b/changelog.d/15607.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug where filters with multiple backslashes were rejected. diff --git a/changelog.d/15610.misc b/changelog.d/15610.misc new file mode 100644 index 0000000000..2eff30f6e3 --- /dev/null +++ b/changelog.d/15610.misc @@ -0,0 +1 @@ +Instrument `state` and `state_group` storage-related operations to better picture what's happening when tracing. diff --git a/changelog.d/15611.feature b/changelog.d/15611.feature new file mode 100644 index 0000000000..7cfb46fd0a --- /dev/null +++ b/changelog.d/15611.feature @@ -0,0 +1 @@ +Add a new admin API to create a new device for a user. diff --git a/changelog.d/15613.doc b/changelog.d/15613.doc new file mode 100644 index 0000000000..94733facf0 --- /dev/null +++ b/changelog.d/15613.doc @@ -0,0 +1 @@ +Warn users that at least 3.75GB of space is needed for the nix Synapse development environment. diff --git a/changelog.d/15614.bugfix b/changelog.d/15614.bugfix new file mode 100644 index 0000000000..b523ae6eb1 --- /dev/null +++ b/changelog.d/15614.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse 1.82.0 where the error message displayed when validation of the `app_service_config_files` config option fails would be incorrectly formatted. diff --git a/changelog.d/15615.misc b/changelog.d/15615.misc new file mode 100644 index 0000000000..a39fd0a098 --- /dev/null +++ b/changelog.d/15615.misc @@ -0,0 +1 @@ +Re-type config paths in `ConfigError`s to be `StrSequence`s instead of `Iterable[str]`s. diff --git a/changelog.d/15620.misc b/changelog.d/15620.misc new file mode 100644 index 0000000000..568c0d3fc5 --- /dev/null +++ b/changelog.d/15620.misc @@ -0,0 +1 @@ +Update internal terminology. diff --git a/changelog.d/15621.misc b/changelog.d/15621.misc new file mode 100644 index 0000000000..5d060f4dbc --- /dev/null +++ b/changelog.d/15621.misc @@ -0,0 +1 @@ +Update Mutual Rooms (MSC2666) implementation to match new proposal text. \ No newline at end of file diff --git a/changelog.d/15624.bugfix b/changelog.d/15624.bugfix new file mode 100644 index 0000000000..fde515ba62 --- /dev/null +++ b/changelog.d/15624.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug where deactivated users were still able to login using the custom `org.matrix.login.jwt` login type (if enabled). diff --git a/changelog.d/15625.misc b/changelog.d/15625.misc new file mode 100644 index 0000000000..7ea8cc9433 --- /dev/null +++ b/changelog.d/15625.misc @@ -0,0 +1 @@ +Remove the unstable identifiers from faster joins ([MSC3706](https://github.com/matrix-org/matrix-spec-proposals/pull/3706). diff --git a/changelog.d/15626.misc b/changelog.d/15626.misc new file mode 100644 index 0000000000..0016cdbf10 --- /dev/null +++ b/changelog.d/15626.misc @@ -0,0 +1 @@ +Fix the olddeps CI. diff --git a/changelog.d/15630.misc b/changelog.d/15630.misc new file mode 100644 index 0000000000..a30304bfd6 --- /dev/null +++ b/changelog.d/15630.misc @@ -0,0 +1 @@ +Fix two memory leaks in `trial` test runs. diff --git a/changelog.d/15633.misc b/changelog.d/15633.misc new file mode 100644 index 0000000000..4126a20602 --- /dev/null +++ b/changelog.d/15633.misc @@ -0,0 +1 @@ +Trace how many new events from the backfill response we need to process. diff --git a/changelog.d/15634.bugfix b/changelog.d/15634.bugfix new file mode 100644 index 0000000000..ef39e8a689 --- /dev/null +++ b/changelog.d/15634.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug where deactivated users were able to login in uncommon situations. diff --git a/changelog.d/15636.misc b/changelog.d/15636.misc new file mode 100644 index 0000000000..82329c5e43 --- /dev/null +++ b/changelog.d/15636.misc @@ -0,0 +1 @@ +Remove duplicate timestamp from test logs (`_trial_temp/test.log`). diff --git a/changelog.d/15639.misc b/changelog.d/15639.misc new file mode 100644 index 0000000000..92230e206f --- /dev/null +++ b/changelog.d/15639.misc @@ -0,0 +1 @@ +Bump types-setuptools from 67.7.0.2 to 67.8.0.0. diff --git a/changelog.d/15640.misc b/changelog.d/15640.misc new file mode 100644 index 0000000000..4c2a3dbc52 --- /dev/null +++ b/changelog.d/15640.misc @@ -0,0 +1 @@ +Bump types-pillow from 9.5.0.2 to 9.5.0.4. diff --git a/changelog.d/15641.misc b/changelog.d/15641.misc new file mode 100644 index 0000000000..a85d85c58e --- /dev/null +++ b/changelog.d/15641.misc @@ -0,0 +1 @@ +Bump sphinx from 6.1.3 to 6.2.1. diff --git a/changelog.d/15642.misc b/changelog.d/15642.misc new file mode 100644 index 0000000000..5d6125140d --- /dev/null +++ b/changelog.d/15642.misc @@ -0,0 +1 @@ +Bump furo from 2023.3.27 to 2023.5.20. diff --git a/changelog.d/15643.misc b/changelog.d/15643.misc new file mode 100644 index 0000000000..5bd2e74071 --- /dev/null +++ b/changelog.d/15643.misc @@ -0,0 +1 @@ +Bump pygithub from 1.58.1 to 1.58.2. diff --git a/changelog.d/15646.misc b/changelog.d/15646.misc new file mode 100644 index 0000000000..872afe30b8 --- /dev/null +++ b/changelog.d/15646.misc @@ -0,0 +1 @@ +Limit the size of the `HomeServerConfig` cache in trial test runs. diff --git a/changelog.d/15647.bugfix b/changelog.d/15647.bugfix new file mode 100644 index 0000000000..2eff30f6e3 --- /dev/null +++ b/changelog.d/15647.bugfix @@ -0,0 +1 @@ +Instrument `state` and `state_group` storage-related operations to better picture what's happening when tracing. diff --git a/changelog.d/15648.doc b/changelog.d/15648.doc new file mode 100644 index 0000000000..70f65ebbff --- /dev/null +++ b/changelog.d/15648.doc @@ -0,0 +1 @@ +Remove outdated comment from the generated and sample homeserver log configs. \ No newline at end of file diff --git a/changelog.d/15651.misc b/changelog.d/15651.misc new file mode 100644 index 0000000000..4d7c0248b2 --- /dev/null +++ b/changelog.d/15651.misc @@ -0,0 +1 @@ +Bump requests from 2.28.2 to 2.31.0. diff --git a/changelog.d/15658.misc b/changelog.d/15658.misc new file mode 100644 index 0000000000..93ceaeafc9 --- /dev/null +++ b/changelog.d/15658.misc @@ -0,0 +1 @@ +Improve type hints. diff --git a/contrib/docker_compose_workers/README.md b/contrib/docker_compose_workers/README.md index d3cdfe5614..ebb225fba6 100644 --- a/contrib/docker_compose_workers/README.md +++ b/contrib/docker_compose_workers/README.md @@ -70,6 +70,10 @@ redis: port: 6379 # dbid: <redis_logical_db_id> # password: <secret_password> + # use_tls: True + # certificate_file: <path_to_certificate> + # private_key_file: <path_to_private_key> + # ca_file: <path_to_ca_certificate> ``` This assumes that your Redis service is called `redis` in your Docker Compose file. diff --git a/debian/changelog b/debian/changelog index c3bea01c05..51935e03b6 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,51 @@ +matrix-synapse-py3 (1.84.0) stable; urgency=medium + + * New Synapse release 1.84.0. + + -- Synapse Packaging team <packages@matrix.org> Tue, 23 May 2023 10:57:22 +0100 + +matrix-synapse-py3 (1.84.0~rc1) stable; urgency=medium + + * New Synapse release 1.84.0rc1. + + -- Synapse Packaging team <packages@matrix.org> Tue, 16 May 2023 11:12:02 +0100 + +matrix-synapse-py3 (1.83.0) stable; urgency=medium + + * New Synapse release 1.83.0. + + -- Synapse Packaging team <packages@matrix.org> Tue, 09 May 2023 18:13:37 +0200 + +matrix-synapse-py3 (1.83.0~rc1) stable; urgency=medium + + * New Synapse release 1.83.0rc1. + + -- Synapse Packaging team <packages@matrix.org> Tue, 02 May 2023 15:56:38 +0100 + +matrix-synapse-py3 (1.82.0) stable; urgency=medium + + * New Synapse release 1.82.0. + + -- Synapse Packaging team <packages@matrix.org> Tue, 25 Apr 2023 11:56:06 +0100 + +matrix-synapse-py3 (1.82.0~rc1) stable; urgency=medium + + * New Synapse release 1.82.0rc1. + + -- Synapse Packaging team <packages@matrix.org> Tue, 18 Apr 2023 09:47:30 +0100 + +matrix-synapse-py3 (1.81.0) stable; urgency=medium + + * New Synapse release 1.81.0. + + -- Synapse Packaging team <packages@matrix.org> Tue, 11 Apr 2023 14:18:35 +0100 + +matrix-synapse-py3 (1.81.0~rc2) stable; urgency=medium + + * New Synapse release 1.81.0rc2. + + -- Synapse Packaging team <packages@matrix.org> Thu, 06 Apr 2023 16:07:54 +0100 + matrix-synapse-py3 (1.81.0~rc1) stable; urgency=medium * New Synapse release 1.81.0rc1. diff --git a/demo/start.sh b/demo/start.sh index fdd75816fb..06ec6f985f 100755 --- a/demo/start.sh +++ b/demo/start.sh @@ -46,7 +46,7 @@ for port in 8080 8081 8082; do echo '' # Warning, this heredoc depends on the interaction of tabs and spaces. - # Please don't accidentaly bork me with your fancy settings. + # Please don't accidentally bork me with your fancy settings. listeners=$(cat <<-PORTLISTENERS # Configure server to listen on both $https_port and $port # This overides some of the default settings above @@ -80,12 +80,8 @@ for port in 8080 8081 8082; do echo "tls_certificate_path: \"$DIR/$port/localhost:$port.tls.crt\"" echo "tls_private_key_path: \"$DIR/$port/localhost:$port.tls.key\"" - # Ignore keys from the trusted keys server - echo '# Ignore keys from the trusted keys server' - echo 'trusted_key_servers:' - echo ' - server_name: "matrix.org"' - echo ' accept_keys_insecurely: true' - echo '' + # Request keys directly from servers contacted over federation + echo 'trusted_key_servers: []' # Allow the servers to communicate over localhost. allow_list=$(cat <<-ALLOW_LIST diff --git a/docker/Dockerfile b/docker/Dockerfile index 3d07bcd71f..6107dced43 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -37,7 +37,7 @@ RUN \ --mount=type=cache,target=/var/cache/apt,sharing=locked \ --mount=type=cache,target=/var/lib/apt,sharing=locked \ apt-get update -qq && apt-get install -yqq \ - build-essential curl git libffi-dev libssl-dev \ + build-essential curl git libffi-dev libssl-dev pkg-config \ && rm -rf /var/lib/apt/lists/* # Install rust and ensure its in the PATH. diff --git a/docker/conf-workers/worker.yaml.j2 b/docker/conf-workers/worker.yaml.j2 index 42131afc95..44c6e413cf 100644 --- a/docker/conf-workers/worker.yaml.j2 +++ b/docker/conf-workers/worker.yaml.j2 @@ -6,10 +6,6 @@ worker_app: "{{ app }}" worker_name: "{{ name }}" -# The replication listener on the main synapse process. -worker_replication_host: 127.0.0.1 -worker_replication_http_port: 9093 - worker_listeners: - type: http port: {{ port }} diff --git a/docker/configure_workers_and_start.py b/docker/configure_workers_and_start.py index 2a50ee1e4b..79b5b87397 100755 --- a/docker/configure_workers_and_start.py +++ b/docker/configure_workers_and_start.py @@ -69,6 +69,9 @@ import yaml from jinja2 import Environment, FileSystemLoader MAIN_PROCESS_HTTP_LISTENER_PORT = 8080 +MAIN_PROCESS_INSTANCE_NAME = "main" +MAIN_PROCESS_LOCALHOST_ADDRESS = "127.0.0.1" +MAIN_PROCESS_REPLICATION_PORT = 9093 # A simple name used as a placeholder in the WORKERS_CONFIG below. This will be replaced # during processing with the name of the worker. @@ -173,6 +176,8 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = { "^/_matrix/client/(api/v1|r0|v3|unstable)/search", "^/_matrix/client/(r0|v3|unstable)/user/.*/filter(/|$)", "^/_matrix/client/(r0|v3|unstable)/password_policy$", + "^/_matrix/client/(api/v1|r0|v3|unstable)/directory/room/.*$", + "^/_matrix/client/(r0|v3|unstable)/capabilities$", ], "shared_extra_conf": {}, "worker_extra_conf": "", @@ -717,8 +722,8 @@ def generate_worker_files( # shared config file. listeners = [ { - "port": 9093, - "bind_address": "127.0.0.1", + "port": MAIN_PROCESS_REPLICATION_PORT, + "bind_address": MAIN_PROCESS_LOCALHOST_ADDRESS, "type": "http", "resources": [{"names": ["replication"]}], } @@ -868,6 +873,14 @@ def generate_worker_files( workers_in_use = len(requested_worker_types) > 0 + # If there are workers, add the main process to the instance_map too. + if workers_in_use: + instance_map = shared_config.setdefault("instance_map", {}) + instance_map[MAIN_PROCESS_INSTANCE_NAME] = { + "host": MAIN_PROCESS_LOCALHOST_ADDRESS, + "port": MAIN_PROCESS_REPLICATION_PORT, + } + # Shared homeserver config convert( "/conf/shared.yaml.j2", diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index ade77d4926..a8e5ddad9d 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -57,6 +57,7 @@ - [Account Validity](admin_api/account_validity.md) - [Background Updates](usage/administration/admin_api/background_updates.md) - [Event Reports](admin_api/event_reports.md) + - [Experimental Features](admin_api/experimental_features.md) - [Media](admin_api/media_admin_api.md) - [Purge History](admin_api/purge_history_api.md) - [Register Users](admin_api/register_api.md) diff --git a/docs/admin_api/experimental_features.md b/docs/admin_api/experimental_features.md new file mode 100644 index 0000000000..07b630915d --- /dev/null +++ b/docs/admin_api/experimental_features.md @@ -0,0 +1,55 @@ +# Experimental Features API + +This API allows a server administrator to enable or disable some experimental features on a per-user +basis. The currently supported features are: +- [MSC3026](https://github.com/matrix-org/matrix-spec-proposals/pull/3026): busy +presence state enabled +- [MSC3881](https://github.com/matrix-org/matrix-spec-proposals/pull/3881): enable remotely toggling push notifications +for another client +- [MSC3967](https://github.com/matrix-org/matrix-spec-proposals/pull/3967): do not require +UIA when first uploading cross-signing keys. + + +To use it, you will need to authenticate by providing an `access_token` +for a server admin: see [Admin API](../usage/administration/admin_api/). + +## Enabling/Disabling Features + +This API allows a server administrator to enable experimental features for a given user. The request must +provide a body containing the user id and listing the features to enable/disable in the following format: +```json +{ + "features": { + "msc3026":true, + "msc3881":true + } +} +``` +where true is used to enable the feature, and false is used to disable the feature. + + +The API is: + +``` +PUT /_synapse/admin/v1/experimental_features/<user_id> +``` + +## Listing Enabled Features + +To list which features are enabled/disabled for a given user send a request to the following API: + +``` +GET /_synapse/admin/v1/experimental_features/<user_id> +``` + +It will return a list of possible features and indicate whether they are enabled or disabled for the +user like so: +```json +{ + "features": { + "msc3026": true, + "msc3881": false, + "msc3967": false + } +} +``` \ No newline at end of file diff --git a/docs/admin_api/statistics.md b/docs/admin_api/statistics.md index 03b3621e55..59f07311eb 100644 --- a/docs/admin_api/statistics.md +++ b/docs/admin_api/statistics.md @@ -81,3 +81,52 @@ The following fields are returned in the JSON response body: - `user_id` - string - Fully-qualified user ID (ex. `@user:server.com`). * `next_token` - integer - Opaque value used for pagination. See above. * `total` - integer - Total number of users after filtering. + + +# Get largest rooms by size in database + +Returns the 10 largest rooms and an estimate of how much space in the database +they are taking. + +This does not include the size of any associated media associated with the room. + +Returns an error on SQLite. + +*Note:* This uses the planner statistics from PostgreSQL to do the estimates, +which means that the returned information can vary widely from reality. However, +it should be enough to get a rough idea of where database disk space is going. + + +The API is: + +``` +GET /_synapse/admin/v1/statistics/database/rooms +``` + +A response body like the following is returned: + +```json +{ + "rooms": [ + { + "room_id": "!OGEhHVWSdvArJzumhm:matrix.org", + "estimated_size": 47325417353 + } + ], +} +``` + + + +**Response** + +The following fields are returned in the JSON response body: + +* `rooms` - An array of objects, sorted by largest room first. Objects contain + the following fields: + - `room_id` - string - The room ID. + - `estimated_size` - integer - Estimated disk space used in bytes by the room + in the database. + + +*Added in Synapse 1.83.0* diff --git a/docs/admin_api/user_admin_api.md b/docs/admin_api/user_admin_api.md index 86c29ab380..229942b311 100644 --- a/docs/admin_api/user_admin_api.md +++ b/docs/admin_api/user_admin_api.md @@ -62,7 +62,7 @@ URL parameters: - `user_id`: fully-qualified user id: for example, `@user:server.com`. -## Create or modify Account +## Create or modify account This API allows an administrator to create or modify a user account with a specific `user_id`. @@ -78,28 +78,29 @@ with a body of: ```json { "password": "user_password", - "displayname": "User", + "logout_devices": false, + "displayname": "Alice Marigold", + "avatar_url": "mxc://example.com/abcde12345", "threepids": [ { "medium": "email", - "address": "<user_mail_1>" + "address": "alice@example.com" }, { "medium": "email", - "address": "<user_mail_2>" + "address": "alice@domain.org" } ], "external_ids": [ { - "auth_provider": "<provider1>", - "external_id": "<user_id_provider_1>" + "auth_provider": "example", + "external_id": "12345" }, { - "auth_provider": "<provider2>", - "external_id": "<user_id_provider_2>" + "auth_provider": "example2", + "external_id": "abc54321" } ], - "avatar_url": "<avatar_url>", "admin": false, "deactivated": false, "user_type": null @@ -112,41 +113,51 @@ Returns HTTP status code: URL parameters: -- `user_id`: fully-qualified user id: for example, `@user:server.com`. +- `user_id` - A fully-qualified user id. For example, `@user:server.com`. Body parameters: -- `password` - string, optional. If provided, the user's password is updated and all +- `password` - **string**, optional. If provided, the user's password is updated and all devices are logged out, unless `logout_devices` is set to `false`. -- `logout_devices` - bool, optional, defaults to `true`. If set to false, devices aren't +- `logout_devices` - **bool**, optional, defaults to `true`. If set to `false`, devices aren't logged out even when `password` is provided. -- `displayname` - string, optional, defaults to the value of `user_id`. -- `threepids` - array, optional, allows setting the third-party IDs (email, msisdn) - - `medium` - string. Kind of third-party ID, either `email` or `msisdn`. - - `address` - string. Value of third-party ID. - belonging to a user. -- `external_ids` - array, optional. Allow setting the identifier of the external identity - provider for SSO (Single sign-on). Details in the configuration manual under the - sections [sso](../usage/configuration/config_documentation.md#sso) and [oidc_providers](../usage/configuration/config_documentation.md#oidc_providers). - - `auth_provider` - string. ID of the external identity provider. Value of `idp_id` - in the homeserver configuration. Note that no error is raised if the provided - value is not in the homeserver configuration. - - `external_id` - string, user ID in the external identity provider. -- `avatar_url` - string, optional, must be a +- `displayname` - **string**, optional. If set to an empty string (`""`), the user's display name + will be removed. +- `avatar_url` - **string**, optional. Must be a [MXC URI](https://matrix.org/docs/spec/client_server/r0.6.0#matrix-content-mxc-uris). -- `admin` - bool, optional, defaults to `false`. -- `deactivated` - bool, optional. If unspecified, deactivation state will be left - unchanged on existing accounts and set to `false` for new accounts. - A user cannot be erased by deactivating with this API. For details on - deactivating users see [Deactivate Account](#deactivate-account). -- `user_type` - string or null, optional. If provided, the user type will be - adjusted. If `null` given, the user type will be cleared. Other - allowed options are: `bot` and `support`. - -If the user already exists then optional parameters default to the current value. - -In order to re-activate an account `deactivated` must be set to `false`. If -users do not login via single-sign-on, a new `password` must be provided. + If set to an empty string (`""`), the user's avatar is removed. +- `threepids` - **array**, optional. If provided, the user's third-party IDs (email, msisdn) are + entirely replaced with the given list. Each item in the array is an object with the following + fields: + - `medium` - **string**, required. The type of third-party ID, either `email` or `msisdn` (phone number). + - `address` - **string**, required. The third-party ID itself, e.g. `alice@example.com` for `email` or + `447470274584` (for a phone number with country code "44") and `19254857364` (for a phone number + with country code "1") for `msisdn`. + Note: If a threepid is removed from a user via this option, Synapse will also attempt to remove + that threepid from any identity servers it is aware has a binding for it. +- `external_ids` - **array**, optional. Allow setting the identifier of the external identity + provider for SSO (Single sign-on). More details are in the configuration manual under the + sections [sso](../usage/configuration/config_documentation.md#sso) and [oidc_providers](../usage/configuration/config_documentation.md#oidc_providers). + - `auth_provider` - **string**, required. The unique, internal ID of the external identity provider. + The same as `idp_id` from the homeserver configuration. Note that no error is raised if the + provided value is not in the homeserver configuration. + - `external_id` - **string**, required. An identifier for the user in the external identity provider. + When the user logs in to the identity provider, this must be the unique ID that they map to. +- `admin` - **bool**, optional, defaults to `false`. Whether the user is a homeserver administrator, + granting them access to the Admin API, among other things. +- `deactivated` - **bool**, optional. If unspecified, deactivation state will be left unchanged. + + Note: the `password` field must also be set if both of the following are true: + - `deactivated` is set to `false` and the user was previously deactivated (you are reactivating this user) + - Users are allowed to set their password on this homeserver (both `password_config.enabled` and + `password_config.localdb_enabled` config options are set to `true`). + Users' passwords are wiped upon account deactivation, hence the need to set a new one here. + + Note: a user cannot be erased with this API. For more details on + deactivating and erasing users see [Deactivate Account](#deactivate-account). +- `user_type` - **string** or null, optional. If not provided, the user type will be + not be changed. If `null` is given, the user type will be cleared. + Other allowed options are: `bot` and `support`. ## List Accounts @@ -802,6 +813,33 @@ The following fields are returned in the JSON response body: - `total` - Total number of user's devices. +### Create a device + +Creates a new device for a specific `user_id` and `device_id`. Does nothing if the `device_id` +exists already. + +The API is: + +``` +POST /_synapse/admin/v2/users/<user_id>/devices + +{ + "device_id": "QBUAZIFURK" +} +``` + +An empty JSON dict is returned. + +**Parameters** + +The following parameters should be set in the URL: + +- `user_id` - fully qualified: for example, `@user:server.com`. + +The following fields are required in the JSON request body: + +- `device_id` - The device ID to create. + ### Delete multiple devices Deletes the given devices for a specific `user_id`, and invalidates any access token associated with them. diff --git a/docs/development/contributing_guide.md b/docs/development/contributing_guide.md index 925dcd8933..56cf4ba81e 100644 --- a/docs/development/contributing_guide.md +++ b/docs/development/contributing_guide.md @@ -346,6 +346,7 @@ The above will run a monolithic (single-process) Synapse with SQLite as the data A safe example would be `WORKER_TYPES="federation_inbound, federation_sender, synchrotron"`. See the [worker documentation](../workers.md) for additional information on workers. - Passing `ASYNCIO_REACTOR=1` as an environment variable to use the Twisted asyncio reactor instead of the default one. +- Passing `PODMAN=1` will use the [podman](https://podman.io/) container runtime, instead of docker. To increase the log level for the tests, set `SYNAPSE_TEST_LOG_LEVEL`, e.g: ```sh diff --git a/docs/development/database_schema.md b/docs/development/database_schema.md index 29945c264e..e231be21dd 100644 --- a/docs/development/database_schema.md +++ b/docs/development/database_schema.md @@ -155,43 +155,11 @@ def run_upgrade( Boolean columns require special treatment, since SQLite treats booleans the same as integers. -There are three separate aspects to this: - - * Any new boolean column must be added to the `BOOLEAN_COLUMNS` list in +Any new boolean column must be added to the `BOOLEAN_COLUMNS` list in `synapse/_scripts/synapse_port_db.py`. This tells the port script to cast the integer value from SQLite to a boolean before writing the value to the postgres database. - * Before SQLite 3.23, `TRUE` and `FALSE` were not recognised as constants by - SQLite, and the `IS [NOT] TRUE`/`IS [NOT] FALSE` operators were not - supported. This makes it necessary to avoid using `TRUE` and `FALSE` - constants in SQL commands. - - For example, to insert a `TRUE` value into the database, write: - - ```python - txn.execute("INSERT INTO tbl(col) VALUES (?)", (True, )) - ``` - - * Default values for new boolean columns present a particular - difficulty. Generally it is best to create separate schema files for - Postgres and SQLite. For example: - - ```sql - # in 00delta.sql.postgres: - ALTER TABLE tbl ADD COLUMN col BOOLEAN DEFAULT FALSE; - ``` - - ```sql - # in 00delta.sql.sqlite: - ALTER TABLE tbl ADD COLUMN col BOOLEAN DEFAULT 0; - ``` - - Note that there is a particularly insidious failure mode here: the Postgres - flavour will be accepted by SQLite 3.22, but will give a column whose - default value is the **string** `"FALSE"` - which, when cast back to a boolean - in Python, evaluates to `True`. - ## `event_id` global uniqueness diff --git a/docs/modules/password_auth_provider_callbacks.md b/docs/modules/password_auth_provider_callbacks.md index f6349d5404..d66ac7df31 100644 --- a/docs/modules/password_auth_provider_callbacks.md +++ b/docs/modules/password_auth_provider_callbacks.md @@ -46,6 +46,9 @@ instead. If the authentication is unsuccessful, the module must return `None`. +Note that the user is not automatically registered, the `register_user(..)` method of +the [module API](writing_a_module.html) can be used to lazily create users. + If multiple modules register an auth checker for the same login type but with different fields, Synapse will refuse to start. @@ -103,6 +106,9 @@ Called during a logout request for a user. It is passed the qualified user ID, t deactivated device (if any: access tokens are occasionally created without an associated device ID), and the (now deactivated) access token. +Deleting the related pushers is done after calling `on_logged_out`, so you can rely on them +to still be present. + If multiple modules implement this callback, Synapse runs them all in order. ### `get_username_for_registration` diff --git a/docs/openid.md b/docs/openid.md index 73f1e06121..9773a7de52 100644 --- a/docs/openid.md +++ b/docs/openid.md @@ -569,7 +569,7 @@ You should receive a response similar to the following. Make sure to save it. {"client_id":"someclientid_123","client_secret":"someclientsecret_123","id":"12345","name":"my_synapse_app","redirect_uri":"https://[synapse_public_baseurl]/_synapse/client/oidc/callback","website":null,"vapid_key":"somerandomvapidkey_123"} ``` -As the Synapse login mechanism needs an attribute to uniquely identify users, and Mastodon's endpoint does not return a `sub` property, an alternative `subject_claim` has to be set. Your Synapse configuration should include the following: +As the Synapse login mechanism needs an attribute to uniquely identify users, and Mastodon's endpoint does not return a `sub` property, an alternative `subject_template` has to be set. Your Synapse configuration should include the following: ```yaml oidc_providers: @@ -585,7 +585,9 @@ oidc_providers: scopes: ["read"] user_mapping_provider: config: - subject_claim: "id" + subject_template: "{{ user.id }}" + localpart_template: "{{ user.username }}" + display_name_template: "{{ user.display_name }}" ``` Note that the fields `client_id` and `client_secret` are taken from the CURL response above. diff --git a/docs/replication.md b/docs/replication.md index 108da9a065..25145daaf5 100644 --- a/docs/replication.md +++ b/docs/replication.md @@ -30,12 +30,6 @@ minimal. See [the TCP replication documentation](tcp_replication.md). -### The Slaved DataStore - -There are read-only version of the synapse storage layer in -`synapse/replication/slave/storage` that use the response of the -replication API to invalidate their caches. - ### The TCP Replication Module Information about how the tcp replication module is structured, including how the classes interact, can be found in diff --git a/docs/sample_log_config.yaml b/docs/sample_log_config.yaml index 6339160d00..ae0318122e 100644 --- a/docs/sample_log_config.yaml +++ b/docs/sample_log_config.yaml @@ -68,9 +68,7 @@ root: # Write logs to the `buffer` handler, which will buffer them together in memory, # then write them to a file. # - # Replace "buffer" with "console" to log to stderr instead. (Note that you'll - # also need to update the configuration for the `twisted` logger above, in - # this case.) + # Replace "buffer" with "console" to log to stderr instead. # handlers: [buffer] diff --git a/docs/systemd-with-workers/workers/generic_worker.yaml b/docs/systemd-with-workers/workers/generic_worker.yaml index a858f99ed1..db6436ee6e 100644 --- a/docs/systemd-with-workers/workers/generic_worker.yaml +++ b/docs/systemd-with-workers/workers/generic_worker.yaml @@ -1,10 +1,6 @@ worker_app: synapse.app.generic_worker worker_name: generic_worker1 -# The replication listener on the main synapse process. -worker_replication_host: 127.0.0.1 -worker_replication_http_port: 9093 - worker_listeners: - type: http port: 8083 diff --git a/docs/upgrade.md b/docs/upgrade.md index 0886b03115..af999dd91f 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -88,6 +88,95 @@ process, for example: dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb ``` +# Upgrading to v1.84.0 + +## Deprecation of `worker_replication_*` configuration settings + +When using workers, + +* `worker_replication_host` +* `worker_replication_http_port` +* `worker_replication_http_tls` + +should now be removed from individual worker YAML configurations and the main process should instead be added to the `instance_map` +in the shared YAML configuration, using the name `main`. + +The old `worker_replication_*` settings are now considered deprecated and are expected to be removed in Synapse v1.88.0. + + +### Example change + +#### Before: + +Shared YAML +```yaml +instance_map: + generic_worker1: + host: localhost + port: 5678 + tls: false +``` + +Worker YAML +```yaml +worker_app: synapse.app.generic_worker +worker_name: generic_worker1 + +worker_replication_host: localhost +worker_replication_http_port: 3456 +worker_replication_http_tls: false + +worker_listeners: + - type: http + port: 1234 + resources: + - names: [client, federation] + - type: http + port: 5678 + resources: + - names: [replication] + +worker_log_config: /etc/matrix-synapse/generic-worker-log.yaml +``` + + +#### After: + +Shared YAML +```yaml +instance_map: + main: + host: localhost + port: 3456 + tls: false + generic_worker1: + host: localhost + port: 5678 + tls: false +``` + +Worker YAML +```yaml +worker_app: synapse.app.generic_worker +worker_name: generic_worker1 + +worker_listeners: + - type: http + port: 1234 + resources: + - names: [client, federation] + - type: http + port: 5678 + resources: + - names: [replication] + +worker_log_config: /etc/matrix-synapse/generic-worker-log.yaml + +``` +Notes: +* `tls` is optional but mirrors the functionality of `worker_replication_http_tls` + + # Upgrading to v1.81.0 ## Application service path & authentication deprecations diff --git a/docs/usage/administration/monitoring/reporting_homeserver_usage_statistics.md b/docs/usage/administration/monitoring/reporting_homeserver_usage_statistics.md index 3a7ed7c806..60b758e33b 100644 --- a/docs/usage/administration/monitoring/reporting_homeserver_usage_statistics.md +++ b/docs/usage/administration/monitoring/reporting_homeserver_usage_statistics.md @@ -42,11 +42,6 @@ The following statistics are sent to the configured reporting endpoint: | `daily_e2ee_messages` | int | The number of (state) events with the type `m.room.encrypted` seen in the last 24 hours. | | `daily_sent_messages` | int | The number of (state) events sent by a local user with the type `m.room.message` seen in the last 24 hours. | | `daily_sent_e2ee_messages` | int | The number of (state) events sent by a local user with the type `m.room.encrypted` seen in the last 24 hours. | -| `r30_users_all` | int | The number of 30 day retained users, defined as users who have created their accounts more than 30 days ago, where they were last seen at most 30 days ago and where those two timestamps are over 30 days apart. Includes clients that do not fit into the below r30 client types. | -| `r30_users_android` | int | The number of 30 day retained users, as defined above. Filtered only to clients with "Android" in the user agent string. | -| `r30_users_ios` | int | The number of 30 day retained users, as defined above. Filtered only to clients with "iOS" in the user agent string. | -| `r30_users_electron` | int | The number of 30 day retained users, as defined above. Filtered only to clients with "Electron" in the user agent string. | -| `r30_users_web` | int | The number of 30 day retained users, as defined above. Filtered only to clients with "Mozilla" or "Gecko" in the user agent string. | | `r30v2_users_all` | int | The number of 30 day retained users, with a revised algorithm. Defined as users that appear more than once in the past 60 days, and have more than 30 days between the most and least recent appearances in the past 60 days. Includes clients that do not fit into the below r30 client types. | | `r30v2_users_android` | int | The number of 30 day retained users, as defined above. Filtered only to clients with ("riot" or "element") and "android" (case-insensitive) in the user agent string. | | `r30v2_users_ios` | int | The number of 30 day retained users, as defined above. Filtered only to clients with ("riot" or "element") and "ios" (case-insensitive) in the user agent string. | diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index c5c2c2b615..93b132b6e4 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -577,6 +577,10 @@ delete any device that hasn't been accessed for more than the specified amount o Defaults to no duration, which means devices are never pruned. +**Note:** This task will always run on the main process, regardless of the value of +`run_background_tasks_on`. This is due to workers currently not having the ability to +delete devices. + Example configuration: ```yaml delete_stale_devices_after: 1y @@ -1764,6 +1768,30 @@ Example configuration: max_image_pixels: 35M ``` --- +### `prevent_media_downloads_from` + +A list of domains to never download media from. Media from these +domains that is already downloaded will not be deleted, but will be +inaccessible to users. This option does not affect admin APIs trying +to download/operate on media. + +This will not prevent the listed domains from accessing media themselves. +It simply prevents users on this server from downloading media originating +from the listed servers. + +This will have no effect on media originating from the local server. +This only affects media downloaded from other Matrix servers, to +block domains from URL previews see [`url_preview_url_blacklist`](#url_preview_url_blacklist). + +Defaults to an empty list (nothing blocked). + +Example configuration: +```yaml +prevent_media_downloads_from: + - evil.example.org + - evil2.example.org +``` +--- ### `dynamic_thumbnails` Whether to generate new thumbnails on the fly to precisely match @@ -3438,6 +3466,9 @@ This option has a number of sub-options. They are as follows: user has unread messages in. Defaults to true, meaning push clients will see the number of rooms with unread messages in them. Set to false to instead send the number of unread messages. +* `jitter_delay`: Delays push notifications by a random amount up to the given + duration. Useful for mitigating timing attacks. Optional, defaults to no + delay. _Added in Synapse 1.84.0._ Example configuration: ```yaml @@ -3445,6 +3476,7 @@ push: enabled: true include_content: false group_unread_count_by_room: false + jitter_delay: "10s" ``` --- ## Rooms @@ -3691,6 +3723,16 @@ default_power_level_content_override: trusted_private_chat: null public_chat: null ``` +--- +### `forget_rooms_on_leave` + +Set to true to automatically forget rooms for users when they leave them, either +normally or via a kick or ban. Defaults to false. + +Example configuration: +```yaml +forget_rooms_on_leave: false +``` --- ## Opentracing @@ -3842,15 +3884,20 @@ federation_sender_instances: ### `instance_map` When using workers this should be a map from [`worker_name`](#worker_name) to the -HTTP replication listener of the worker, if configured. +HTTP replication listener of the worker, if configured, and to the main process. Each worker declared under [`stream_writers`](../../workers.md#stream-writers) needs a HTTP replication listener, and that listener should be included in the `instance_map`. -(The main process also needs an HTTP replication listener, but it should not be -listed in the `instance_map`.) +The main process also needs an entry on the `instance_map`, and it should be listed under +`main` **if even one other worker exists**. Ensure the port matches with what is declared +inside the `listener` block for a `replication` listener. + Example configuration: ```yaml instance_map: + main: + host: localhost + port: 8030 worker1: host: localhost port: 8034 @@ -3934,9 +3981,16 @@ This setting has the following sub-options: localhost and 6379 * `password`: Optional password if configured on the Redis instance. * `dbid`: Optional redis dbid if needs to connect to specific redis logical db. +* `use_tls`: Whether to use tls connection. Defaults to false. +* `certificate_file`: Optional path to the certificate file +* `private_key_file`: Optional path to the private key file +* `ca_file`: Optional path to the CA certificate file. Use this one or: +* `ca_path`: Optional path to the folder containing the CA certificate file _Added in Synapse 1.78.0._ + _Changed in Synapse 1.84.0: Added use\_tls, certificate\_file, private\_key\_file, ca\_file and ca\_path attributes_ + Example configuration: ```yaml redis: @@ -3945,6 +3999,10 @@ redis: port: 6379 password: <secret_password> dbid: <dbid> + #use_tls: True + #certificate_file: <path_to_the_certificate_file> + #private_key_file: <path_to_the_private_key_file> + #ca_file: <path_to_the_ca_certificate_file> ``` --- ## Individual worker configuration @@ -3982,6 +4040,7 @@ worker_name: generic_worker1 ``` --- ### `worker_replication_host` +*Deprecated as of version 1.84.0. Place `host` under `main` entry on the [`instance_map`](#instance_map) in your shared yaml configuration instead.* The HTTP replication endpoint that it should talk to on the main Synapse process. The main Synapse process defines this with a `replication` resource in @@ -3993,6 +4052,7 @@ worker_replication_host: 127.0.0.1 ``` --- ### `worker_replication_http_port` +*Deprecated as of version 1.84.0. Place `port` under `main` entry on the [`instance_map`](#instance_map) in your shared yaml configuration instead.* The HTTP replication port that it should talk to on the main Synapse process. The main Synapse process defines this with a `replication` resource in @@ -4004,6 +4064,7 @@ worker_replication_http_port: 9093 ``` --- ### `worker_replication_http_tls` +*Deprecated as of version 1.84.0. Place `tls` under `main` entry on the [`instance_map`](#instance_map) in your shared yaml configuration instead.* Whether TLS should be used for talking to the HTTP replication port on the main Synapse process. @@ -4029,9 +4090,9 @@ A worker can handle HTTP requests. To do so, a `worker_listeners` option must be declared, in the same way as the [`listeners` option](#listeners) in the shared config. -Workers declared in [`stream_writers`](#stream_writers) will need to include a -`replication` listener here, in order to accept internal HTTP requests from -other workers. +Workers declared in [`stream_writers`](#stream_writers) and [`instance_map`](#instance_map) + will need to include a `replication` listener here, in order to accept internal HTTP +requests from other workers. Example configuration: ```yaml diff --git a/docs/workers.md b/docs/workers.md index e9a477d32c..991814c0bc 100644 --- a/docs/workers.md +++ b/docs/workers.md @@ -87,12 +87,18 @@ shared configuration file. ### Shared configuration -Normally, only a couple of changes are needed to make an existing configuration -file suitable for use with workers. First, you need to enable an +Normally, only a few changes are needed to make an existing configuration +file suitable for use with workers: +* First, you need to enable an ["HTTP replication listener"](usage/configuration/config_documentation.md#listeners) -for the main process; and secondly, you need to enable -[redis-based replication](usage/configuration/config_documentation.md#redis). -Optionally, a [shared secret](usage/configuration/config_documentation.md#worker_replication_secret) +for the main process +* Secondly, you need to enable +[redis-based replication](usage/configuration/config_documentation.md#redis) +* You will need to add an [`instance_map`](usage/configuration/config_documentation.md#instance_map) +with the `main` process defined, as well as the relevant connection information from +it's HTTP `replication` listener (defined in step 1 above). Note that the `host` defined +is the address the worker needs to look for the `main` process at, not necessarily the same address that is bound to. +* Optionally, a [shared secret](usage/configuration/config_documentation.md#worker_replication_secret) can be used to authenticate HTTP traffic between workers. For example: ```yaml @@ -111,6 +117,11 @@ worker_replication_secret: "" redis: enabled: true + +instance_map: + main: + host: 'localhost' + port: 9093 ``` See the [configuration manual](usage/configuration/config_documentation.md) @@ -130,13 +141,13 @@ In the config file for each worker, you must specify: * The type of worker ([`worker_app`](usage/configuration/config_documentation.md#worker_app)). The currently available worker applications are listed [below](#available-worker-applications). * A unique name for the worker ([`worker_name`](usage/configuration/config_documentation.md#worker_name)). - * The HTTP replication endpoint that it should talk to on the main synapse process - ([`worker_replication_host`](usage/configuration/config_documentation.md#worker_replication_host) and - [`worker_replication_http_port`](usage/configuration/config_documentation.md#worker_replication_http_port)). * If handling HTTP requests, a [`worker_listeners`](usage/configuration/config_documentation.md#worker_listeners) option with an `http` listener. * **Synapse 1.72 and older:** if handling the `^/_matrix/client/v3/keys/upload` endpoint, the HTTP URI for the main process (`worker_main_http_uri`). This config option is no longer required and is ignored when running Synapse 1.73 and newer. + * **Synapse 1.83 and older:** The HTTP replication endpoint that the worker should talk to on the main synapse process + ([`worker_replication_host`](usage/configuration/config_documentation.md#worker_replication_host) and + [`worker_replication_http_port`](usage/configuration/config_documentation.md#worker_replication_http_port)). If using Synapse 1.84 and newer, these are not needed if `main` is defined on the [shared configuration](#shared-configuration) `instance_map` For example: @@ -234,6 +245,8 @@ information. ^/_matrix/client/(api/v1|r0|v3|unstable/.*)/rooms/.*/aliases ^/_matrix/client/(api/v1|r0|v3|unstable)/search$ ^/_matrix/client/(r0|v3|unstable)/user/.*/filter(/|$) + ^/_matrix/client/(api/v1|r0|v3|unstable)/directory/room/.*$ + ^/_matrix/client/(r0|v3|unstable)/capabilities$ # Encryption requests ^/_matrix/client/(r0|v3|unstable)/keys/query$ @@ -323,8 +336,7 @@ load balancing can be done in different ways. For `/sync` and `/initialSync` requests it will be more efficient if all requests from a particular user are routed to a single instance. This can -be done e.g. in nginx via IP `hash $http_x_forwarded_for;` or via -`hash $http_authorization consistent;` which contains the users access token. +be done in reverse proxy by extracting username part from the users access token. Admins may additionally wish to separate out `/sync` requests that have a `since` query parameter from those that don't (and @@ -333,6 +345,69 @@ when a user logs in on a new device and can be *very* resource intensive, so isolating these requests will stop them from interfering with other users ongoing syncs. +Example `nginx` configuration snippet that handles the cases above. This is just an +example and probably requires some changes according to your particular setup: + +```nginx +# Choose sync worker based on the existence of "since" query parameter +map $arg_since $sync { + default synapse_sync; + '' synapse_initial_sync; +} + +# Extract username from access token passed as URL parameter +map $arg_access_token $accesstoken_from_urlparam { + # Defaults to just passing back the whole accesstoken + default $arg_access_token; + # Try to extract username part from accesstoken URL parameter + "~syt_(?<username>.*?)_.*" $username; +} + +# Extract username from access token passed as authorization header +map $http_authorization $mxid_localpart { + # Defaults to just passing back the whole accesstoken + default $http_authorization; + # Try to extract username part from accesstoken header + "~Bearer syt_(?<username>.*?)_.*" $username; + # if no authorization-header exist, try mapper for URL parameter "access_token" + "" $accesstoken_from_urlparam; +} + +upstream synapse_initial_sync { + # Use the username mapper result for hash key + hash $mxid_localpart consistent; + server 127.0.0.1:8016; + server 127.0.0.1:8036; +} + +upstream synapse_sync { + # Use the username mapper result for hash key + hash $mxid_localpart consistent; + server 127.0.0.1:8013; + server 127.0.0.1:8037; + server 127.0.0.1:8038; + server 127.0.0.1:8039; +} + +# Sync initial/normal +location ~ ^/_matrix/client/(r0|v3)/sync$ { + proxy_pass http://$sync; +} + +# Normal sync +location ~ ^/_matrix/client/(api/v1|r0|v3)/events$ { + proxy_pass http://synapse_sync; +} + +# Initial_sync +location ~ ^/_matrix/client/(api/v1|r0|v3)/initialSync$ { + proxy_pass http://synapse_initial_sync; +} +location ~ ^/_matrix/client/(api/v1|r0|v3)/rooms/[^/]+/initialSync$ { + proxy_pass http://synapse_initial_sync; +} +``` + Federation and client requests can be balanced via simple round robin. The inbound federation transaction request `^/_matrix/federation/v1/send/` @@ -353,11 +428,14 @@ effects of bursts of events from that bridge on events sent by normal users. Additionally, the writing of specific streams (such as events) can be moved off of the main process to a particular worker. -To enable this, the worker must have a -[HTTP `replication` listener](usage/configuration/config_documentation.md#listeners) configured, -have a [`worker_name`](usage/configuration/config_documentation.md#worker_name) +To enable this, the worker must have: +* An [HTTP `replication` listener](usage/configuration/config_documentation.md#listeners) configured, +* Have a [`worker_name`](usage/configuration/config_documentation.md#worker_name) and be listed in the [`instance_map`](usage/configuration/config_documentation.md#instance_map) -config. The same worker can handle multiple streams, but unless otherwise documented, +config. +* Have the main process declared on the [`instance_map`](usage/configuration/config_documentation.md#instance_map) as well. + +Note: The same worker can handle multiple streams, but unless otherwise documented, each stream can only have a single writer. For example, to move event persistence off to a dedicated worker, the shared @@ -365,6 +443,9 @@ configuration would include: ```yaml instance_map: + main: + host: localhost + port: 8030 event_persister1: host: localhost port: 8034 diff --git a/flake.lock b/flake.lock new file mode 100644 index 0000000000..d1c933e9aa --- /dev/null +++ b/flake.lock @@ -0,0 +1,274 @@ +{ + "nodes": { + "devenv": { + "inputs": { + "flake-compat": "flake-compat", + "nix": "nix", + "nixpkgs": "nixpkgs", + "pre-commit-hooks": "pre-commit-hooks" + }, + "locked": { + "lastModified": 1683102061, + "narHash": "sha256-kOphT6V0uQUlFNBP3GBjs7DAU7fyZGGqCs9ue1gNY6E=", + "owner": "cachix", + "repo": "devenv", + "rev": "ff1f29e41756553174d596cafe3a9fa77595100b", + "type": "github" + }, + "original": { + "owner": "cachix", + "ref": "main", + "repo": "devenv", + "type": "github" + } + }, + "fenix": { + "inputs": { + "nixpkgs": [ + "nixpkgs" + ], + "rust-analyzer-src": "rust-analyzer-src" + }, + "locked": { + "lastModified": 1682490133, + "narHash": "sha256-tR2Qx0uuk97WySpSSk4rGS/oH7xb5LykbjATcw1vw1I=", + "owner": "nix-community", + "repo": "fenix", + "rev": "4e9412753ab75ef0e038a5fe54a062fb44c27c6a", + "type": "github" + }, + "original": { + "owner": "nix-community", + "repo": "fenix", + "type": "github" + } + }, + "flake-compat": { + "flake": false, + "locked": { + "lastModified": 1673956053, + "narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=", + "owner": "edolstra", + "repo": "flake-compat", + "rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9", + "type": "github" + }, + "original": { + "owner": "edolstra", + "repo": "flake-compat", + "type": "github" + } + }, + "flake-utils": { + "locked": { + "lastModified": 1667395993, + "narHash": "sha256-nuEHfE/LcWyuSWnS8t12N1wc105Qtau+/OdUAjtQ0rA=", + "owner": "numtide", + "repo": "flake-utils", + "rev": "5aed5285a952e0b949eb3ba02c12fa4fcfef535f", + "type": "github" + }, + "original": { + "owner": "numtide", + "repo": "flake-utils", + "type": "github" + } + }, + "gitignore": { + "inputs": { + "nixpkgs": [ + "devenv", + "pre-commit-hooks", + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1660459072, + "narHash": "sha256-8DFJjXG8zqoONA1vXtgeKXy68KdJL5UaXR8NtVMUbx8=", + "owner": "hercules-ci", + "repo": "gitignore.nix", + "rev": "a20de23b925fd8264fd7fad6454652e142fd7f73", + "type": "github" + }, + "original": { + "owner": "hercules-ci", + "repo": "gitignore.nix", + "type": "github" + } + }, + "lowdown-src": { + "flake": false, + "locked": { + "lastModified": 1633514407, + "narHash": "sha256-Dw32tiMjdK9t3ETl5fzGrutQTzh2rufgZV4A/BbxuD4=", + "owner": "kristapsdz", + "repo": "lowdown", + "rev": "d2c2b44ff6c27b936ec27358a2653caaef8f73b8", + "type": "github" + }, + "original": { + "owner": "kristapsdz", + "repo": "lowdown", + "type": "github" + } + }, + "nix": { + "inputs": { + "lowdown-src": "lowdown-src", + "nixpkgs": [ + "devenv", + "nixpkgs" + ], + "nixpkgs-regression": "nixpkgs-regression" + }, + "locked": { + "lastModified": 1676545802, + "narHash": "sha256-EK4rZ+Hd5hsvXnzSzk2ikhStJnD63odF7SzsQ8CuSPU=", + "owner": "domenkozar", + "repo": "nix", + "rev": "7c91803598ffbcfe4a55c44ac6d49b2cf07a527f", + "type": "github" + }, + "original": { + "owner": "domenkozar", + "ref": "relaxed-flakes", + "repo": "nix", + "type": "github" + } + }, + "nixpkgs": { + "locked": { + "lastModified": 1678875422, + "narHash": "sha256-T3o6NcQPwXjxJMn2shz86Chch4ljXgZn746c2caGxd8=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "126f49a01de5b7e35a43fd43f891ecf6d3a51459", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "nixpkgs-unstable", + "repo": "nixpkgs", + "type": "github" + } + }, + "nixpkgs-regression": { + "locked": { + "lastModified": 1643052045, + "narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", + "type": "github" + }, + "original": { + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", + "type": "github" + } + }, + "nixpkgs-stable": { + "locked": { + "lastModified": 1673800717, + "narHash": "sha256-SFHraUqLSu5cC6IxTprex/nTsI81ZQAtDvlBvGDWfnA=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "2f9fd351ec37f5d479556cd48be4ca340da59b8f", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "nixos-22.11", + "repo": "nixpkgs", + "type": "github" + } + }, + "nixpkgs_2": { + "locked": { + "lastModified": 1682519441, + "narHash": "sha256-Vsq/8NOtvW1AoC6shCBxRxZyMQ+LhvPuJT6ltbzuv+Y=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "7a32a141db568abde9bc389845949dc2a454dfd3", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "master", + "repo": "nixpkgs", + "type": "github" + } + }, + "pre-commit-hooks": { + "inputs": { + "flake-compat": [ + "devenv", + "flake-compat" + ], + "flake-utils": "flake-utils", + "gitignore": "gitignore", + "nixpkgs": [ + "devenv", + "nixpkgs" + ], + "nixpkgs-stable": "nixpkgs-stable" + }, + "locked": { + "lastModified": 1678376203, + "narHash": "sha256-3tyYGyC8h7fBwncLZy5nCUjTJPrHbmNwp47LlNLOHSM=", + "owner": "cachix", + "repo": "pre-commit-hooks.nix", + "rev": "1a20b9708962096ec2481eeb2ddca29ed747770a", + "type": "github" + }, + "original": { + "owner": "cachix", + "repo": "pre-commit-hooks.nix", + "type": "github" + } + }, + "root": { + "inputs": { + "devenv": "devenv", + "fenix": "fenix", + "nixpkgs": "nixpkgs_2", + "systems": "systems" + } + }, + "rust-analyzer-src": { + "flake": false, + "locked": { + "lastModified": 1682426789, + "narHash": "sha256-UqnLmJESRZE0tTEaGbRAw05Hm19TWIPA+R3meqi5I4w=", + "owner": "rust-lang", + "repo": "rust-analyzer", + "rev": "943d2a8a1ca15e8b28a1f51f5a5c135e3728da04", + "type": "github" + }, + "original": { + "owner": "rust-lang", + "ref": "nightly", + "repo": "rust-analyzer", + "type": "github" + } + }, + "systems": { + "locked": { + "lastModified": 1681028828, + "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", + "owner": "nix-systems", + "repo": "default", + "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", + "type": "github" + }, + "original": { + "owner": "nix-systems", + "repo": "default", + "type": "github" + } + } + }, + "root": "root", + "version": 7 +} diff --git a/flake.nix b/flake.nix new file mode 100644 index 0000000000..8d2bf779bd --- /dev/null +++ b/flake.nix @@ -0,0 +1,227 @@ +# A Nix flake that sets up a complete Synapse development environment. Dependencies +# for the SyTest (https://github.com/matrix-org/sytest) and Complement +# (https://github.com/matrix-org/complement) Matrix homeserver test suites are also +# installed automatically. +# +# You must have already installed Nix (https://nixos.org) on your system to use this. +# Nix can be installed on Linux or MacOS; NixOS is not required. Windows is not +# directly supported, but Nix can be installed inside of WSL2 or even Docker +# containers. Please refer to https://nixos.org/download for details. +# +# You must also enable support for flakes in Nix. See the following for how to +# do so permanently: https://nixos.wiki/wiki/Flakes#Enable_flakes +# +# Be warned: you'll need over 3.75 GB of free space to download all the dependencies. +# +# Usage: +# +# With Nix installed, navigate to the directory containing this flake and run +# `nix develop --impure`. The `--impure` is necessary in order to store state +# locally from "services", such as PostgreSQL and Redis. +# +# You should now be dropped into a new shell with all programs and dependencies +# availabile to you! +# +# You can start up pre-configured local Synapse, PostgreSQL and Redis instances by +# running: `devenv up`. To stop them, use Ctrl-C. +# +# All state (the venv, postgres and redis data and config) are stored in +# .devenv/state. Deleting a file from here and then re-entering the shell +# will recreate these files from scratch. +# +# You can exit the development shell by typing `exit`, or using Ctrl-D. +# +# If you would like this development environment to activate automatically +# upon entering this directory in your terminal, first install `direnv` +# (https://direnv.net/). Then run `echo 'use flake . --impure' >> .envrc` at +# the root of the Synapse repo. Finally, run `direnv allow .` to allow the +# contents of '.envrc' to run every time you enter this directory. Voilà ! + +{ + inputs = { + # Use the master/unstable branch of nixpkgs. The latest stable, 22.11, + # does not contain 'perl536Packages.NetAsyncHTTP', needed by Sytest. + nixpkgs.url = "github:NixOS/nixpkgs/master"; + # Output a development shell for x86_64/aarch64 Linux/Darwin (MacOS). + systems.url = "github:nix-systems/default"; + # A development environment manager built on Nix. See https://devenv.sh. + devenv.url = "github:cachix/devenv/main"; + # Rust toolchains and rust-analyzer nightly. + fenix = { + url = "github:nix-community/fenix"; + inputs.nixpkgs.follows = "nixpkgs"; + }; + }; + + outputs = { self, nixpkgs, devenv, systems, ... } @ inputs: + let + forEachSystem = nixpkgs.lib.genAttrs (import systems); + in { + devShells = forEachSystem (system: + let + pkgs = nixpkgs.legacyPackages.${system}; + in { + # Everything is configured via devenv - a Nix module for creating declarative + # developer environments. See https://devenv.sh/reference/options/ for a list + # of all possible options. + default = devenv.lib.mkShell { + inherit inputs pkgs; + modules = [ + { + # Make use of the Starship command prompt when this development environment + # is manually activated (via `nix develop --impure`). + # See https://starship.rs/ for details on the prompt itself. + starship.enable = true; + + # Configure packages to install. + # Search for package names at https://search.nixos.org/packages?channel=unstable + packages = with pkgs; [ + # Native dependencies for running Synapse. + icu + libffi + libjpeg + libpqxx + libwebp + libxml2 + libxslt + sqlite + + # Native dependencies for unit tests (SyTest also requires OpenSSL). + openssl + xmlsec + + # Native dependencies for running Complement. + olm + + # For building the Synapse documentation website. + mdbook + ]; + + # Install Python and manage a virtualenv with Poetry. + languages.python.enable = true; + languages.python.poetry.enable = true; + # Automatically activate the poetry virtualenv upon entering the shell. + languages.python.poetry.activate.enable = true; + # Install all extra Python dependencies; this is needed to run the unit + # tests and utilitise all Synapse features. + languages.python.poetry.install.arguments = ["--extras all"]; + # Install the 'matrix-synapse' package from the local checkout. + languages.python.poetry.install.installRootPackage = true; + + # This is a work-around for NixOS systems. NixOS is special in + # that you can have multiple versions of packages installed at + # once, including your libc linker! + # + # Some binaries built for Linux expect those to be in a certain + # filepath, but that is not the case on NixOS. In that case, we + # force compiling those binaries locally instead. + env.POETRY_INSTALLER_NO_BINARY = "ruff"; + + # Install dependencies for the additional programming languages + # involved with Synapse development. + # + # * Rust is used for developing and running Synapse. + # * Golang is needed to run the Complement test suite. + # * Perl is needed to run the SyTest test suite. + languages.go.enable = true; + languages.rust.enable = true; + languages.rust.version = "stable"; + languages.perl.enable = true; + + # Postgres is needed to run Synapse with postgres support and + # to run certain unit tests that require postgres. + services.postgres.enable = true; + + # On the first invocation of `devenv up`, create a database for + # Synapse to store data in. + services.postgres.initdbArgs = ["--locale=C" "--encoding=UTF8"]; + services.postgres.initialDatabases = [ + { name = "synapse"; } + ]; + # Create a postgres user called 'synapse_user' which has ownership + # over the 'synapse' database. + services.postgres.initialScript = '' + CREATE USER synapse_user; + ALTER DATABASE synapse OWNER TO synapse_user; + ''; + + # Redis is needed in order to run Synapse in worker mode. + services.redis.enable = true; + + # Configure and start Synapse. Before starting Synapse, this shell code: + # * generates a default homeserver.yaml config file if one does not exist, and + # * ensures a directory containing two additional homeserver config files exists; + # one to configure using the development environment's PostgreSQL as the + # database backend and another for enabling Redis support. + process.before = '' + python -m synapse.app.homeserver -c homeserver.yaml --generate-config --server-name=synapse.dev --report-stats=no + mkdir -p homeserver-config-overrides.d + cat > homeserver-config-overrides.d/database.yaml << EOF + ## Do not edit this file. This file is generated by flake.nix + database: + name: psycopg2 + args: + user: synapse_user + database: synapse + host: $PGHOST + cp_min: 5 + cp_max: 10 + EOF + cat > homeserver-config-overrides.d/redis.yaml << EOF + ## Do not edit this file. This file is generated by flake.nix + redis: + enabled: true + EOF + ''; + # Start synapse when `devenv up` is run. + processes.synapse.exec = "poetry run python -m synapse.app.homeserver -c homeserver.yaml --config-directory homeserver-config-overrides.d"; + + # Define the perl modules we require to run SyTest. + # + # This list was compiled by cross-referencing https://metacpan.org/ + # with the modules defined in './cpanfile' and then finding the + # corresponding Nix packages on https://search.nixos.org/packages. + # + # This was done until `./install-deps.pl --dryrun` produced no output. + env.PERL5LIB = "${with pkgs.perl536Packages; makePerlPath [ + DBI + ClassMethodModifiers + CryptEd25519 + DataDump + DBDPg + DigestHMAC + DigestSHA1 + EmailAddressXS + EmailMIME + EmailSimple # required by Email::Mime + EmailMessageID # required by Email::Mime + EmailMIMEContentType # required by Email::Mime + TextUnidecode # required by Email::Mime + ModuleRuntime # required by Email::Mime + EmailMIMEEncodings # required by Email::Mime + FilePath + FileSlurper + Future + GetoptLong + HTTPMessage + IOAsync + IOAsyncSSL + IOSocketSSL + NetSSLeay + JSON + ListUtilsBy + ScalarListUtils + ModulePluggable + NetAsyncHTTP + MetricsAny # required by Net::Async::HTTP + NetAsyncHTTPServer + StructDumb + URI + YAMLLibYAML + ]}"; + } + ]; + }; + }); + }; +} diff --git a/mypy.ini b/mypy.ini index 945f7925cb..3363c6daee 100644 --- a/mypy.ini +++ b/mypy.ini @@ -13,6 +13,9 @@ no_implicit_optional = True disallow_untyped_defs = True strict_equality = True warn_redundant_casts = True +# Run mypy type checking with the minimum supported Python version to catch new usage +# that isn't backwards-compatible (types, overloads, etc). +python_version = 3.8 files = docker/, @@ -21,26 +24,7 @@ files = tests/, build_rust.py -# Note: Better exclusion syntax coming in mypy > 0.910 -# https://github.com/python/mypy/pull/11329 -# -# For now, set the (?x) flag enable "verbose" regexes -# https://docs.python.org/3/library/re.html#re.X -exclude = (?x) - ^( - |synapse/storage/databases/__init__.py - |synapse/storage/databases/main/cache.py - |synapse/storage/schema/ - )$ - -[mypy-synapse.federation.transport.client] -disallow_untyped_defs = False - -[mypy-synapse.http.matrixfederationclient] -disallow_untyped_defs = False - [mypy-synapse.metrics._reactor_metrics] -disallow_untyped_defs = False # This module imports select.epoll. That exists on Linux, but doesn't on macOS. # See https://github.com/matrix-org/synapse/pull/11771. warn_unused_ignores = False diff --git a/poetry.lock b/poetry.lock index 978a6e1598..3f8bf7c304 100644 --- a/poetry.lock +++ b/poetry.lock @@ -156,37 +156,37 @@ lxml = ["lxml"] [[package]] name = "black" -version = "23.1.0" +version = "23.3.0" description = "The uncompromising code formatter." category = "dev" optional = false python-versions = ">=3.7" files = [ - {file = "black-23.1.0-cp310-cp310-macosx_10_16_arm64.whl", hash = "sha256:b6a92a41ee34b883b359998f0c8e6eb8e99803aa8bf3123bf2b2e6fec505a221"}, - {file = "black-23.1.0-cp310-cp310-macosx_10_16_universal2.whl", hash = "sha256:57c18c5165c1dbe291d5306e53fb3988122890e57bd9b3dcb75f967f13411a26"}, - {file = "black-23.1.0-cp310-cp310-macosx_10_16_x86_64.whl", hash = "sha256:9880d7d419bb7e709b37e28deb5e68a49227713b623c72b2b931028ea65f619b"}, - {file = "black-23.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e6663f91b6feca5d06f2ccd49a10f254f9298cc1f7f49c46e498a0771b507104"}, - {file = "black-23.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:9afd3f493666a0cd8f8df9a0200c6359ac53940cbde049dcb1a7eb6ee2dd7074"}, - {file = "black-23.1.0-cp311-cp311-macosx_10_16_arm64.whl", hash = "sha256:bfffba28dc52a58f04492181392ee380e95262af14ee01d4bc7bb1b1c6ca8d27"}, - {file = "black-23.1.0-cp311-cp311-macosx_10_16_universal2.whl", hash = "sha256:c1c476bc7b7d021321e7d93dc2cbd78ce103b84d5a4cf97ed535fbc0d6660648"}, - {file = "black-23.1.0-cp311-cp311-macosx_10_16_x86_64.whl", hash = "sha256:382998821f58e5c8238d3166c492139573325287820963d2f7de4d518bd76958"}, - {file = "black-23.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bf649fda611c8550ca9d7592b69f0637218c2369b7744694c5e4902873b2f3a"}, - {file = "black-23.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:121ca7f10b4a01fd99951234abdbd97728e1240be89fde18480ffac16503d481"}, - {file = "black-23.1.0-cp37-cp37m-macosx_10_16_x86_64.whl", hash = "sha256:a8471939da5e824b891b25751955be52ee7f8a30a916d570a5ba8e0f2eb2ecad"}, - {file = "black-23.1.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8178318cb74f98bc571eef19068f6ab5613b3e59d4f47771582f04e175570ed8"}, - {file = "black-23.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:a436e7881d33acaf2536c46a454bb964a50eff59b21b51c6ccf5a40601fbef24"}, - {file = "black-23.1.0-cp38-cp38-macosx_10_16_arm64.whl", hash = "sha256:a59db0a2094d2259c554676403fa2fac3473ccf1354c1c63eccf7ae65aac8ab6"}, - {file = "black-23.1.0-cp38-cp38-macosx_10_16_universal2.whl", hash = "sha256:0052dba51dec07ed029ed61b18183942043e00008ec65d5028814afaab9a22fd"}, - {file = "black-23.1.0-cp38-cp38-macosx_10_16_x86_64.whl", hash = "sha256:49f7b39e30f326a34b5c9a4213213a6b221d7ae9d58ec70df1c4a307cf2a1580"}, - {file = "black-23.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:162e37d49e93bd6eb6f1afc3e17a3d23a823042530c37c3c42eeeaf026f38468"}, - {file = "black-23.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:8b70eb40a78dfac24842458476135f9b99ab952dd3f2dab738c1881a9b38b753"}, - {file = "black-23.1.0-cp39-cp39-macosx_10_16_arm64.whl", hash = "sha256:a29650759a6a0944e7cca036674655c2f0f63806ddecc45ed40b7b8aa314b651"}, - {file = "black-23.1.0-cp39-cp39-macosx_10_16_universal2.whl", hash = "sha256:bb460c8561c8c1bec7824ecbc3ce085eb50005883a6203dcfb0122e95797ee06"}, - {file = "black-23.1.0-cp39-cp39-macosx_10_16_x86_64.whl", hash = "sha256:c91dfc2c2a4e50df0026f88d2215e166616e0c80e86004d0003ece0488db2739"}, - {file = "black-23.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2a951cc83ab535d248c89f300eccbd625e80ab880fbcfb5ac8afb5f01a258ac9"}, - {file = "black-23.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:0680d4380db3719ebcfb2613f34e86c8e6d15ffeabcf8ec59355c5e7b85bb555"}, - {file = "black-23.1.0-py3-none-any.whl", hash = "sha256:7a0f701d314cfa0896b9001df70a530eb2472babb76086344e688829efd97d32"}, - {file = "black-23.1.0.tar.gz", hash = "sha256:b0bd97bea8903f5a2ba7219257a44e3f1f9d00073d6cc1add68f0beec69692ac"}, + {file = "black-23.3.0-cp310-cp310-macosx_10_16_arm64.whl", hash = "sha256:0945e13506be58bf7db93ee5853243eb368ace1c08a24c65ce108986eac65915"}, + {file = "black-23.3.0-cp310-cp310-macosx_10_16_universal2.whl", hash = "sha256:67de8d0c209eb5b330cce2469503de11bca4085880d62f1628bd9972cc3366b9"}, + {file = "black-23.3.0-cp310-cp310-macosx_10_16_x86_64.whl", hash = "sha256:7c3eb7cea23904399866c55826b31c1f55bbcd3890ce22ff70466b907b6775c2"}, + {file = "black-23.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:32daa9783106c28815d05b724238e30718f34155653d4d6e125dc7daec8e260c"}, + {file = "black-23.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:35d1381d7a22cc5b2be2f72c7dfdae4072a3336060635718cc7e1ede24221d6c"}, + {file = "black-23.3.0-cp311-cp311-macosx_10_16_arm64.whl", hash = "sha256:a8a968125d0a6a404842fa1bf0b349a568634f856aa08ffaff40ae0dfa52e7c6"}, + {file = "black-23.3.0-cp311-cp311-macosx_10_16_universal2.whl", hash = "sha256:c7ab5790333c448903c4b721b59c0d80b11fe5e9803d8703e84dcb8da56fec1b"}, + {file = "black-23.3.0-cp311-cp311-macosx_10_16_x86_64.whl", hash = "sha256:a6f6886c9869d4daae2d1715ce34a19bbc4b95006d20ed785ca00fa03cba312d"}, + {file = "black-23.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6f3c333ea1dd6771b2d3777482429864f8e258899f6ff05826c3a4fcc5ce3f70"}, + {file = "black-23.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:11c410f71b876f961d1de77b9699ad19f939094c3a677323f43d7a29855fe326"}, + {file = "black-23.3.0-cp37-cp37m-macosx_10_16_x86_64.whl", hash = "sha256:1d06691f1eb8de91cd1b322f21e3bfc9efe0c7ca1f0e1eb1db44ea367dff656b"}, + {file = "black-23.3.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:50cb33cac881766a5cd9913e10ff75b1e8eb71babf4c7104f2e9c52da1fb7de2"}, + {file = "black-23.3.0-cp37-cp37m-win_amd64.whl", hash = "sha256:e114420bf26b90d4b9daa597351337762b63039752bdf72bf361364c1aa05925"}, + {file = "black-23.3.0-cp38-cp38-macosx_10_16_arm64.whl", hash = "sha256:48f9d345675bb7fbc3dd85821b12487e1b9a75242028adad0333ce36ed2a6d27"}, + {file = "black-23.3.0-cp38-cp38-macosx_10_16_universal2.whl", hash = "sha256:714290490c18fb0126baa0fca0a54ee795f7502b44177e1ce7624ba1c00f2331"}, + {file = "black-23.3.0-cp38-cp38-macosx_10_16_x86_64.whl", hash = "sha256:064101748afa12ad2291c2b91c960be28b817c0c7eaa35bec09cc63aa56493c5"}, + {file = "black-23.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:562bd3a70495facf56814293149e51aa1be9931567474993c7942ff7d3533961"}, + {file = "black-23.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:e198cf27888ad6f4ff331ca1c48ffc038848ea9f031a3b40ba36aced7e22f2c8"}, + {file = "black-23.3.0-cp39-cp39-macosx_10_16_arm64.whl", hash = "sha256:3238f2aacf827d18d26db07524e44741233ae09a584273aa059066d644ca7b30"}, + {file = "black-23.3.0-cp39-cp39-macosx_10_16_universal2.whl", hash = "sha256:f0bd2f4a58d6666500542b26354978218a9babcdc972722f4bf90779524515f3"}, + {file = "black-23.3.0-cp39-cp39-macosx_10_16_x86_64.whl", hash = "sha256:92c543f6854c28a3c7f39f4d9b7694f9a6eb9d3c5e2ece488c327b6e7ea9b266"}, + {file = "black-23.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a150542a204124ed00683f0db1f5cf1c2aaaa9cc3495b7a3b5976fb136090ab"}, + {file = "black-23.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:6b39abdfb402002b8a7d030ccc85cf5afff64ee90fa4c5aebc531e3ad0175ddb"}, + {file = "black-23.3.0-py3-none-any.whl", hash = "sha256:ec751418022185b0c1bb7d7736e6933d40bbb14c14a0abcf9123d1b159f98dd4"}, + {file = "black-23.3.0.tar.gz", hash = "sha256:1c7b8d606e728a41ea1ccbd7264677e494e87cf630e399262ced92d4a8dac940"}, ] [package.dependencies] @@ -481,31 +481,31 @@ files = [ [[package]] name = "cryptography" -version = "40.0.1" +version = "40.0.2" description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." category = "main" optional = false python-versions = ">=3.6" files = [ - {file = "cryptography-40.0.1-cp36-abi3-macosx_10_12_universal2.whl", hash = "sha256:918cb89086c7d98b1b86b9fdb70c712e5a9325ba6f7d7cfb509e784e0cfc6917"}, - {file = "cryptography-40.0.1-cp36-abi3-macosx_10_12_x86_64.whl", hash = "sha256:9618a87212cb5200500e304e43691111570e1f10ec3f35569fdfcd17e28fd797"}, - {file = "cryptography-40.0.1-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3a4805a4ca729d65570a1b7cac84eac1e431085d40387b7d3bbaa47e39890b88"}, - {file = "cryptography-40.0.1-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:63dac2d25c47f12a7b8aa60e528bfb3c51c5a6c5a9f7c86987909c6c79765554"}, - {file = "cryptography-40.0.1-cp36-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:0a4e3406cfed6b1f6d6e87ed243363652b2586b2d917b0609ca4f97072994405"}, - {file = "cryptography-40.0.1-cp36-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:1e0af458515d5e4028aad75f3bb3fe7a31e46ad920648cd59b64d3da842e4356"}, - {file = "cryptography-40.0.1-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:d8aa3609d337ad85e4eb9bb0f8bcf6e4409bfb86e706efa9a027912169e89122"}, - {file = "cryptography-40.0.1-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:cf91e428c51ef692b82ce786583e214f58392399cf65c341bc7301d096fa3ba2"}, - {file = "cryptography-40.0.1-cp36-abi3-win32.whl", hash = "sha256:650883cc064297ef3676b1db1b7b1df6081794c4ada96fa457253c4cc40f97db"}, - {file = "cryptography-40.0.1-cp36-abi3-win_amd64.whl", hash = "sha256:a805a7bce4a77d51696410005b3e85ae2839bad9aa38894afc0aa99d8e0c3160"}, - {file = "cryptography-40.0.1-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:cd033d74067d8928ef00a6b1327c8ea0452523967ca4463666eeba65ca350d4c"}, - {file = "cryptography-40.0.1-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:d36bbeb99704aabefdca5aee4eba04455d7a27ceabd16f3b3ba9bdcc31da86c4"}, - {file = "cryptography-40.0.1-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:32057d3d0ab7d4453778367ca43e99ddb711770477c4f072a51b3ca69602780a"}, - {file = "cryptography-40.0.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:f5d7b79fa56bc29580faafc2ff736ce05ba31feaa9d4735048b0de7d9ceb2b94"}, - {file = "cryptography-40.0.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:7c872413353c70e0263a9368c4993710070e70ab3e5318d85510cc91cce77e7c"}, - {file = "cryptography-40.0.1-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:28d63d75bf7ae4045b10de5413fb1d6338616e79015999ad9cf6fc538f772d41"}, - {file = "cryptography-40.0.1-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:6f2bbd72f717ce33100e6467572abaedc61f1acb87b8d546001328d7f466b778"}, - {file = "cryptography-40.0.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:cc3a621076d824d75ab1e1e530e66e7e8564e357dd723f2533225d40fe35c60c"}, - {file = "cryptography-40.0.1.tar.gz", hash = "sha256:2803f2f8b1e95f614419926c7e6f55d828afc614ca5ed61543877ae668cc3472"}, + {file = "cryptography-40.0.2-cp36-abi3-macosx_10_12_universal2.whl", hash = "sha256:8f79b5ff5ad9d3218afb1e7e20ea74da5f76943ee5edb7f76e56ec5161ec782b"}, + {file = "cryptography-40.0.2-cp36-abi3-macosx_10_12_x86_64.whl", hash = "sha256:05dc219433b14046c476f6f09d7636b92a1c3e5808b9a6536adf4932b3b2c440"}, + {file = "cryptography-40.0.2-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4df2af28d7bedc84fe45bd49bc35d710aede676e2a4cb7fc6d103a2adc8afe4d"}, + {file = "cryptography-40.0.2-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0dcca15d3a19a66e63662dc8d30f8036b07be851a8680eda92d079868f106288"}, + {file = "cryptography-40.0.2-cp36-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:a04386fb7bc85fab9cd51b6308633a3c271e3d0d3eae917eebab2fac6219b6d2"}, + {file = "cryptography-40.0.2-cp36-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:adc0d980fd2760c9e5de537c28935cc32b9353baaf28e0814df417619c6c8c3b"}, + {file = "cryptography-40.0.2-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:d5a1bd0e9e2031465761dfa920c16b0065ad77321d8a8c1f5ee331021fda65e9"}, + {file = "cryptography-40.0.2-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:a95f4802d49faa6a674242e25bfeea6fc2acd915b5e5e29ac90a32b1139cae1c"}, + {file = "cryptography-40.0.2-cp36-abi3-win32.whl", hash = "sha256:aecbb1592b0188e030cb01f82d12556cf72e218280f621deed7d806afd2113f9"}, + {file = "cryptography-40.0.2-cp36-abi3-win_amd64.whl", hash = "sha256:b12794f01d4cacfbd3177b9042198f3af1c856eedd0a98f10f141385c809a14b"}, + {file = "cryptography-40.0.2-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:142bae539ef28a1c76794cca7f49729e7c54423f615cfd9b0b1fa90ebe53244b"}, + {file = "cryptography-40.0.2-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:956ba8701b4ffe91ba59665ed170a2ebbdc6fc0e40de5f6059195d9f2b33ca0e"}, + {file = "cryptography-40.0.2-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:4f01c9863da784558165f5d4d916093737a75203a5c5286fde60e503e4276c7a"}, + {file = "cryptography-40.0.2-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:3daf9b114213f8ba460b829a02896789751626a2a4e7a43a28ee77c04b5e4958"}, + {file = "cryptography-40.0.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:48f388d0d153350f378c7f7b41497a54ff1513c816bcbbcafe5b829e59b9ce5b"}, + {file = "cryptography-40.0.2-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c0764e72b36a3dc065c155e5b22f93df465da9c39af65516fe04ed3c68c92636"}, + {file = "cryptography-40.0.2-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:cbaba590180cba88cb99a5f76f90808a624f18b169b90a4abb40c1fd8c19420e"}, + {file = "cryptography-40.0.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7a38250f433cd41df7fcb763caa3ee9362777fdb4dc642b9a349721d2bf47404"}, + {file = "cryptography-40.0.2.tar.gz", hash = "sha256:c33c0d32b8594fa647d2e01dbccc303478e16fdd7cf98652d5b3ed11aa5e5c99"}, ] [package.dependencies] @@ -580,20 +580,20 @@ dev = ["Sphinx", "coverage", "flake8", "lxml", "lxml-stubs", "memory-profiler", [[package]] name = "furo" -version = "2023.3.23" +version = "2023.5.20" description = "A clean customisable Sphinx documentation theme." category = "dev" optional = false python-versions = ">=3.7" files = [ - {file = "furo-2023.3.23-py3-none-any.whl", hash = "sha256:1cdf0730496f6ac0ecf394845fe55010539d987a3085f29d819e49a8e87da60a"}, - {file = "furo-2023.3.23.tar.gz", hash = "sha256:6cf9a59fe2919693ecff6509a08229afa081583cbb5b81f59c3e755f3bd81d26"}, + {file = "furo-2023.5.20-py3-none-any.whl", hash = "sha256:594a8436ddfe0c071f3a9e9a209c314a219d8341f3f1af33fdf7c69544fab9e6"}, + {file = "furo-2023.5.20.tar.gz", hash = "sha256:40e09fa17c6f4b22419d122e933089226dcdb59747b5b6c79363089827dea16f"}, ] [package.dependencies] beautifulsoup4 = "*" pygments = ">=2.7" -sphinx = ">=5.0,<7.0" +sphinx = ">=6.0,<8.0" sphinx-basic-ng = "*" [[package]] @@ -629,101 +629,101 @@ typing-extensions = {version = ">=3.7.4.3", markers = "python_version < \"3.8\"" [[package]] name = "hiredis" -version = "2.2.2" +version = "2.2.3" description = "Python wrapper for hiredis" category = "main" optional = true python-versions = ">=3.7" files = [ - {file = "hiredis-2.2.2-cp310-cp310-macosx_10_12_universal2.whl", hash = "sha256:ba6123ff137275e2f4c31fc74b93813fcbb79160d43f5357163e09638c7743de"}, - {file = "hiredis-2.2.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:d995846acc8e3339fb7833cd19bf6f3946ff5157c8488a4df9c51cd119a36870"}, - {file = "hiredis-2.2.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:82f869ca44bcafa37cd71cfa1429648fa354d6021dcd72f03a2f66bcb339c546"}, - {file = "hiredis-2.2.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa90a5ee7a7f30c3d72d3513914b8f51f953a71b8cbd52a241b6db6685e55645"}, - {file = "hiredis-2.2.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:01e2e588392b5fdcc3a6aa0eb62a2eb2a142f829082fa4c3354228029d3aa1ce"}, - {file = "hiredis-2.2.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5dac177a6ab8b4eb4d5e74978c29eef7cc9eef14086f814cb3893f7465578044"}, - {file = "hiredis-2.2.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4cb992e3f9753c5a0c637f333c2010d1ad702aebf2d730ee4d484f32b19bae97"}, - {file = "hiredis-2.2.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e61c22fda5fc25d31bbced24a8322d33c5cb8cad9ba698634c16edb5b3e79a91"}, - {file = "hiredis-2.2.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:9873898e26e50cd41415e9d1ea128bfdb60eb26abb4f5be28a4500fd7834dc0c"}, - {file = "hiredis-2.2.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:2c18b00a382546e19bcda8b83dcca5b6e0dbc238d235723434405f48a18e8f77"}, - {file = "hiredis-2.2.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:8c3a6998f6f88d7ca4d082fd26525074df13162b274d7c64034784b6fdc56666"}, - {file = "hiredis-2.2.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:0fc1f9a9791d028b2b8afa318ccff734c7fc8861d37a04ca9b3d27c9b05f9718"}, - {file = "hiredis-2.2.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:5f2cfd323f83985f2bed6ed013107873275025af270485b7d04c338bfb47bd14"}, - {file = "hiredis-2.2.2-cp310-cp310-win32.whl", hash = "sha256:55c7e9a9e05f8c0555bfba5c16d98492f8b6db650e56d0c35cc28aeabfc86020"}, - {file = "hiredis-2.2.2-cp310-cp310-win_amd64.whl", hash = "sha256:eaff526c2fed31c971b0fa338a25237ae5513550ef75d0b85b9420ec778cca45"}, - {file = "hiredis-2.2.2-cp311-cp311-macosx_10_12_universal2.whl", hash = "sha256:688b9b7458b4f3f452fea6ed062c04fa1fd9a69d9223d95c6cb052581aba553b"}, - {file = "hiredis-2.2.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:544d52fde3a8dac7854673eac20deca05214758193c01926ffbb0d57c6bf4ffe"}, - {file = "hiredis-2.2.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:990916e8b0b4eedddef787e73549b562f8c9e73a7fea82f9b8ff517806774ad0"}, - {file = "hiredis-2.2.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:10dc34854e9acfb3e7cc4157606e2efcb497b1c6fca07bd6c3be34ae5e413f13"}, - {file = "hiredis-2.2.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c446a2007985ae49c2ecd946dd819dea72b931beb5f647ba08655a1a1e133fa8"}, - {file = "hiredis-2.2.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:02b9f928dc6cd43ed0f0ffc1c75fb209fb180f004b7e2e19994805f998d247aa"}, - {file = "hiredis-2.2.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2a355aff8dfa02ebfe67f0946dd706e490bddda9ea260afac9cdc43942310c53"}, - {file = "hiredis-2.2.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:831461abe5b63e73719621a5f31d8fc175528a05dc09d5a8aa8ef565d6deefa4"}, - {file = "hiredis-2.2.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:75349f7c8f77eb0fd33ede4575d1e5b0a902a8176a436bf03293d7fec4bd3894"}, - {file = "hiredis-2.2.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:1eb39b34d15220095dc49ad1e1082580d35cd3b6d9741def52988b5075e4ff03"}, - {file = "hiredis-2.2.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:a9b306f4e870747eea8b008dcba2e9f1e4acd12b333a684bc1cc120e633a280e"}, - {file = "hiredis-2.2.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:03dfb4ab7a2136ce1be305592553f102e1bd91a96068ab2778e3252aed20d9bc"}, - {file = "hiredis-2.2.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d8bc89c7e33fecb083a199ade0131a34d20365a8c32239e218da57290987ca9a"}, - {file = "hiredis-2.2.2-cp311-cp311-win32.whl", hash = "sha256:ed44b3c711cecde920f238ac35f70ac08744f2079b6369655856e43944464a72"}, - {file = "hiredis-2.2.2-cp311-cp311-win_amd64.whl", hash = "sha256:2e2f0ce3e8ab1314a52f562386220f6714fd24d7968a95528135ad04e88cc741"}, - {file = "hiredis-2.2.2-cp37-cp37m-macosx_10_12_x86_64.whl", hash = "sha256:e7e61ab75b851aac2d6bc634d03738a242a6ef255a44178437b427c5ebac0a87"}, - {file = "hiredis-2.2.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9eb14339e399554bb436cc4628e8aaa3943adf7afcf34aba4cbd1e3e6b9ec7ec"}, - {file = "hiredis-2.2.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e4ec57886f20f4298537cb1ab9dbda98594fb8d7c724c5fbf9a4b55329fd4a63"}, - {file = "hiredis-2.2.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a89f5afb9827eab07b9c8c585cd4dc95e5232c727508ae2c935d09531abe9e33"}, - {file = "hiredis-2.2.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3645590b9234cafd21c8ecfbf252ad9aa1d67629f4bdc98ba3627f48f8f7b5aa"}, - {file = "hiredis-2.2.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:99350e89f52186146938bdba0b9c6cd68802c20346707d6ca8366f2d69d89b2f"}, - {file = "hiredis-2.2.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:b5d290f3d8f7a05c4adbe6c355055b87c7081bfa1eccd1ae5491216307ee5f53"}, - {file = "hiredis-2.2.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:c95be6f20377d5995ef41a98314542e194d2dc9c2579d8f130a1aea78d48fd42"}, - {file = "hiredis-2.2.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:e4e2da61a04251121cb551f569c3250e6e27e95f2a80f8351c36822eda1f5d2b"}, - {file = "hiredis-2.2.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:ac7f8d68826f95a3652e44b0c12bfa74d3aa6531d47d5dbe6a2fbfc7979bc20f"}, - {file = "hiredis-2.2.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:359e662324318baadb768d3c4ade8c4bdcfbb313570eb01e15d75dc5db781815"}, - {file = "hiredis-2.2.2-cp37-cp37m-win32.whl", hash = "sha256:fd0ca35e2cf44866137cbb5ae7e439fab18a0b0e0e1cf51d45137622d59ec012"}, - {file = "hiredis-2.2.2-cp37-cp37m-win_amd64.whl", hash = "sha256:c9488ffb10acc6b121c498875278b0a6715d193742dc92d21a281712169ac06d"}, - {file = "hiredis-2.2.2-cp38-cp38-macosx_10_12_universal2.whl", hash = "sha256:1570fe4f93bc1ea487fb566f2b863fd0ed146f643a4ea31e4e07036db9e0c7f8"}, - {file = "hiredis-2.2.2-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:8753c561b37cccbda7264c9b4486e206a6318c18377cd647beb3aa41a15a6beb"}, - {file = "hiredis-2.2.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a06d0dd84f10be6b15a92edbca2490b64917280f66d8267c63de99b6550308ad"}, - {file = "hiredis-2.2.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40ff3f1ec3a4046732e9e41df08dcb1a559847196755d295d43e32528aae39e6"}, - {file = "hiredis-2.2.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c24d856e13c02bd9d28a189e47be70cbba6f2c2a4bd85a8cc98819db9e7e3e06"}, - {file = "hiredis-2.2.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4ee9fe7cef505e8d925c70bebcc16bfab12aa7af922f948346baffd4730f7b00"}, - {file = "hiredis-2.2.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:03ab1d545794bb0e09f3b1e2c8b3adcfacd84f6f2d402bfdcd441a98c0e9643c"}, - {file = "hiredis-2.2.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:14dfccf4696d75395c587a5dafafb4f7aa0a5d55309341d10bc2e7f1eaa20771"}, - {file = "hiredis-2.2.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2ddc573809ca4374da1b24b48604f34f3d5f0911fcccfb1c403ff8d8ca31c232"}, - {file = "hiredis-2.2.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:24301ca2bf9b2f843b4c3015c90f161798fa3bbc5b95fd494785751b137dbbe2"}, - {file = "hiredis-2.2.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:b083a69e158138ffa95740ff6984d328259387b5596908021b3ccb946469ff66"}, - {file = "hiredis-2.2.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:8e16dc949cc2e9c5fbcd08de05b5fb61b89ff65738d772863c5c96248628830e"}, - {file = "hiredis-2.2.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:674f296c3c89cb53f97aa9ba2508d3f360ad481b9e0c0e3a59b342a15192adaf"}, - {file = "hiredis-2.2.2-cp38-cp38-win32.whl", hash = "sha256:20ecbf87aac4f0f33f9c55ae15cb73b485d256c57518c590b7d0c9c152150632"}, - {file = "hiredis-2.2.2-cp38-cp38-win_amd64.whl", hash = "sha256:b11960237a3025bf248135e5b497dc4923e83d137eb798fbfe78b40d57c4b156"}, - {file = "hiredis-2.2.2-cp39-cp39-macosx_10_12_universal2.whl", hash = "sha256:18103090b8eda9c529830e26594e88b0b1472055785f3ed29b8adc694d03862a"}, - {file = "hiredis-2.2.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:d1acb7c957e5343303b3862947df3232dc7395da320b3b9ae076dfaa56ad59dc"}, - {file = "hiredis-2.2.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4997f55e1208af95a8fbd0fa187b04c672fcec8f66e49b9ab7fcc45cc1657dc4"}, - {file = "hiredis-2.2.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:449e18506d22af40977abd0f5a8979f57f88d4562fe591478a3438d76a15133d"}, - {file = "hiredis-2.2.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a32a4474f7a4abdea954f3365608edee3f90f1de9fa05b81d214d4cad04c718a"}, - {file = "hiredis-2.2.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e86c800c6941698777fc58419216a66a7f76504f1cea72381d2ee206888e964d"}, - {file = "hiredis-2.2.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c73aa295c5369135247ff63aa1fbb116067485d0506cd787cc0c868e72bbee55"}, - {file = "hiredis-2.2.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2e10a66680023bd5c5a3d605dae0844e3dde60eac5b79e39f51395a2aceaf634"}, - {file = "hiredis-2.2.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:03ab760fc96e0c5d36226eb727f30645bf6a53c97f14bfc0a4d0401bfc9b8af7"}, - {file = "hiredis-2.2.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:855d258e7f1aee3d7fbd5b1dc87790b1b5016e23d369a97b934a25ae7bc0171f"}, - {file = "hiredis-2.2.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:ccc33d87866d213f84f857a98f69c13f94fbf99a3304e328869890c9e49c8d65"}, - {file = "hiredis-2.2.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:339af17bb9817f8acb127247c79a99cad63db6738c0fb2aec9fa3d4f35d2a250"}, - {file = "hiredis-2.2.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:57f73aa04d0b70ff436fb35fa7ea2b796aa7addbd7ebb8d1aa1f3d1b3e4439f1"}, - {file = "hiredis-2.2.2-cp39-cp39-win32.whl", hash = "sha256:e97d4e650b8d933a1229f341db92b610fc52b8d752490235977b63b81fbbc2cb"}, - {file = "hiredis-2.2.2-cp39-cp39-win_amd64.whl", hash = "sha256:8d43a7bba66a800279e33229a206861be09c279e261eaa8db4824e59465f4848"}, - {file = "hiredis-2.2.2-pp37-pypy37_pp73-macosx_10_12_x86_64.whl", hash = "sha256:632d79fd02b03e8d9fbaebbe40bfe34b920c5d0a9c0ef6270752e0db85208175"}, - {file = "hiredis-2.2.2-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3a5fefac31c84143782ec1ebc323c04e733a6e4bfebcef9907a34e47a465e648"}, - {file = "hiredis-2.2.2-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5155bc1710df8e21aa48c9b2f4d4e13e4987e1efff363a1ef9c84fae2cc6c145"}, - {file = "hiredis-2.2.2-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2f220b71235d2deab1b4b22681c8aee444720d973b80f1b86a4e2a85f6bcf1e1"}, - {file = "hiredis-2.2.2-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:f1f1efbe9cc29a3af39cf7eed27225f951aed3f48a1149c7fb74529fb5ab86d4"}, - {file = "hiredis-2.2.2-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:1f1c44242c18b1f02e6d1162f133d65d00e09cc10d9165dccc78662def72abc2"}, - {file = "hiredis-2.2.2-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9e0f444d9062f7e487ef42bab2fb2e290f1704afcbca48ad3ec23de63eef0fda"}, - {file = "hiredis-2.2.2-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac15e7e1efca51b4695e540c80c328accb352c9608da7c2df82d1fa1a3c539ef"}, - {file = "hiredis-2.2.2-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:20cfbc469400669a5999aa34ccba3872a1e34490ec3d5c84e8c0752c27977b7c"}, - {file = "hiredis-2.2.2-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:bae004a0b978bf62e38d0eef5ab9156f8101d01167b3ca7054bd0994b773e917"}, - {file = "hiredis-2.2.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a1ce725542133dbdda9e8704867ef52651886bd1ef568c6fd997a27404381985"}, - {file = "hiredis-2.2.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6e6ea7532221c97fa6d79f7d19d452cd9d1141d759c54279cc4774ce24728f13"}, - {file = "hiredis-2.2.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a7114961ed78d708142f6c6eb1d2ed65dc3da4b5ae8a4660ad889dd7fc891971"}, - {file = "hiredis-2.2.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1b084fbc3e69f99865242f8e1ccd4ea2a34bf6a3983d015d61133377526c0ce2"}, - {file = "hiredis-2.2.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2d1ba0799f3487294f72b2157944d5c3a4fb33c99e2d495d63eab98c7ec7234b"}, - {file = "hiredis-2.2.2.tar.gz", hash = "sha256:9c270bd0567a9c60673284e000132f603bb4ecbcd707567647a68f85ef45c4d4"}, + {file = "hiredis-2.2.3-cp310-cp310-macosx_10_12_universal2.whl", hash = "sha256:9a1a80a8fa767f2fdc3870316a54b84fe9fc09fa6ab6a2686783de6a228a4604"}, + {file = "hiredis-2.2.3-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:3f006c28c885deb99b670a5a66f367a175ab8955b0374029bad7111f5357dcd4"}, + {file = "hiredis-2.2.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ffaf841546905d90ff189de7397aa56413b1ce5e54547f17a98f0ebf3a3b0a3b"}, + {file = "hiredis-2.2.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1cadb0ac7ba3babfd804e425946bec9717b320564a1390f163a54af9365a720a"}, + {file = "hiredis-2.2.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:33bc4721632ef9708fa44e5df0066053fccc8e65410a2c48573192517a533b48"}, + {file = "hiredis-2.2.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:227c5b4bcb60f89008c275d596e4a7b6625a6b3c827b8a66ae582eace7051f71"}, + {file = "hiredis-2.2.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:61995eb826009d99ed8590747bc0da683a5f4fbb4faa8788166bf3810845cd5c"}, + {file = "hiredis-2.2.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6f969edc851efe23010e0f53a64269f2629a9364135e9ec81c842e8b2277d0c1"}, + {file = "hiredis-2.2.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d27e560eefb57914d742a837f1da98d3b29cb22eff013c8023b7cf52ae6e051d"}, + {file = "hiredis-2.2.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:3759f4789ae1913b7df278dfc9e8749205b7a106f888cd2903d19461e24a7697"}, + {file = "hiredis-2.2.3-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:c6cb613148422c523945cdb8b6bed617856f2602fd8750e33773ede2616e55d5"}, + {file = "hiredis-2.2.3-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:1d274d5c511dfc03f83f997d3238eaa9b6ee3f982640979f509373cced891e98"}, + {file = "hiredis-2.2.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:3b7fe075e91b9d9cff40eba4fb6a8eff74964d3979a39be9a9ef58b1b4cb3604"}, + {file = "hiredis-2.2.3-cp310-cp310-win32.whl", hash = "sha256:77924b0d32fd1f493d3df15d9609ddf9d94c31a364022a6bf6b525ce9da75bea"}, + {file = "hiredis-2.2.3-cp310-cp310-win_amd64.whl", hash = "sha256:dcb0569dd5bfe6004658cd0f229efa699a3169dcb4f77bd72e188adda302063d"}, + {file = "hiredis-2.2.3-cp311-cp311-macosx_10_12_universal2.whl", hash = "sha256:d115790f18daa99b5c11a506e48923b630ef712e9e4b40482af942c3d40638b8"}, + {file = "hiredis-2.2.3-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:4c3b8be557e08b234774925622e196f0ee36fe4eab66cd19df934d3efd8f3743"}, + {file = "hiredis-2.2.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3f5446068197b35a11ccc697720c41879c8657e2e761aaa8311783aac84cef20"}, + {file = "hiredis-2.2.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa17a3b22b3726d54d7af20394f65d4a1735a842a4e0f557dc67a90f6965c4bc"}, + {file = "hiredis-2.2.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7df645b6b7800e8b748c217fbd6a4ca8361bcb9a1ae6206cc02377833ec8a1aa"}, + {file = "hiredis-2.2.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2fb9300959a0048138791f3d68359d61a788574ec9556bddf1fec07f2dbc5320"}, + {file = "hiredis-2.2.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2d7e459fe7313925f395148d36d9b7f4f8dac65be06e45d7af356b187cef65fc"}, + {file = "hiredis-2.2.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8eceffca3941775b646cd585cd19b275d382de43cc3327d22f7c75d7b003d481"}, + {file = "hiredis-2.2.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:b17baf702c6e5b4bb66e1281a3efbb1d749c9d06cdb92b665ad81e03118f78fc"}, + {file = "hiredis-2.2.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:4e43e2b5acaad09cf48c032f7e4926392bb3a3f01854416cf6d82ebff94d5467"}, + {file = "hiredis-2.2.3-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:a7205497d7276a81fe92951a29616ef96562ed2f91a02066f72b6f93cb34b40e"}, + {file = "hiredis-2.2.3-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:126623b03c31cb6ac3e0d138feb6fcc36dd43dd34fc7da7b7a0c38b5d75bc896"}, + {file = "hiredis-2.2.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:071c5814b850574036506a8118034f97c3cbf2fe9947ff45a27b07a48da56240"}, + {file = "hiredis-2.2.3-cp311-cp311-win32.whl", hash = "sha256:d1be9e30e675f5bc1cb534633324578f6f0944a1bcffe53242cf632f554f83b6"}, + {file = "hiredis-2.2.3-cp311-cp311-win_amd64.whl", hash = "sha256:b9a7c987e161e3c58f992c63b7e26fea7fe0777f3b975799d23d65bbb8cb5899"}, + {file = "hiredis-2.2.3-cp37-cp37m-macosx_10_12_x86_64.whl", hash = "sha256:f2dcb8389fa3d453927b1299f46bdb38473c293c8269d5c777d33ea0e526b610"}, + {file = "hiredis-2.2.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a2df98f5e071320c7d84e8bd07c0542acdd0a7519307fc31774d60e4b842ec4f"}, + {file = "hiredis-2.2.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:61a72e4a523cdfc521762137559c08dfa360a3caef63620be58c699d1717dac1"}, + {file = "hiredis-2.2.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c9b9e5bde7030cae83aa900b5bd660decc65afd2db8c400f3c568c815a47ca2a"}, + {file = "hiredis-2.2.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cd2614f17e261f72efc2f19f5e5ff2ee19e2296570c0dcf33409e22be30710de"}, + {file = "hiredis-2.2.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:46525fbd84523cac75af5bf524bc74aaac848beaf31b142d2df8a787d9b4bbc4"}, + {file = "hiredis-2.2.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:d1a4ce40ba11da9382c14da31f4f9e88c18f7d294f523decd0fadfb81f51ad18"}, + {file = "hiredis-2.2.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:5cda592405bbd29d53942e0389dc3fa77b49c362640210d7e94a10c14a677d4d"}, + {file = "hiredis-2.2.3-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:5e6674a017629284ef373b50496d9fb1a89b85a20a7fa100ecd109484ec748e5"}, + {file = "hiredis-2.2.3-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:e62ec131816c6120eff40dffe43424e140264a15fa4ab88c301bd6a595913af3"}, + {file = "hiredis-2.2.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:17e938d9d3ee92e1adbff361706f1c36cc60eeb3e3eeca7a3a353eae344f4c91"}, + {file = "hiredis-2.2.3-cp37-cp37m-win32.whl", hash = "sha256:95d2305fd2a7b179cacb48b10f618872fc565c175f9f62b854e8d1acac3e8a9e"}, + {file = "hiredis-2.2.3-cp37-cp37m-win_amd64.whl", hash = "sha256:8f9dbe12f011a9b784f58faecc171d22465bb532c310bd588d769ba79a59ef5a"}, + {file = "hiredis-2.2.3-cp38-cp38-macosx_10_12_universal2.whl", hash = "sha256:5a4bcef114fc071d5f52c386c47f35aae0a5b43673197b9288a15b584da8fa3a"}, + {file = "hiredis-2.2.3-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:232d0a70519865741ba56e1dfefd160a580ae78c30a1517bad47b3cf95a3bc7d"}, + {file = "hiredis-2.2.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:9076ce8429785c85f824650735791738de7143f61f43ae9ed83e163c0ca0fa44"}, + {file = "hiredis-2.2.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ec58fb7c2062f835595c12f0f02dcda76d0eb0831423cc191d1e18c9276648de"}, + {file = "hiredis-2.2.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7f2b34a6444b8f9c1e9f84bd2c639388e5d14f128afd14a869dfb3d9af893aa2"}, + {file = "hiredis-2.2.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:818dfd310aa1020a13cd08ee48e116dd8c3bb2e23b8161f8ac4df587dd5093d7"}, + {file = "hiredis-2.2.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:96d9ea6c8d4cbdeee2e0d43379ce2881e4af0454b00570677c59f33f2531cd38"}, + {file = "hiredis-2.2.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f1eadbcd3de55ac42310ff82550d3302cb4efcd4e17d76646a17b6e7004bb42b"}, + {file = "hiredis-2.2.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:477c34c4489666dc73cb5e89dafe2617c3e13da1298917f73d55aac4696bd793"}, + {file = "hiredis-2.2.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:14824e457e4f5cda685c3345d125da13949bcf3bb1c88eb5d248c8d2c3dee08f"}, + {file = "hiredis-2.2.3-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:9cd32326dfa6ce87edf754153b0105aca64486bebe93b9600ccff74fa0b224df"}, + {file = "hiredis-2.2.3-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:51341e70b467004dcbec3a6ce8c478d2d6241e0f6b01e4c56764afd5022e1e9d"}, + {file = "hiredis-2.2.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:2443659c76b226267e2a04dbbb21bc2a3f91aa53bdc0c22964632753ae43a247"}, + {file = "hiredis-2.2.3-cp38-cp38-win32.whl", hash = "sha256:4e3e3e31423f888d396b1fc1f936936e52af868ac1ec17dd15e3eeba9dd4de24"}, + {file = "hiredis-2.2.3-cp38-cp38-win_amd64.whl", hash = "sha256:20f509e3a1a20d6e5f5794fc37ceb21f70f409101fcfe7a8bde783894d51b369"}, + {file = "hiredis-2.2.3-cp39-cp39-macosx_10_12_universal2.whl", hash = "sha256:d20891e3f33803b26d54c77fd5745878497091e33f4bbbdd454cf6e71aee8890"}, + {file = "hiredis-2.2.3-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:50171f985e17970f87d5a29e16603d1e5b03bdbf5c2691a37e6c912942a6b657"}, + {file = "hiredis-2.2.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9944a2cac25ffe049a7e89f306e11b900640837d1ef38d9be0eaa4a4e2b73a52"}, + {file = "hiredis-2.2.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5a5c8019ff94988d56eb49b15de76fe83f6b42536d76edeb6565dbf7fe14b973"}, + {file = "hiredis-2.2.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a286ded34eb16501002e3713b3130c987366eee2ba0d58c33c72f27778e31676"}, + {file = "hiredis-2.2.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4b3e974ad15eb32b1f537730dea70b93a4c3db7b026de3ad2b59da49c6f7454d"}, + {file = "hiredis-2.2.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:08415ea74c1c29b9d6a4ca3dd0e810dc1af343c1d1d442e15ba133b11ab5be6a"}, + {file = "hiredis-2.2.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7e17d04ea58ab8cf3f2dc52e875db16077c6357846006780086fff3189fb199d"}, + {file = "hiredis-2.2.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:6ccdcb635dae85b006592f78e32d97f4bc7541cb27829d505f9c7fefcef48298"}, + {file = "hiredis-2.2.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:69536b821dd1bc78058a6e7541743f8d82bf2d981b91280b14c4daa6cdc7faba"}, + {file = "hiredis-2.2.3-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:3753df5f873d473f055e1f8837bfad0bd3b277c86f3c9bf058c58f14204cd901"}, + {file = "hiredis-2.2.3-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:6f88cafe46612b6fa68e6dea49e25bebf160598bba00101caa51cc8c1f18d597"}, + {file = "hiredis-2.2.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:33ee3ea5cad3a8cb339352cd230b411eb437a2e75d7736c4899acab32056ccdb"}, + {file = "hiredis-2.2.3-cp39-cp39-win32.whl", hash = "sha256:b4f3d06dc16671b88a13ae85d8ca92534c0b637d59e49f0558d040a691246422"}, + {file = "hiredis-2.2.3-cp39-cp39-win_amd64.whl", hash = "sha256:4f674e309cd055ee7a48304ceb8cf43265d859faf4d7d01d270ce45e976ae9d3"}, + {file = "hiredis-2.2.3-pp37-pypy37_pp73-macosx_10_12_x86_64.whl", hash = "sha256:8f280ab4e043b089777b43b4227bdc2035f88da5072ab36588e0ccf77d45d058"}, + {file = "hiredis-2.2.3-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:15c2a551f3b8a26f7940d6ee10b837810201754b8d7e6f6b1391655370882c5a"}, + {file = "hiredis-2.2.3-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:60c4e3c258eafaab21b174b17270a0cc093718d61cdbde8c03f85ec4bf835343"}, + {file = "hiredis-2.2.3-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cc36a9dded458d4e37492fe3e619c6c83caae794d26ad925adbce61d592f8428"}, + {file = "hiredis-2.2.3-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:4ed68a3b1ccb4313d2a42546fd7e7439ad4745918a48b6c9bcaa61e1e3e42634"}, + {file = "hiredis-2.2.3-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3bf4b5bae472630c229518e4a814b1b68f10a3d9b00aeaec45f1a330f03a0251"}, + {file = "hiredis-2.2.3-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:33a94d264e6e12a79d9bb8af333b01dc286b9f39c99072ab5fef94ce1f018e17"}, + {file = "hiredis-2.2.3-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3fa6811a618653164f918b891a0fa07052bd71a799defa5c44d167cac5557b26"}, + {file = "hiredis-2.2.3-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:af33f370be90b48bbaf0dab32decbdcc522b1fa95d109020a963282086518a8e"}, + {file = "hiredis-2.2.3-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:b9953d87418ac228f508d93898ab572775e4d3b0eeb886a1a7734553bcdaf291"}, + {file = "hiredis-2.2.3-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5e7bb4dd524f50b71c20ef5a12bd61da9b463f8894b18a06130942fe31509881"}, + {file = "hiredis-2.2.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:89a258424158eb8b3ed9f65548d68998da334ef155d09488c5637723eb1cd697"}, + {file = "hiredis-2.2.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f4a65276f6ecdebe75f2a53f578fbc40e8d2860658420d5e0611c56bbf5054c"}, + {file = "hiredis-2.2.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:334f2738700b20faa04a0d813366fb16ed17287430a6b50584161d5ad31ca6d7"}, + {file = "hiredis-2.2.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:d194decd9608f11c777946f596f31d5aacad13972a0a87829ae1e6f2d26c1885"}, + {file = "hiredis-2.2.3.tar.gz", hash = "sha256:e75163773a309e56a9b58165cf5a50e0f84b755f6ff863b2c01a38918fe92daa"}, ] [[package]] @@ -855,14 +855,14 @@ files = [ [[package]] name = "immutabledict" -version = "2.2.3" +version = "2.2.4" description = "Immutable wrapper around dictionaries (a fork of frozendict)" category = "main" optional = false python-versions = ">=3.7,<4.0" files = [ - {file = "immutabledict-2.2.3-py3-none-any.whl", hash = "sha256:a7b078ebcc4a58ddc73b55f808b26e7c8c2d5183fad325615112689e1a63e714"}, - {file = "immutabledict-2.2.3.tar.gz", hash = "sha256:0e1e8a3f2b3ff062daa19795f947e9ec7a58add269d44e34d3ab4319e1343853"}, + {file = "immutabledict-2.2.4-py3-none-any.whl", hash = "sha256:c827715c147d2364522f9a7709cc424c7001015274a3c705250e673605bde64b"}, + {file = "immutabledict-2.2.4.tar.gz", hash = "sha256:3bedc0741faaa2846f6edf5c29183f993da3abaff6a5961bb70a5659bb9e68ab"}, ] [[package]] @@ -1460,38 +1460,38 @@ files = [ [[package]] name = "mypy" -version = "1.0.0" +version = "1.0.1" description = "Optional static typing for Python" category = "dev" optional = false python-versions = ">=3.7" files = [ - {file = "mypy-1.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e0626db16705ab9f7fa6c249c017c887baf20738ce7f9129da162bb3075fc1af"}, - {file = "mypy-1.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1ace23f6bb4aec4604b86c4843276e8fa548d667dbbd0cb83a3ae14b18b2db6c"}, - {file = "mypy-1.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87edfaf344c9401942883fad030909116aa77b0fa7e6e8e1c5407e14549afe9a"}, - {file = "mypy-1.0.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0ab090d9240d6b4e99e1fa998c2d0aa5b29fc0fb06bd30e7ad6183c95fa07593"}, - {file = "mypy-1.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:7cc2c01dfc5a3cbddfa6c13f530ef3b95292f926329929001d45e124342cd6b7"}, - {file = "mypy-1.0.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:14d776869a3e6c89c17eb943100f7868f677703c8a4e00b3803918f86aafbc52"}, - {file = "mypy-1.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:bb2782a036d9eb6b5a6efcdda0986774bf798beef86a62da86cb73e2a10b423d"}, - {file = "mypy-1.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5cfca124f0ac6707747544c127880893ad72a656e136adc935c8600740b21ff5"}, - {file = "mypy-1.0.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:8845125d0b7c57838a10fd8925b0f5f709d0e08568ce587cc862aacce453e3dd"}, - {file = "mypy-1.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:01b1b9e1ed40544ef486fa8ac022232ccc57109f379611633ede8e71630d07d2"}, - {file = "mypy-1.0.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c7cf862aef988b5fbaa17764ad1d21b4831436701c7d2b653156a9497d92c83c"}, - {file = "mypy-1.0.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5cd187d92b6939617f1168a4fe68f68add749902c010e66fe574c165c742ed88"}, - {file = "mypy-1.0.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:4e5175026618c178dfba6188228b845b64131034ab3ba52acaffa8f6c361f805"}, - {file = "mypy-1.0.0-cp37-cp37m-win_amd64.whl", hash = "sha256:2f6ac8c87e046dc18c7d1d7f6653a66787a4555085b056fe2d599f1f1a2a2d21"}, - {file = "mypy-1.0.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7306edca1c6f1b5fa0bc9aa645e6ac8393014fa82d0fa180d0ebc990ebe15964"}, - {file = "mypy-1.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:3cfad08f16a9c6611e6143485a93de0e1e13f48cfb90bcad7d5fde1c0cec3d36"}, - {file = "mypy-1.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:67cced7f15654710386e5c10b96608f1ee3d5c94ca1da5a2aad5889793a824c1"}, - {file = "mypy-1.0.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:a86b794e8a56ada65c573183756eac8ac5b8d3d59daf9d5ebd72ecdbb7867a43"}, - {file = "mypy-1.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:50979d5efff8d4135d9db293c6cb2c42260e70fb010cbc697b1311a4d7a39ddb"}, - {file = "mypy-1.0.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3ae4c7a99e5153496243146a3baf33b9beff714464ca386b5f62daad601d87af"}, - {file = "mypy-1.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5e398652d005a198a7f3c132426b33c6b85d98aa7dc852137a2a3be8890c4072"}, - {file = "mypy-1.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:be78077064d016bc1b639c2cbcc5be945b47b4261a4f4b7d8923f6c69c5c9457"}, - {file = "mypy-1.0.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:92024447a339400ea00ac228369cd242e988dd775640755fa4ac0c126e49bb74"}, - {file = "mypy-1.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:fe523fcbd52c05040c7bee370d66fee8373c5972171e4fbc323153433198592d"}, - {file = "mypy-1.0.0-py3-none-any.whl", hash = "sha256:2efa963bdddb27cb4a0d42545cd137a8d2b883bd181bbc4525b568ef6eca258f"}, - {file = "mypy-1.0.0.tar.gz", hash = "sha256:f34495079c8d9da05b183f9f7daec2878280c2ad7cc81da686ef0b484cea2ecf"}, + {file = "mypy-1.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:71a808334d3f41ef011faa5a5cd8153606df5fc0b56de5b2e89566c8093a0c9a"}, + {file = "mypy-1.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:920169f0184215eef19294fa86ea49ffd4635dedfdea2b57e45cb4ee85d5ccaf"}, + {file = "mypy-1.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:27a0f74a298769d9fdc8498fcb4f2beb86f0564bcdb1a37b58cbbe78e55cf8c0"}, + {file = "mypy-1.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:65b122a993d9c81ea0bfde7689b3365318a88bde952e4dfa1b3a8b4ac05d168b"}, + {file = "mypy-1.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:5deb252fd42a77add936b463033a59b8e48eb2eaec2976d76b6878d031933fe4"}, + {file = "mypy-1.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2013226d17f20468f34feddd6aae4635a55f79626549099354ce641bc7d40262"}, + {file = "mypy-1.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:48525aec92b47baed9b3380371ab8ab6e63a5aab317347dfe9e55e02aaad22e8"}, + {file = "mypy-1.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c96b8a0c019fe29040d520d9257d8c8f122a7343a8307bf8d6d4a43f5c5bfcc8"}, + {file = "mypy-1.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:448de661536d270ce04f2d7dddaa49b2fdba6e3bd8a83212164d4174ff43aa65"}, + {file = "mypy-1.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:d42a98e76070a365a1d1c220fcac8aa4ada12ae0db679cb4d910fabefc88b994"}, + {file = "mypy-1.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:e64f48c6176e243ad015e995de05af7f22bbe370dbb5b32bd6988438ec873919"}, + {file = "mypy-1.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5fdd63e4f50e3538617887e9aee91855368d9fc1dea30da743837b0df7373bc4"}, + {file = "mypy-1.0.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:dbeb24514c4acbc78d205f85dd0e800f34062efcc1f4a4857c57e4b4b8712bff"}, + {file = "mypy-1.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:a2948c40a7dd46c1c33765718936669dc1f628f134013b02ff5ac6c7ef6942bf"}, + {file = "mypy-1.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5bc8d6bd3b274dd3846597855d96d38d947aedba18776aa998a8d46fabdaed76"}, + {file = "mypy-1.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:17455cda53eeee0a4adb6371a21dd3dbf465897de82843751cf822605d152c8c"}, + {file = "mypy-1.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e831662208055b006eef68392a768ff83596035ffd6d846786578ba1714ba8f6"}, + {file = "mypy-1.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:e60d0b09f62ae97a94605c3f73fd952395286cf3e3b9e7b97f60b01ddfbbda88"}, + {file = "mypy-1.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:0af4f0e20706aadf4e6f8f8dc5ab739089146b83fd53cb4a7e0e850ef3de0bb6"}, + {file = "mypy-1.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:24189f23dc66f83b839bd1cce2dfc356020dfc9a8bae03978477b15be61b062e"}, + {file = "mypy-1.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:93a85495fb13dc484251b4c1fd7a5ac370cd0d812bbfc3b39c1bafefe95275d5"}, + {file = "mypy-1.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5f546ac34093c6ce33f6278f7c88f0f147a4849386d3bf3ae193702f4fe31407"}, + {file = "mypy-1.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c6c2ccb7af7154673c591189c3687b013122c5a891bb5651eca3db8e6c6c55bd"}, + {file = "mypy-1.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:15b5a824b58c7c822c51bc66308e759243c32631896743f030daf449fe3677f3"}, + {file = "mypy-1.0.1-py3-none-any.whl", hash = "sha256:eda5c8b9949ed411ff752b9a01adda31afe7eae1e53e946dbdf9db23865e66c4"}, + {file = "mypy-1.0.1.tar.gz", hash = "sha256:28cea5a6392bb43d266782983b5a4216c25544cd7d80be681a155ddcdafd152d"}, ] [package.dependencies] @@ -1593,26 +1593,26 @@ tests = ["Sphinx", "doubles", "flake8", "flake8-quotes", "gevent", "mock", "pyte [[package]] name = "packaging" -version = "23.0" +version = "23.1" description = "Core utilities for Python packages" category = "main" optional = false python-versions = ">=3.7" files = [ - {file = "packaging-23.0-py3-none-any.whl", hash = "sha256:714ac14496c3e68c99c29b00845f7a2b85f3bb6f1078fd9f72fd20f0570002b2"}, - {file = "packaging-23.0.tar.gz", hash = "sha256:b6ad297f8907de0fa2fe1ccbd26fdaf387f5f47c7275fedf8cce89f99446cf97"}, + {file = "packaging-23.1-py3-none-any.whl", hash = "sha256:994793af429502c4ea2ebf6bf664629d07c1a9fe974af92966e4b8d2df7edc61"}, + {file = "packaging-23.1.tar.gz", hash = "sha256:a392980d2b6cffa644431898be54b0045151319d1e7ec34f0cfed48767dd334f"}, ] [[package]] name = "parameterized" -version = "0.8.1" +version = "0.9.0" description = "Parameterized testing with any Python test framework" category = "main" optional = false -python-versions = "*" +python-versions = ">=3.7" files = [ - {file = "parameterized-0.8.1-py2.py3-none-any.whl", hash = "sha256:9cbb0b69a03e8695d68b3399a8a5825200976536fe1cb79db60ed6a4c8c9efe9"}, - {file = "parameterized-0.8.1.tar.gz", hash = "sha256:41bbff37d6186430f77f900d777e5bb6a24928a1c46fb1de692f8b52b8833b5c"}, + {file = "parameterized-0.9.0-py2.py3-none-any.whl", hash = "sha256:4e0758e3d41bea3bbd05ec14fc2c24736723f243b28d702081aef438c9372b1b"}, + {file = "parameterized-0.9.0.tar.gz", hash = "sha256:7fc905272cefa4f364c1a3429cbbe9c0f98b793988efb5bf90aac80f08db09b1"}, ] [package.extras] @@ -1632,14 +1632,14 @@ files = [ [[package]] name = "phonenumbers" -version = "8.13.7" +version = "8.13.11" description = "Python version of Google's common library for parsing, formatting, storing and validating international phone numbers." category = "main" optional = false python-versions = "*" files = [ - {file = "phonenumbers-8.13.7-py2.py3-none-any.whl", hash = "sha256:d3e3555b38c89b121f5b2e917847003bdd07027569d758d5f40156c01aeac089"}, - {file = "phonenumbers-8.13.7.tar.gz", hash = "sha256:253bb0e01250d21a11f2b42b3e6e161b7f6cb2ac440e2e2a95c1da71d221ee1a"}, + {file = "phonenumbers-8.13.11-py2.py3-none-any.whl", hash = "sha256:107469114fd297258a485bdf8238d0522cb392db1257faf2bf23384ecbdb0e8a"}, + {file = "phonenumbers-8.13.11.tar.gz", hash = "sha256:3e3274d88cab3609b55ff5b93417075dbca2d13064f103fbf562e0ea1dda0f9a"}, ] [[package]] @@ -1796,25 +1796,25 @@ twisted = ["twisted"] [[package]] name = "psycopg2" -version = "2.9.5" +version = "2.9.6" description = "psycopg2 - Python-PostgreSQL Database Adapter" category = "main" optional = true python-versions = ">=3.6" files = [ - {file = "psycopg2-2.9.5-cp310-cp310-win32.whl", hash = "sha256:d3ef67e630b0de0779c42912fe2cbae3805ebaba30cda27fea2a3de650a9414f"}, - {file = "psycopg2-2.9.5-cp310-cp310-win_amd64.whl", hash = "sha256:4cb9936316d88bfab614666eb9e32995e794ed0f8f6b3b718666c22819c1d7ee"}, - {file = "psycopg2-2.9.5-cp311-cp311-win32.whl", hash = "sha256:093e3894d2d3c592ab0945d9eba9d139c139664dcf83a1c440b8a7aa9bb21955"}, - {file = "psycopg2-2.9.5-cp311-cp311-win_amd64.whl", hash = "sha256:920bf418000dd17669d2904472efeab2b20546efd0548139618f8fa305d1d7ad"}, - {file = "psycopg2-2.9.5-cp36-cp36m-win32.whl", hash = "sha256:b9ac1b0d8ecc49e05e4e182694f418d27f3aedcfca854ebd6c05bb1cffa10d6d"}, - {file = "psycopg2-2.9.5-cp36-cp36m-win_amd64.whl", hash = "sha256:fc04dd5189b90d825509caa510f20d1d504761e78b8dfb95a0ede180f71d50e5"}, - {file = "psycopg2-2.9.5-cp37-cp37m-win32.whl", hash = "sha256:922cc5f0b98a5f2b1ff481f5551b95cd04580fd6f0c72d9b22e6c0145a4840e0"}, - {file = "psycopg2-2.9.5-cp37-cp37m-win_amd64.whl", hash = "sha256:1e5a38aa85bd660c53947bd28aeaafb6a97d70423606f1ccb044a03a1203fe4a"}, - {file = "psycopg2-2.9.5-cp38-cp38-win32.whl", hash = "sha256:f5b6320dbc3cf6cfb9f25308286f9f7ab464e65cfb105b64cc9c52831748ced2"}, - {file = "psycopg2-2.9.5-cp38-cp38-win_amd64.whl", hash = "sha256:1a5c7d7d577e0eabfcf15eb87d1e19314c8c4f0e722a301f98e0e3a65e238b4e"}, - {file = "psycopg2-2.9.5-cp39-cp39-win32.whl", hash = "sha256:322fd5fca0b1113677089d4ebd5222c964b1760e361f151cbb2706c4912112c5"}, - {file = "psycopg2-2.9.5-cp39-cp39-win_amd64.whl", hash = "sha256:190d51e8c1b25a47484e52a79638a8182451d6f6dff99f26ad9bd81e5359a0fa"}, - {file = "psycopg2-2.9.5.tar.gz", hash = "sha256:a5246d2e683a972e2187a8714b5c2cf8156c064629f9a9b1a873c1730d9e245a"}, + {file = "psycopg2-2.9.6-cp310-cp310-win32.whl", hash = "sha256:f7a7a5ee78ba7dc74265ba69e010ae89dae635eea0e97b055fb641a01a31d2b1"}, + {file = "psycopg2-2.9.6-cp310-cp310-win_amd64.whl", hash = "sha256:f75001a1cbbe523e00b0ef896a5a1ada2da93ccd752b7636db5a99bc57c44494"}, + {file = "psycopg2-2.9.6-cp311-cp311-win32.whl", hash = "sha256:53f4ad0a3988f983e9b49a5d9765d663bbe84f508ed655affdb810af9d0972ad"}, + {file = "psycopg2-2.9.6-cp311-cp311-win_amd64.whl", hash = "sha256:b81fcb9ecfc584f661b71c889edeae70bae30d3ef74fa0ca388ecda50b1222b7"}, + {file = "psycopg2-2.9.6-cp36-cp36m-win32.whl", hash = "sha256:11aca705ec888e4f4cea97289a0bf0f22a067a32614f6ef64fcf7b8bfbc53744"}, + {file = "psycopg2-2.9.6-cp36-cp36m-win_amd64.whl", hash = "sha256:36c941a767341d11549c0fbdbb2bf5be2eda4caf87f65dfcd7d146828bd27f39"}, + {file = "psycopg2-2.9.6-cp37-cp37m-win32.whl", hash = "sha256:869776630c04f335d4124f120b7fb377fe44b0a7645ab3c34b4ba42516951889"}, + {file = "psycopg2-2.9.6-cp37-cp37m-win_amd64.whl", hash = "sha256:a8ad4a47f42aa6aec8d061fdae21eaed8d864d4bb0f0cade5ad32ca16fcd6258"}, + {file = "psycopg2-2.9.6-cp38-cp38-win32.whl", hash = "sha256:2362ee4d07ac85ff0ad93e22c693d0f37ff63e28f0615a16b6635a645f4b9214"}, + {file = "psycopg2-2.9.6-cp38-cp38-win_amd64.whl", hash = "sha256:d24ead3716a7d093b90b27b3d73459fe8cd90fd7065cf43b3c40966221d8c394"}, + {file = "psycopg2-2.9.6-cp39-cp39-win32.whl", hash = "sha256:1861a53a6a0fd248e42ea37c957d36950da00266378746588eab4f4b5649e95f"}, + {file = "psycopg2-2.9.6-cp39-cp39-win_amd64.whl", hash = "sha256:ded2faa2e6dfb430af7713d87ab4abbfc764d8d7fb73eafe96a24155f906ebf5"}, + {file = "psycopg2-2.9.6.tar.gz", hash = "sha256:f15158418fd826831b28585e2ab48ed8df2d0d98f502a2b4fe619e7d5ca29011"}, ] [[package]] @@ -1860,18 +1860,18 @@ files = [ [[package]] name = "pyasn1-modules" -version = "0.2.8" -description = "A collection of ASN.1-based protocols modules." +version = "0.3.0" +description = "A collection of ASN.1-based protocols modules" category = "main" optional = false -python-versions = "*" +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" files = [ - {file = "pyasn1-modules-0.2.8.tar.gz", hash = "sha256:905f84c712230b2c592c19470d3ca8d552de726050d1d1716282a1f6146be65e"}, - {file = "pyasn1_modules-0.2.8-py2.py3-none-any.whl", hash = "sha256:a50b808ffeb97cb3601dd25981f6b016cbb3d31fbf57a8b8a87428e6158d0c74"}, + {file = "pyasn1_modules-0.3.0-py2.py3-none-any.whl", hash = "sha256:d3ccd6ed470d9ffbc716be08bd90efbd44d0734bc9303818f7336070984a162d"}, + {file = "pyasn1_modules-0.3.0.tar.gz", hash = "sha256:5bd01446b736eb9d31512a30d46c1ac3395d676c6f3cafa4c03eb54b9925631c"}, ] [package.dependencies] -pyasn1 = ">=0.4.6,<0.5.0" +pyasn1 = ">=0.4.6,<0.6.0" [[package]] name = "pycparser" @@ -1940,14 +1940,14 @@ email = ["email-validator (>=1.0.3)"] [[package]] name = "pygithub" -version = "1.58.1" +version = "1.58.2" description = "Use the full Github API v3" category = "dev" optional = false python-versions = ">=3.7" files = [ - {file = "PyGithub-1.58.1-py3-none-any.whl", hash = "sha256:4e7fe9c3ec30d5fde5b4fbb97f18821c9dbf372bf6df337fe66f6689a65e0a83"}, - {file = "PyGithub-1.58.1.tar.gz", hash = "sha256:7d528b4ad92bc13122129fafd444ce3d04c47d2d801f6446b6e6ee2d410235b3"}, + {file = "PyGithub-1.58.2-py3-none-any.whl", hash = "sha256:f435884af617c6debaa76cbc355372d1027445a56fbc39972a3b9ed4968badc8"}, + {file = "PyGithub-1.58.2.tar.gz", hash = "sha256:1e6b1b7afe31f75151fb81f7ab6b984a7188a852bdb123dbb9ae90023c3ce60f"}, ] [package.dependencies] @@ -1973,13 +1973,13 @@ plugins = ["importlib-metadata"] [[package]] name = "pyicu" -version = "2.10.2" +version = "2.11" description = "Python extension wrapping the ICU C++ API" category = "main" optional = true python-versions = "*" files = [ - {file = "PyICU-2.10.2.tar.gz", hash = "sha256:0c3309eea7fab6857507ace62403515b60fe096cbfb4f90d14f55ff75c5441c1"}, + {file = "PyICU-2.11.tar.gz", hash = "sha256:3ab531264cfe9132b3d2ac5d708da9a4649d25f6e6813730ac88cf040a08a844"}, ] [[package]] @@ -2060,14 +2060,14 @@ tests = ["hypothesis (>=3.27.0)", "pytest (>=3.2.1,!=3.3.0)"] [[package]] name = "pyopenssl" -version = "23.1.0" +version = "23.1.1" description = "Python wrapper module around the OpenSSL library" category = "main" optional = false python-versions = ">=3.6" files = [ - {file = "pyOpenSSL-23.1.0-py3-none-any.whl", hash = "sha256:fb96e936866ad65662c22d0de84ca0fba58397893cdfe0f01334fa93382af23c"}, - {file = "pyOpenSSL-23.1.0.tar.gz", hash = "sha256:8cb78010a1eb2c8e24b851693b7b04dfe9b1dc0a5ab3843927b10a85b1dfbb2e"}, + {file = "pyOpenSSL-23.1.1-py3-none-any.whl", hash = "sha256:9e0c526404a210df9d2b18cd33364beadb0dc858a739b885677bc65e105d4a4c"}, + {file = "pyOpenSSL-23.1.1.tar.gz", hash = "sha256:841498b9bec61623b1b6c47ebbc02367c07d60e0e195f19790817f10cc8db0b7"}, ] [package.dependencies] @@ -2251,21 +2251,21 @@ md = ["cmarkgfm (>=0.8.0)"] [[package]] name = "requests" -version = "2.28.2" +version = "2.31.0" description = "Python HTTP for Humans." category = "main" optional = false -python-versions = ">=3.7, <4" +python-versions = ">=3.7" files = [ - {file = "requests-2.28.2-py3-none-any.whl", hash = "sha256:64299f4909223da747622c030b781c0d7811e359c37124b4bd368fb8c6518baa"}, - {file = "requests-2.28.2.tar.gz", hash = "sha256:98b1b2782e3c6c4904938b84c0eb932721069dfdb9134313beff7c83c2df24bf"}, + {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"}, + {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"}, ] [package.dependencies] certifi = ">=2017.4.17" charset-normalizer = ">=2,<4" idna = ">=2.5,<4" -urllib3 = ">=1.21.1,<1.27" +urllib3 = ">=1.21.1,<3" [package.extras] socks = ["PySocks (>=1.5.6,!=1.5.7)"] @@ -2323,29 +2323,29 @@ jupyter = ["ipywidgets (>=7.5.1,<9)"] [[package]] name = "ruff" -version = "0.0.259" +version = "0.0.265" description = "An extremely fast Python linter, written in Rust." category = "dev" optional = false python-versions = ">=3.7" files = [ - {file = "ruff-0.0.259-py3-none-macosx_10_7_x86_64.whl", hash = "sha256:f3938dc45e2a3f818e9cbd53007265c22246fbfded8837b2c563bf0ebde1a226"}, - {file = "ruff-0.0.259-py3-none-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:22e1e35bf5f12072cd644d22afd9203641ccf258bc14ff91aa1c43dc14f6047d"}, - {file = "ruff-0.0.259-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d2fb20e89e85d147c85caa807707a1488bccc1f3854dc3d53533e89b52a0c5ff"}, - {file = "ruff-0.0.259-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:49e903bcda19f6bb0725a962c058eb5d61f40d84ef52ed53b61939b69402ab4e"}, - {file = "ruff-0.0.259-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:71f0ef1985e9a6696fa97da8459917fa34bdaa2c16bd33bd5edead585b7d44f7"}, - {file = "ruff-0.0.259-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:7cfef26619cba184d59aa7fa17b48af5891d51fc0b755a9bc533478a10d4d066"}, - {file = "ruff-0.0.259-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:79b02fa17ec1fd8d306ae302cb47fb614b71e1f539997858243769bcbe78c6d9"}, - {file = "ruff-0.0.259-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:428507fb321b386dda70d66cd1a8aa0abf51d7c197983d83bb9e4fa5ee60300b"}, - {file = "ruff-0.0.259-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c5fbaea9167f1852757f02133e5daacdb8c75b3431343205395da5b10499927a"}, - {file = "ruff-0.0.259-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:40ae87f2638484b7e8a7567b04a7af719f1c484c5bf132038b702bb32e1f6577"}, - {file = "ruff-0.0.259-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:29e2b77b7d5da6a7dd5cf9b738b511355c5734ece56f78e500d4b5bffd58c1a0"}, - {file = "ruff-0.0.259-py3-none-musllinux_1_2_i686.whl", hash = "sha256:5b3c1beacf6037e7f0781d4699d9a2dd4ba2462f475be5b1f45cf84c4ba3c69d"}, - {file = "ruff-0.0.259-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:daaea322e7e85f4c13d82be9536309e1c4b8b9851bb0cbc7eeb15d490fd46bf9"}, - {file = "ruff-0.0.259-py3-none-win32.whl", hash = "sha256:38704f151323aa5858370a2f792e122cc25e5d1aabe7d42ceeab83da18f0b456"}, - {file = "ruff-0.0.259-py3-none-win_amd64.whl", hash = "sha256:aa9449b898287e621942cc71b9327eceb8f0c357e4065fecefb707ef2d978df8"}, - {file = "ruff-0.0.259-py3-none-win_arm64.whl", hash = "sha256:e4f39e18702de69faaaee3969934b92d7467285627f99a5b6ecd55a7d9f5d086"}, - {file = "ruff-0.0.259.tar.gz", hash = "sha256:8b56496063ab3bfdf72339a5fbebb8bd46e5c5fee25ef11a9f03b208fa0562ec"}, + {file = "ruff-0.0.265-py3-none-macosx_10_7_x86_64.whl", hash = "sha256:30ddfe22de6ce4eb1260408f4480bbbce998f954dbf470228a21a9b2c45955e4"}, + {file = "ruff-0.0.265-py3-none-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:a11bd0889e88d3342e7bc514554bb4461bf6cc30ec115821c2425cfaac0b1b6a"}, + {file = "ruff-0.0.265-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2a9b38bdb40a998cbc677db55b6225a6c4fadcf8819eb30695e1b8470942426b"}, + {file = "ruff-0.0.265-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a8b44a245b60512403a6a03a5b5212da274d33862225c5eed3bcf12037eb19bb"}, + {file = "ruff-0.0.265-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b279fa55ea175ef953208a6d8bfbcdcffac1c39b38cdb8c2bfafe9222add70bb"}, + {file = "ruff-0.0.265-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:5028950f7af9b119d43d91b215d5044976e43b96a0d1458d193ef0dd3c587bf8"}, + {file = "ruff-0.0.265-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4057eb539a1d88eb84e9f6a36e0a999e0f261ed850ae5d5817e68968e7b89ed9"}, + {file = "ruff-0.0.265-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d586e69ab5cbf521a1910b733412a5735936f6a610d805b89d35b6647e2a66aa"}, + {file = "ruff-0.0.265-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa17b13cd3f29fc57d06bf34c31f21d043735cc9a681203d634549b0e41047d1"}, + {file = "ruff-0.0.265-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:9ac13b11d9ad3001de9d637974ec5402a67cefdf9fffc3929ab44c2fcbb850a1"}, + {file = "ruff-0.0.265-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:62a9578b48cfd292c64ea3d28681dc16b1aa7445b7a7709a2884510fc0822118"}, + {file = "ruff-0.0.265-py3-none-musllinux_1_2_i686.whl", hash = "sha256:d0f9967f84da42d28e3d9d9354cc1575f96ed69e6e40a7d4b780a7a0418d9409"}, + {file = "ruff-0.0.265-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:1d5a8de2fbaf91ea5699451a06f4074e7a312accfa774ad9327cde3e4fda2081"}, + {file = "ruff-0.0.265-py3-none-win32.whl", hash = "sha256:9e9db5ccb810742d621f93272e3cc23b5f277d8d00c4a79668835d26ccbe48dd"}, + {file = "ruff-0.0.265-py3-none-win_amd64.whl", hash = "sha256:f54facf286103006171a00ce20388d88ed1d6732db3b49c11feb9bf3d46f90e9"}, + {file = "ruff-0.0.265-py3-none-win_arm64.whl", hash = "sha256:c78470656e33d32ddc54e8482b1b0fc6de58f1195586731e5ff1405d74421499"}, + {file = "ruff-0.0.265.tar.gz", hash = "sha256:53c17f0dab19ddc22b254b087d1381b601b155acfa8feed514f0d6a413d0ab3a"}, ] [[package]] @@ -2382,19 +2382,19 @@ doc = ["Sphinx", "sphinx-rtd-theme"] [[package]] name = "sentry-sdk" -version = "1.17.0" +version = "1.22.1" description = "Python client for Sentry (https://sentry.io)" category = "main" optional = true python-versions = "*" files = [ - {file = "sentry-sdk-1.17.0.tar.gz", hash = "sha256:ad40860325c94d1a656da70fba5a7c4dbb2f6809d3cc2d00f74ca0b608330f14"}, - {file = "sentry_sdk-1.17.0-py2.py3-none-any.whl", hash = "sha256:3c4e898f7a3edf5a2042cd0dcab6ee124e2112189228c272c08ad15d3850c201"}, + {file = "sentry-sdk-1.22.1.tar.gz", hash = "sha256:052dff5069c6f0d836ee014323576824a9b40836fc003fb12489a1f19c60a3c9"}, + {file = "sentry_sdk-1.22.1-py2.py3-none-any.whl", hash = "sha256:c6c6946f8c927adb00af1c5ab6921df38775b2199b9003816d5935a1310352d5"}, ] [package.dependencies] certifi = "*" -urllib3 = {version = ">=1.26.11", markers = "python_version >= \"3.6\""} +urllib3 = {version = ">=1.26.11,<2.0.0", markers = "python_version >= \"3.6\""} [package.extras] aiohttp = ["aiohttp (>=3.5)"] @@ -2407,6 +2407,7 @@ django = ["django (>=1.8)"] falcon = ["falcon (>=1.4)"] fastapi = ["fastapi (>=0.79.0)"] flask = ["blinker (>=1.1)", "flask (>=0.11)"] +grpcio = ["grpcio (>=1.21.1)"] httpx = ["httpx (>=0.16.0)"] huey = ["huey (>=2)"] opentelemetry = ["opentelemetry-distro (>=0.35b0)"] @@ -2465,14 +2466,14 @@ testing-integration = ["build[virtualenv]", "filelock (>=3.4.0)", "jaraco.envs ( [[package]] name = "setuptools-rust" -version = "1.5.2" +version = "1.6.0" description = "Setuptools Rust extension plugin" category = "main" optional = false python-versions = ">=3.7" files = [ - {file = "setuptools-rust-1.5.2.tar.gz", hash = "sha256:d8daccb14dc0eae1b6b6eb3ecef79675bd37b4065369f79c35393dd5c55652c7"}, - {file = "setuptools_rust-1.5.2-py3-none-any.whl", hash = "sha256:8eb45851e34288f2296cd5ab9e924535ac1757318b730a13fe6836867843f206"}, + {file = "setuptools-rust-1.6.0.tar.gz", hash = "sha256:c86e734deac330597998bfbc08da45187e6b27837e23bd91eadb320732392262"}, + {file = "setuptools_rust-1.6.0-py3-none-any.whl", hash = "sha256:e28ae09fb7167c44ab34434eb49279307d611547cb56cb9789955cdb54a1aed9"}, ] [package.dependencies] @@ -2564,21 +2565,21 @@ files = [ [[package]] name = "sphinx" -version = "6.1.3" +version = "6.2.1" description = "Python documentation generator" category = "dev" optional = false python-versions = ">=3.8" files = [ - {file = "Sphinx-6.1.3.tar.gz", hash = "sha256:0dac3b698538ffef41716cf97ba26c1c7788dba73ce6f150c1ff5b4720786dd2"}, - {file = "sphinx-6.1.3-py3-none-any.whl", hash = "sha256:807d1cb3d6be87eb78a381c3e70ebd8d346b9a25f3753e9947e866b2786865fc"}, + {file = "Sphinx-6.2.1.tar.gz", hash = "sha256:6d56a34697bb749ffa0152feafc4b19836c755d90a7c59b72bc7dfd371b9cc6b"}, + {file = "sphinx-6.2.1-py3-none-any.whl", hash = "sha256:97787ff1fa3256a3eef9eda523a63dbf299f7b47e053cfcf684a1c2a8380c912"}, ] [package.dependencies] alabaster = ">=0.7,<0.8" babel = ">=2.9" colorama = {version = ">=0.4.5", markers = "sys_platform == \"win32\""} -docutils = ">=0.18,<0.20" +docutils = ">=0.18.1,<0.20" imagesize = ">=1.3" importlib-metadata = {version = ">=4.8", markers = "python_version < \"3.10\""} Jinja2 = ">=3.0" @@ -2596,7 +2597,7 @@ sphinxcontrib-serializinghtml = ">=1.1.5" [package.extras] docs = ["sphinxcontrib-websupport"] lint = ["docutils-stubs", "flake8 (>=3.5.0)", "flake8-simplify", "isort", "mypy (>=0.990)", "ruff", "sphinx-lint", "types-requests"] -test = ["cython", "html5lib", "pytest (>=4.6)"] +test = ["cython", "filelock", "html5lib", "pytest (>=4.6)"] [[package]] name = "sphinx-autodoc2" @@ -3009,86 +3010,86 @@ files = [ [[package]] name = "types-commonmark" -version = "0.9.2.2" +version = "0.9.2.3" description = "Typing stubs for commonmark" category = "dev" optional = false python-versions = "*" files = [ - {file = "types-commonmark-0.9.2.2.tar.gz", hash = "sha256:f3259350634c2ce68ae503398430482f7cf44e5cae3d344995e916fbf453b4be"}, - {file = "types_commonmark-0.9.2.2-py3-none-any.whl", hash = "sha256:d3d878692615e7fbe47bf19ba67497837b135812d665012a3d42219c1f2c3a61"}, + {file = "types-commonmark-0.9.2.3.tar.gz", hash = "sha256:42769a2c194fd5b49fd9eedfd4a83cd1d2514c6d0a36f00f5c5ffe0b6a2d2fcf"}, + {file = "types_commonmark-0.9.2.3-py3-none-any.whl", hash = "sha256:b575156e1b8a292d43acb36f861110b85c4bc7aa53bbfb5ac64addec15d18cfa"}, ] [[package]] name = "types-jsonschema" -version = "4.17.0.6" +version = "4.17.0.7" description = "Typing stubs for jsonschema" category = "dev" optional = false python-versions = "*" files = [ - {file = "types-jsonschema-4.17.0.6.tar.gz", hash = "sha256:e9b15e34b4f2fd5587bd68530fa0eb2a17c73ead212f4471d71eea032d231c46"}, - {file = "types_jsonschema-4.17.0.6-py3-none-any.whl", hash = "sha256:ecef99bc64848f3798ad18922dfb2b40da25f17796fafcee50da984a21c5d6e6"}, + {file = "types-jsonschema-4.17.0.7.tar.gz", hash = "sha256:130e57c5f1ca755f95775d0822ad7a3907294e1461306af54baf804f317fd54c"}, + {file = "types_jsonschema-4.17.0.7-py3-none-any.whl", hash = "sha256:e129b52be6df841d97a98f087631dd558f7812eb91ff7b733c3301bd2446271b"}, ] [[package]] name = "types-netaddr" -version = "0.8.0.6" +version = "0.8.0.8" description = "Typing stubs for netaddr" category = "dev" optional = false python-versions = "*" files = [ - {file = "types-netaddr-0.8.0.6.tar.gz", hash = "sha256:e5048640c2412e7ea2d3eb02c94ae1b50442b2c7a50a7c48e957676139cdf19b"}, - {file = "types_netaddr-0.8.0.6-py3-none-any.whl", hash = "sha256:d4d40d1ba35430a4e4c929596542cd37e6831f5d08676b33dc84e06e01a840f6"}, + {file = "types-netaddr-0.8.0.8.tar.gz", hash = "sha256:db7e8cd16b1244e7c4541edd0df99d1039fc05fd5387c21840f0b958fc52aabc"}, + {file = "types_netaddr-0.8.0.8-py3-none-any.whl", hash = "sha256:6741b3824e2ec3f7a74842b394439b71107c7675f8ae42bb2b5e7a8ebfe8cf18"}, ] [[package]] name = "types-opentracing" -version = "2.4.10.3" +version = "2.4.10.4" description = "Typing stubs for opentracing" category = "dev" optional = false python-versions = "*" files = [ - {file = "types-opentracing-2.4.10.3.tar.gz", hash = "sha256:b277f114265b41216714f9c77dffcab57038f1730fd141e2c55c5c9f6f2caa87"}, - {file = "types_opentracing-2.4.10.3-py3-none-any.whl", hash = "sha256:60244d718fcd9de7043645ecaf597222d550432507098ab2e6268f7b589a7fa7"}, + {file = "types-opentracing-2.4.10.4.tar.gz", hash = "sha256:347040c9da4ada7d3c795659912c95d98c5651e242e8eaa0344815fee5bb97e2"}, + {file = "types_opentracing-2.4.10.4-py3-none-any.whl", hash = "sha256:73c9b958eea3df6c4906ebf3865608a562dd9981c1bbc75a373a583c613bed56"}, ] [[package]] name = "types-pillow" -version = "9.4.0.17" +version = "9.5.0.4" description = "Typing stubs for Pillow" category = "dev" optional = false python-versions = "*" files = [ - {file = "types-Pillow-9.4.0.17.tar.gz", hash = "sha256:7f0e871d2d46fbb6bc7deca3e02dc552cf9c1e8b49deb9595509551be3954e49"}, - {file = "types_Pillow-9.4.0.17-py3-none-any.whl", hash = "sha256:f8b848a05f17cb4d53d245c59bf560372b9778d4cfaf9705f6245009bf9f65f3"}, + {file = "types-Pillow-9.5.0.4.tar.gz", hash = "sha256:f1b6af47abd151847ee25911ffeba784899bc7dc7f9eba8ca6a5aac522b012ef"}, + {file = "types_Pillow-9.5.0.4-py3-none-any.whl", hash = "sha256:69427d9fa4320ff6e30f00fb9c0dd71185dc0a16de4757774220104759483466"}, ] [[package]] name = "types-psycopg2" -version = "2.9.21.8" +version = "2.9.21.10" description = "Typing stubs for psycopg2" category = "dev" optional = false python-versions = "*" files = [ - {file = "types-psycopg2-2.9.21.8.tar.gz", hash = "sha256:b629440ffcfdebd742fab07f777ff69aefdd19394a138c18e921a1964c3cf5f6"}, - {file = "types_psycopg2-2.9.21.8-py3-none-any.whl", hash = "sha256:e747fbec6e0e2502b625bc7686d13cc62fc170e8ae920e5ba27fac946778eeb9"}, + {file = "types-psycopg2-2.9.21.10.tar.gz", hash = "sha256:c2600892312ae1c34e12f145749795d93dc4eac3ef7dbf8a9c1bfd45385e80d7"}, + {file = "types_psycopg2-2.9.21.10-py3-none-any.whl", hash = "sha256:918224a0731a3650832e46633e720703b5beef7693a064e777d9748654fcf5e5"}, ] [[package]] name = "types-pyopenssl" -version = "23.1.0.0" +version = "23.1.0.2" description = "Typing stubs for pyOpenSSL" category = "dev" optional = false python-versions = "*" files = [ - {file = "types-pyOpenSSL-23.1.0.0.tar.gz", hash = "sha256:acc153718bff497e8f6ca3beecb5ea7a3087c796e40d569fded8bafbfca73605"}, - {file = "types_pyOpenSSL-23.1.0.0-py3-none-any.whl", hash = "sha256:9dacec020a3484ef5e4ea4bd9d403a981765b80821d5a40b790b2ba2f09d58db"}, + {file = "types-pyOpenSSL-23.1.0.2.tar.gz", hash = "sha256:20b80971b86240e8432a1832bd8124cea49c3088c7bfc77dfd23be27ffe4a517"}, + {file = "types_pyOpenSSL-23.1.0.2-py3-none-any.whl", hash = "sha256:b050641aeff6dfebf231ad719bdac12d53b8ee818d4afb67b886333484629957"}, ] [package.dependencies] @@ -3096,41 +3097,41 @@ cryptography = ">=35.0.0" [[package]] name = "types-pyyaml" -version = "6.0.12.8" +version = "6.0.12.9" description = "Typing stubs for PyYAML" category = "dev" optional = false python-versions = "*" files = [ - {file = "types-PyYAML-6.0.12.8.tar.gz", hash = "sha256:19304869a89d49af00be681e7b267414df213f4eb89634c4495fa62e8f942b9f"}, - {file = "types_PyYAML-6.0.12.8-py3-none-any.whl", hash = "sha256:5314a4b2580999b2ea06b2e5f9a7763d860d6e09cdf21c0e9561daa9cbd60178"}, + {file = "types-PyYAML-6.0.12.9.tar.gz", hash = "sha256:c51b1bd6d99ddf0aa2884a7a328810ebf70a4262c292195d3f4f9a0005f9eeb6"}, + {file = "types_PyYAML-6.0.12.9-py3-none-any.whl", hash = "sha256:5aed5aa66bd2d2e158f75dda22b059570ede988559f030cf294871d3b647e3e8"}, ] [[package]] name = "types-requests" -version = "2.28.11.16" +version = "2.30.0.0" description = "Typing stubs for requests" category = "dev" optional = false python-versions = "*" files = [ - {file = "types-requests-2.28.11.16.tar.gz", hash = "sha256:9d4002056df7ebc4ec1f28fd701fba82c5c22549c4477116cb2656aa30ace6db"}, - {file = "types_requests-2.28.11.16-py3-none-any.whl", hash = "sha256:a86921028335fdcc3aaf676c9d3463f867db6af2303fc65aa309b13ae1e6dd53"}, + {file = "types-requests-2.30.0.0.tar.gz", hash = "sha256:dec781054324a70ba64430ae9e62e7e9c8e4618c185a5cb3f87a6738251b5a31"}, + {file = "types_requests-2.30.0.0-py3-none-any.whl", hash = "sha256:c6cf08e120ca9f0dc4fa4e32c3f953c3fba222bcc1db6b97695bce8da1ba9864"}, ] [package.dependencies] -types-urllib3 = "<1.27" +types-urllib3 = "*" [[package]] name = "types-setuptools" -version = "67.6.0.5" +version = "67.8.0.0" description = "Typing stubs for setuptools" category = "dev" optional = false python-versions = "*" files = [ - {file = "types-setuptools-67.6.0.5.tar.gz", hash = "sha256:3a708e66c7bdc620e4d0439f344c750c57a4340c895a4c3ed2d0fc4ae8eb9962"}, - {file = "types_setuptools-67.6.0.5-py3-none-any.whl", hash = "sha256:dae5a4a659dbb6dba57773440f6e2dbdd8ef282dc136a174a8a59bd33d949945"}, + {file = "types-setuptools-67.8.0.0.tar.gz", hash = "sha256:95c9ed61871d6c0e258433373a4e1753c0a7c3627a46f4d4058c7b5a08ab844f"}, + {file = "types_setuptools-67.8.0.0-py3-none-any.whl", hash = "sha256:6df73340d96b238a4188b7b7668814b37e8018168aef1eef94a3b1872e3f60ff"}, ] [[package]] @@ -3426,4 +3427,4 @@ user-search = ["pyicu"] [metadata] lock-version = "2.0" python-versions = "^3.7.1" -content-hash = "102eed4faa13eab195555ea070f235acd1e3f0ff9cf028afcac6c51b3e409071" +content-hash = "ef3a16dd66177f7141239e1a2d3e07cc14c08f1e4e0c5127184d022bc062da52" diff --git a/pyproject.toml b/pyproject.toml index 3b38d8370c..9c77f9294a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -89,7 +89,7 @@ manifest-path = "rust/Cargo.toml" [tool.poetry] name = "matrix-synapse" -version = "1.81.0rc1" +version = "1.84.0" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors <packages@matrix.org>"] license = "Apache-2.0" @@ -311,7 +311,7 @@ all = [ # We pin black so that our tests don't start failing on new releases. isort = ">=5.10.1" black = ">=22.3.0" -ruff = "0.0.259" +ruff = "0.0.265" # Typechecking mypy = "*" @@ -368,7 +368,7 @@ furo = ">=2022.12.7,<2024.0.0" # system changes. # We are happy to raise these upper bounds upon request, # provided we check that it's safe to do so (i.e. that CI passes). -requires = ["poetry-core>=1.0.0,<=1.5.0", "setuptools_rust>=1.3,<=1.5.2"] +requires = ["poetry-core>=1.1.0,<=1.6.0", "setuptools_rust>=1.3,<=1.6.0"] build-backend = "poetry.core.masonry.api" diff --git a/rust/src/push/base_rules.rs b/rust/src/push/base_rules.rs index d7c73c1f25..51372e1553 100644 --- a/rust/src/push/base_rules.rs +++ b/rust/src/push/base_rules.rs @@ -57,7 +57,7 @@ pub const BASE_PREPEND_OVERRIDE_RULES: &[PushRule] = &[PushRule { rule_id: Cow::Borrowed("global/override/.m.rule.master"), priority_class: 5, conditions: Cow::Borrowed(&[]), - actions: Cow::Borrowed(&[Action::DontNotify]), + actions: Cow::Borrowed(&[]), default: true, default_enabled: false, }]; @@ -88,7 +88,7 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[ pattern: Cow::Borrowed("m.notice"), }, ))]), - actions: Cow::Borrowed(&[Action::DontNotify]), + actions: Cow::Borrowed(&[]), default: true, default_enabled: true, }, @@ -122,7 +122,7 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[ pattern: Cow::Borrowed("m.room.member"), }, ))]), - actions: Cow::Borrowed(&[Action::DontNotify]), + actions: Cow::Borrowed(&[]), default: true, default_enabled: true, }, diff --git a/rust/src/push/evaluator.rs b/rust/src/push/evaluator.rs index 6941c61ea4..2d7c4c06be 100644 --- a/rust/src/push/evaluator.rs +++ b/rust/src/push/evaluator.rs @@ -140,7 +140,7 @@ impl PushRuleEvaluator { /// name. /// /// Returns the set of actions, if any, that match (filtering out any - /// `dont_notify` actions). + /// `dont_notify` and `coalesce` actions). pub fn run( &self, push_rules: &FilteredPushRules, @@ -198,8 +198,9 @@ impl PushRuleEvaluator { let actions = push_rule .actions .iter() - // Filter out "dont_notify" actions, as we don't store them. - .filter(|a| **a != Action::DontNotify) + // Filter out "dont_notify" and "coalesce" actions, as we don't store them + // (since they result in no action by the pushers). + .filter(|a| **a != Action::DontNotify && **a != Action::Coalesce) .cloned() .collect(); diff --git a/rust/src/push/mod.rs b/rust/src/push/mod.rs index 575a1c1e68..f19d3c739f 100644 --- a/rust/src/push/mod.rs +++ b/rust/src/push/mod.rs @@ -164,11 +164,13 @@ impl PushRule { /// The "action" Synapse should perform for a matching push rule. #[derive(Debug, Clone, PartialEq, Eq)] pub enum Action { - DontNotify, Notify, - Coalesce, SetTweak(SetTweak), + // Legacy actions that should be understood, but are equivalent to no-ops. + DontNotify, + Coalesce, + // An unrecognized custom action. Unknown(Value), } @@ -568,7 +570,10 @@ impl FilteredPushRules { .filter(|rule| { // Ignore disabled experimental push rules - if !self.msc1767_enabled && rule.rule_id.contains("org.matrix.msc1767") { + if !self.msc1767_enabled + && (rule.rule_id.contains("org.matrix.msc1767") + || rule.rule_id.contains("org.matrix.msc3933")) + { return false; } diff --git a/scripts-dev/check_schema_delta.py b/scripts-dev/check_schema_delta.py index 32fe7f50de..fee4a8bd3d 100755 --- a/scripts-dev/check_schema_delta.py +++ b/scripts-dev/check_schema_delta.py @@ -40,10 +40,32 @@ def main(force_colors: bool) -> None: exec(r, locals) current_schema_version = locals["SCHEMA_VERSION"] - click.secho(f"Current schema version: {current_schema_version}") - diffs: List[git.Diff] = repo.remote().refs.develop.commit.diff(None) + # Get the schema version of the local file to check against current schema on develop + with open("synapse/storage/schema/__init__.py", "r") as file: + local_schema = file.read() + new_locals: Dict[str, Any] = {} + exec(local_schema, new_locals) + local_schema_version = new_locals["SCHEMA_VERSION"] + + if local_schema_version != current_schema_version: + # local schema version must be +/-1 the current schema version on develop + if abs(local_schema_version - current_schema_version) != 1: + click.secho( + "The proposed schema version has diverged more than one version from develop, please fix!", + fg="red", + bold=True, + color=force_colors, + ) + click.get_current_context().exit(1) + + # right, we've changed the schema version within the allowable tolerance so + # let's now use the local version as the canonical version + current_schema_version = local_schema_version + + click.secho(f"Current schema version: {current_schema_version}") + seen_deltas = False bad_files = [] for diff in diffs: diff --git a/scripts-dev/complement.sh b/scripts-dev/complement.sh index 1b1761202f..cba2799f15 100755 --- a/scripts-dev/complement.sh +++ b/scripts-dev/complement.sh @@ -11,6 +11,11 @@ # filepath of a local Complement checkout or by setting the COMPLEMENT_REF # environment variable to pull a different branch or commit. # +# To use the 'podman' command instead 'docker', set the PODMAN environment +# variable. Example: +# +# PODMAN=1 ./complement.sh +# # By default Synapse is run in monolith mode. This can be overridden by # setting the WORKERS environment variable. # @@ -30,7 +35,6 @@ # Exit if a line returns a non-zero exit code set -e - # Helper to emit annotations that collapse portions of the log in GitHub Actions echo_if_github() { if [[ -n "$GITHUB_WORKFLOW" ]]; then @@ -100,6 +104,16 @@ done # enable buildkit for the docker builds export DOCKER_BUILDKIT=1 +# Determine whether to use the docker or podman container runtime. +if [ -n "$PODMAN" ]; then + export CONTAINER_RUNTIME=podman + export DOCKER_HOST=unix://$XDG_RUNTIME_DIR/podman/podman.sock + export BUILDAH_FORMAT=docker + export COMPLEMENT_HOSTNAME_RUNNING_COMPLEMENT=host.containers.internal +else + export CONTAINER_RUNTIME=docker +fi + # Change to the repository root cd "$(dirname $0)/.." @@ -126,16 +140,16 @@ if [ -n "$use_editable_synapse" ]; then editable_mount="$(realpath .):/editable-src:z" if [ -n "$rebuild_editable_synapse" ]; then unset skip_docker_build - elif docker inspect complement-synapse-editable &>/dev/null; then + elif $CONTAINER_RUNTIME inspect complement-synapse-editable &>/dev/null; then # complement-synapse-editable already exists: see if we can still use it: # - The Rust module must still be importable; it will fail to import if the Rust source has changed. # - The Poetry lock file must be the same (otherwise we assume dependencies have changed) # First set up the module in the right place for an editable installation. - docker run --rm -v $editable_mount --entrypoint 'cp' complement-synapse-editable -- /synapse_rust.abi3.so.bak /editable-src/synapse/synapse_rust.abi3.so + $CONTAINER_RUNTIME run --rm -v $editable_mount --entrypoint 'cp' complement-synapse-editable -- /synapse_rust.abi3.so.bak /editable-src/synapse/synapse_rust.abi3.so - if (docker run --rm -v $editable_mount --entrypoint 'python' complement-synapse-editable -c 'import synapse.synapse_rust' \ - && docker run --rm -v $editable_mount --entrypoint 'diff' complement-synapse-editable --brief /editable-src/poetry.lock /poetry.lock.bak); then + if ($CONTAINER_RUNTIME run --rm -v $editable_mount --entrypoint 'python' complement-synapse-editable -c 'import synapse.synapse_rust' \ + && $CONTAINER_RUNTIME run --rm -v $editable_mount --entrypoint 'diff' complement-synapse-editable --brief /editable-src/poetry.lock /poetry.lock.bak); then skip_docker_build=1 else echo "Editable Synapse image is stale. Will rebuild." @@ -149,25 +163,25 @@ if [ -z "$skip_docker_build" ]; then # Build a special image designed for use in development with editable # installs. - docker build -t synapse-editable \ + $CONTAINER_RUNTIME build -t synapse-editable \ -f "docker/editable.Dockerfile" . - docker build -t synapse-workers-editable \ + $CONTAINER_RUNTIME build -t synapse-workers-editable \ --build-arg FROM=synapse-editable \ -f "docker/Dockerfile-workers" . - docker build -t complement-synapse-editable \ + $CONTAINER_RUNTIME build -t complement-synapse-editable \ --build-arg FROM=synapse-workers-editable \ -f "docker/complement/Dockerfile" "docker/complement" # Prepare the Rust module - docker run --rm -v $editable_mount --entrypoint 'cp' complement-synapse-editable -- /synapse_rust.abi3.so.bak /editable-src/synapse/synapse_rust.abi3.so + $CONTAINER_RUNTIME run --rm -v $editable_mount --entrypoint 'cp' complement-synapse-editable -- /synapse_rust.abi3.so.bak /editable-src/synapse/synapse_rust.abi3.so else # Build the base Synapse image from the local checkout echo_if_github "::group::Build Docker image: matrixdotorg/synapse" - docker build -t matrixdotorg/synapse \ + $CONTAINER_RUNTIME build -t matrixdotorg/synapse \ --build-arg TEST_ONLY_SKIP_DEP_HASH_VERIFICATION \ --build-arg TEST_ONLY_IGNORE_POETRY_LOCKFILE \ -f "docker/Dockerfile" . @@ -175,12 +189,12 @@ if [ -z "$skip_docker_build" ]; then # Build the workers docker image (from the base Synapse image we just built). echo_if_github "::group::Build Docker image: matrixdotorg/synapse-workers" - docker build -t matrixdotorg/synapse-workers -f "docker/Dockerfile-workers" . + $CONTAINER_RUNTIME build -t matrixdotorg/synapse-workers -f "docker/Dockerfile-workers" . echo_if_github "::endgroup::" # Build the unified Complement image (from the worker Synapse image we just built). echo_if_github "::group::Build Docker image: complement/Dockerfile" - docker build -t complement-synapse \ + $CONTAINER_RUNTIME build -t complement-synapse \ -f "docker/complement/Dockerfile" "docker/complement" echo_if_github "::endgroup::" diff --git a/scripts-dev/mypy_synapse_plugin.py b/scripts-dev/mypy_synapse_plugin.py index 2c377533c0..8058e9c993 100644 --- a/scripts-dev/mypy_synapse_plugin.py +++ b/scripts-dev/mypy_synapse_plugin.py @@ -18,10 +18,11 @@ can crop up, e.g the cache descriptors. from typing import Callable, Optional, Type +from mypy.erasetype import remove_instance_last_known_values from mypy.nodes import ARG_NAMED_OPT from mypy.plugin import MethodSigContext, Plugin from mypy.typeops import bind_self -from mypy.types import CallableType, NoneType, UnionType +from mypy.types import CallableType, Instance, NoneType, UnionType class SynapsePlugin(Plugin): @@ -92,10 +93,41 @@ def cached_function_method_signature(ctx: MethodSigContext) -> CallableType: arg_names.append("on_invalidate") arg_kinds.append(ARG_NAMED_OPT) # Arg is an optional kwarg. + # Finally we ensure the return type is a Deferred. + if ( + isinstance(signature.ret_type, Instance) + and signature.ret_type.type.fullname == "twisted.internet.defer.Deferred" + ): + # If it is already a Deferred, nothing to do. + ret_type = signature.ret_type + else: + ret_arg = None + if isinstance(signature.ret_type, Instance): + # If a coroutine, wrap the coroutine's return type in a Deferred. + if signature.ret_type.type.fullname == "typing.Coroutine": + ret_arg = signature.ret_type.args[2] + + # If an awaitable, wrap the awaitable's final value in a Deferred. + elif signature.ret_type.type.fullname == "typing.Awaitable": + ret_arg = signature.ret_type.args[0] + + # Otherwise, wrap the return value in a Deferred. + if ret_arg is None: + ret_arg = signature.ret_type + + # This should be able to use ctx.api.named_generic_type, but that doesn't seem + # to find the correct symbol for anything more than 1 module deep. + # + # modules is not part of CheckerPluginInterface. The following is a combination + # of TypeChecker.named_generic_type and TypeChecker.lookup_typeinfo. + sym = ctx.api.modules["twisted.internet.defer"].names.get("Deferred") # type: ignore[attr-defined] + ret_type = Instance(sym.node, [remove_instance_last_known_values(ret_arg)]) + signature = signature.copy_modified( arg_types=arg_types, arg_names=arg_names, arg_kinds=arg_kinds, + ret_type=ret_type, ) return signature diff --git a/synapse/_scripts/synapse_port_db.py b/synapse/_scripts/synapse_port_db.py index 1dcb397ba4..27fee3d9a9 100755 --- a/synapse/_scripts/synapse_port_db.py +++ b/synapse/_scripts/synapse_port_db.py @@ -54,11 +54,12 @@ from synapse.logging.context import ( ) from synapse.notifier import ReplicationNotifier from synapse.storage.database import DatabasePool, LoggingTransaction, make_conn -from synapse.storage.databases.main import PushRuleStore +from synapse.storage.databases.main import FilteringWorkerStore, PushRuleStore from synapse.storage.databases.main.account_data import AccountDataWorkerStore from synapse.storage.databases.main.client_ips import ClientIpBackgroundUpdateStore from synapse.storage.databases.main.deviceinbox import DeviceInboxBackgroundUpdateStore from synapse.storage.databases.main.devices import DeviceBackgroundUpdateStore +from synapse.storage.databases.main.e2e_room_keys import EndToEndRoomKeyBackgroundStore from synapse.storage.databases.main.end_to_end_keys import EndToEndKeyBackgroundStore from synapse.storage.databases.main.event_push_actions import EventPushActionsStore from synapse.storage.databases.main.events_bg_updates import ( @@ -68,6 +69,7 @@ from synapse.storage.databases.main.media_repository import ( MediaRepositoryBackgroundUpdateStore, ) from synapse.storage.databases.main.presence import PresenceBackgroundUpdateStore +from synapse.storage.databases.main.profile import ProfileWorkerStore from synapse.storage.databases.main.pusher import ( PusherBackgroundUpdatesStore, PusherWorkerStore, @@ -123,6 +125,7 @@ BOOLEAN_COLUMNS = { "users": ["shadow_banned", "approved"], "un_partial_stated_event_stream": ["rejection_status_changed"], "users_who_share_rooms": ["share_private"], + "per_user_experimental_features": ["enabled"], } @@ -225,8 +228,11 @@ class Store( MainStateBackgroundUpdateStore, UserDirectoryBackgroundUpdateStore, EndToEndKeyBackgroundStore, + EndToEndRoomKeyBackgroundStore, StatsStore, AccountDataWorkerStore, + FilteringWorkerStore, + ProfileWorkerStore, PushRuleStore, PusherWorkerStore, PusherBackgroundUpdatesStore, diff --git a/synapse/api/auth_blocking.py b/synapse/api/auth_blocking.py index 22348d2d86..fcf5b842c6 100644 --- a/synapse/api/auth_blocking.py +++ b/synapse/api/auth_blocking.py @@ -39,7 +39,7 @@ class AuthBlocking: self._mau_limits_reserved_threepids = ( hs.config.server.mau_limits_reserved_threepids ) - self._server_name = hs.hostname + self._is_mine_server_name = hs.is_mine_server_name self._track_appservice_user_ips = hs.config.appservice.track_appservice_user_ips async def check_auth_blocking( @@ -77,7 +77,7 @@ class AuthBlocking: if requester: if requester.authenticated_entity.startswith("@"): user_id = requester.authenticated_entity - elif requester.authenticated_entity == self._server_name: + elif self._is_mine_server_name(requester.authenticated_entity): # We never block the server from doing actions on behalf of # users. return diff --git a/synapse/api/constants.py b/synapse/api/constants.py index 0f224b34cd..cde9a2ecef 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -215,6 +215,8 @@ class EventContentFields: FEDERATE: Final = "m.federate" # The creator of the room, as used in `m.room.create` events. + # + # This is deprecated in MSC2175. ROOM_CREATOR: Final = "creator" # Used in m.room.guest_access events. @@ -255,6 +257,7 @@ class AccountDataTypes: DIRECT: Final = "m.direct" IGNORED_USER_LIST: Final = "m.ignored_user_list" TAG: Final = "m.tag" + PUSH_RULES: Final = "m.push_rules" class HistoryVisibility: diff --git a/synapse/api/errors.py b/synapse/api/errors.py index f2d6f9ab2d..8c7c94b045 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py @@ -108,10 +108,10 @@ class Codes(str, Enum): USER_AWAITING_APPROVAL = "ORG.MATRIX.MSC3866_USER_AWAITING_APPROVAL" - AS_PING_URL_NOT_SET = "FI.MAU.MSC2659_URL_NOT_SET" - AS_PING_BAD_STATUS = "FI.MAU.MSC2659_BAD_STATUS" - AS_PING_CONNECTION_TIMEOUT = "FI.MAU.MSC2659_CONNECTION_TIMEOUT" - AS_PING_CONNECTION_FAILED = "FI.MAU.MSC2659_CONNECTION_FAILED" + AS_PING_URL_NOT_SET = "M_URL_NOT_SET" + AS_PING_BAD_STATUS = "M_BAD_STATUS" + AS_PING_CONNECTION_TIMEOUT = "M_CONNECTION_TIMEOUT" + AS_PING_CONNECTION_FAILED = "M_CONNECTION_FAILED" # Attempt to send a second annotation with the same event type & annotation key # MSC2677 diff --git a/synapse/api/filtering.py b/synapse/api/filtering.py index b9f432cc23..82aeef8d19 100644 --- a/synapse/api/filtering.py +++ b/synapse/api/filtering.py @@ -128,20 +128,7 @@ USER_FILTER_SCHEMA = { "account_data": {"$ref": "#/definitions/filter"}, "room": {"$ref": "#/definitions/room_filter"}, "event_format": {"type": "string", "enum": ["client", "federation"]}, - "event_fields": { - "type": "array", - "items": { - "type": "string", - # Don't allow '\\' in event field filters. This makes matching - # events a lot easier as we can then use a negative lookbehind - # assertion to split '\.' If we allowed \\ then it would - # incorrectly split '\\.' See synapse.events.utils.serialize_event - # - # Note that because this is a regular expression, we have to escape - # each backslash in the pattern. - "pattern": r"^((?!\\\\).)*$", - }, - }, + "event_fields": {"type": "array", "items": {"type": "string"}}, }, "additionalProperties": True, # Allow new fields for forward compatibility } @@ -170,11 +157,9 @@ class Filtering: result = await self.store.get_user_filter(user_localpart, filter_id) return FilterCollection(self._hs, result) - def add_user_filter( - self, user_localpart: str, user_filter: JsonDict - ) -> Awaitable[int]: + def add_user_filter(self, user_id: UserID, user_filter: JsonDict) -> Awaitable[int]: self.check_valid_filter(user_filter) - return self.store.add_user_filter(user_localpart, user_filter) + return self.store.add_user_filter(user_id, user_filter) # TODO(paul): surely we should probably add a delete_user_filter or # replace_user_filter at some point? There's no REST API specified for diff --git a/synapse/api/room_versions.py b/synapse/api/room_versions.py index c397920fe5..7030b133d3 100644 --- a/synapse/api/room_versions.py +++ b/synapse/api/room_versions.py @@ -78,7 +78,10 @@ class RoomVersion: # MSC2209: Check 'notifications' key while verifying # m.room.power_levels auth rules. limit_notifications_power_levels: bool - # MSC2174/MSC2176: Apply updated redaction rules algorithm. + # MSC2175: No longer include the creator in m.room.create events. + msc2175_implicit_room_creator: bool + # MSC2174/MSC2176: Apply updated redaction rules algorithm, move redacts to + # content property. msc2176_redaction_rules: bool # MSC3083: Support the 'restricted' join_rule. msc3083_join_rules: bool @@ -93,17 +96,23 @@ class RoomVersion: msc2716_historical: bool # MSC2716: Adds support for redacting "insertion", "chunk", and "marker" events msc2716_redactions: bool + # MSC3389: Protect relation information from redaction. + msc3389_relation_redactions: bool # MSC3787: Adds support for a `knock_restricted` join rule, mixing concepts of # knocks and restricted join rules into the same join condition. msc3787_knock_restricted_join_rule: bool # MSC3667: Enforce integer power levels msc3667_int_only_power_levels: bool + # MSC3821: Do not redact the third_party_invite content field for membership events. + msc3821_redaction_rules: bool # MSC3931: Adds a push rule condition for "room version feature flags", making # some push rules room version dependent. Note that adding a flag to this list # is not enough to mark it "supported": the push rule evaluator also needs to # support the flag. Unknown flags are ignored by the evaluator, making conditions # fail if used. msc3931_push_features: Tuple[str, ...] # values from PushRuleRoomFlag + # MSC3989: Redact the origin field. + msc3989_redaction_rules: bool class RoomVersions: @@ -116,15 +125,19 @@ class RoomVersions: special_case_aliases_auth=True, strict_canonicaljson=False, limit_notifications_power_levels=False, + msc2175_implicit_room_creator=False, msc2176_redaction_rules=False, msc3083_join_rules=False, msc3375_redaction_rules=False, msc2403_knocking=False, msc2716_historical=False, msc2716_redactions=False, + msc3389_relation_redactions=False, msc3787_knock_restricted_join_rule=False, msc3667_int_only_power_levels=False, + msc3821_redaction_rules=False, msc3931_push_features=(), + msc3989_redaction_rules=False, ) V2 = RoomVersion( "2", @@ -135,15 +148,19 @@ class RoomVersions: special_case_aliases_auth=True, strict_canonicaljson=False, limit_notifications_power_levels=False, + msc2175_implicit_room_creator=False, msc2176_redaction_rules=False, msc3083_join_rules=False, msc3375_redaction_rules=False, msc2403_knocking=False, msc2716_historical=False, msc2716_redactions=False, + msc3389_relation_redactions=False, msc3787_knock_restricted_join_rule=False, msc3667_int_only_power_levels=False, + msc3821_redaction_rules=False, msc3931_push_features=(), + msc3989_redaction_rules=False, ) V3 = RoomVersion( "3", @@ -154,15 +171,19 @@ class RoomVersions: special_case_aliases_auth=True, strict_canonicaljson=False, limit_notifications_power_levels=False, + msc2175_implicit_room_creator=False, msc2176_redaction_rules=False, msc3083_join_rules=False, msc3375_redaction_rules=False, msc2403_knocking=False, msc2716_historical=False, msc2716_redactions=False, + msc3389_relation_redactions=False, msc3787_knock_restricted_join_rule=False, msc3667_int_only_power_levels=False, + msc3821_redaction_rules=False, msc3931_push_features=(), + msc3989_redaction_rules=False, ) V4 = RoomVersion( "4", @@ -173,15 +194,19 @@ class RoomVersions: special_case_aliases_auth=True, strict_canonicaljson=False, limit_notifications_power_levels=False, + msc2175_implicit_room_creator=False, msc2176_redaction_rules=False, msc3083_join_rules=False, msc3375_redaction_rules=False, msc2403_knocking=False, msc2716_historical=False, msc2716_redactions=False, + msc3389_relation_redactions=False, msc3787_knock_restricted_join_rule=False, msc3667_int_only_power_levels=False, + msc3821_redaction_rules=False, msc3931_push_features=(), + msc3989_redaction_rules=False, ) V5 = RoomVersion( "5", @@ -192,15 +217,19 @@ class RoomVersions: special_case_aliases_auth=True, strict_canonicaljson=False, limit_notifications_power_levels=False, + msc2175_implicit_room_creator=False, msc2176_redaction_rules=False, msc3083_join_rules=False, msc3375_redaction_rules=False, msc2403_knocking=False, msc2716_historical=False, msc2716_redactions=False, + msc3389_relation_redactions=False, msc3787_knock_restricted_join_rule=False, msc3667_int_only_power_levels=False, + msc3821_redaction_rules=False, msc3931_push_features=(), + msc3989_redaction_rules=False, ) V6 = RoomVersion( "6", @@ -211,15 +240,19 @@ class RoomVersions: special_case_aliases_auth=False, strict_canonicaljson=True, limit_notifications_power_levels=True, + msc2175_implicit_room_creator=False, msc2176_redaction_rules=False, msc3083_join_rules=False, msc3375_redaction_rules=False, msc2403_knocking=False, msc2716_historical=False, msc2716_redactions=False, + msc3389_relation_redactions=False, msc3787_knock_restricted_join_rule=False, msc3667_int_only_power_levels=False, + msc3821_redaction_rules=False, msc3931_push_features=(), + msc3989_redaction_rules=False, ) MSC2176 = RoomVersion( "org.matrix.msc2176", @@ -230,15 +263,19 @@ class RoomVersions: special_case_aliases_auth=False, strict_canonicaljson=True, limit_notifications_power_levels=True, + msc2175_implicit_room_creator=False, msc2176_redaction_rules=True, msc3083_join_rules=False, msc3375_redaction_rules=False, msc2403_knocking=False, msc2716_historical=False, msc2716_redactions=False, + msc3389_relation_redactions=False, msc3787_knock_restricted_join_rule=False, msc3667_int_only_power_levels=False, + msc3821_redaction_rules=False, msc3931_push_features=(), + msc3989_redaction_rules=False, ) V7 = RoomVersion( "7", @@ -249,15 +286,19 @@ class RoomVersions: special_case_aliases_auth=False, strict_canonicaljson=True, limit_notifications_power_levels=True, + msc2175_implicit_room_creator=False, msc2176_redaction_rules=False, msc3083_join_rules=False, msc3375_redaction_rules=False, msc2403_knocking=True, msc2716_historical=False, msc2716_redactions=False, + msc3389_relation_redactions=False, msc3787_knock_restricted_join_rule=False, msc3667_int_only_power_levels=False, + msc3821_redaction_rules=False, msc3931_push_features=(), + msc3989_redaction_rules=False, ) V8 = RoomVersion( "8", @@ -268,15 +309,19 @@ class RoomVersions: special_case_aliases_auth=False, strict_canonicaljson=True, limit_notifications_power_levels=True, + msc2175_implicit_room_creator=False, msc2176_redaction_rules=False, msc3083_join_rules=True, msc3375_redaction_rules=False, msc2403_knocking=True, msc2716_historical=False, msc2716_redactions=False, + msc3389_relation_redactions=False, msc3787_knock_restricted_join_rule=False, msc3667_int_only_power_levels=False, + msc3821_redaction_rules=False, msc3931_push_features=(), + msc3989_redaction_rules=False, ) V9 = RoomVersion( "9", @@ -287,15 +332,19 @@ class RoomVersions: special_case_aliases_auth=False, strict_canonicaljson=True, limit_notifications_power_levels=True, + msc2175_implicit_room_creator=False, msc2176_redaction_rules=False, msc3083_join_rules=True, msc3375_redaction_rules=True, msc2403_knocking=True, msc2716_historical=False, msc2716_redactions=False, + msc3389_relation_redactions=False, msc3787_knock_restricted_join_rule=False, msc3667_int_only_power_levels=False, + msc3821_redaction_rules=False, msc3931_push_features=(), + msc3989_redaction_rules=False, ) MSC3787 = RoomVersion( "org.matrix.msc3787", @@ -306,15 +355,42 @@ class RoomVersions: special_case_aliases_auth=False, strict_canonicaljson=True, limit_notifications_power_levels=True, + msc2175_implicit_room_creator=False, msc2176_redaction_rules=False, msc3083_join_rules=True, msc3375_redaction_rules=True, msc2403_knocking=True, msc2716_historical=False, msc2716_redactions=False, + msc3389_relation_redactions=False, msc3787_knock_restricted_join_rule=True, msc3667_int_only_power_levels=False, + msc3821_redaction_rules=False, msc3931_push_features=(), + msc3989_redaction_rules=False, + ) + MSC3821 = RoomVersion( + "org.matrix.msc3821.opt1", + RoomDisposition.UNSTABLE, + EventFormatVersions.ROOM_V4_PLUS, + StateResolutionVersions.V2, + enforce_key_validity=True, + special_case_aliases_auth=False, + strict_canonicaljson=True, + limit_notifications_power_levels=True, + msc2175_implicit_room_creator=False, + msc2176_redaction_rules=False, + msc3083_join_rules=True, + msc3375_redaction_rules=True, + msc2403_knocking=True, + msc2716_historical=False, + msc2716_redactions=False, + msc3389_relation_redactions=False, + msc3787_knock_restricted_join_rule=False, + msc3667_int_only_power_levels=False, + msc3821_redaction_rules=True, + msc3931_push_features=(), + msc3989_redaction_rules=False, ) V10 = RoomVersion( "10", @@ -325,15 +401,19 @@ class RoomVersions: special_case_aliases_auth=False, strict_canonicaljson=True, limit_notifications_power_levels=True, + msc2175_implicit_room_creator=False, msc2176_redaction_rules=False, msc3083_join_rules=True, msc3375_redaction_rules=True, msc2403_knocking=True, msc2716_historical=False, msc2716_redactions=False, + msc3389_relation_redactions=False, msc3787_knock_restricted_join_rule=True, msc3667_int_only_power_levels=True, + msc3821_redaction_rules=False, msc3931_push_features=(), + msc3989_redaction_rules=False, ) MSC2716v4 = RoomVersion( "org.matrix.msc2716v4", @@ -344,15 +424,19 @@ class RoomVersions: special_case_aliases_auth=False, strict_canonicaljson=True, limit_notifications_power_levels=True, + msc2175_implicit_room_creator=False, msc2176_redaction_rules=False, msc3083_join_rules=False, msc3375_redaction_rules=False, msc2403_knocking=True, msc2716_historical=True, msc2716_redactions=True, + msc3389_relation_redactions=False, msc3787_knock_restricted_join_rule=False, msc3667_int_only_power_levels=False, + msc3821_redaction_rules=False, msc3931_push_features=(), + msc3989_redaction_rules=False, ) MSC1767v10 = RoomVersion( # MSC1767 (Extensible Events) based on room version "10" @@ -364,15 +448,42 @@ class RoomVersions: special_case_aliases_auth=False, strict_canonicaljson=True, limit_notifications_power_levels=True, + msc2175_implicit_room_creator=False, msc2176_redaction_rules=False, msc3083_join_rules=True, msc3375_redaction_rules=True, msc2403_knocking=True, msc2716_historical=False, msc2716_redactions=False, + msc3389_relation_redactions=False, msc3787_knock_restricted_join_rule=True, msc3667_int_only_power_levels=True, + msc3821_redaction_rules=False, msc3931_push_features=(PushRuleRoomFlag.EXTENSIBLE_EVENTS,), + msc3989_redaction_rules=False, + ) + MSC3989 = RoomVersion( + "org.matrix.msc3989", + RoomDisposition.UNSTABLE, + EventFormatVersions.ROOM_V4_PLUS, + StateResolutionVersions.V2, + enforce_key_validity=True, + special_case_aliases_auth=False, + strict_canonicaljson=True, + limit_notifications_power_levels=True, + msc2175_implicit_room_creator=False, + msc2176_redaction_rules=False, + msc3083_join_rules=True, + msc3375_redaction_rules=True, + msc2403_knocking=True, + msc2716_historical=False, + msc2716_redactions=False, + msc3389_relation_redactions=False, + msc3787_knock_restricted_join_rule=True, + msc3667_int_only_power_levels=True, + msc3821_redaction_rules=False, + msc3931_push_features=(), + msc3989_redaction_rules=True, ) @@ -392,6 +503,7 @@ KNOWN_ROOM_VERSIONS: Dict[str, RoomVersion] = { RoomVersions.MSC3787, RoomVersions.V10, RoomVersions.MSC2716v4, + RoomVersions.MSC3989, ) } diff --git a/synapse/app/_base.py b/synapse/app/_base.py index f7b866978c..936b1b0430 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -21,6 +21,7 @@ import socket import sys import traceback import warnings +from textwrap import indent from typing import ( TYPE_CHECKING, Any, @@ -64,8 +65,6 @@ from synapse.config.homeserver import HomeServerConfig from synapse.config.server import ListenerConfig, ManholeConfig, TCPListenerConfig from synapse.crypto import context_factory from synapse.events.presence_router import load_legacy_presence_router -from synapse.events.spamcheck import load_legacy_spam_checkers -from synapse.events.third_party_rules import load_legacy_third_party_event_rules from synapse.handlers.auth import load_legacy_password_auth_providers from synapse.http.site import SynapseSite from synapse.logging.context import PreserveLoggingContext @@ -73,6 +72,10 @@ from synapse.logging.opentracing import init_tracer from synapse.metrics import install_gc_manager, register_threadpool from synapse.metrics.background_process_metrics import wrap_as_background_process from synapse.metrics.jemalloc import setup_jemalloc_stats +from synapse.module_api.callbacks.spamchecker_callbacks import load_legacy_spam_checkers +from synapse.module_api.callbacks.third_party_event_rules_callbacks import ( + load_legacy_third_party_event_rules, +) from synapse.types import ISynapseReactor from synapse.util import SYNAPSE_VERSION from synapse.util.caches.lrucache import setup_expire_lru_cache_entries @@ -210,8 +213,12 @@ def handle_startup_exception(e: Exception) -> NoReturn: # Exceptions that occur between setting up the logging and forking or starting # the reactor are written to the logs, followed by a summary to stderr. logger.exception("Exception during startup") + + error_string = "".join(traceback.format_exception(type(e), e, e.__traceback__)) + indented_error_string = indent(error_string, " ") + quit_with_error( - f"Error during initialisation:\n {e}\nThere may be more information in the logs." + f"Error during initialisation:\n{indented_error_string}\nThere may be more information in the logs." ) diff --git a/synapse/app/admin_cmd.py b/synapse/app/admin_cmd.py index b05fe2c589..f9aada269a 100644 --- a/synapse/app/admin_cmd.py +++ b/synapse/app/admin_cmd.py @@ -64,7 +64,7 @@ from synapse.util.logcontext import LoggingContext logger = logging.getLogger("synapse.app.admin_cmd") -class AdminCmdSlavedStore( +class AdminCmdStore( FilteringWorkerStore, ClientIpWorkerStore, DeviceWorkerStore, @@ -103,7 +103,7 @@ class AdminCmdSlavedStore( class AdminCmdServer(HomeServer): - DATASTORE_CLASS = AdminCmdSlavedStore # type: ignore + DATASTORE_CLASS = AdminCmdStore # type: ignore async def export_data_command(hs: HomeServer, args: argparse.Namespace) -> None: diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index e17ce35b8e..909ebccf78 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -102,7 +102,7 @@ from synapse.util.httpresourcetree import create_resource_tree logger = logging.getLogger("synapse.app.generic_worker") -class GenericWorkerSlavedStore( +class GenericWorkerStore( # FIXME(#3714): We need to add UserDirectoryStore as we write directly # rather than going via the correct worker. UserDirectoryStore, @@ -154,7 +154,7 @@ class GenericWorkerSlavedStore( class GenericWorkerServer(HomeServer): - DATASTORE_CLASS = GenericWorkerSlavedStore # type: ignore + DATASTORE_CLASS = GenericWorkerStore # type: ignore def _listen_http(self, listener_config: ListenerConfig) -> None: assert listener_config.http_options is not None diff --git a/synapse/app/phone_stats_home.py b/synapse/app/phone_stats_home.py index 897dd3edac..09988670da 100644 --- a/synapse/app/phone_stats_home.py +++ b/synapse/app/phone_stats_home.py @@ -127,10 +127,6 @@ async def phone_stats_home( daily_sent_messages = await store.count_daily_sent_messages() stats["daily_sent_messages"] = daily_sent_messages - r30_results = await store.count_r30_users() - for name, count in r30_results.items(): - stats["r30_users_" + name] = count - r30v2_results = await store.count_r30v2_users() for name, count in r30v2_results.items(): stats["r30v2_users_" + name] = count diff --git a/synapse/appservice/__init__.py b/synapse/appservice/__init__.py index 35c330a3c4..2260a8f589 100644 --- a/synapse/appservice/__init__.py +++ b/synapse/appservice/__init__.py @@ -86,6 +86,7 @@ class ApplicationService: url.rstrip("/") if isinstance(url, str) else None ) # url must not end with a slash self.hs_token = hs_token + # The full Matrix ID for this application service's sender. self.sender = sender self.namespaces = self._check_namespaces(namespaces) self.id = id @@ -212,7 +213,7 @@ class ApplicationService: True if the application service is interested in the user, False if not. """ return ( - # User is the appservice's sender_localpart user + # User is the appservice's configured sender_localpart user user_id == self.sender # User is in the appservice's user namespace or self.is_user_in_namespace(user_id) diff --git a/synapse/appservice/api.py b/synapse/appservice/api.py index 86ddb1bb28..5fb3d5083d 100644 --- a/synapse/appservice/api.py +++ b/synapse/appservice/api.py @@ -326,7 +326,7 @@ class ApplicationServiceApi(SimpleHttpClient): assert service.hs_token is not None await self.post_json_get_json( - uri=f"{service.url}{APP_SERVICE_UNSTABLE_PREFIX}/fi.mau.msc2659/ping", + uri=f"{service.url}{APP_SERVICE_PREFIX}/ping", post_json={"transaction_id": txn_id}, headers={"Authorization": [f"Bearer {service.hs_token}"]}, ) @@ -442,8 +442,10 @@ class ApplicationServiceApi(SimpleHttpClient): return False async def claim_client_keys( - self, service: "ApplicationService", query: List[Tuple[str, str, str]] - ) -> Tuple[Dict[str, Dict[str, Dict[str, JsonDict]]], List[Tuple[str, str, str]]]: + self, service: "ApplicationService", query: List[Tuple[str, str, str, int]] + ) -> Tuple[ + Dict[str, Dict[str, Dict[str, JsonDict]]], List[Tuple[str, str, str, int]] + ]: """Claim one time keys from an application service. Note that any error (including a timeout) is treated as the application @@ -469,8 +471,10 @@ class ApplicationServiceApi(SimpleHttpClient): # Create the expected payload shape. body: Dict[str, Dict[str, List[str]]] = {} - for user_id, device, algorithm in query: - body.setdefault(user_id, {}).setdefault(device, []).append(algorithm) + for user_id, device, algorithm, count in query: + body.setdefault(user_id, {}).setdefault(device, []).extend( + [algorithm] * count + ) uri = f"{service.url}/_matrix/app/unstable/org.matrix.msc3983/keys/claim" try: @@ -493,11 +497,20 @@ class ApplicationServiceApi(SimpleHttpClient): # or if some are still missing. # # TODO This places a lot of faith in the response shape being correct. - missing = [ - (user_id, device, algorithm) - for user_id, device, algorithm in query - if algorithm not in response.get(user_id, {}).get(device, []) - ] + missing = [] + for user_id, device, algorithm, count in query: + # Count the number of keys in the response for this algorithm by + # checking which key IDs start with the algorithm. This uses that + # True == 1 in Python to generate a count. + response_count = sum( + key_id.startswith(f"{algorithm}:") + for key_id in response.get(user_id, {}).get(device, {}) + ) + count -= response_count + # If the appservice responds with fewer keys than requested, then + # consider the request unfulfilled. + if count > 0: + missing.append((user_id, device, algorithm, count)) return response, missing diff --git a/synapse/config/_base.py b/synapse/config/_base.py index 2ce60610ca..1d268a1817 100644 --- a/synapse/config/_base.py +++ b/synapse/config/_base.py @@ -44,6 +44,7 @@ import jinja2 import pkg_resources import yaml +from synapse.types import StrSequence from synapse.util.templates import _create_mxc_to_http_filter, _format_ts_filter logger = logging.getLogger(__name__) @@ -58,7 +59,7 @@ class ConfigError(Exception): the problem lies. """ - def __init__(self, msg: str, path: Optional[Iterable[str]] = None): + def __init__(self, msg: str, path: Optional[StrSequence] = None): self.msg = msg self.path = path diff --git a/synapse/config/_base.pyi b/synapse/config/_base.pyi index b5cec132b4..fc51aed234 100644 --- a/synapse/config/_base.pyi +++ b/synapse/config/_base.pyi @@ -61,9 +61,10 @@ from synapse.config import ( # noqa: F401 voip, workers, ) +from synapse.types import StrSequence class ConfigError(Exception): - def __init__(self, msg: str, path: Optional[Iterable[str]] = None): + def __init__(self, msg: str, path: Optional[StrSequence] = None): self.msg = msg self.path = path diff --git a/synapse/config/_util.py b/synapse/config/_util.py index d3a4b484ab..acccca413b 100644 --- a/synapse/config/_util.py +++ b/synapse/config/_util.py @@ -11,16 +11,17 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Iterable +from typing import Any, Dict, Type, TypeVar import jsonschema +from pydantic import BaseModel, ValidationError, parse_obj_as from synapse.config._base import ConfigError -from synapse.types import JsonDict +from synapse.types import JsonDict, StrSequence def validate_config( - json_schema: JsonDict, config: Any, config_path: Iterable[str] + json_schema: JsonDict, config: Any, config_path: StrSequence ) -> None: """Validates a config setting against a JsonSchema definition @@ -44,7 +45,7 @@ def validate_config( def json_error_to_config_error( - e: jsonschema.ValidationError, config_path: Iterable[str] + e: jsonschema.ValidationError, config_path: StrSequence ) -> ConfigError: """Converts a json validation error to a user-readable ConfigError @@ -64,3 +65,28 @@ def json_error_to_config_error( else: path.append(str(p)) return ConfigError(e.message, path) + + +Model = TypeVar("Model", bound=BaseModel) + + +def parse_and_validate_mapping( + config: Any, + model_type: Type[Model], +) -> Dict[str, Model]: + """Parse `config` as a mapping from strings to a given `Model` type. + Args: + config: The configuration data to check + model_type: The BaseModel to validate and parse against. + Returns: + Fully validated and parsed Dict[str, Model]. + Raises: + ConfigError, if given improper input. + """ + try: + # type-ignore: mypy doesn't like constructing `Dict[str, model_type]` because + # `model_type` is a runtime variable. Pydantic is fine with this. + instances = parse_obj_as(Dict[str, model_type], config) # type: ignore[valid-type] + except ValidationError as e: + raise ConfigError(str(e)) from e + return instances diff --git a/synapse/config/appservice.py b/synapse/config/appservice.py index 00182090b2..c2710fdf04 100644 --- a/synapse/config/appservice.py +++ b/synapse/config/appservice.py @@ -33,6 +33,15 @@ class AppServiceConfig(Config): def read_config(self, config: JsonDict, **kwargs: Any) -> None: self.app_service_config_files = config.get("app_service_config_files", []) + if not isinstance(self.app_service_config_files, list) or not all( + type(x) is str for x in self.app_service_config_files + ): + raise ConfigError( + "Expected '%s' to be a list of AS config files:" + % (self.app_service_config_files), + ("app_service_config_files",), + ) + self.track_appservice_user_ips = config.get("track_appservice_user_ips", False) @@ -40,10 +49,6 @@ def load_appservices( hostname: str, config_files: List[str] ) -> List[ApplicationService]: """Returns a list of Application Services from the config files.""" - if not isinstance(config_files, list): - # type-ignore: this function gets arbitrary json value; we do use this path. - logger.warning("Expected %s to be a list of AS config files.", config_files) # type: ignore[unreachable] - return [] # Dicts of value -> filename seen_as_tokens: Dict[str, str] = {} diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index 7687c80ea0..d769b7f668 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -84,18 +84,6 @@ class ExperimentalConfig(Config): "msc3984_appservice_key_query", False ) - # MSC3706 (server-side support for partial state in /send_join responses) - # Synapse will always serve partial state responses to requests using the stable - # query parameter `omit_members`. If this flag is set, Synapse will also serve - # partial state responses to requests using the unstable query parameter - # `org.matrix.msc3706.partial_state`. - self.msc3706_enabled: bool = experimental.get("msc3706_enabled", False) - - # experimental support for faster joins over federation - # (MSC2775, MSC3706, MSC3895) - # requires a target server that can provide a partial join response (MSC3706) - self.faster_joins_enabled: bool = experimental.get("faster_joins", True) - # MSC3720 (Account status endpoint) self.msc3720_enabled: bool = experimental.get("msc3720_enabled", False) @@ -189,5 +177,18 @@ class ExperimentalConfig(Config): # MSC3967: Do not require UIA when first uploading cross signing keys self.msc3967_enabled = experimental.get("msc3967_enabled", False) - # MSC2659: Application service ping endpoint - self.msc2659_enabled = experimental.get("msc2659_enabled", False) + # MSC3981: Recurse relations + self.msc3981_recurse_relations = experimental.get( + "msc3981_recurse_relations", False + ) + + # MSC3970: Scope transaction IDs to devices + self.msc3970_enabled = experimental.get("msc3970_enabled", False) + + # MSC4009: E.164 Matrix IDs + self.msc4009_e164_mxids = experimental.get("msc4009_e164_mxids", False) + + # MSC4010: Do not allow setting m.push_rules account data. + self.msc4010_push_rules_account_data = experimental.get( + "msc4010_push_rules_account_data", False + ) diff --git a/synapse/config/logger.py b/synapse/config/logger.py index 56db875b25..1e080133dc 100644 --- a/synapse/config/logger.py +++ b/synapse/config/logger.py @@ -117,9 +117,7 @@ root: # Write logs to the `buffer` handler, which will buffer them together in memory, # then write them to a file. # - # Replace "buffer" with "console" to log to stderr instead. (Note that you'll - # also need to update the configuration for the `twisted` logger above, in - # this case.) + # Replace "buffer" with "console" to log to stderr instead. # handlers: [buffer] diff --git a/synapse/config/oembed.py b/synapse/config/oembed.py index 0d32aba70a..d7959639ee 100644 --- a/synapse/config/oembed.py +++ b/synapse/config/oembed.py @@ -19,7 +19,7 @@ from urllib import parse as urlparse import attr import pkg_resources -from synapse.types import JsonDict +from synapse.types import JsonDict, StrSequence from ._base import Config, ConfigError from ._util import validate_config @@ -80,7 +80,7 @@ class OembedConfig(Config): ) def _parse_and_validate_provider( - self, providers: List[JsonDict], config_path: Iterable[str] + self, providers: List[JsonDict], config_path: StrSequence ) -> Iterable[OEmbedEndpointConfig]: # Ensure it is the proper form. validate_config( @@ -112,7 +112,7 @@ class OembedConfig(Config): api_endpoint, patterns, endpoint.get("formats") ) - def _glob_to_pattern(self, glob: str, config_path: Iterable[str]) -> Pattern: + def _glob_to_pattern(self, glob: str, config_path: StrSequence) -> Pattern: """ Convert the glob into a sane regular expression to match against. The rules followed will be slightly different for the domain portion vs. diff --git a/synapse/config/push.py b/synapse/config/push.py index 3b5378e6ea..8177ff52e2 100644 --- a/synapse/config/push.py +++ b/synapse/config/push.py @@ -42,11 +42,17 @@ class PushConfig(Config): # Now check for the one in the 'email' section and honour it, # with a warning. - push_config = config.get("email") or {} - redact_content = push_config.get("redact_content") + email_push_config = config.get("email") or {} + redact_content = email_push_config.get("redact_content") if redact_content is not None: print( "The 'email.redact_content' option is deprecated: " "please set push.include_content instead" ) self.push_include_content = not redact_content + + # Whether to apply a random delay to outbound push. + self.push_jitter_delay_ms = None + push_jitter_delay = push_config.get("jitter_delay", None) + if push_jitter_delay: + self.push_jitter_delay_ms = self.parse_duration(push_jitter_delay) diff --git a/synapse/config/redis.py b/synapse/config/redis.py index e6a75be434..636cb450b8 100644 --- a/synapse/config/redis.py +++ b/synapse/config/redis.py @@ -35,3 +35,9 @@ class RedisConfig(Config): self.redis_port = redis_config.get("port", 6379) self.redis_dbid = redis_config.get("dbid", None) self.redis_password = redis_config.get("password") + + self.redis_use_tls = redis_config.get("use_tls", False) + self.redis_certificate = redis_config.get("certificate_file", None) + self.redis_private_key = redis_config.get("private_key_file", None) + self.redis_ca_file = redis_config.get("ca_file", None) + self.redis_ca_path = redis_config.get("ca_path", None) diff --git a/synapse/config/repository.py b/synapse/config/repository.py index ecb3edbe3a..f6cfdd3e04 100644 --- a/synapse/config/repository.py +++ b/synapse/config/repository.py @@ -137,6 +137,10 @@ class ContentRepositoryConfig(Config): self.max_image_pixels = self.parse_size(config.get("max_image_pixels", "32M")) self.max_spider_size = self.parse_size(config.get("max_spider_size", "10M")) + self.prevent_media_downloads_from = config.get( + "prevent_media_downloads_from", [] + ) + self.media_store_path = self.ensure_directory( config.get("media_store_path", "media_store") ) @@ -220,20 +224,20 @@ class ContentRepositoryConfig(Config): if "http" in proxy_env or "https" in proxy_env: logger.warning("".join(HTTP_PROXY_SET_WARNING)) - # we always blacklist '0.0.0.0' and '::', which are supposed to be + # we always block '0.0.0.0' and '::', which are supposed to be # unroutable addresses. - self.url_preview_ip_range_blacklist = generate_ip_set( + self.url_preview_ip_range_blocklist = generate_ip_set( config["url_preview_ip_range_blacklist"], ["0.0.0.0", "::"], config_path=("url_preview_ip_range_blacklist",), ) - self.url_preview_ip_range_whitelist = generate_ip_set( + self.url_preview_ip_range_allowlist = generate_ip_set( config.get("url_preview_ip_range_whitelist", ()), config_path=("url_preview_ip_range_whitelist",), ) - self.url_preview_url_blacklist = config.get("url_preview_url_blacklist", ()) + self.url_preview_url_blocklist = config.get("url_preview_url_blacklist", ()) self.url_preview_accept_language = config.get( "url_preview_accept_language" diff --git a/synapse/config/room.py b/synapse/config/room.py index 4a7ac00540..b6696cd129 100644 --- a/synapse/config/room.py +++ b/synapse/config/room.py @@ -75,3 +75,7 @@ class RoomConfig(Config): % preset ) # We validate the actual overrides when we try to apply them. + + # When enabled, users will forget rooms when they leave them, either via a + # leave, kick or ban. + self.forget_on_leave = config.get("forget_rooms_on_leave", False) diff --git a/synapse/config/server.py b/synapse/config/server.py index 386c3194b8..b46fa51593 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -27,7 +27,7 @@ from netaddr import AddrFormatError, IPNetwork, IPSet from twisted.conch.ssh.keys import Key from synapse.api.room_versions import KNOWN_ROOM_VERSIONS -from synapse.types import JsonDict +from synapse.types import JsonDict, StrSequence from synapse.util.module_loader import load_module from synapse.util.stringutils import parse_and_validate_server_name @@ -73,7 +73,7 @@ def _6to4(network: IPNetwork) -> IPNetwork: def generate_ip_set( ip_addresses: Optional[Iterable[str]], extra_addresses: Optional[Iterable[str]] = None, - config_path: Optional[Iterable[str]] = None, + config_path: Optional[StrSequence] = None, ) -> IPSet: """ Generate an IPSet from a list of IP addresses or CIDRs. @@ -115,7 +115,7 @@ def generate_ip_set( # IP ranges that are considered private / unroutable / don't make sense. -DEFAULT_IP_RANGE_BLACKLIST = [ +DEFAULT_IP_RANGE_BLOCKLIST = [ # Localhost "127.0.0.0/8", # Private networks. @@ -501,36 +501,36 @@ class ServerConfig(Config): # due to resource constraints self.admin_contact = config.get("admin_contact", None) - ip_range_blacklist = config.get( - "ip_range_blacklist", DEFAULT_IP_RANGE_BLACKLIST + ip_range_blocklist = config.get( + "ip_range_blacklist", DEFAULT_IP_RANGE_BLOCKLIST ) # Attempt to create an IPSet from the given ranges - # Always blacklist 0.0.0.0, :: - self.ip_range_blacklist = generate_ip_set( - ip_range_blacklist, ["0.0.0.0", "::"], config_path=("ip_range_blacklist",) + # Always block 0.0.0.0, :: + self.ip_range_blocklist = generate_ip_set( + ip_range_blocklist, ["0.0.0.0", "::"], config_path=("ip_range_blacklist",) ) - self.ip_range_whitelist = generate_ip_set( + self.ip_range_allowlist = generate_ip_set( config.get("ip_range_whitelist", ()), config_path=("ip_range_whitelist",) ) # The federation_ip_range_blacklist is used for backwards-compatibility # and only applies to federation and identity servers. if "federation_ip_range_blacklist" in config: - # Always blacklist 0.0.0.0, :: - self.federation_ip_range_blacklist = generate_ip_set( + # Always block 0.0.0.0, :: + self.federation_ip_range_blocklist = generate_ip_set( config["federation_ip_range_blacklist"], ["0.0.0.0", "::"], config_path=("federation_ip_range_blacklist",), ) # 'federation_ip_range_whitelist' was never a supported configuration option. - self.federation_ip_range_whitelist = None + self.federation_ip_range_allowlist = None else: # No backwards-compatiblity requrired, as federation_ip_range_blacklist # is not given. Default to ip_range_blacklist and ip_range_whitelist. - self.federation_ip_range_blacklist = self.ip_range_blacklist - self.federation_ip_range_whitelist = self.ip_range_whitelist + self.federation_ip_range_blocklist = self.ip_range_blocklist + self.federation_ip_range_allowlist = self.ip_range_allowlist # (undocumented) option for torturing the worker-mode replication a bit, # for testing. The value defines the number of milliseconds to pause before diff --git a/synapse/config/workers.py b/synapse/config/workers.py index 1dfbe27e89..d2311cc857 100644 --- a/synapse/config/workers.py +++ b/synapse/config/workers.py @@ -18,6 +18,7 @@ import logging from typing import Any, Dict, List, Union import attr +from pydantic import BaseModel, Extra, StrictBool, StrictInt, StrictStr from synapse.config._base import ( Config, @@ -25,6 +26,7 @@ from synapse.config._base import ( RoutableShardedWorkerHandlingConfig, ShardedWorkerHandlingConfig, ) +from synapse.config._util import parse_and_validate_mapping from synapse.config.server import ( DIRECT_TCP_ERROR, TCPListenerConfig, @@ -37,6 +39,19 @@ The '%s' configuration option is deprecated and will be removed in a future Synapse version. Please use ``%s: name_of_worker`` instead. """ +_MISSING_MAIN_PROCESS_INSTANCE_MAP_DATA = """ +Missing data for a worker to connect to main process. Please include '%s' in the +`instance_map` declared in your shared yaml configuration, or optionally(as a deprecated +solution) in every worker's yaml as various `worker_replication_*` settings as defined +in workers documentation here: +`https://matrix-org.github.io/synapse/latest/workers.html#worker-configuration` +""" +# This allows for a handy knob when it's time to change from 'master' to +# something with less 'history' +MAIN_PROCESS_INSTANCE_NAME = "master" +# Use this to adjust what the main process is known as in the yaml instance_map +MAIN_PROCESS_INSTANCE_MAP_NAME = "main" + logger = logging.getLogger(__name__) @@ -50,13 +65,43 @@ def _instance_to_list_converter(obj: Union[str, List[str]]) -> List[str]: return obj -@attr.s(auto_attribs=True) -class InstanceLocationConfig: +class ConfigModel(BaseModel): + """A custom version of Pydantic's BaseModel which + + - ignores unknown fields and + - does not allow fields to be overwritten after construction, + + but otherwise uses Pydantic's default behaviour. + + For now, ignore unknown fields. In the future, we could change this so that unknown + config values cause a ValidationError, provided the error messages are meaningful to + server operators. + + Subclassing in this way is recommended by + https://pydantic-docs.helpmanual.io/usage/model_config/#change-behaviour-globally + """ + + class Config: + # By default, ignore fields that we don't recognise. + extra = Extra.ignore + # By default, don't allow fields to be reassigned after parsing. + allow_mutation = False + + +class InstanceLocationConfig(ConfigModel): """The host and port to talk to an instance via HTTP replication.""" - host: str - port: int - tls: bool = False + host: StrictStr + port: StrictInt + tls: StrictBool = False + + def scheme(self) -> str: + """Hardcode a retrievable scheme based on self.tls""" + return "https" if self.tls else "http" + + def netloc(self) -> str: + """Nicely format the network location data""" + return f"{self.host}:{self.port}" @attr.s @@ -129,27 +174,15 @@ class WorkerConfig(Config): raise ConfigError("worker_log_config must be a string") self.worker_log_config = worker_log_config - # The host used to connect to the main synapse - self.worker_replication_host = config.get("worker_replication_host", None) - # The port on the main synapse for TCP replication if "worker_replication_port" in config: raise ConfigError(DIRECT_TCP_ERROR, ("worker_replication_port",)) - # The port on the main synapse for HTTP replication endpoint - self.worker_replication_http_port = config.get("worker_replication_http_port") - - # The tls mode on the main synapse for HTTP replication endpoint. - # For backward compatibility this defaults to False. - self.worker_replication_http_tls = config.get( - "worker_replication_http_tls", False - ) - # The shared secret used for authentication when connecting to the main synapse. self.worker_replication_secret = config.get("worker_replication_secret", None) self.worker_name = config.get("worker_name", self.worker_app) - self.instance_name = self.worker_name or "master" + self.instance_name = self.worker_name or MAIN_PROCESS_INSTANCE_NAME # FIXME: Remove this check after a suitable amount of time. self.worker_main_http_uri = config.get("worker_main_http_uri", None) @@ -183,10 +216,55 @@ class WorkerConfig(Config): ) # A map from instance name to host/port of their HTTP replication endpoint. - instance_map = config.get("instance_map") or {} - self.instance_map = { - name: InstanceLocationConfig(**c) for name, c in instance_map.items() - } + # Check if the main process is declared. Inject it into the map if it's not, + # based first on if a 'main' block is declared then on 'worker_replication_*' + # data. If both are available, default to instance_map. The main process + # itself doesn't need this data as it would never have to talk to itself. + instance_map: Dict[str, Any] = config.get("instance_map", {}) + + if instance_map and self.instance_name is not MAIN_PROCESS_INSTANCE_NAME: + # The host used to connect to the main synapse + main_host = config.get("worker_replication_host", None) + + # The port on the main synapse for HTTP replication endpoint + main_port = config.get("worker_replication_http_port") + + # The tls mode on the main synapse for HTTP replication endpoint. + # For backward compatibility this defaults to False. + main_tls = config.get("worker_replication_http_tls", False) + + # For now, accept 'main' in the instance_map, but the replication system + # expects 'master', force that into being until it's changed later. + if MAIN_PROCESS_INSTANCE_MAP_NAME in instance_map: + instance_map[MAIN_PROCESS_INSTANCE_NAME] = instance_map[ + MAIN_PROCESS_INSTANCE_MAP_NAME + ] + del instance_map[MAIN_PROCESS_INSTANCE_MAP_NAME] + + # This is the backwards compatibility bit that handles the + # worker_replication_* bits using setdefault() to not overwrite anything. + elif main_host is not None and main_port is not None: + instance_map.setdefault( + MAIN_PROCESS_INSTANCE_NAME, + { + "host": main_host, + "port": main_port, + "tls": main_tls, + }, + ) + + else: + # If we've gotten here, it means that the main process is not on the + # instance_map and that not enough worker_replication_* variables + # were declared in the worker's yaml. + raise ConfigError( + _MISSING_MAIN_PROCESS_INSTANCE_MAP_DATA + % MAIN_PROCESS_INSTANCE_MAP_NAME + ) + + self.instance_map: Dict[ + str, InstanceLocationConfig + ] = parse_and_validate_mapping(instance_map, InstanceLocationConfig) # Map from type of streams to source, c.f. WriterLocations. writers = config.get("stream_writers") or {} diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index d710607c63..260aab3241 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -150,18 +150,19 @@ class Keyring: def __init__( self, hs: "HomeServer", key_fetchers: "Optional[Iterable[KeyFetcher]]" = None ): - self.clock = hs.get_clock() - if key_fetchers is None: - key_fetchers = ( - # Fetch keys from the database. - StoreKeyFetcher(hs), - # Fetch keys from a configured Perspectives server. - PerspectivesKeyFetcher(hs), - # Fetch keys from the origin server directly. - ServerKeyFetcher(hs), - ) - self._key_fetchers = key_fetchers + # Always fetch keys from the database. + mutable_key_fetchers: List[KeyFetcher] = [StoreKeyFetcher(hs)] + # Fetch keys from configured trusted key servers, if any exist. + key_servers = hs.config.key.key_servers + if key_servers: + mutable_key_fetchers.append(PerspectivesKeyFetcher(hs)) + # Finally, fetch keys from the origin server directly. + mutable_key_fetchers.append(ServerKeyFetcher(hs)) + + self._key_fetchers: Iterable[KeyFetcher] = tuple(mutable_key_fetchers) + else: + self._key_fetchers = key_fetchers self._fetch_keys_queue: BatchingQueue[ _FetchKeyRequest, Dict[str, Dict[str, FetchKeyResult]] @@ -172,7 +173,7 @@ class Keyring: process_batch_callback=self._inner_fetch_key_requests, ) - self._hostname = hs.hostname + self._is_mine_server_name = hs.is_mine_server_name # build a FetchKeyResult for each of our own keys, to shortcircuit the # fetcher. @@ -276,7 +277,7 @@ class Keyring: # If we are the originating server, short-circuit the key-fetch for any keys # we already have - if verify_request.server_name == self._hostname: + if self._is_mine_server_name(verify_request.server_name): for key_id in verify_request.key_ids: if key_id in self._local_verify_keys: found_keys[key_id] = self._local_verify_keys[key_id] @@ -510,7 +511,7 @@ class StoreKeyFetcher(KeyFetcher): for key_id in queue_value.key_ids ) - res = await self.store.get_server_verify_keys(key_ids_to_fetch) + res = await self.store.get_server_keys_json(key_ids_to_fetch) keys: Dict[str, Dict[str, FetchKeyResult]] = {} for (server_name, key_id), key in res.items(): keys.setdefault(server_name, {})[key_id] = key @@ -522,7 +523,6 @@ class BaseV2KeyFetcher(KeyFetcher): super().__init__(hs) self.store = hs.get_datastores().main - self.config = hs.config async def process_v2_response( self, from_server: str, response_json: JsonDict, time_added_ms: int @@ -626,7 +626,7 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher): super().__init__(hs) self.clock = hs.get_clock() self.client = hs.get_federation_http_client() - self.key_servers = self.config.key.key_servers + self.key_servers = hs.config.key.key_servers async def _fetch_keys( self, keys_to_fetch: List[_FetchKeyRequest] @@ -721,7 +721,7 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher): ) keys: Dict[str, Dict[str, FetchKeyResult]] = {} - added_keys: List[Tuple[str, str, FetchKeyResult]] = [] + added_keys: Dict[Tuple[str, str], FetchKeyResult] = {} time_now_ms = self.clock.time_msec() @@ -752,12 +752,30 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher): # we continue to process the rest of the response continue - added_keys.extend( - (server_name, key_id, key) for key_id, key in processed_response.items() - ) + for key_id, key in processed_response.items(): + dict_key = (server_name, key_id) + if dict_key in added_keys: + already_present_key = added_keys[dict_key] + logger.warning( + "Duplicate server keys for %s (%s) from perspective %s (%r, %r)", + server_name, + key_id, + perspective_name, + already_present_key, + key, + ) + + if already_present_key.valid_until_ts > key.valid_until_ts: + # Favour the entry with the largest valid_until_ts, + # as `old_verify_keys` are also collected from this + # response. + continue + + added_keys[dict_key] = key + keys.setdefault(server_name, {}).update(processed_response) - await self.store.store_server_verify_keys( + await self.store.store_server_signature_keys( perspective_name, time_now_ms, added_keys ) diff --git a/synapse/event_auth.py b/synapse/event_auth.py index af55874b5c..b4b43ec4d7 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py @@ -455,8 +455,11 @@ def _check_create(event: "EventBase") -> None: "room appears to have unsupported version %s" % (room_version_prop,), ) - # 1.4 If content has no creator field, reject. - if EventContentFields.ROOM_CREATOR not in event.content: + # 1.4 If content has no creator field, reject if the room version requires it. + if ( + not event.room_version.msc2175_implicit_room_creator + and EventContentFields.ROOM_CREATOR not in event.content + ): raise AuthError(403, "Create event lacks a 'creator' property") @@ -491,7 +494,11 @@ def _is_membership_change_allowed( key = (EventTypes.Create, "") create = auth_events.get(key) if create and event.prev_event_ids()[0] == create.event_id: - if create.content["creator"] == event.state_key: + if room_version.msc2175_implicit_room_creator: + creator = create.sender + else: + creator = create.content[EventContentFields.ROOM_CREATOR] + if creator == event.state_key: return target_user_id = event.state_key @@ -786,7 +793,7 @@ def check_redaction( """Check whether the event sender is allowed to redact the target event. Returns: - True if the the sender is allowed to redact the target event if the + True if the sender is allowed to redact the target event if the target event was created by them. False if the sender is allowed to redact the target event with no further checks. @@ -1004,10 +1011,14 @@ def get_user_power_level(user_id: str, auth_events: StateMap["EventBase"]) -> in # that. key = (EventTypes.Create, "") create_event = auth_events.get(key) - if create_event is not None and create_event.content["creator"] == user_id: - return 100 - else: - return 0 + if create_event is not None: + if create_event.room_version.msc2175_implicit_room_creator: + creator = create_event.sender + else: + creator = create_event.content[EventContentFields.ROOM_CREATOR] + if creator == user_id: + return 100 + return 0 def get_named_level(auth_events: StateMap["EventBase"], name: str, default: int) -> int: @@ -1043,10 +1054,15 @@ def _verify_third_party_invite( """ if "third_party_invite" not in event.content: return False - if "signed" not in event.content["third_party_invite"]: + third_party_invite = event.content["third_party_invite"] + if not isinstance(third_party_invite, collections.abc.Mapping): return False - signed = event.content["third_party_invite"]["signed"] - for key in {"mxid", "token"}: + if "signed" not in third_party_invite: + return False + signed = third_party_invite["signed"] + if not isinstance(signed, collections.abc.Mapping): + return False + for key in {"mxid", "token", "signatures"}: if key not in signed: return False @@ -1064,8 +1080,6 @@ def _verify_third_party_invite( if signed["mxid"] != event.state_key: return False - if signed["token"] != token: - return False for public_key_object in get_public_keys(invite_event): public_key = public_key_object["public_key"] @@ -1077,7 +1091,9 @@ def _verify_third_party_invite( verify_key = decode_verify_key_bytes( key_name, decode_base64(public_key) ) - verify_signed_json(signed, server, verify_key) + # verify_signed_json incorrectly states it wants a dict, it + # just needs a mapping. + verify_signed_json(signed, server, verify_key) # type: ignore[arg-type] # We got the public key from the invite, so we know that the # correct server signed the signed bundle. diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py index d475fe7ae5..de7e5be42b 100644 --- a/synapse/events/__init__.py +++ b/synapse/events/__init__.py @@ -198,9 +198,16 @@ class _EventInternalMetadata: soft_failed: DictProperty[bool] = DictProperty("soft_failed") proactively_send: DictProperty[bool] = DictProperty("proactively_send") redacted: DictProperty[bool] = DictProperty("redacted") + historical: DictProperty[bool] = DictProperty("historical") + txn_id: DictProperty[str] = DictProperty("txn_id") + """The transaction ID, if it was set when the event was created.""" + token_id: DictProperty[int] = DictProperty("token_id") - historical: DictProperty[bool] = DictProperty("historical") + """The access token ID of the user who sent this event, if any.""" + + device_id: DictProperty[str] = DictProperty("device_id") + """The device ID of the user who sent this event, if any.""" # XXX: These are set by StreamWorkerStore._set_before_and_after. # I'm pretty sure that these are never persisted to the database, so shouldn't @@ -326,7 +333,6 @@ class EventBase(metaclass=abc.ABCMeta): hashes: DictProperty[Dict[str, str]] = DictProperty("hashes") origin: DictProperty[str] = DictProperty("origin") origin_server_ts: DictProperty[int] = DictProperty("origin_server_ts") - redacts: DefaultDictProperty[Optional[str]] = DefaultDictProperty("redacts", None) room_id: DictProperty[str] = DictProperty("room_id") sender: DictProperty[str] = DictProperty("sender") # TODO state_key should be Optional[str]. This is generally asserted in Synapse @@ -346,6 +352,13 @@ class EventBase(metaclass=abc.ABCMeta): def membership(self) -> str: return self.content["membership"] + @property + def redacts(self) -> Optional[str]: + """MSC2176 moved the redacts field into the content.""" + if self.room_version.msc2176_redaction_rules: + return self.content.get("redacts") + return self.get("redacts") + def is_state(self) -> bool: return self.get_state_key() is not None diff --git a/synapse/events/builder.py b/synapse/events/builder.py index c82745275f..a254548c6c 100644 --- a/synapse/events/builder.py +++ b/synapse/events/builder.py @@ -173,7 +173,9 @@ class EventBuilder: if self.is_state(): event_dict["state_key"] = self._state_key - if self._redacts is not None: + # MSC2174 moves the redacts property to the content, it is invalid to + # provide it as a top-level property. + if self._redacts is not None and not self.room_version.msc2176_redaction_rules: event_dict["redacts"] = self._redacts if self._origin_server_ts is not None: diff --git a/synapse/events/snapshot.py b/synapse/events/snapshot.py index 9b4d692cf4..e7e8225b8e 100644 --- a/synapse/events/snapshot.py +++ b/synapse/events/snapshot.py @@ -19,6 +19,7 @@ from immutabledict import immutabledict from synapse.appservice import ApplicationService from synapse.events import EventBase +from synapse.logging.opentracing import tag_args, trace from synapse.types import JsonDict, StateMap if TYPE_CHECKING: @@ -242,6 +243,8 @@ class EventContext(UnpersistedEventContextBase): return self._state_group + @trace + @tag_args async def get_current_state_ids( self, state_filter: Optional["StateFilter"] = None ) -> Optional[StateMap[str]]: @@ -275,6 +278,8 @@ class EventContext(UnpersistedEventContextBase): return prev_state_ids + @trace + @tag_args async def get_prev_state_ids( self, state_filter: Optional["StateFilter"] = None ) -> StateMap[str]: diff --git a/synapse/events/utils.py b/synapse/events/utils.py index c14c7791db..e7b7b78b84 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -22,6 +22,7 @@ from typing import ( Iterable, List, Mapping, + Match, MutableMapping, Optional, Union, @@ -46,12 +47,10 @@ if TYPE_CHECKING: from synapse.handlers.relations import BundledAggregations -# Split strings on "." but not "\." This uses a negative lookbehind assertion for '\' -# (?<!stuff) matches if the current position in the string is not preceded -# by a match for 'stuff'. -# TODO: This is fast, but fails to handle "foo\\.bar" which should be treated as -# the literal fields "foo\" and "bar" but will instead be treated as "foo\\.bar" -SPLIT_FIELD_REGEX = re.compile(r"(?<!\\)\.") +# Split strings on "." but not "\." (or "\\\."). +SPLIT_FIELD_REGEX = re.compile(r"\\*\.") +# Find escaped characters, e.g. those with a \ in front of them. +ESCAPE_SEQUENCE_PATTERN = re.compile(r"\\(.)") CANONICALJSON_MAX_INT = (2**53) - 1 CANONICALJSON_MIN_INT = -CANONICALJSON_MAX_INT @@ -106,7 +105,6 @@ def prune_event_dict(room_version: RoomVersion, event_dict: JsonDict) -> JsonDic "depth", "prev_events", "auth_events", - "origin", "origin_server_ts", ] @@ -114,6 +112,10 @@ def prune_event_dict(room_version: RoomVersion, event_dict: JsonDict) -> JsonDic if not room_version.msc2176_redaction_rules: allowed_keys.extend(["prev_state", "membership"]) + # Room versions before MSC3989 kept the origin field. + if not room_version.msc3989_redaction_rules: + allowed_keys.append("origin") + event_type = event_dict["type"] new_content = {} @@ -127,6 +129,16 @@ def prune_event_dict(room_version: RoomVersion, event_dict: JsonDict) -> JsonDic add_fields("membership") if room_version.msc3375_redaction_rules: add_fields(EventContentFields.AUTHORISING_USER) + if room_version.msc3821_redaction_rules: + # Preserve the signed field under third_party_invite. + third_party_invite = event_dict["content"].get("third_party_invite") + if isinstance(third_party_invite, collections.abc.Mapping): + new_content["third_party_invite"] = {} + if "signed" in third_party_invite: + new_content["third_party_invite"]["signed"] = third_party_invite[ + "signed" + ] + elif event_type == EventTypes.Create: # MSC2176 rules state that create events cannot be redacted. if room_version.msc2176_redaction_rules: @@ -168,6 +180,18 @@ def prune_event_dict(room_version: RoomVersion, event_dict: JsonDict) -> JsonDic elif room_version.msc2716_redactions and event_type == EventTypes.MSC2716_MARKER: add_fields(EventContentFields.MSC2716_INSERTION_EVENT_REFERENCE) + # Protect the rel_type and event_id fields under the m.relates_to field. + if room_version.msc3389_relation_redactions: + relates_to = event_dict["content"].get("m.relates_to") + if isinstance(relates_to, collections.abc.Mapping): + new_relates_to = {} + for field in ("rel_type", "event_id"): + if field in relates_to: + new_relates_to[field] = relates_to[field] + # Only include a non-empty relates_to field. + if new_relates_to: + new_content["m.relates_to"] = new_relates_to + allowed_fields = {k: v for k, v in event_dict.items() if k in allowed_keys} allowed_fields["content"] = new_content @@ -228,6 +252,57 @@ def _copy_field(src: JsonDict, dst: JsonDict, field: List[str]) -> None: sub_out_dict[key_to_move] = sub_dict[key_to_move] +def _escape_slash(m: Match[str]) -> str: + """ + Replacement function; replace a backslash-backslash or backslash-dot with the + second character. Leaves any other string alone. + """ + if m.group(1) in ("\\", "."): + return m.group(1) + return m.group(0) + + +def _split_field(field: str) -> List[str]: + """ + Splits strings on unescaped dots and removes escaping. + + Args: + field: A string representing a path to a field. + + Returns: + A list of nested fields to traverse. + """ + + # Convert the field and remove escaping: + # + # 1. "content.body.thing\.with\.dots" + # 2. ["content", "body", "thing\.with\.dots"] + # 3. ["content", "body", "thing.with.dots"] + + # Find all dots (and their preceding backslashes). If the dot is unescaped + # then emit a new field part. + result = [] + prev_start = 0 + for match in SPLIT_FIELD_REGEX.finditer(field): + # If the match is an *even* number of characters than the dot was escaped. + if len(match.group()) % 2 == 0: + continue + + # Add a new part (up to the dot, exclusive) after escaping. + result.append( + ESCAPE_SEQUENCE_PATTERN.sub( + _escape_slash, field[prev_start : match.end() - 1] + ) + ) + prev_start = match.end() + + # Add any part of the field after the last unescaped dot. (Note that if the + # character is a dot this correctly adds a blank string.) + result.append(re.sub(r"\\(.)", _escape_slash, field[prev_start:])) + + return result + + def only_fields(dictionary: JsonDict, fields: List[str]) -> JsonDict: """Return a new dict with only the fields in 'dictionary' which are present in 'fields'. @@ -235,7 +310,7 @@ def only_fields(dictionary: JsonDict, fields: List[str]) -> JsonDict: If there are no event fields specified then all fields are included. The entries may include '.' characters to indicate sub-fields. So ['content.body'] will include the 'body' field of the 'content' object. - A literal '.' character in a field name may be escaped using a '\'. + A literal '.' or '\' character in a field name may be escaped using a '\'. Args: dictionary: The dictionary to read from. @@ -250,13 +325,7 @@ def only_fields(dictionary: JsonDict, fields: List[str]) -> JsonDict: # for each field, convert it: # ["content.body.thing\.with\.dots"] => [["content", "body", "thing\.with\.dots"]] - split_fields = [SPLIT_FIELD_REGEX.split(f) for f in fields] - - # for each element of the output array of arrays: - # remove escaping so we can use the right key names. - split_fields[:] = [ - [f.replace(r"\.", r".") for f in field_array] for field_array in split_fields - ] + split_fields = [_split_field(f) for f in fields] output: JsonDict = {} for field_array in split_fields: @@ -336,6 +405,7 @@ def serialize_event( time_now_ms: int, *, config: SerializeEventConfig = _DEFAULT_SERIALIZE_EVENT_CONFIG, + msc3970_enabled: bool = False, ) -> JsonDict: """Serialize event for clients @@ -343,6 +413,8 @@ def serialize_event( e time_now_ms config: Event serialization config + msc3970_enabled: Whether MSC3970 is enabled. It changes whether we should + include the `transaction_id` in the event's `unsigned` section. Returns: The serialized event dictionary. @@ -365,27 +437,43 @@ def serialize_event( if "redacted_because" in e.unsigned: d["unsigned"]["redacted_because"] = serialize_event( - e.unsigned["redacted_because"], time_now_ms, config=config + e.unsigned["redacted_because"], + time_now_ms, + config=config, + msc3970_enabled=msc3970_enabled, ) # If we have a txn_id saved in the internal_metadata, we should include it in the # unsigned section of the event if it was sent by the same session as the one # requesting the event. - # There is a special case for guests, because they only have one access token - # without associated access_token_id, so we always include the txn_id for events - # they sent. - txn_id = getattr(e.internal_metadata, "txn_id", None) + txn_id: Optional[str] = getattr(e.internal_metadata, "txn_id", None) if txn_id is not None and config.requester is not None: - event_token_id = getattr(e.internal_metadata, "token_id", None) - if config.requester.user.to_string() == e.sender and ( - ( - event_token_id is not None - and config.requester.access_token_id is not None - and event_token_id == config.requester.access_token_id + # For the MSC3970 rules to be applied, we *need* to have the device ID in the + # event internal metadata. Since we were not recording them before, if it hasn't + # been recorded, we fallback to the old behaviour. + event_device_id: Optional[str] = getattr(e.internal_metadata, "device_id", None) + if msc3970_enabled and event_device_id is not None: + if event_device_id == config.requester.device_id: + d["unsigned"]["transaction_id"] = txn_id + + else: + # The pre-MSC3970 behaviour is to only include the transaction ID if the + # event was sent from the same access token. For regular users, we can use + # the access token ID to determine this. For guests, we can't, but since + # each guest only has one access token, we can just check that the event was + # sent by the same user as the one requesting the event. + event_token_id: Optional[int] = getattr( + e.internal_metadata, "token_id", None ) - or config.requester.is_guest - ): - d["unsigned"]["transaction_id"] = txn_id + if config.requester.user.to_string() == e.sender and ( + ( + event_token_id is not None + and config.requester.access_token_id is not None + and event_token_id == config.requester.access_token_id + ) + or config.requester.is_guest + ): + d["unsigned"]["transaction_id"] = txn_id # invite_room_state and knock_room_state are a list of stripped room state events # that are meant to provide metadata about a room to an invitee/knocker. They are @@ -416,6 +504,9 @@ class EventClientSerializer: clients. """ + def __init__(self, *, msc3970_enabled: bool = False): + self._msc3970_enabled = msc3970_enabled + def serialize_event( self, event: Union[JsonDict, EventBase], @@ -440,7 +531,9 @@ class EventClientSerializer: if not isinstance(event, EventBase): return event - serialized_event = serialize_event(event, time_now, config=config) + serialized_event = serialize_event( + event, time_now, config=config, msc3970_enabled=self._msc3970_enabled + ) # Check if there are any bundled aggregations to include with the event. if bundle_aggregations: @@ -498,7 +591,9 @@ class EventClientSerializer: # `sender` of the edit; however MSC3925 proposes extending it to the whole # of the edit, which is what we do here. serialized_aggregations[RelationTypes.REPLACE] = self.serialize_event( - event_aggregations.replace, time_now, config=config + event_aggregations.replace, + time_now, + config=config, ) # Include any threaded replies to this event. diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py index 29fae716f5..b77022b406 100644 --- a/synapse/federation/federation_base.py +++ b/synapse/federation/federation_base.py @@ -49,9 +49,9 @@ class FederationBase: def __init__(self, hs: "HomeServer"): self.hs = hs - self.server_name = hs.hostname + self._is_mine_server_name = hs.is_mine_server_name self.keyring = hs.get_keyring() - self.spam_checker = hs.get_spam_checker() + self._spam_checker_module_callbacks = hs.get_module_api_callbacks().spam_checker self.store = hs.get_datastores().main self._clock = hs.get_clock() self._storage_controllers = hs.get_storage_controllers() @@ -137,9 +137,9 @@ class FederationBase: ) return redacted_event - spam_check = await self.spam_checker.check_event_for_spam(pdu) + spam_check = await self._spam_checker_module_callbacks.check_event_for_spam(pdu) - if spam_check != self.spam_checker.NOT_SPAM: + if spam_check != self._spam_checker_module_callbacks.NOT_SPAM: logger.warning("Event contains spam, soft-failing %s", pdu.event_id) log_kv( { diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 4cf4957a42..076b9287c6 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -235,7 +235,10 @@ class FederationClient(FederationBase): ) async def claim_client_keys( - self, destination: str, content: JsonDict, timeout: Optional[int] + self, + destination: str, + query: Dict[str, Dict[str, Dict[str, int]]], + timeout: Optional[int], ) -> JsonDict: """Claims one-time keys for a device hosted on a remote server. @@ -247,6 +250,50 @@ class FederationClient(FederationBase): The JSON object from the response """ sent_queries_counter.labels("client_one_time_keys").inc() + + # Convert the query with counts into a stable and unstable query and check + # if attempting to claim more than 1 OTK. + content: Dict[str, Dict[str, str]] = {} + unstable_content: Dict[str, Dict[str, List[str]]] = {} + use_unstable = False + for user_id, one_time_keys in query.items(): + for device_id, algorithms in one_time_keys.items(): + if any(count > 1 for count in algorithms.values()): + use_unstable = True + if algorithms: + # For the stable query, choose only the first algorithm. + content.setdefault(user_id, {})[device_id] = next(iter(algorithms)) + # For the unstable query, repeat each algorithm by count, then + # splat those into chain to get a flattened list of all algorithms. + # + # Converts from {"algo1": 2, "algo2": 2} to ["algo1", "algo1", "algo2"]. + unstable_content.setdefault(user_id, {})[device_id] = list( + itertools.chain( + *( + itertools.repeat(algorithm, count) + for algorithm, count in algorithms.items() + ) + ) + ) + + if use_unstable: + try: + return await self.transport_layer.claim_client_keys_unstable( + destination, unstable_content, timeout + ) + except HttpResponseException as e: + # If an error is received that is due to an unrecognised endpoint, + # fallback to the v1 endpoint. Otherwise, consider it a legitimate error + # and raise. + if not is_unknown_endpoint(e): + raise + + logger.debug( + "Couldn't claim client keys with the unstable API, falling back to the v1 API" + ) + else: + logger.debug("Skipping unstable claim client keys API") + return await self.transport_layer.claim_client_keys( destination, content, timeout ) @@ -280,15 +327,11 @@ class FederationClient(FederationBase): logger.debug("backfill transaction_data=%r", transaction_data) if not isinstance(transaction_data, dict): - # TODO we probably want an exception type specific to federation - # client validation. - raise TypeError("Backfill transaction_data is not a dict.") + raise InvalidResponseError("Backfill transaction_data is not a dict.") transaction_data_pdus = transaction_data.get("pdus") if not isinstance(transaction_data_pdus, list): - # TODO we probably want an exception type specific to federation - # client validation. - raise TypeError("transaction_data.pdus is not a list.") + raise InvalidResponseError("transaction_data.pdus is not a list.") room_version = await self.store.get_room_version(room_id) @@ -811,7 +854,7 @@ class FederationClient(FederationBase): for destination in destinations: # We don't want to ask our own server for information we don't have - if destination == self.server_name: + if self._is_mine_server_name(destination): continue try: @@ -1493,7 +1536,7 @@ class FederationClient(FederationBase): self, destinations: Iterable[str], room_id: str, event_dict: JsonDict ) -> None: for destination in destinations: - if destination == self.server_name: + if self._is_mine_server_name(destination): continue try: diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 64e99292ec..f4ca70a698 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -129,8 +129,9 @@ class FederationServer(FederationBase): def __init__(self, hs: "HomeServer"): super().__init__(hs) + self.server_name = hs.hostname self.handler = hs.get_federation_handler() - self._spam_checker = hs.get_spam_checker() + self._spam_checker_module_callbacks = hs.get_module_api_callbacks().spam_checker self._federation_event_handler = hs.get_federation_event_handler() self.state = hs.get_state_handler() self._event_auth_handler = hs.get_event_auth_handler() @@ -738,12 +739,10 @@ class FederationServer(FederationBase): "event": event_json, "state": [p.get_pdu_json(time_now) for p in state_events], "auth_chain": [p.get_pdu_json(time_now) for p in auth_chain_events], - "org.matrix.msc3706.partial_state": caller_supports_partial_state, "members_omitted": caller_supports_partial_state, } if servers_in_room is not None: - resp["org.matrix.msc3706.servers_in_room"] = list(servers_in_room) resp["servers_in_room"] = list(servers_in_room) return resp @@ -942,7 +941,7 @@ class FederationServer(FederationBase): authorising_server = get_domain_from_id( event.content[EventContentFields.AUTHORISING_USER] ) - if authorising_server != self.server_name: + if not self._is_mine_server_name(authorising_server): raise SynapseError( 400, f"Cannot authorise request from resident server: {authorising_server}", @@ -1005,15 +1004,12 @@ class FederationServer(FederationBase): @trace async def on_claim_client_keys( - self, origin: str, content: JsonDict + self, query: List[Tuple[str, str, str, int]], always_include_fallback_keys: bool ) -> Dict[str, Any]: - query = [] - for user_id, device_keys in content.get("one_time_keys", {}).items(): - for device_id, algorithm in device_keys.items(): - query.append((user_id, device_id, algorithm)) - log_kv({"message": "Claiming one time keys.", "user, device pairs": query}) - results = await self._e2e_keys_handler.claim_local_one_time_keys(query) + results = await self._e2e_keys_handler.claim_local_one_time_keys( + query, always_include_fallback_keys=always_include_fallback_keys + ) json_result: Dict[str, Dict[str, Dict[str, JsonDict]]] = {} for result in results: @@ -1129,7 +1125,7 @@ class FederationServer(FederationBase): logger.warning("event id %s: %s", pdu.event_id, e) raise FederationError("ERROR", 403, str(e), affected=pdu.event_id) - if await self._spam_checker.should_drop_federated_event(pdu): + if await self._spam_checker_module_callbacks.should_drop_federated_event(pdu): logger.warning( "Unstaged federated event contains spam, dropping %s", pdu.event_id ) @@ -1174,7 +1170,9 @@ class FederationServer(FederationBase): origin, event = next - if await self._spam_checker.should_drop_federated_event(event): + if await self._spam_checker_module_callbacks.should_drop_federated_event( + event + ): logger.warning( "Staged federated event contains spam, dropping %s", event.event_id, diff --git a/synapse/federation/send_queue.py b/synapse/federation/send_queue.py index 0b7c81677e..fb448f2155 100644 --- a/synapse/federation/send_queue.py +++ b/synapse/federation/send_queue.py @@ -68,6 +68,7 @@ class FederationRemoteSendQueue(AbstractFederationSender): self.clock = hs.get_clock() self.notifier = hs.get_notifier() self.is_mine_id = hs.is_mine_id + self.is_mine_server_name = hs.is_mine_server_name # We may have multiple federation sender instances, so we need to track # their positions separately. @@ -198,7 +199,7 @@ class FederationRemoteSendQueue(AbstractFederationSender): key: Optional[Hashable] = None, ) -> None: """As per FederationSender""" - if destination == self.server_name: + if self.is_mine_server_name(destination): logger.info("Not sending EDU to ourselves") return diff --git a/synapse/federation/sender/__init__.py b/synapse/federation/sender/__init__.py index edc4b1768c..f3bdc5a4d2 100644 --- a/synapse/federation/sender/__init__.py +++ b/synapse/federation/sender/__init__.py @@ -362,6 +362,7 @@ class FederationSender(AbstractFederationSender): self.clock = hs.get_clock() self.is_mine_id = hs.is_mine_id + self.is_mine_server_name = hs.is_mine_server_name self._presence_router: Optional["PresenceRouter"] = None self._transaction_manager = TransactionManager(hs) @@ -766,7 +767,7 @@ class FederationSender(AbstractFederationSender): domains = [ d for d in domains_set - if d != self.server_name + if not self.is_mine_server_name(d) and self._federation_shard_config.should_handle(self._instance_name, d) ] if not domains: @@ -832,7 +833,7 @@ class FederationSender(AbstractFederationSender): assert self.is_mine_id(state.user_id) for destination in destinations: - if destination == self.server_name: + if self.is_mine_server_name(destination): continue if not self._federation_shard_config.should_handle( self._instance_name, destination @@ -860,7 +861,7 @@ class FederationSender(AbstractFederationSender): content: content of EDU key: clobbering key for this edu """ - if destination == self.server_name: + if self.is_mine_server_name(destination): logger.info("Not sending EDU to ourselves") return @@ -897,7 +898,7 @@ class FederationSender(AbstractFederationSender): queue.send_edu(edu) def send_device_messages(self, destination: str, immediate: bool = True) -> None: - if destination == self.server_name: + if self.is_mine_server_name(destination): logger.warning("Not sending device update to ourselves") return @@ -919,7 +920,7 @@ class FederationSender(AbstractFederationSender): might have come back. """ - if destination == self.server_name: + if self.is_mine_server_name(destination): logger.warning("Not waking up ourselves") return diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index c05d598b70..1cfc4446c4 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -16,6 +16,7 @@ import logging import urllib from typing import ( + TYPE_CHECKING, Any, Callable, Collection, @@ -42,21 +43,23 @@ from synapse.api.urls import ( ) from synapse.events import EventBase, make_event_from_dict from synapse.federation.units import Transaction -from synapse.http.matrixfederationclient import ByteParser +from synapse.http.matrixfederationclient import ByteParser, LegacyJsonSendParser from synapse.http.types import QueryParams from synapse.types import JsonDict from synapse.util import ExceptionBundle +if TYPE_CHECKING: + from synapse.app.homeserver import HomeServer + logger = logging.getLogger(__name__) class TransportLayerClient: """Sends federation HTTP requests to other servers""" - def __init__(self, hs): - self.server_name = hs.hostname + def __init__(self, hs: "HomeServer"): self.client = hs.get_federation_http_client() - self._faster_joins_enabled = hs.config.experimental.faster_joins_enabled + self._is_mine_server_name = hs.is_mine_server_name async def get_room_state_ids( self, destination: str, room_id: str, event_id: str @@ -133,7 +136,7 @@ class TransportLayerClient: async def backfill( self, destination: str, room_id: str, event_tuples: Collection[str], limit: int - ) -> Optional[JsonDict]: + ) -> Optional[Union[JsonDict, list]]: """Requests `limit` previous PDUs in a given context before list of PDUs. @@ -231,7 +234,7 @@ class TransportLayerClient: transaction.transaction_id, ) - if transaction.destination == self.server_name: + if self._is_mine_server_name(transaction.destination): raise RuntimeError("Transport layer cannot send to itself!") # FIXME: This is only used by the tests. The actual json sent is @@ -359,12 +362,8 @@ class TransportLayerClient: ) -> "SendJoinResponse": path = _create_v2_path("/send_join/%s/%s", room_id, event_id) query_params: Dict[str, str] = {} - if self._faster_joins_enabled: - # lazy-load state on join - query_params["org.matrix.msc3706.partial_state"] = ( - "true" if omit_members else "false" - ) - query_params["omit_members"] = "true" if omit_members else "false" + # lazy-load state on join + query_params["omit_members"] = "true" if omit_members else "false" return await self.client.put_json( destination=destination, @@ -388,6 +387,7 @@ class TransportLayerClient: # server was just having a momentary blip, the room will be out of # sync. ignore_backoff=True, + parser=LegacyJsonSendParser(), ) async def send_leave_v2( @@ -445,7 +445,11 @@ class TransportLayerClient: path = _create_v1_path("/invite/%s/%s", room_id, event_id) return await self.client.put_json( - destination=destination, path=path, data=content, ignore_backoff=True + destination=destination, + path=path, + data=content, + ignore_backoff=True, + parser=LegacyJsonSendParser(), ) async def send_invite_v2( @@ -641,10 +645,10 @@ class TransportLayerClient: Response: { - "device_keys": { + "one_time_keys": { "<user_id>": { "<device_id>": { - "<algorithm>:<key_id>": "<key_base64>" + "<algorithm>:<key_id>": <OTK JSON> } } } @@ -660,7 +664,50 @@ class TransportLayerClient: path = _create_v1_path("/user/keys/claim") return await self.client.post_json( - destination=destination, path=path, data=query_content, timeout=timeout + destination=destination, + path=path, + data={"one_time_keys": query_content}, + timeout=timeout, + ) + + async def claim_client_keys_unstable( + self, destination: str, query_content: JsonDict, timeout: Optional[int] + ) -> JsonDict: + """Claim one-time keys for a list of devices hosted on a remote server. + + Request: + { + "one_time_keys": { + "<user_id>": { + "<device_id>": {"<algorithm>": <count>} + } + } + } + + Response: + { + "one_time_keys": { + "<user_id>": { + "<device_id>": { + "<algorithm>:<key_id>": <OTK JSON> + } + } + } + } + + Args: + destination: The server to query. + query_content: The user ids to query. + Returns: + A dict containing the one-time keys. + """ + path = _create_path(FEDERATION_UNSTABLE_PREFIX, "/user/keys/claim") + + return await self.client.post_json( + destination=destination, + path=path, + data={"one_time_keys": query_content}, + timeout=timeout, ) async def get_missing_events( @@ -850,9 +897,7 @@ def _members_omitted_parser(response: SendJoinResponse) -> Generator[None, Any, while True: val = yield if not isinstance(val, bool): - raise TypeError( - "members_omitted (formerly org.matrix.msc370c.partial_state) must be a boolean" - ) + raise TypeError("members_omitted must be a boolean") response.members_omitted = val @@ -915,27 +960,11 @@ class SendJoinParser(ByteParser[SendJoinResponse]): self._coros.append( ijson.items_coro( _members_omitted_parser(self._response), - "org.matrix.msc3706.partial_state", - use_float="True", - ) - ) - # The stable field name comes last, so it "wins" if the fields disagree - self._coros.append( - ijson.items_coro( - _members_omitted_parser(self._response), "members_omitted", use_float="True", ) ) - self._coros.append( - ijson.items_coro( - _servers_in_room_parser(self._response), - "org.matrix.msc3706.servers_in_room", - use_float="True", - ) - ) - # Again, stable field name comes last self._coros.append( ijson.items_coro( diff --git a/synapse/federation/transport/server/__init__.py b/synapse/federation/transport/server/__init__.py index 753372fc54..55d2cd0a9a 100644 --- a/synapse/federation/transport/server/__init__.py +++ b/synapse/federation/transport/server/__init__.py @@ -25,6 +25,7 @@ from synapse.federation.transport.server._base import ( from synapse.federation.transport.server.federation import ( FEDERATION_SERVLET_CLASSES, FederationAccountStatusServlet, + FederationUnstableClientKeysClaimServlet, ) from synapse.http.server import HttpServer, JsonResource from synapse.http.servlet import ( @@ -298,6 +299,11 @@ def register_servlets( and not hs.config.experimental.msc3720_enabled ): continue + if ( + servletclass == FederationUnstableClientKeysClaimServlet + and not hs.config.experimental.msc3983_appservice_otk_claims + ): + continue servletclass( hs=hs, diff --git a/synapse/federation/transport/server/_base.py b/synapse/federation/transport/server/_base.py index cdaf0d5de7..b6e9c58760 100644 --- a/synapse/federation/transport/server/_base.py +++ b/synapse/federation/transport/server/_base.py @@ -57,6 +57,7 @@ class Authenticator: self._clock = hs.get_clock() self.keyring = hs.get_keyring() self.server_name = hs.hostname + self._is_mine_server_name = hs.is_mine_server_name self.store = hs.get_datastores().main self.federation_domain_whitelist = ( hs.config.federation.federation_domain_whitelist @@ -100,7 +101,9 @@ class Authenticator: json_request["signatures"].setdefault(origin, {})[key] = sig # if the origin_server sent a destination along it needs to match our own server_name - if destination is not None and destination != self.server_name: + if destination is not None and not self._is_mine_server_name( + destination + ): raise AuthenticationError( HTTPStatus.UNAUTHORIZED, "Destination mismatch in auth header", diff --git a/synapse/federation/transport/server/federation.py b/synapse/federation/transport/server/federation.py index ec5b5eeafa..3a744e25be 100644 --- a/synapse/federation/transport/server/federation.py +++ b/synapse/federation/transport/server/federation.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging +from collections import Counter from typing import ( TYPE_CHECKING, Dict, @@ -439,7 +440,6 @@ class FederationV2SendJoinServlet(BaseFederationServerServlet): server_name: str, ): super().__init__(hs, authenticator, ratelimiter, server_name) - self._read_msc3706_query_param = hs.config.experimental.msc3706_enabled async def on_PUT( self, @@ -452,16 +452,7 @@ class FederationV2SendJoinServlet(BaseFederationServerServlet): # TODO(paul): assert that event_id parsed from path actually # match those given in content - partial_state = False - # The stable query parameter wins, if it disagrees with the unstable - # parameter for some reason. - stable_param = parse_boolean_from_args(query, "omit_members", default=None) - if stable_param is not None: - partial_state = stable_param - elif self._read_msc3706_query_param: - partial_state = parse_boolean_from_args( - query, "org.matrix.msc3706.partial_state", default=False - ) + partial_state = parse_boolean_from_args(query, "omit_members", default=False) result = await self.handler.on_send_join_request( origin, content, room_id, caller_supports_partial_state=partial_state @@ -577,7 +568,43 @@ class FederationClientKeysClaimServlet(BaseFederationServerServlet): async def on_POST( self, origin: str, content: JsonDict, query: Dict[bytes, List[bytes]] ) -> Tuple[int, JsonDict]: - response = await self.handler.on_claim_client_keys(origin, content) + # Generate a count for each algorithm, which is hard-coded to 1. + key_query: List[Tuple[str, str, str, int]] = [] + for user_id, device_keys in content.get("one_time_keys", {}).items(): + for device_id, algorithm in device_keys.items(): + key_query.append((user_id, device_id, algorithm, 1)) + + response = await self.handler.on_claim_client_keys( + key_query, always_include_fallback_keys=False + ) + return 200, response + + +class FederationUnstableClientKeysClaimServlet(BaseFederationServerServlet): + """ + Identical to the stable endpoint (FederationClientKeysClaimServlet) except + it allows for querying for multiple OTKs at once and always includes fallback + keys in the response. + """ + + PREFIX = FEDERATION_UNSTABLE_PREFIX + PATH = "/user/keys/claim" + CATEGORY = "Federation requests" + + async def on_POST( + self, origin: str, content: JsonDict, query: Dict[bytes, List[bytes]] + ) -> Tuple[int, JsonDict]: + # Generate a count for each algorithm. + key_query: List[Tuple[str, str, str, int]] = [] + for user_id, device_keys in content.get("one_time_keys", {}).items(): + for device_id, algorithms in device_keys.items(): + counts = Counter(algorithms) + for algorithm, count in counts.items(): + key_query.append((user_id, device_id, algorithm, count)) + + response = await self.handler.on_claim_client_keys( + key_query, always_include_fallback_keys=True + ) return 200, response @@ -784,6 +811,7 @@ FEDERATION_SERVLET_CLASSES: Tuple[Type[BaseFederationServlet], ...] = ( FederationClientKeysQueryServlet, FederationUserDevicesQueryServlet, FederationClientKeysClaimServlet, + FederationUnstableClientKeysClaimServlet, FederationThirdPartyInviteExchangeServlet, On3pidBindServlet, FederationVersionServlet, diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index da887647d4..6429545c98 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -841,9 +841,9 @@ class ApplicationServicesHandler: return True async def claim_e2e_one_time_keys( - self, query: Iterable[Tuple[str, str, str]] + self, query: Iterable[Tuple[str, str, str, int]] ) -> Tuple[ - Iterable[Dict[str, Dict[str, Dict[str, JsonDict]]]], List[Tuple[str, str, str]] + Dict[str, Dict[str, Dict[str, JsonDict]]], List[Tuple[str, str, str, int]] ]: """Claim one time keys from application services. @@ -856,7 +856,7 @@ class ApplicationServicesHandler: Returns: A tuple of: - An iterable of maps of user ID -> a map device ID -> a map of key ID -> JSON bytes. + A map of user ID -> a map device ID -> a map of key ID -> JSON. A copy of the input which has not been fulfilled (either because they are not appservice users or the appservice does not support @@ -865,18 +865,18 @@ class ApplicationServicesHandler: services = self.store.get_app_services() # Partition the users by appservice. - query_by_appservice: Dict[str, List[Tuple[str, str, str]]] = {} + query_by_appservice: Dict[str, List[Tuple[str, str, str, int]]] = {} missing = [] - for user_id, device, algorithm in query: + for user_id, device, algorithm, count in query: if not self.store.get_if_app_services_interested_in_user(user_id): - missing.append((user_id, device, algorithm)) + missing.append((user_id, device, algorithm, count)) continue # Find the associated appservice. for service in services: if service.is_exclusive_user(user_id): query_by_appservice.setdefault(service.id, []).append( - (user_id, device, algorithm) + (user_id, device, algorithm, count) ) continue @@ -897,12 +897,11 @@ class ApplicationServicesHandler: ) # Patch together the results -- they are all independent (since they - # require exclusive control over the users). They get returned as a list - # and the caller combines them. - claimed_keys: List[Dict[str, Dict[str, Dict[str, JsonDict]]]] = [] + # require exclusive control over the users, which is the outermost key). + claimed_keys: Dict[str, Dict[str, Dict[str, JsonDict]]] = {} for success, result in results: if success: - claimed_keys.append(result[0]) + claimed_keys.update(result[0]) missing.extend(result[1]) return claimed_keys, missing diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 1e89447044..d001f2fb2f 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -52,7 +52,6 @@ from synapse.api.errors import ( NotFoundError, StoreError, SynapseError, - UserDeactivatedError, ) from synapse.api.ratelimiting import Ratelimiter from synapse.handlers.ui_auth import ( @@ -212,7 +211,7 @@ class AuthHandler: self._password_enabled_for_login = hs.config.auth.password_enabled_for_login self._password_enabled_for_reauth = hs.config.auth.password_enabled_for_reauth self._password_localdb_enabled = hs.config.auth.password_localdb_enabled - self._third_party_rules = hs.get_third_party_event_rules() + self._third_party_rules = hs.get_module_api_callbacks().third_party_event_rules # Ratelimiter for failed auth during UIA. Uses same ratelimit config # as per `rc_login.failed_attempts`. @@ -1419,12 +1418,6 @@ class AuthHandler: return None (user_id, password_hash) = lookupres - # If the password hash is None, the account has likely been deactivated - if not password_hash: - deactivated = await self.store.get_user_deactivated_status(user_id) - if deactivated: - raise UserDeactivatedError("This account has been deactivated") - result = await self.validate_hash(password, password_hash) if not result: logger.warning("Failed password login for user %s", user_id) @@ -1749,8 +1742,11 @@ class AuthHandler: registered. auth_provider_session_id: The session ID from the SSO IdP received during login. """ - # If the account has been deactivated, do not proceed with the login - # flow. + # If the account has been deactivated, do not proceed with the login. + # + # This gets checked again when the token is submitted but this lets us + # provide an HTML error page to the user (instead of issuing a token and + # having it error later). deactivated = await self.store.get_user_deactivated_status(registered_user_id) if deactivated: respond_with_html(request, 403, self._sso_account_deactivated_template) diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py index d31263c717..f299b89a1b 100644 --- a/synapse/handlers/deactivate_account.py +++ b/synapse/handlers/deactivate_account.py @@ -39,11 +39,11 @@ class DeactivateAccountHandler: self._profile_handler = hs.get_profile_handler() self.user_directory_handler = hs.get_user_directory_handler() self._server_name = hs.hostname - self._third_party_rules = hs.get_third_party_event_rules() + self._third_party_rules = hs.get_module_api_callbacks().third_party_event_rules # Flag that indicates whether the process to part users from rooms is running self._user_parter_running = False - self._third_party_rules = hs.get_third_party_event_rules() + self._third_party_rules = hs.get_module_api_callbacks().third_party_event_rules # Start the user parter loop so it can resume parting users from rooms where # it left off (if it has work left to do). @@ -176,6 +176,9 @@ class DeactivateAccountHandler: # Remove account data (including ignored users and push rules). await self.store.purge_account_data_for_user(user_id) + # Delete any server-side backup keys + await self.store.bulk_delete_backup_keys_and_versions_for_user(user_id) + # Let modules know the user has been deactivated. await self._third_party_rules.on_user_deactivation_status_changed( user_id, diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 9ded6389ac..5d12a39e26 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -14,7 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from http import HTTPStatus from typing import ( TYPE_CHECKING, Any, @@ -76,10 +75,14 @@ class DeviceWorkerHandler: self.store = hs.get_datastores().main self.notifier = hs.get_notifier() self.state = hs.get_state_handler() + self._appservice_handler = hs.get_application_service_handler() self._state_storage = hs.get_storage_controllers().state self._auth_handler = hs.get_auth_handler() self.server_name = hs.hostname self._msc3852_enabled = hs.config.experimental.msc3852_enabled + self._query_appservices_for_keys = ( + hs.config.experimental.msc3984_appservice_key_query + ) self.device_list_updater = DeviceListWorkerUpdater(hs) @@ -215,6 +218,16 @@ class DeviceWorkerHandler: possibly_changed = set(changed) possibly_left = set() for room_id in rooms_changed: + # Check if the forward extremities have changed. If not then we know + # the current state won't have changed, and so we can skip this room. + try: + if not await self.store.have_room_forward_extremities_changed_since( + room_id, stream_ordering + ): + continue + except errors.StoreError: + pass + current_state_ids = await self._state_storage.get_current_state_ids( room_id, await_full_state=False ) @@ -319,6 +332,30 @@ class DeviceWorkerHandler: user_id, "self_signing" ) + # Check if the application services have any results. + if self._query_appservices_for_keys: + # Query the appservice for all devices for this user. + query: Dict[str, Optional[List[str]]] = {user_id: None} + + # Query the appservices for any keys. + appservice_results = await self._appservice_handler.query_keys(query) + + # Merge results, overriding anything from the database. + appservice_devices = appservice_results.get("device_keys", {}).get( + user_id, {} + ) + + # Filter the database results to only those devices that the appservice has + # *not* responded with. + devices = [d for d in devices if d["device_id"] not in appservice_devices] + # Append the appservice response by wrapping each result in another dictionary. + devices.extend( + {"device_id": device_id, "keys": device} + for device_id, device in appservice_devices.items() + ) + + # TODO Handle cross-signing keys. + return { "user_id": user_id, "stream_id": stream_id, @@ -503,8 +540,6 @@ class DeviceHandler(DeviceWorkerHandler): else: raise - await self.hs.get_pusherpool().remove_pushers_by_devices(user_id, device_ids) - # Delete data specific to each device. Not optimised as it is not # considered as part of a critical path. for device_id in device_ids: @@ -523,6 +558,10 @@ class DeviceHandler(DeviceWorkerHandler): f"org.matrix.msc3890.local_notification_settings.{device_id}", ) + # Pushers are deleted after `delete_access_tokens_for_user` is called so that + # modules using `on_logged_out` hook can use them if needed. + await self.hs.get_pusherpool().remove_pushers_by_devices(user_id, device_ids) + await self.notify_device_update(user_id, device_ids) async def update_device(self, user_id: str, device_id: str, content: dict) -> None: @@ -909,12 +948,8 @@ class DeviceListWorkerUpdater: def __init__(self, hs: "HomeServer"): from synapse.replication.http.devices import ( ReplicationMultiUserDevicesResyncRestServlet, - ReplicationUserDevicesResyncRestServlet, ) - self._user_device_resync_client = ( - ReplicationUserDevicesResyncRestServlet.make_client(hs) - ) self._multi_user_device_resync_client = ( ReplicationMultiUserDevicesResyncRestServlet.make_client(hs) ) @@ -936,37 +971,7 @@ class DeviceListWorkerUpdater: # Shortcut empty requests return {} - try: - return await self._multi_user_device_resync_client(user_ids=user_ids) - except SynapseError as err: - if not ( - err.code == HTTPStatus.NOT_FOUND and err.errcode == Codes.UNRECOGNIZED - ): - raise - - # Fall back to single requests - result: Dict[str, Optional[JsonDict]] = {} - for user_id in user_ids: - result[user_id] = await self._user_device_resync_client(user_id=user_id) - return result - - async def user_device_resync( - self, user_id: str, mark_failed_as_stale: bool = True - ) -> Optional[JsonDict]: - """Fetches all devices for a user and updates the device cache with them. - - Args: - user_id: The user's id whose device_list will be updated. - mark_failed_as_stale: Whether to mark the user's device list as stale - if the attempt to resync failed. - Returns: - A dict with device info as under the "devices" in the result of this - request: - https://matrix.org/docs/spec/server_server/r0.1.2#get-matrix-federation-v1-user-devices-userid - None when we weren't able to fetch the device info for some reason, - e.g. due to a connection problem. - """ - return (await self.multi_user_device_resync([user_id]))[user_id] + return await self._multi_user_device_resync_client(user_ids=user_ids) class DeviceListUpdater(DeviceListWorkerUpdater): @@ -1119,7 +1124,7 @@ class DeviceListUpdater(DeviceListWorkerUpdater): ) if resync: - await self.user_device_resync(user_id) + await self.multi_user_device_resync([user_id]) else: # Simply update the single device, since we know that is the only # change (because of the single prev_id matching the current cache) @@ -1186,10 +1191,9 @@ class DeviceListUpdater(DeviceListWorkerUpdater): for user_id in need_resync: try: # Try to resync the current user's devices list. - result = await self.user_device_resync( - user_id=user_id, - mark_failed_as_stale=False, - ) + result = (await self.multi_user_device_resync([user_id], False))[ + user_id + ] # user_device_resync only returns a result if it managed to # successfully resync and update the database. Updating the table @@ -1248,18 +1252,6 @@ class DeviceListUpdater(DeviceListWorkerUpdater): return result - async def user_device_resync( - self, user_id: str, mark_failed_as_stale: bool = True - ) -> Optional[JsonDict]: - result, failed = await self._user_device_resync_returning_failed(user_id) - - if failed and mark_failed_as_stale: - # Mark the remote user's device list as stale so we know we need to retry - # it later. - await self.store.mark_remote_users_device_caches_as_stale((user_id,)) - - return result - async def _user_device_resync_returning_failed( self, user_id: str ) -> Tuple[Optional[JsonDict], bool]: diff --git a/synapse/handlers/devicemessage.py b/synapse/handlers/devicemessage.py index 00c403db49..3caf9b31cc 100644 --- a/synapse/handlers/devicemessage.py +++ b/synapse/handlers/devicemessage.py @@ -25,7 +25,9 @@ from synapse.logging.opentracing import ( log_kv, set_tag, ) -from synapse.replication.http.devices import ReplicationUserDevicesResyncRestServlet +from synapse.replication.http.devices import ( + ReplicationMultiUserDevicesResyncRestServlet, +) from synapse.types import JsonDict, Requester, StreamKeyType, UserID, get_domain_from_id from synapse.util import json_encoder from synapse.util.stringutils import random_string @@ -71,12 +73,12 @@ class DeviceMessageHandler: # sync. We do all device list resyncing on the master instance, so if # we're on a worker we hit the device resync replication API. if hs.config.worker.worker_app is None: - self._user_device_resync = ( - hs.get_device_handler().device_list_updater.user_device_resync + self._multi_user_device_resync = ( + hs.get_device_handler().device_list_updater.multi_user_device_resync ) else: - self._user_device_resync = ( - ReplicationUserDevicesResyncRestServlet.make_client(hs) + self._multi_user_device_resync = ( + ReplicationMultiUserDevicesResyncRestServlet.make_client(hs) ) # a rate limiter for room key requests. The keys are @@ -198,7 +200,7 @@ class DeviceMessageHandler: await self.store.mark_remote_users_device_caches_as_stale((sender_user_id,)) # Immediately attempt a resync in the background - run_in_background(self._user_device_resync, user_id=sender_user_id) + run_in_background(self._multi_user_device_resync, user_ids=[sender_user_id]) async def send_device_message( self, diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index 1fb23cc9bf..1e0623c7f8 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -52,7 +52,9 @@ class DirectoryHandler: self.config = hs.config self.enable_room_list_search = hs.config.roomdirectory.enable_room_list_search self.require_membership = hs.config.server.require_membership_for_aliases - self.third_party_event_rules = hs.get_third_party_event_rules() + self._third_party_event_rules = ( + hs.get_module_api_callbacks().third_party_event_rules + ) self.server_name = hs.hostname self.federation = hs.get_federation_client() @@ -60,7 +62,7 @@ class DirectoryHandler: "directory", self.on_directory_query ) - self.spam_checker = hs.get_spam_checker() + self._spam_checker_module_callbacks = hs.get_module_api_callbacks().spam_checker async def _create_association( self, @@ -145,10 +147,12 @@ class DirectoryHandler: 403, "You must be in the room to create an alias for it" ) - spam_check = await self.spam_checker.user_may_create_room_alias( - user_id, room_alias + spam_check = ( + await self._spam_checker_module_callbacks.user_may_create_room_alias( + user_id, room_alias + ) ) - if spam_check != self.spam_checker.NOT_SPAM: + if spam_check != self._spam_checker_module_callbacks.NOT_SPAM: raise AuthError( 403, "This user is not permitted to create this alias", @@ -444,7 +448,9 @@ class DirectoryHandler: """ user_id = requester.user.to_string() - spam_check = await self.spam_checker.user_may_publish_room(user_id, room_id) + spam_check = await self._spam_checker_module_callbacks.user_may_publish_room( + user_id, room_id + ) if spam_check != NOT_SPAM: raise AuthError( 403, @@ -499,7 +505,7 @@ class DirectoryHandler: # Check if publishing is blocked by a third party module allowed_by_third_party_rules = ( await ( - self.third_party_event_rules.check_visibility_can_be_modified( + self._third_party_event_rules.check_visibility_can_be_modified( room_id, visibility ) ) diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index 0073667470..24741b667b 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -563,7 +563,9 @@ class E2eKeysHandler: return ret async def claim_local_one_time_keys( - self, local_query: List[Tuple[str, str, str]] + self, + local_query: List[Tuple[str, str, str, int]], + always_include_fallback_keys: bool, ) -> Iterable[Dict[str, Dict[str, Dict[str, JsonDict]]]]: """Claim one time keys for local users. @@ -573,43 +575,104 @@ class E2eKeysHandler: Args: local_query: An iterable of tuples of (user ID, device ID, algorithm). + always_include_fallback_keys: True to always include fallback keys. Returns: An iterable of maps of user ID -> a map device ID -> a map of key ID -> JSON bytes. """ + # Cap the number of OTKs that can be claimed at once to avoid abuse. + local_query = [ + (user_id, device_id, algorithm, min(count, 5)) + for user_id, device_id, algorithm, count in local_query + ] + otk_results, not_found = await self.store.claim_e2e_one_time_keys(local_query) # If the application services have not provided any keys via the C-S # API, query it directly for one-time keys. if self._query_appservices_for_otks: + # TODO Should this query for fallback keys of uploaded OTKs if + # always_include_fallback_keys is True? The MSC is ambiguous. ( appservice_results, not_found, ) = await self._appservice_handler.claim_e2e_one_time_keys(not_found) else: - appservice_results = [] + appservice_results = {} + + # Calculate which user ID / device ID / algorithm tuples to get fallback + # keys for. This can be either only missing results *or* all results + # (which don't already have a fallback key). + if always_include_fallback_keys: + # Build the fallback query as any part of the original query where + # the appservice didn't respond with a fallback key. + fallback_query = [] + + # Iterate each item in the original query and search the results + # from the appservice for that user ID / device ID. If it is found, + # check if any of the keys match the requested algorithm & are a + # fallback key. + for user_id, device_id, algorithm, _count in local_query: + # Check if the appservice responded for this query. + as_result = appservice_results.get(user_id, {}).get(device_id, {}) + found_otk = False + for key_id, key_json in as_result.items(): + if key_id.startswith(f"{algorithm}:"): + # A OTK or fallback key was found for this query. + found_otk = True + # A fallback key was found for this query, no need to + # query further. + if key_json.get("fallback", False): + break + + else: + # No fallback key was found from appservices, query for it. + # Only mark the fallback key as used if no OTK was found + # (from either the database or appservices). + mark_as_used = not found_otk and not any( + key_id.startswith(f"{algorithm}:") + for key_id in otk_results.get(user_id, {}) + .get(device_id, {}) + .keys() + ) + # Note that it doesn't make sense to request more than 1 fallback key + # per (user_id, device_id, algorithm). + fallback_query.append((user_id, device_id, algorithm, mark_as_used)) + + else: + # All fallback keys get marked as used. + fallback_query = [ + # Note that it doesn't make sense to request more than 1 fallback key + # per (user_id, device_id, algorithm). + (user_id, device_id, algorithm, True) + for user_id, device_id, algorithm, count in not_found + ] # For each user that does not have a one-time keys available, see if # there is a fallback key. - fallback_results = await self.store.claim_e2e_fallback_keys(not_found) + fallback_results = await self.store.claim_e2e_fallback_keys(fallback_query) # Return the results in order, each item from the input query should # only appear once in the combined list. - return (otk_results, *appservice_results, fallback_results) + return (otk_results, appservice_results, fallback_results) @trace async def claim_one_time_keys( - self, query: Dict[str, Dict[str, Dict[str, str]]], timeout: Optional[int] + self, + query: Dict[str, Dict[str, Dict[str, int]]], + timeout: Optional[int], + always_include_fallback_keys: bool, ) -> JsonDict: - local_query: List[Tuple[str, str, str]] = [] - remote_queries: Dict[str, Dict[str, Dict[str, str]]] = {} + local_query: List[Tuple[str, str, str, int]] = [] + remote_queries: Dict[str, Dict[str, Dict[str, Dict[str, int]]]] = {} - for user_id, one_time_keys in query.get("one_time_keys", {}).items(): + for user_id, one_time_keys in query.items(): # we use UserID.from_string to catch invalid user ids if self.is_mine(UserID.from_string(user_id)): - for device_id, algorithm in one_time_keys.items(): - local_query.append((user_id, device_id, algorithm)) + for device_id, algorithms in one_time_keys.items(): + for algorithm, count in algorithms.items(): + local_query.append((user_id, device_id, algorithm, count)) else: domain = get_domain_from_id(user_id) remote_queries.setdefault(domain, {})[user_id] = one_time_keys @@ -617,7 +680,9 @@ class E2eKeysHandler: set_tag("local_key_query", str(local_query)) set_tag("remote_key_query", str(remote_queries)) - results = await self.claim_local_one_time_keys(local_query) + results = await self.claim_local_one_time_keys( + local_query, always_include_fallback_keys + ) # A map of user ID -> device ID -> key ID -> key. json_result: Dict[str, Dict[str, Dict[str, JsonDict]]] = {} @@ -625,7 +690,9 @@ class E2eKeysHandler: for user_id, device_keys in result.items(): for device_id, keys in device_keys.items(): for key_id, key in keys.items(): - json_result.setdefault(user_id, {})[device_id] = {key_id: key} + json_result.setdefault(user_id, {}).setdefault( + device_id, {} + ).update({key_id: key}) # Remote failures. failures: Dict[str, JsonDict] = {} @@ -636,7 +703,7 @@ class E2eKeysHandler: device_keys = remote_queries[destination] try: remote_result = await self.federation.claim_client_keys( - destination, {"one_time_keys": device_keys}, timeout=timeout + destination, device_keys, timeout=timeout ) for user_id, keys in remote_result["one_time_keys"].items(): if user_id in device_keys: diff --git a/synapse/handlers/event_auth.py b/synapse/handlers/event_auth.py index 0db0bd7304..3e37c0cbe2 100644 --- a/synapse/handlers/event_auth.py +++ b/synapse/handlers/event_auth.py @@ -29,7 +29,7 @@ from synapse.event_auth import ( ) from synapse.events import EventBase from synapse.events.builder import EventBuilder -from synapse.types import StateMap, StrCollection, get_domain_from_id +from synapse.types import StateMap, StrCollection if TYPE_CHECKING: from synapse.server import HomeServer @@ -47,6 +47,7 @@ class EventAuthHandler: self._store = hs.get_datastores().main self._state_storage_controller = hs.get_storage_controllers().state self._server_name = hs.hostname + self._is_mine_id = hs.is_mine_id async def check_auth_rules_from_context( self, @@ -247,7 +248,7 @@ class EventAuthHandler: if not await self.is_user_in_rooms(allowed_rooms, user_id): # If this is a remote request, the user might be in an allowed room # that we do not know about. - if get_domain_from_id(user_id) != self._server_name: + if not self._is_mine_id(user_id): for room_id in allowed_rooms: if not await self._store.is_host_joined(room_id, self._server_name): raise SynapseError( diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 65461a0787..2eb28d55ac 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -141,13 +141,14 @@ class FederationHandler: self.server_name = hs.hostname self.keyring = hs.get_keyring() self.is_mine_id = hs.is_mine_id - self.spam_checker = hs.get_spam_checker() + self.is_mine_server_name = hs.is_mine_server_name + self._spam_checker_module_callbacks = hs.get_module_api_callbacks().spam_checker self.event_creation_handler = hs.get_event_creation_handler() self.event_builder_factory = hs.get_event_builder_factory() self._event_auth_handler = hs.get_event_auth_handler() self._server_notices_mxid = hs.config.servernotices.server_notices_mxid self.config = hs.config - self.http_client = hs.get_proxied_blacklisted_http_client() + self.http_client = hs.get_proxied_blocklisted_http_client() self._replication = hs.get_replication_data_handler() self._federation_event_handler = hs.get_federation_event_handler() self._device_handler = hs.get_device_handler() @@ -169,7 +170,9 @@ class FederationHandler: self._room_backfill = Linearizer("room_backfill") - self.third_party_event_rules = hs.get_third_party_event_rules() + self._third_party_event_rules = ( + hs.get_module_api_callbacks().third_party_event_rules + ) # Tracks running partial state syncs by room ID. # Partial state syncs currently only run on the main process, so it's okay to @@ -451,7 +454,7 @@ class FederationHandler: for dom in domains: # We don't want to ask our own server for information we don't have - if dom == self.server_name: + if self.is_mine_server_name(dom): continue try: @@ -1042,7 +1045,7 @@ class FederationHandler: if self.hs.config.server.block_non_admin_invites: raise SynapseError(403, "This server does not accept room invites") - spam_check = await self.spam_checker.user_may_invite( + spam_check = await self._spam_checker_module_callbacks.user_may_invite( event.sender, event.state_key, event.room_id ) if spam_check != NOT_SPAM: @@ -1253,7 +1256,7 @@ class FederationHandler: unpersisted_context, ) = await self.event_creation_handler.create_new_client_event(builder=builder) - event_allowed, _ = await self.third_party_event_rules.check_event_allowed( + event_allowed, _ = await self._third_party_event_rules.check_event_allowed( event, unpersisted_context ) if not event_allowed: diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index 982c8d3b2f..9a08618da5 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -70,7 +70,9 @@ from synapse.logging.opentracing import ( trace, ) from synapse.metrics.background_process_metrics import run_as_background_process -from synapse.replication.http.devices import ReplicationUserDevicesResyncRestServlet +from synapse.replication.http.devices import ( + ReplicationMultiUserDevicesResyncRestServlet, +) from synapse.replication.http.federation import ( ReplicationFederationSendEventsRestServlet, ) @@ -155,10 +157,13 @@ class FederationEventHandler: self._get_room_member_handler = hs.get_room_member_handler self._federation_client = hs.get_federation_client() - self._third_party_event_rules = hs.get_third_party_event_rules() + self._third_party_event_rules = ( + hs.get_module_api_callbacks().third_party_event_rules + ) self._notifier = hs.get_notifier() self._is_mine_id = hs.is_mine_id + self._is_mine_server_name = hs.is_mine_server_name self._server_name = hs.hostname self._instance_name = hs.get_instance_name() @@ -167,8 +172,8 @@ class FederationEventHandler: self._send_events = ReplicationFederationSendEventsRestServlet.make_client(hs) if hs.config.worker.worker_app: - self._user_device_resync = ( - ReplicationUserDevicesResyncRestServlet.make_client(hs) + self._multi_user_device_resync = ( + ReplicationMultiUserDevicesResyncRestServlet.make_client(hs) ) else: self._device_list_updater = hs.get_device_handler().device_list_updater @@ -684,7 +689,7 @@ class FederationEventHandler: server from invalid events (there is probably no point in trying to re-fetch invalid events from every other HS in the room.) """ - if dest == self._server_name: + if self._is_mine_server_name(dest): raise SynapseError(400, "Can't backfill from self.") events = await self._federation_client.backfill( @@ -885,6 +890,11 @@ class FederationEventHandler: # Continue on with the events that are new to us. new_events.append(event) + set_tag( + SynapseTags.RESULT_PREFIX + "new_events.length", + str(len(new_events)), + ) + # We want to sort these by depth so we process them and # tell clients about them in order. sorted_events = sorted(new_events, key=lambda x: x.depth) @@ -1487,9 +1497,11 @@ class FederationEventHandler: # Immediately attempt a resync in the background if self._config.worker.worker_app: - await self._user_device_resync(user_id=sender) + await self._multi_user_device_resync(user_ids=[sender]) else: - await self._device_list_updater.user_device_resync(sender) + await self._device_list_updater.multi_user_device_resync( + user_ids=[sender] + ) except Exception: logger.exception("Failed to resync device for %s", sender) @@ -1515,7 +1527,10 @@ class FederationEventHandler: # support it or the event is not from the room creator. room_version = await self._store.get_room_version(marker_event.room_id) create_event = await self._store.get_create_event_for_room(marker_event.room_id) - room_creator = create_event.content.get(EventContentFields.ROOM_CREATOR) + if not room_version.msc2175_implicit_room_creator: + room_creator = create_event.content.get(EventContentFields.ROOM_CREATOR) + else: + room_creator = create_event.sender if not room_version.msc2716_historical and ( not self._config.experimental.msc2716_enabled or marker_event.sender != room_creator diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py index bf0f7acf80..3031384d25 100644 --- a/synapse/handlers/identity.py +++ b/synapse/handlers/identity.py @@ -52,10 +52,10 @@ class IdentityHandler: # An HTTP client for contacting trusted URLs. self.http_client = SimpleHttpClient(hs) # An HTTP client for contacting identity servers specified by clients. - self.blacklisting_http_client = SimpleHttpClient( + self._http_client = SimpleHttpClient( hs, - ip_blacklist=hs.config.server.federation_ip_range_blacklist, - ip_whitelist=hs.config.server.federation_ip_range_whitelist, + ip_blocklist=hs.config.server.federation_ip_range_blocklist, + ip_allowlist=hs.config.server.federation_ip_range_allowlist, ) self.federation_http_client = hs.get_federation_http_client() self.hs = hs @@ -197,7 +197,7 @@ class IdentityHandler: try: # Use the blacklisting http client as this call is only to identity servers # provided by a client - data = await self.blacklisting_http_client.post_json_get_json( + data = await self._http_client.post_json_get_json( bind_url, bind_data, headers=headers ) @@ -308,9 +308,7 @@ class IdentityHandler: try: # Use the blacklisting http client as this call is only to identity servers # provided by a client - await self.blacklisting_http_client.post_json_get_json( - url, content, headers - ) + await self._http_client.post_json_get_json(url, content, headers) changed = True except HttpResponseException as e: changed = False @@ -579,7 +577,7 @@ class IdentityHandler: """ # Check what hashing details are supported by this identity server try: - hash_details = await self.blacklisting_http_client.get_json( + hash_details = await self._http_client.get_json( "%s%s/_matrix/identity/v2/hash_details" % (id_server_scheme, id_server), {"access_token": id_access_token}, ) @@ -646,7 +644,7 @@ class IdentityHandler: headers = {"Authorization": create_id_access_token_header(id_access_token)} try: - lookup_results = await self.blacklisting_http_client.post_json_get_json( + lookup_results = await self._http_client.post_json_get_json( "%s%s/_matrix/identity/v2/lookup" % (id_server_scheme, id_server), { "addresses": [lookup_value], @@ -752,7 +750,7 @@ class IdentityHandler: url = "%s%s/_matrix/identity/v2/store-invite" % (id_server_scheme, id_server) try: - data = await self.blacklisting_http_client.post_json_get_json( + data = await self._http_client.post_json_get_json( url, invite_config, {"Authorization": create_id_access_token_header(id_access_token)}, diff --git a/synapse/handlers/jwt.py b/synapse/handlers/jwt.py new file mode 100644 index 0000000000..740bf9b3c4 --- /dev/null +++ b/synapse/handlers/jwt.py @@ -0,0 +1,105 @@ +# Copyright 2023 Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import TYPE_CHECKING + +from authlib.jose import JsonWebToken, JWTClaims +from authlib.jose.errors import BadSignatureError, InvalidClaimError, JoseError + +from synapse.api.errors import Codes, LoginError +from synapse.types import JsonDict, UserID + +if TYPE_CHECKING: + from synapse.server import HomeServer + + +class JwtHandler: + def __init__(self, hs: "HomeServer"): + self.hs = hs + + self.jwt_secret = hs.config.jwt.jwt_secret + self.jwt_subject_claim = hs.config.jwt.jwt_subject_claim + self.jwt_algorithm = hs.config.jwt.jwt_algorithm + self.jwt_issuer = hs.config.jwt.jwt_issuer + self.jwt_audiences = hs.config.jwt.jwt_audiences + + def validate_login(self, login_submission: JsonDict) -> str: + """ + Authenticates the user for the /login API + + Args: + login_submission: the whole of the login submission + (including 'type' and other relevant fields) + + Returns: + The user ID that is logging in. + + Raises: + LoginError if there was an authentication problem. + """ + token = login_submission.get("token", None) + if token is None: + raise LoginError( + 403, "Token field for JWT is missing", errcode=Codes.FORBIDDEN + ) + + jwt = JsonWebToken([self.jwt_algorithm]) + claim_options = {} + if self.jwt_issuer is not None: + claim_options["iss"] = {"value": self.jwt_issuer, "essential": True} + if self.jwt_audiences is not None: + claim_options["aud"] = {"values": self.jwt_audiences, "essential": True} + + try: + claims = jwt.decode( + token, + key=self.jwt_secret, + claims_cls=JWTClaims, + claims_options=claim_options, + ) + except BadSignatureError: + # We handle this case separately to provide a better error message + raise LoginError( + 403, + "JWT validation failed: Signature verification failed", + errcode=Codes.FORBIDDEN, + ) + except JoseError as e: + # A JWT error occurred, return some info back to the client. + raise LoginError( + 403, + "JWT validation failed: %s" % (str(e),), + errcode=Codes.FORBIDDEN, + ) + + try: + claims.validate(leeway=120) # allows 2 min of clock skew + + # Enforce the old behavior which is rolled out in productive + # servers: if the JWT contains an 'aud' claim but none is + # configured, the login attempt will fail + if claims.get("aud") is not None: + if self.jwt_audiences is None or len(self.jwt_audiences) == 0: + raise InvalidClaimError("aud") + except JoseError as e: + raise LoginError( + 403, + "JWT validation failed: %s" % (str(e),), + errcode=Codes.FORBIDDEN, + ) + + user = claims.get(self.jwt_subject_claim, None) + if user is None: + raise LoginError(403, "Invalid JWT", errcode=Codes.FORBIDDEN) + + return UserID(user, self.hs.hostname).to_string() diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 4c75433a63..0b61c2272b 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -77,7 +77,6 @@ from synapse.util.metrics import measure_func from synapse.visibility import get_effective_room_visibility_from_state if TYPE_CHECKING: - from synapse.events.third_party_rules import ThirdPartyEventRules from synapse.server import HomeServer logger = logging.getLogger(__name__) @@ -508,9 +507,9 @@ class EventCreationHandler: self._bulk_push_rule_evaluator = hs.get_bulk_push_rule_evaluator() - self.spam_checker = hs.get_spam_checker() - self.third_party_event_rules: "ThirdPartyEventRules" = ( - self.hs.get_third_party_event_rules() + self._spam_checker_module_callbacks = hs.get_module_api_callbacks().spam_checker + self._third_party_event_rules = ( + self.hs.get_module_api_callbacks().third_party_event_rules ) self._block_events_without_consent_error = ( @@ -561,6 +560,8 @@ class EventCreationHandler: expiry_ms=30 * 60 * 1000, ) + self._msc3970_enabled = hs.config.experimental.msc3970_enabled + async def create_event( self, requester: Requester, @@ -701,9 +702,16 @@ class EventCreationHandler: if require_consent and not is_exempt: await self.assert_accepted_privacy_policy(requester) + # Save the access token ID, the device ID and the transaction ID in the event + # internal metadata. This is useful to determine if we should echo the + # transaction_id in events. + # See `synapse.events.utils.EventClientSerializer.serialize_event` if requester.access_token_id is not None: builder.internal_metadata.token_id = requester.access_token_id + if requester.device_id is not None: + builder.internal_metadata.device_id = requester.device_id + if txn_id is not None: builder.internal_metadata.txn_id = txn_id @@ -897,12 +905,31 @@ class EventCreationHandler: Returns: An event if one could be found, None otherwise. """ + + if self._msc3970_enabled and requester.device_id: + # When MSC3970 is enabled, we lookup for events sent by the same device first, + # and fallback to the old behaviour if none were found. + existing_event_id = ( + await self.store.get_event_id_from_transaction_id_and_device_id( + room_id, + requester.user.to_string(), + requester.device_id, + txn_id, + ) + ) + if existing_event_id: + return await self.store.get_event(existing_event_id) + + # Pre-MSC3970, we looked up for events that were sent by the same session by + # using the access token ID. if requester.access_token_id: - existing_event_id = await self.store.get_event_id_from_transaction_id( - room_id, - requester.user.to_string(), - requester.access_token_id, - txn_id, + existing_event_id = ( + await self.store.get_event_id_from_transaction_id_and_token_id( + room_id, + requester.user.to_string(), + requester.access_token_id, + txn_id, + ) ) if existing_event_id: return await self.store.get_event(existing_event_id) @@ -1035,8 +1062,12 @@ class EventCreationHandler: event.sender, ) - spam_check_result = await self.spam_checker.check_event_for_spam(event) - if spam_check_result != self.spam_checker.NOT_SPAM: + spam_check_result = ( + await self._spam_checker_module_callbacks.check_event_for_spam( + event + ) + ) + if spam_check_result != self._spam_checker_module_callbacks.NOT_SPAM: if isinstance(spam_check_result, tuple): try: [code, dict] = spam_check_result @@ -1282,7 +1313,7 @@ class EventCreationHandler: if requester: context.app_service = requester.app_service - res, new_content = await self.third_party_event_rules.check_event_allowed( + res, new_content = await self._third_party_event_rules.check_event_allowed( event, context ) if res is False: @@ -1909,7 +1940,12 @@ class EventCreationHandler: room_version_obj = KNOWN_ROOM_VERSIONS[room_version] create_event = await self.store.get_create_event_for_room(event.room_id) - room_creator = create_event.content.get(EventContentFields.ROOM_CREATOR) + if not room_version_obj.msc2175_implicit_room_creator: + room_creator = create_event.content.get( + EventContentFields.ROOM_CREATOR + ) + else: + room_creator = create_event.sender # Only check an insertion event if the room version # supports it or the event is from the room creator. diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index 9a81a77cbd..a9160c87e3 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -59,9 +59,9 @@ class ProfileHandler: self.max_avatar_size = hs.config.server.max_avatar_size self.allowed_avatar_mimetypes = hs.config.server.allowed_avatar_mimetypes - self.server_name = hs.config.server.server_name + self._is_mine_server_name = hs.is_mine_server_name - self._third_party_rules = hs.get_third_party_event_rules() + self._third_party_rules = hs.get_module_api_callbacks().third_party_event_rules async def get_profile(self, user_id: str, ignore_backoff: bool = True) -> JsonDict: target_user = UserID.from_string(user_id) @@ -170,17 +170,15 @@ class ProfileHandler: displayname_to_set = None # If the admin changes the display name of a user, the requesting user cannot send - # the join event to update the displayname in the rooms. - # This must be done by the target user himself. + # the join event to update the display name in the rooms. + # This must be done by the target user themselves. if by_admin: requester = create_requester( target_user, authenticated_entity=requester.authenticated_entity, ) - await self.store.set_profile_displayname( - target_user.localpart, displayname_to_set - ) + await self.store.set_profile_displayname(target_user, displayname_to_set) profile = await self.store.get_profileinfo(target_user.localpart) await self.user_directory_handler.handle_local_profile_change( @@ -272,9 +270,7 @@ class ProfileHandler: target_user, authenticated_entity=requester.authenticated_entity ) - await self.store.set_profile_avatar_url( - target_user.localpart, avatar_url_to_set - ) + await self.store.set_profile_avatar_url(target_user, avatar_url_to_set) profile = await self.store.get_profileinfo(target_user.localpart) await self.user_directory_handler.handle_local_profile_change( @@ -313,7 +309,7 @@ class ProfileHandler: else: server_name = host - if server_name == self.server_name: + if self._is_mine_server_name(server_name): media_info = await self.store.get_local_media(media_id) else: media_info = await self.store.get_cached_remote_media(server_name, media_id) diff --git a/synapse/handlers/push_rules.py b/synapse/handlers/push_rules.py index 1219672a59..7ed88a3611 100644 --- a/synapse/handlers/push_rules.py +++ b/synapse/handlers/push_rules.py @@ -11,14 +11,15 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import TYPE_CHECKING, List, Optional, Union +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union import attr from synapse.api.errors import SynapseError, UnrecognizedRequestError +from synapse.push.clientformat import format_push_rules_for_user from synapse.storage.push_rule import RuleNotFoundException from synapse.synapse_rust.push import get_base_rule_ids -from synapse.types import JsonDict +from synapse.types import JsonDict, UserID if TYPE_CHECKING: from synapse.server import HomeServer @@ -115,6 +116,17 @@ class PushRulesHandler: stream_id = self._main_store.get_max_push_rules_stream_id() self._notifier.on_new_event("push_rules_key", stream_id, users=[user_id]) + async def push_rules_for_user( + self, user: UserID + ) -> Dict[str, Dict[str, List[Dict[str, Any]]]]: + """ + Push rules aren't really account data, but get formatted as such for /sync. + """ + user_id = user.to_string() + rules_raw = await self._main_store.get_push_rules_for_user(user_id) + rules = format_push_rules_for_user(user, rules_raw) + return rules + def check_actions(actions: List[Union[str, JsonDict]]) -> None: """Check if the given actions are spec compliant. @@ -129,6 +141,8 @@ def check_actions(actions: List[Union[str, JsonDict]]) -> None: raise InvalidRuleException("No actions found") for a in actions: + # "dont_notify" and "coalesce" are legacy actions. They are allowed, but + # ignored (resulting in no action from the pusher). if a in ["notify", "dont_notify", "coalesce"]: pass elif isinstance(a, dict) and "set_tweak" in a: diff --git a/synapse/handlers/read_marker.py b/synapse/handlers/read_marker.py index 05122fd5a6..49a497a860 100644 --- a/synapse/handlers/read_marker.py +++ b/synapse/handlers/read_marker.py @@ -15,6 +15,8 @@ import logging from typing import TYPE_CHECKING +from synapse.api.constants import ReceiptTypes +from synapse.api.errors import SynapseError from synapse.util.async_helpers import Linearizer if TYPE_CHECKING: @@ -42,19 +44,28 @@ class ReadMarkerHandler: async with self.read_marker_linearizer.queue((room_id, user_id)): existing_read_marker = await self.store.get_account_data_for_room_and_type( - user_id, room_id, "m.fully_read" + user_id, room_id, ReceiptTypes.FULLY_READ ) should_update = True + # Get event ordering, this also ensures we know about the event + event_ordering = await self.store.get_event_ordering(event_id) if existing_read_marker: - # Only update if the new marker is ahead in the stream - should_update = await self.store.is_event_after( - event_id, existing_read_marker["event_id"] - ) + try: + old_event_ordering = await self.store.get_event_ordering( + existing_read_marker["event_id"] + ) + except SynapseError: + # Old event no longer exists, assume new is ahead. This may + # happen if the old event was removed due to retention. + pass + else: + # Only update if the new marker is ahead in the stream + should_update = event_ordering > old_event_ordering if should_update: content = {"event_id": event_id} await self.account_data_handler.add_account_data_to_room( - user_id, room_id, "m.fully_read", content + user_id, room_id, ReceiptTypes.FULLY_READ, content ) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index c8bf2439af..c80946c2e9 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -46,7 +46,7 @@ from synapse.replication.http.register import ( ReplicationRegisterServlet, ) from synapse.spam_checker_api import RegistrationBehaviour -from synapse.types import RoomAlias, UserID, create_requester +from synapse.types import GUEST_USER_ID_PATTERN, RoomAlias, UserID, create_requester from synapse.types.state import StateFilter if TYPE_CHECKING: @@ -110,7 +110,7 @@ class RegistrationHandler: self._server_notices_mxid = hs.config.servernotices.server_notices_mxid self._server_name = hs.hostname - self.spam_checker = hs.get_spam_checker() + self._spam_checker_module_callbacks = hs.get_module_api_callbacks().spam_checker if hs.config.worker.worker_app: self._register_client = ReplicationRegisterServlet.make_client(hs) @@ -143,10 +143,15 @@ class RegistrationHandler: assigned_user_id: Optional[str] = None, inhibit_user_in_use_error: bool = False, ) -> None: - if types.contains_invalid_mxid_characters(localpart): + if types.contains_invalid_mxid_characters( + localpart, self.hs.config.experimental.msc4009_e164_mxids + ): + extra_chars = ( + "=_-./+" if self.hs.config.experimental.msc4009_e164_mxids else "=_-./" + ) raise SynapseError( 400, - "User ID can only contain characters a-z, 0-9, or '=_-./'", + f"User ID can only contain characters a-z, 0-9, or '{extra_chars}'", Codes.INVALID_USERNAME, ) @@ -195,16 +200,12 @@ class RegistrationHandler: errcode=Codes.FORBIDDEN, ) - if guest_access_token is None: - try: - int(localpart) - raise SynapseError( - 400, - "Numeric user IDs are reserved for guest users.", - errcode=Codes.INVALID_USERNAME, - ) - except ValueError: - pass + if guest_access_token is None and GUEST_USER_ID_PATTERN.fullmatch(localpart): + raise SynapseError( + 400, + "Numeric user IDs are reserved for guest users.", + errcode=Codes.INVALID_USERNAME, + ) async def register_user( self, @@ -259,7 +260,7 @@ class RegistrationHandler: await self.check_registration_ratelimit(address) - result = await self.spam_checker.check_registration_for_spam( + result = await self._spam_checker_module_callbacks.check_registration_for_spam( threepid, localpart, user_agent_ips or [], diff --git a/synapse/handlers/relations.py b/synapse/handlers/relations.py index 1d09fdf135..4824635162 100644 --- a/synapse/handlers/relations.py +++ b/synapse/handlers/relations.py @@ -85,6 +85,7 @@ class RelationsHandler: event_id: str, room_id: str, pagin_config: PaginationConfig, + recurse: bool, include_original_event: bool, relation_type: Optional[str] = None, event_type: Optional[str] = None, @@ -98,6 +99,7 @@ class RelationsHandler: event_id: Fetch events that relate to this event ID. room_id: The room the event belongs to. pagin_config: The pagination config rules to apply, if any. + recurse: Whether to recursively find relations. include_original_event: Whether to include the parent event. relation_type: Only fetch events with this relation type, if given. event_type: Only fetch events with this event type, if given. @@ -132,6 +134,7 @@ class RelationsHandler: direction=pagin_config.direction, from_token=pagin_config.from_token, to_token=pagin_config.to_token, + recurse=recurse, ) events = await self._main_store.get_events_as_list( diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index be120cb12f..5e1702d78a 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -106,7 +106,7 @@ class RoomCreationHandler: self.auth_blocking = hs.get_auth_blocking() self.clock = hs.get_clock() self.hs = hs - self.spam_checker = hs.get_spam_checker() + self._spam_checker_module_callbacks = hs.get_module_api_callbacks().spam_checker self.event_creation_handler = hs.get_event_creation_handler() self.room_member_handler = hs.get_room_member_handler() self._event_auth_handler = hs.get_event_auth_handler() @@ -160,7 +160,9 @@ class RoomCreationHandler: ) self._server_notices_mxid = hs.config.servernotices.server_notices_mxid - self.third_party_event_rules = hs.get_third_party_event_rules() + self._third_party_event_rules = ( + hs.get_module_api_callbacks().third_party_event_rules + ) async def upgrade_room( self, requester: Requester, old_room_id: str, new_version: RoomVersion @@ -449,7 +451,9 @@ class RoomCreationHandler: """ user_id = requester.user.to_string() - spam_check = await self.spam_checker.user_may_create_room(user_id) + spam_check = await self._spam_checker_module_callbacks.user_may_create_room( + user_id + ) if spam_check != NOT_SPAM: raise SynapseError( 403, @@ -567,6 +571,7 @@ class RoomCreationHandler: await self._send_events_for_new_room( requester, new_room_id, + new_room_version, # we expect to override all the presets with initial_state, so this is # somewhat arbitrary. room_config={"preset": RoomCreationPreset.PRIVATE_CHAT}, @@ -739,7 +744,7 @@ class RoomCreationHandler: # Let the third party rules modify the room creation config if needed, or abort # the room creation entirely with an exception. - await self.third_party_event_rules.on_create_room( + await self._third_party_event_rules.on_create_room( requester, config, is_requester_admin=is_requester_admin ) @@ -760,7 +765,9 @@ class RoomCreationHandler: ) if not is_requester_admin: - spam_check = await self.spam_checker.user_may_create_room(user_id) + spam_check = await self._spam_checker_module_callbacks.user_may_create_room( + user_id + ) if spam_check != NOT_SPAM: raise SynapseError( 403, @@ -874,7 +881,7 @@ class RoomCreationHandler: # Check whether this visibility value is blocked by a third party module allowed_by_third_party_rules = ( await ( - self.third_party_event_rules.check_visibility_can_be_modified( + self._third_party_event_rules.check_visibility_can_be_modified( room_id, visibility ) ) @@ -922,6 +929,7 @@ class RoomCreationHandler: ) = await self._send_events_for_new_room( requester, room_id, + room_version, room_config=config, invite_list=invite_list, initial_state=initial_state, @@ -998,6 +1006,7 @@ class RoomCreationHandler: self, creator: Requester, room_id: str, + room_version: RoomVersion, room_config: JsonDict, invite_list: List[str], initial_state: MutableStateMap, @@ -1020,6 +1029,8 @@ class RoomCreationHandler: the user requesting the room creation room_id: room id for the room being created + room_version: + The room version of the new room. room_config: A dict of configuration options. This will be the body of a /createRoom request; see @@ -1053,14 +1064,6 @@ class RoomCreationHandler: # (as this info can't be pulled from the db) state_map: MutableStateMap[str] = {} - def create_event_dict(etype: str, content: JsonDict, **kwargs: Any) -> JsonDict: - e = {"type": etype, "content": content} - - e.update(event_keys) - e.update(kwargs) - - return e - async def create_event( etype: str, content: JsonDict, @@ -1083,7 +1086,10 @@ class RoomCreationHandler: nonlocal depth nonlocal prev_event - event_dict = create_event_dict(etype, content, **kwargs) + # Create the event dictionary. + event_dict = {"type": etype, "content": content} + event_dict.update(event_keys) + event_dict.update(kwargs) ( new_event, @@ -1120,7 +1126,9 @@ class RoomCreationHandler: 400, f"'{preset_config}' is not a valid preset", errcode=Codes.BAD_JSON ) - creation_content.update({"creator": creator_id}) + # MSC2175 removes the creator field from the create event. + if not room_version.msc2175_implicit_room_creator: + creation_content["creator"] = creator_id creation_event, unpersisted_creation_context = await create_event( EventTypes.Create, creation_content, False ) @@ -1725,7 +1733,7 @@ class RoomShutdownHandler: self.room_member_handler = hs.get_room_member_handler() self._room_creation_handler = hs.get_room_creation_handler() self._replication = hs.get_replication_data_handler() - self._third_party_rules = hs.get_third_party_event_rules() + self._third_party_rules = hs.get_module_api_callbacks().third_party_event_rules self.event_creation_handler = hs.get_event_creation_handler() self.store = hs.get_datastores().main diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 1d8b0aee6f..af0ca5c26d 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -16,7 +16,7 @@ import abc import logging import random from http import HTTPStatus -from typing import TYPE_CHECKING, Iterable, List, Optional, Set, Tuple +from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Set, Tuple from synapse import types from synapse.api.constants import ( @@ -38,7 +38,10 @@ from synapse.event_auth import get_named_level, get_power_level_event from synapse.events import EventBase from synapse.events.snapshot import EventContext from synapse.handlers.profile import MAX_AVATAR_URL_LEN, MAX_DISPLAYNAME_LEN +from synapse.handlers.state_deltas import MatchChange, StateDeltasHandler from synapse.logging import opentracing +from synapse.metrics import event_processing_positions +from synapse.metrics.background_process_metrics import run_as_background_process from synapse.module_api import NOT_SPAM from synapse.types import ( JsonDict, @@ -96,8 +99,10 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): self.member_as_limiter = Linearizer(max_count=10, name="member_as_limiter") self.clock = hs.get_clock() - self.spam_checker = hs.get_spam_checker() - self.third_party_event_rules = hs.get_third_party_event_rules() + self._spam_checker_module_callbacks = hs.get_module_api_callbacks().spam_checker + self._third_party_event_rules = ( + hs.get_module_api_callbacks().third_party_event_rules + ) self._server_notices_mxid = self.config.servernotices.server_notices_mxid self._enable_lookup = hs.config.registration.enable_3pid_lookup self.allow_per_room_profiles = self.config.server.allow_per_room_profiles @@ -169,6 +174,8 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): self.request_ratelimiter = hs.get_request_ratelimiter() hs.get_notifier().add_new_join_in_room_callback(self._on_user_joined_room) + self._msc3970_enabled = hs.config.experimental.msc3970_enabled + def _on_user_joined_room(self, event_id: str, room_id: str) -> None: """Notify the rate limiter that a room join has occurred. @@ -278,9 +285,25 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): """ raise NotImplementedError() - @abc.abstractmethod async def forget(self, user: UserID, room_id: str) -> None: - raise NotImplementedError() + user_id = user.to_string() + + member = await self._storage_controllers.state.get_current_state_event( + room_id=room_id, event_type=EventTypes.Member, state_key=user_id + ) + membership = member.membership if member else None + + if membership is not None and membership not in [ + Membership.LEAVE, + Membership.BAN, + ]: + raise SynapseError(400, "User %s in room %s" % (user_id, room_id)) + + # In normal case this call is only required if `membership` is not `None`. + # But: After the last member had left the room, the background update + # `_background_remove_left_rooms` is deleting rows related to this room from + # the table `current_state_events` and `get_current_state_events` is `None`. + await self.store.forget(user_id, room_id) async def ratelimit_multiple_invites( self, @@ -399,13 +422,30 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): # Check if we already have an event with a matching transaction ID. (We # do this check just before we persist an event as well, but may as well # do it up front for efficiency.) - if txn_id and requester.access_token_id: - existing_event_id = await self.store.get_event_id_from_transaction_id( - room_id, - requester.user.to_string(), - requester.access_token_id, - txn_id, - ) + if txn_id: + existing_event_id = None + if self._msc3970_enabled and requester.device_id: + # When MSC3970 is enabled, we lookup for events sent by the same device + # first, and fallback to the old behaviour if none were found. + existing_event_id = ( + await self.store.get_event_id_from_transaction_id_and_device_id( + room_id, + requester.user.to_string(), + requester.device_id, + txn_id, + ) + ) + + if requester.access_token_id and not existing_event_id: + existing_event_id = ( + await self.store.get_event_id_from_transaction_id_and_token_id( + room_id, + requester.user.to_string(), + requester.access_token_id, + txn_id, + ) + ) + if existing_event_id: event_pos = await self.store.get_position_for_event(existing_event_id) return existing_event_id, event_pos.stream @@ -806,7 +846,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): ) block_invite_result = (Codes.FORBIDDEN, {}) - spam_check = await self.spam_checker.user_may_invite( + spam_check = await self._spam_checker_module_callbacks.user_may_invite( requester.user.to_string(), target_id, room_id ) if spam_check != NOT_SPAM: @@ -940,8 +980,10 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): # a room then they're allowed to join it. and not new_room ): - spam_check = await self.spam_checker.user_may_join_room( - target.to_string(), room_id, is_invited=inviter is not None + spam_check = ( + await self._spam_checker_module_callbacks.user_may_join_room( + target.to_string(), room_id, is_invited=inviter is not None + ) ) if spam_check != NOT_SPAM: raise SynapseError( @@ -1520,7 +1562,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): # can't just rely on the standard ratelimiting of events. await self._third_party_invite_limiter.ratelimit(requester) - can_invite = await self.third_party_event_rules.check_threepid_can_be_invited( + can_invite = await self._third_party_event_rules.check_threepid_can_be_invited( medium, address, room_id ) if not can_invite: @@ -1550,11 +1592,13 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): ) else: # Check if the spamchecker(s) allow this invite to go through. - spam_check = await self.spam_checker.user_may_send_3pid_invite( - inviter_userid=requester.user.to_string(), - medium=medium, - address=address, - room_id=room_id, + spam_check = ( + await self._spam_checker_module_callbacks.user_may_send_3pid_invite( + inviter_userid=requester.user.to_string(), + medium=medium, + address=address, + room_id=room_id, + ) ) if spam_check != NOT_SPAM: raise SynapseError( @@ -2023,25 +2067,141 @@ class RoomMemberMasterHandler(RoomMemberHandler): """Implements RoomMemberHandler._user_left_room""" user_left_room(self.distributor, target, room_id) - async def forget(self, user: UserID, room_id: str) -> None: - user_id = user.to_string() - member = await self._storage_controllers.state.get_current_state_event( - room_id=room_id, event_type=EventTypes.Member, state_key=user_id - ) - membership = member.membership if member else None +class RoomForgetterHandler(StateDeltasHandler): + """Forgets rooms when they are left, when enabled in the homeserver config. - if membership is not None and membership not in [ - Membership.LEAVE, - Membership.BAN, - ]: - raise SynapseError(400, "User %s in room %s" % (user_id, room_id)) + For the purposes of this feature, kicks, bans and "leaves" via state resolution + weirdness are all considered to be leaves. - # In normal case this call is only required if `membership` is not `None`. - # But: After the last member had left the room, the background update - # `_background_remove_left_rooms` is deleting rows related to this room from - # the table `current_state_events` and `get_current_state_events` is `None`. - await self.store.forget(user_id, room_id) + Derived from `StatsHandler` and `UserDirectoryHandler`. + """ + + def __init__(self, hs: "HomeServer"): + super().__init__(hs) + + self._hs = hs + self._store = hs.get_datastores().main + self._storage_controllers = hs.get_storage_controllers() + self._clock = hs.get_clock() + self._notifier = hs.get_notifier() + self._room_member_handler = hs.get_room_member_handler() + + # The current position in the current_state_delta stream + self.pos: Optional[int] = None + + # Guard to ensure we only process deltas one at a time + self._is_processing = False + + if hs.config.worker.run_background_tasks: + self._notifier.add_replication_callback(self.notify_new_event) + + # We kick this off to pick up outstanding work from before the last restart. + self._clock.call_later(0, self.notify_new_event) + + def notify_new_event(self) -> None: + """Called when there may be more deltas to process""" + if self._is_processing: + return + + self._is_processing = True + + async def process() -> None: + try: + await self._unsafe_process() + finally: + self._is_processing = False + + run_as_background_process("room_forgetter.notify_new_event", process) + + async def _unsafe_process(self) -> None: + # If self.pos is None then means we haven't fetched it from DB + if self.pos is None: + self.pos = await self._store.get_room_forgetter_stream_pos() + room_max_stream_ordering = self._store.get_room_max_stream_ordering() + if self.pos > room_max_stream_ordering: + # apparently, we've processed more events than exist in the database! + # this can happen if events are removed with history purge or similar. + logger.warning( + "Event stream ordering appears to have gone backwards (%i -> %i): " + "rewinding room forgetter processor", + self.pos, + room_max_stream_ordering, + ) + self.pos = room_max_stream_ordering + + if not self._hs.config.room.forget_on_leave: + # Update the processing position, so that if the server admin turns the + # feature on at a later date, we don't decide to forget every room that + # has ever been left in the past. + self.pos = self._store.get_room_max_stream_ordering() + await self._store.update_room_forgetter_stream_pos(self.pos) + return + + # Loop round handling deltas until we're up to date + + while True: + # Be sure to read the max stream_ordering *before* checking if there are any outstanding + # deltas, since there is otherwise a chance that we could miss updates which arrive + # after we check the deltas. + room_max_stream_ordering = self._store.get_room_max_stream_ordering() + if self.pos == room_max_stream_ordering: + break + + logger.debug( + "Processing room forgetting %s->%s", self.pos, room_max_stream_ordering + ) + ( + max_pos, + deltas, + ) = await self._storage_controllers.state.get_current_state_deltas( + self.pos, room_max_stream_ordering + ) + + logger.debug("Handling %d state deltas", len(deltas)) + await self._handle_deltas(deltas) + + self.pos = max_pos + + # Expose current event processing position to prometheus + event_processing_positions.labels("room_forgetter").set(max_pos) + + await self._store.update_room_forgetter_stream_pos(max_pos) + + async def _handle_deltas(self, deltas: List[Dict[str, Any]]) -> None: + """Called with the state deltas to process""" + for delta in deltas: + typ = delta["type"] + state_key = delta["state_key"] + room_id = delta["room_id"] + event_id = delta["event_id"] + prev_event_id = delta["prev_event_id"] + + if typ != EventTypes.Member: + continue + + if not self._hs.is_mine_id(state_key): + continue + + change = await self._get_key_change( + prev_event_id, + event_id, + key_name="membership", + public_value=Membership.JOIN, + ) + is_leave = change is MatchChange.now_false + + if is_leave: + try: + await self._room_member_handler.forget( + UserID.from_string(state_key), room_id + ) + except SynapseError as e: + if e.code == 400: + # The user is back in the room. + pass + else: + raise def get_users_which_can_issue_invite(auth_events: StateMap[EventBase]) -> List[str]: diff --git a/synapse/handlers/room_member_worker.py b/synapse/handlers/room_member_worker.py index 76e36b8a6d..e8ff1ad063 100644 --- a/synapse/handlers/room_member_worker.py +++ b/synapse/handlers/room_member_worker.py @@ -137,6 +137,3 @@ class RoomMemberWorkerHandler(RoomMemberHandler): await self._notify_change_client( user_id=target.to_string(), room_id=room_id, change="left" ) - - async def forget(self, target: UserID, room_id: str) -> None: - raise RuntimeError("Cannot forget rooms on workers.") diff --git a/synapse/handlers/sso.py b/synapse/handlers/sso.py index c28325323c..c3a51722bd 100644 --- a/synapse/handlers/sso.py +++ b/synapse/handlers/sso.py @@ -194,6 +194,7 @@ class SsoHandler: self._clock = hs.get_clock() self._store = hs.get_datastores().main self._server_name = hs.hostname + self._is_mine_server_name = hs.is_mine_server_name self._registration_handler = hs.get_registration_handler() self._auth_handler = hs.get_auth_handler() self._device_handler = hs.get_device_handler() @@ -203,7 +204,7 @@ class SsoHandler: self._media_repo = ( hs.get_media_repository() if hs.config.media.can_load_media_repo else None ) - self._http_client = hs.get_proxied_blacklisted_http_client() + self._http_client = hs.get_proxied_blocklisted_http_client() # The following template is shown after a successful user interactive # authentication session. It tells the user they can close the window. @@ -224,6 +225,8 @@ class SsoHandler: self._consent_at_registration = hs.config.consent.user_consent_at_registration + self._e164_mxids = hs.config.experimental.msc4009_e164_mxids + def register_identity_provider(self, p: SsoIdentityProvider) -> None: p_id = p.idp_id assert p_id not in self._identity_providers @@ -710,7 +713,7 @@ class SsoHandler: # Since the localpart is provided via a potentially untrusted module, # ensure the MXID is valid before registering. if not attributes.localpart or contains_invalid_mxid_characters( - attributes.localpart + attributes.localpart, self._e164_mxids ): raise MappingException("localpart is invalid: %s" % (attributes.localpart,)) @@ -802,7 +805,7 @@ class SsoHandler: if profile["avatar_url"] is not None: server_name = profile["avatar_url"].split("/")[-2] media_id = profile["avatar_url"].split("/")[-1] - if server_name == self._server_name: + if self._is_mine_server_name(server_name): media = await self._media_repo.store.get_local_media(media_id) if media is not None and upload_name == media["upload_name"]: logger.info("skipping saving the user avatar") @@ -943,7 +946,7 @@ class SsoHandler: localpart, ) - if contains_invalid_mxid_characters(localpart): + if contains_invalid_mxid_characters(localpart, self._e164_mxids): raise SynapseError(400, "localpart is invalid: %s" % (localpart,)) user_id = UserID(localpart, self._server_name).to_string() user_infos = await self._store.get_users_by_id_case_insensitive(user_id) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 64d298408d..c010405be6 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -50,7 +50,6 @@ from synapse.logging.opentracing import ( start_active_span, trace, ) -from synapse.push.clientformat import format_push_rules_for_user from synapse.storage.databases.main.event_push_actions import RoomNotifCounts from synapse.storage.databases.main.roommember import extract_heroes_from_room_summary from synapse.storage.roommember import MemberSummary @@ -261,6 +260,7 @@ class SyncHandler: self.notifier = hs.get_notifier() self.presence_handler = hs.get_presence_handler() self._relations_handler = hs.get_relations_handler() + self._push_rules_handler = hs.get_push_rules_handler() self.event_sources = hs.get_event_sources() self.clock = hs.get_clock() self.state = hs.get_state_handler() @@ -428,12 +428,6 @@ class SyncHandler: set_tag(SynapseTags.SYNC_RESULT, bool(sync_result)) return sync_result - async def push_rules_for_user(self, user: UserID) -> Dict[str, Dict[str, list]]: - user_id = user.to_string() - rules_raw = await self.store.get_push_rules_for_user(user_id) - rules = format_push_rules_for_user(user, rules_raw) - return rules - async def ephemeral_by_room( self, sync_result_builder: "SyncResultBuilder", @@ -1777,18 +1771,18 @@ class SyncHandler: if push_rules_changed: global_account_data = dict(global_account_data) - global_account_data["m.push_rules"] = await self.push_rules_for_user( - sync_config.user - ) + global_account_data[ + AccountDataTypes.PUSH_RULES + ] = await self._push_rules_handler.push_rules_for_user(sync_config.user) else: all_global_account_data = await self.store.get_global_account_data_for_user( user_id ) global_account_data = dict(all_global_account_data) - global_account_data["m.push_rules"] = await self.push_rules_for_user( - sync_config.user - ) + global_account_data[ + AccountDataTypes.PUSH_RULES + ] = await self._push_rules_handler.push_rules_for_user(sync_config.user) account_data_for_user = ( await sync_config.filter_collection.filter_global_account_data( diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py index 39ae44ea95..7aeae5319c 100644 --- a/synapse/handlers/typing.py +++ b/synapse/handlers/typing.py @@ -68,6 +68,7 @@ class FollowerTypingHandler: self.server_name = hs.config.server.server_name self.clock = hs.get_clock() self.is_mine_id = hs.is_mine_id + self.is_mine_server_name = hs.is_mine_server_name self.federation = None if hs.should_send_federation(): @@ -153,7 +154,7 @@ class FollowerTypingHandler: member.room_id ) for domain in hosts: - if domain != self.server_name: + if not self.is_mine_server_name(domain): logger.debug("sending typing update to %s", domain) self.federation.build_and_send_edu( destination=domain, diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py index 28a92d41d6..05197edc95 100644 --- a/synapse/handlers/user_directory.py +++ b/synapse/handlers/user_directory.py @@ -94,7 +94,7 @@ class UserDirectoryHandler(StateDeltasHandler): self.is_mine_id = hs.is_mine_id self.update_user_directory = hs.config.worker.should_update_user_directory self.search_all_users = hs.config.userdirectory.user_directory_search_all_users - self.spam_checker = hs.get_spam_checker() + self._spam_checker_module_callbacks = hs.get_module_api_callbacks().spam_checker self._hs = hs # The current position in the current_state_delta stream @@ -149,7 +149,9 @@ class UserDirectoryHandler(StateDeltasHandler): # Remove any spammy users from the results. non_spammy_users = [] for user in results["results"]: - if not await self.spam_checker.check_username_for_spam(user): + if not await self._spam_checker_module_callbacks.check_username_for_spam( + user + ): non_spammy_users.append(user) results["results"] = non_spammy_users diff --git a/synapse/http/client.py b/synapse/http/client.py index b5cf8123ce..09ea93e10d 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -74,8 +74,9 @@ from twisted.web.iweb import ( from synapse.api.errors import Codes, HttpResponseException, SynapseError from synapse.http import QuieterFileBodyProducer, RequestTimedOutError, redact_uri from synapse.http.proxyagent import ProxyAgent +from synapse.http.replicationagent import ReplicationAgent from synapse.http.types import QueryParams -from synapse.logging.context import make_deferred_yieldable +from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.logging.opentracing import set_tag, start_active_span, tags from synapse.types import ISynapseReactor from synapse.util import json_decoder @@ -116,22 +117,22 @@ RawHeaderValue = Union[ ] -def check_against_blacklist( - ip_address: IPAddress, ip_whitelist: Optional[IPSet], ip_blacklist: IPSet +def _is_ip_blocked( + ip_address: IPAddress, allowlist: Optional[IPSet], blocklist: IPSet ) -> bool: """ Compares an IP address to allowed and disallowed IP sets. Args: ip_address: The IP address to check - ip_whitelist: Allowed IP addresses. - ip_blacklist: Disallowed IP addresses. + allowlist: Allowed IP addresses. + blocklist: Disallowed IP addresses. Returns: - True if the IP address is in the blacklist and not in the whitelist. + True if the IP address is in the blocklist and not in the allowlist. """ - if ip_address in ip_blacklist: - if ip_whitelist is None or ip_address not in ip_whitelist: + if ip_address in blocklist: + if allowlist is None or ip_address not in allowlist: return True return False @@ -153,27 +154,27 @@ def _make_scheduler( return _scheduler -class _IPBlacklistingResolver: +class _IPBlockingResolver: """ - A proxy for reactor.nameResolver which only produces non-blacklisted IP - addresses, preventing DNS rebinding attacks on URL preview. + A proxy for reactor.nameResolver which only produces non-blocklisted IP + addresses, preventing DNS rebinding attacks. """ def __init__( self, reactor: IReactorPluggableNameResolver, - ip_whitelist: Optional[IPSet], - ip_blacklist: IPSet, + ip_allowlist: Optional[IPSet], + ip_blocklist: IPSet, ): """ Args: reactor: The twisted reactor. - ip_whitelist: IP addresses to allow. - ip_blacklist: IP addresses to disallow. + ip_allowlist: IP addresses to allow. + ip_blocklist: IP addresses to disallow. """ self._reactor = reactor - self._ip_whitelist = ip_whitelist - self._ip_blacklist = ip_blacklist + self._ip_allowlist = ip_allowlist + self._ip_blocklist = ip_blocklist def resolveHostName( self, recv: IResolutionReceiver, hostname: str, portNumber: int = 0 @@ -190,16 +191,13 @@ class _IPBlacklistingResolver: ip_address = IPAddress(address.host) - if check_against_blacklist( - ip_address, self._ip_whitelist, self._ip_blacklist - ): + if _is_ip_blocked(ip_address, self._ip_allowlist, self._ip_blocklist): logger.info( - "Dropped %s from DNS resolution to %s due to blacklist" - % (ip_address, hostname) + "Blocked %s from DNS resolution to %s" % (ip_address, hostname) ) has_bad_ip = True - # if we have a blacklisted IP, we'd like to raise an error to block the + # if we have a blocked IP, we'd like to raise an error to block the # request, but all we can really do from here is claim that there were no # valid results. if not has_bad_ip: @@ -231,24 +229,24 @@ class _IPBlacklistingResolver: # ISynapseReactor implies IReactorCore, but explicitly marking it this as an implementer # of IReactorCore seems to keep mypy-zope happier. @implementer(IReactorCore, ISynapseReactor) -class BlacklistingReactorWrapper: +class BlocklistingReactorWrapper: """ - A Reactor wrapper which will prevent DNS resolution to blacklisted IP + A Reactor wrapper which will prevent DNS resolution to blocked IP addresses, to prevent DNS rebinding. """ def __init__( self, reactor: IReactorPluggableNameResolver, - ip_whitelist: Optional[IPSet], - ip_blacklist: IPSet, + ip_allowlist: Optional[IPSet], + ip_blocklist: IPSet, ): self._reactor = reactor - # We need to use a DNS resolver which filters out blacklisted IP + # We need to use a DNS resolver which filters out blocked IP # addresses, to prevent DNS rebinding. - self._nameResolver = _IPBlacklistingResolver( - self._reactor, ip_whitelist, ip_blacklist + self._nameResolver = _IPBlockingResolver( + self._reactor, ip_allowlist, ip_blocklist ) def __getattr__(self, attr: str) -> Any: @@ -259,7 +257,7 @@ class BlacklistingReactorWrapper: return getattr(self._reactor, attr) -class BlacklistingAgentWrapper(Agent): +class BlocklistingAgentWrapper(Agent): """ An Agent wrapper which will prevent access to IP addresses being accessed directly (without an IP address lookup). @@ -268,18 +266,18 @@ class BlacklistingAgentWrapper(Agent): def __init__( self, agent: IAgent, - ip_blacklist: IPSet, - ip_whitelist: Optional[IPSet] = None, + ip_blocklist: IPSet, + ip_allowlist: Optional[IPSet] = None, ): """ Args: agent: The Agent to wrap. - ip_whitelist: IP addresses to allow. - ip_blacklist: IP addresses to disallow. + ip_allowlist: IP addresses to allow. + ip_blocklist: IP addresses to disallow. """ self._agent = agent - self._ip_whitelist = ip_whitelist - self._ip_blacklist = ip_blacklist + self._ip_allowlist = ip_allowlist + self._ip_blocklist = ip_blocklist def request( self, @@ -298,13 +296,9 @@ class BlacklistingAgentWrapper(Agent): # Not an IP pass else: - if check_against_blacklist( - ip_address, self._ip_whitelist, self._ip_blacklist - ): - logger.info("Blocking access to %s due to blacklist" % (ip_address,)) - e = SynapseError( - HTTPStatus.FORBIDDEN, "IP address blocked by IP blacklist entry" - ) + if _is_ip_blocked(ip_address, self._ip_allowlist, self._ip_blocklist): + logger.info("Blocking access to %s" % (ip_address,)) + e = SynapseError(HTTPStatus.FORBIDDEN, "IP address blocked") return defer.fail(Failure(e)) return self._agent.request( @@ -312,35 +306,27 @@ class BlacklistingAgentWrapper(Agent): ) -class SimpleHttpClient: +class BaseHttpClient: """ A simple, no-frills HTTP client with methods that wrap up common ways of - using HTTP in Matrix + using HTTP in Matrix. Does not come with a default Agent, subclasses will need to + define their own. + + Args: + hs: The HomeServer instance to pass in + treq_args: Extra keyword arguments to be given to treq.request. """ + agent: IAgent + def __init__( self, hs: "HomeServer", treq_args: Optional[Dict[str, Any]] = None, - ip_whitelist: Optional[IPSet] = None, - ip_blacklist: Optional[IPSet] = None, - use_proxy: bool = False, ): - """ - Args: - hs - treq_args: Extra keyword arguments to be given to treq.request. - ip_blacklist: The IP addresses that are blacklisted that - we may not request. - ip_whitelist: The whitelisted IP addresses, that we can - request if it were otherwise caught in a blacklist. - use_proxy: Whether proxy settings should be discovered and used - from conventional environment variables. - """ self.hs = hs + self.reactor = hs.get_reactor() - self._ip_whitelist = ip_whitelist - self._ip_blacklist = ip_blacklist self._extra_treq_args = treq_args or {} self.clock = hs.get_clock() @@ -356,44 +342,6 @@ class SimpleHttpClient: # reactor. self._cooperator = Cooperator(scheduler=_make_scheduler(hs.get_reactor())) - if self._ip_blacklist: - # If we have an IP blacklist, we need to use a DNS resolver which - # filters out blacklisted IP addresses, to prevent DNS rebinding. - self.reactor: ISynapseReactor = BlacklistingReactorWrapper( - hs.get_reactor(), self._ip_whitelist, self._ip_blacklist - ) - else: - self.reactor = hs.get_reactor() - - # the pusher makes lots of concurrent SSL connections to sygnal, and - # tends to do so in batches, so we need to allow the pool to keep - # lots of idle connections around. - pool = HTTPConnectionPool(self.reactor) - # XXX: The justification for using the cache factor here is that larger instances - # will need both more cache and more connections. - # Still, this should probably be a separate dial - pool.maxPersistentPerHost = max(int(100 * hs.config.caches.global_factor), 5) - pool.cachedConnectionTimeout = 2 * 60 - - self.agent: IAgent = ProxyAgent( - self.reactor, - hs.get_reactor(), - connectTimeout=15, - contextFactory=self.hs.get_http_client_context_factory(), - pool=pool, - use_proxy=use_proxy, - ) - - if self._ip_blacklist: - # If we have an IP blacklist, we then install the blacklisting Agent - # which prevents direct access to IP addresses, that are not caught - # by the DNS resolution. - self.agent = BlacklistingAgentWrapper( - self.agent, - ip_blacklist=self._ip_blacklist, - ip_whitelist=self._ip_whitelist, - ) - async def request( self, method: str, @@ -799,6 +747,201 @@ class SimpleHttpClient: ) +class SimpleHttpClient(BaseHttpClient): + """ + An HTTP client capable of crossing a proxy and respecting a block/allow list. + + This also configures a larger / longer lasting HTTP connection pool. + + Args: + hs: The HomeServer instance to pass in + treq_args: Extra keyword arguments to be given to treq.request. + ip_blocklist: The IP addresses that we may not request. + ip_allowlist: The allowed IP addresses, that we can + request if it were otherwise caught in a blocklist. + use_proxy: Whether proxy settings should be discovered and used + from conventional environment variables. + """ + + def __init__( + self, + hs: "HomeServer", + treq_args: Optional[Dict[str, Any]] = None, + ip_allowlist: Optional[IPSet] = None, + ip_blocklist: Optional[IPSet] = None, + use_proxy: bool = False, + ): + super().__init__(hs, treq_args=treq_args) + self._ip_allowlist = ip_allowlist + self._ip_blocklist = ip_blocklist + + if self._ip_blocklist: + # If we have an IP blocklist, we need to use a DNS resolver which + # filters out blocked IP addresses, to prevent DNS rebinding. + self.reactor: ISynapseReactor = BlocklistingReactorWrapper( + self.reactor, self._ip_allowlist, self._ip_blocklist + ) + + # the pusher makes lots of concurrent SSL connections to Sygnal, and tends to + # do so in batches, so we need to allow the pool to keep lots of idle + # connections around. + pool = HTTPConnectionPool(self.reactor) + # XXX: The justification for using the cache factor here is that larger + # instances will need both more cache and more connections. + # Still, this should probably be a separate dial + pool.maxPersistentPerHost = max(int(100 * hs.config.caches.global_factor), 5) + pool.cachedConnectionTimeout = 2 * 60 + + self.agent: IAgent = ProxyAgent( + self.reactor, + hs.get_reactor(), + connectTimeout=15, + contextFactory=self.hs.get_http_client_context_factory(), + pool=pool, + use_proxy=use_proxy, + ) + + if self._ip_blocklist: + # If we have an IP blocklist, we then install the Agent which prevents + # direct access to IP addresses, that are not caught by the DNS resolution. + self.agent = BlocklistingAgentWrapper( + self.agent, + ip_blocklist=self._ip_blocklist, + ip_allowlist=self._ip_allowlist, + ) + + +class ReplicationClient(BaseHttpClient): + """Client for connecting to replication endpoints via HTTP and HTTPS. + + Attributes: + agent: The custom Twisted Agent used for constructing the connection. + """ + + def __init__( + self, + hs: "HomeServer", + ): + """ + Args: + hs: The HomeServer instance to pass in + """ + super().__init__(hs) + + # Use a pool, but a very small one. + pool = HTTPConnectionPool(self.reactor) + pool.maxPersistentPerHost = 5 + pool.cachedConnectionTimeout = 2 * 60 + + self.agent: IAgent = ReplicationAgent( + hs.get_reactor(), + hs.config.worker.instance_map, + contextFactory=hs.get_http_client_context_factory(), + pool=pool, + ) + + async def request( + self, + method: str, + uri: str, + data: Optional[bytes] = None, + headers: Optional[Headers] = None, + ) -> IResponse: + """ + Make a request, differs from BaseHttpClient.request in that it does not use treq. + + Args: + method: HTTP method to use. + uri: URI to query. + data: Data to send in the request body, if applicable. + headers: Request headers. + + Returns: + Response object, once the headers have been read. + + Raises: + RequestTimedOutError if the request times out before the headers are read + + """ + outgoing_requests_counter.labels(method).inc() + + logger.debug("Sending request %s %s", method, uri) + + with start_active_span( + "outgoing-replication-request", + tags={ + tags.SPAN_KIND: tags.SPAN_KIND_RPC_CLIENT, + tags.HTTP_METHOD: method, + tags.HTTP_URL: uri, + }, + finish_on_close=True, + ): + try: + body_producer = None + if data is not None: + body_producer = QuieterFileBodyProducer( + BytesIO(data), + cooperator=self._cooperator, + ) + + # Skip the fancy treq stuff, we don't need cookie handling, redirects, + # or buffered response bodies. + method_bytes = method.encode("ascii") + uri_bytes = uri.encode("ascii") + + # To preserve the logging context, the timeout is treated + # in a similar way to `defer.gatherResults`: + # * Each logging context-preserving fork is wrapped in + # `run_in_background`. In this case there is only one, + # since the timeout fork is not logging-context aware. + # * The `Deferred` that joins the forks back together is + # wrapped in `make_deferred_yieldable` to restore the + # logging context regardless of the path taken. + # (The logic/comments for this came from MatrixFederationHttpClient) + request_deferred = run_in_background( + self.agent.request, + method_bytes, + uri_bytes, + headers, + bodyProducer=body_producer, + ) + + # we use our own timeout mechanism rather than twisted's as a workaround + # for https://twistedmatrix.com/trac/ticket/9534. + # (Updated url https://github.com/twisted/twisted/issues/9534) + request_deferred = timeout_deferred( + request_deferred, + 60, + self.hs.get_reactor(), + ) + + # turn timeouts into RequestTimedOutErrors + request_deferred.addErrback(_timeout_to_request_timed_out_error) + + response = await make_deferred_yieldable(request_deferred) + + incoming_responses_counter.labels(method, response.code).inc() + logger.info( + "Received response to %s %s: %s", + method, + uri, + response.code, + ) + return response + except Exception as e: + incoming_responses_counter.labels(method, "ERR").inc() + logger.info( + "Error sending request to %s %s: %s %s", + method, + uri, + type(e).__name__, + e.args[0], + ) + set_tag(tags.ERROR, True) + set_tag("error_reason", e.args[0]) + raise + + def _timeout_to_request_timed_out_error(f: Failure) -> Failure: if f.check(twisted_error.TimeoutError, twisted_error.ConnectingCancelledError): # The TCP connection has its own timeout (set by the 'connectTimeout' param diff --git a/synapse/http/federation/matrix_federation_agent.py b/synapse/http/federation/matrix_federation_agent.py index 8d7d0a3875..7e8cf31682 100644 --- a/synapse/http/federation/matrix_federation_agent.py +++ b/synapse/http/federation/matrix_federation_agent.py @@ -36,7 +36,7 @@ from twisted.web.iweb import IAgent, IAgentEndpointFactory, IBodyProducer, IResp from synapse.crypto.context_factory import FederationPolicyForHTTPS from synapse.http import proxyagent -from synapse.http.client import BlacklistingAgentWrapper, BlacklistingReactorWrapper +from synapse.http.client import BlocklistingAgentWrapper, BlocklistingReactorWrapper from synapse.http.connectproxyclient import HTTPConnectProxyEndpoint from synapse.http.federation.srv_resolver import Server, SrvResolver from synapse.http.federation.well_known_resolver import WellKnownResolver @@ -65,12 +65,12 @@ class MatrixFederationAgent: user_agent: The user agent header to use for federation requests. - ip_whitelist: Allowed IP addresses. + ip_allowlist: Allowed IP addresses. - ip_blacklist: Disallowed IP addresses. + ip_blocklist: Disallowed IP addresses. proxy_reactor: twisted reactor to use for connections to the proxy server - reactor might have some blacklisting applied (i.e. for DNS queries), + reactor might have some blocking applied (i.e. for DNS queries), but we need unblocked access to the proxy. _srv_resolver: @@ -87,17 +87,17 @@ class MatrixFederationAgent: reactor: ISynapseReactor, tls_client_options_factory: Optional[FederationPolicyForHTTPS], user_agent: bytes, - ip_whitelist: Optional[IPSet], - ip_blacklist: IPSet, + ip_allowlist: Optional[IPSet], + ip_blocklist: IPSet, _srv_resolver: Optional[SrvResolver] = None, _well_known_resolver: Optional[WellKnownResolver] = None, ): - # proxy_reactor is not blacklisted + # proxy_reactor is not blocklisting reactor proxy_reactor = reactor - # We need to use a DNS resolver which filters out blacklisted IP + # We need to use a DNS resolver which filters out blocked IP # addresses, to prevent DNS rebinding. - reactor = BlacklistingReactorWrapper(reactor, ip_whitelist, ip_blacklist) + reactor = BlocklistingReactorWrapper(reactor, ip_allowlist, ip_blocklist) self._clock = Clock(reactor) self._pool = HTTPConnectionPool(reactor) @@ -120,7 +120,7 @@ class MatrixFederationAgent: if _well_known_resolver is None: _well_known_resolver = WellKnownResolver( reactor, - agent=BlacklistingAgentWrapper( + agent=BlocklistingAgentWrapper( ProxyAgent( reactor, proxy_reactor, @@ -128,7 +128,7 @@ class MatrixFederationAgent: contextFactory=tls_client_options_factory, use_proxy=True, ), - ip_blacklist=ip_blacklist, + ip_blocklist=ip_blocklist, ), user_agent=self.user_agent, ) @@ -256,7 +256,7 @@ class MatrixHostnameEndpoint: Args: reactor: twisted reactor to use for underlying requests proxy_reactor: twisted reactor to use for connections to the proxy server. - 'reactor' might have some blacklisting applied (i.e. for DNS queries), + 'reactor' might have some blocking applied (i.e. for DNS queries), but we need unblocked access to the proxy. tls_client_options_factory: factory to use for fetching client tls options, or none to disable TLS. diff --git a/synapse/http/federation/srv_resolver.py b/synapse/http/federation/srv_resolver.py index de0e882b33..285baddeb7 100644 --- a/synapse/http/federation/srv_resolver.py +++ b/synapse/http/federation/srv_resolver.py @@ -22,7 +22,7 @@ import attr from twisted.internet.error import ConnectError from twisted.names import client, dns -from twisted.names.error import DNSNameError, DomainError +from twisted.names.error import DNSNameError, DNSNotImplementedError, DomainError from synapse.logging.context import make_deferred_yieldable @@ -145,6 +145,9 @@ class SrvResolver: # TODO: cache this. We can get the SOA out of the exception, and use # the negative-TTL value. return [] + except DNSNotImplementedError: + # For .onion homeservers this is unavailable, just fallback to host:8448 + return [] except DomainError as e: # We failed to resolve the name (other than a NameError) # Try something in the cache, else rereaise diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 3302d4e48a..9094dab0fe 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -17,7 +17,6 @@ import codecs import logging import random import sys -import typing import urllib.parse from http import HTTPStatus from io import BytesIO, StringIO @@ -30,9 +29,11 @@ from typing import ( Generic, List, Optional, + TextIO, Tuple, TypeVar, Union, + cast, overload, ) @@ -63,7 +64,7 @@ from synapse.api.errors import ( from synapse.crypto.context_factory import FederationPolicyForHTTPS from synapse.http import QuieterFileBodyProducer from synapse.http.client import ( - BlacklistingAgentWrapper, + BlocklistingAgentWrapper, BodyExceededMaxSize, ByteWriteable, _make_scheduler, @@ -183,20 +184,61 @@ class MatrixFederationRequest: return self.json -class JsonParser(ByteParser[Union[JsonDict, list]]): +class _BaseJsonParser(ByteParser[T]): """A parser that buffers the response and tries to parse it as JSON.""" CONTENT_TYPE = "application/json" - def __init__(self) -> None: + def __init__( + self, validator: Optional[Callable[[Optional[object]], bool]] = None + ) -> None: + """ + Args: + validator: A callable which takes the parsed JSON value and returns + true if the value is valid. + """ self._buffer = StringIO() self._binary_wrapper = BinaryIOWrapper(self._buffer) + self._validator = validator def write(self, data: bytes) -> int: return self._binary_wrapper.write(data) - def finish(self) -> Union[JsonDict, list]: - return json_decoder.decode(self._buffer.getvalue()) + def finish(self) -> T: + result = json_decoder.decode(self._buffer.getvalue()) + if self._validator is not None and not self._validator(result): + raise ValueError( + f"Received incorrect JSON value: {result.__class__.__name__}" + ) + return result + + +class JsonParser(_BaseJsonParser[JsonDict]): + """A parser that buffers the response and tries to parse it as a JSON object.""" + + def __init__(self) -> None: + super().__init__(self._validate) + + @staticmethod + def _validate(v: Any) -> bool: + return isinstance(v, dict) + + +class LegacyJsonSendParser(_BaseJsonParser[Tuple[int, JsonDict]]): + """Ensure the legacy responses of /send_join & /send_leave are correct.""" + + def __init__(self) -> None: + super().__init__(self._validate) + + @staticmethod + def _validate(v: Any) -> bool: + # Match [integer, JSON dict] + return ( + isinstance(v, list) + and len(v) == 2 + and type(v[0]) == int + and isinstance(v[1], dict) + ) async def _handle_response( @@ -313,9 +355,7 @@ async def _handle_response( class BinaryIOWrapper: """A wrapper for a TextIO which converts from bytes on the fly.""" - def __init__( - self, file: typing.TextIO, encoding: str = "utf-8", errors: str = "strict" - ): + def __init__(self, file: TextIO, encoding: str = "utf-8", errors: str = "strict"): self.decoder = codecs.getincrementaldecoder(encoding)(errors) self.file = file @@ -352,15 +392,15 @@ class MatrixFederationHttpClient: self.reactor, tls_client_options_factory, user_agent.encode("ascii"), - hs.config.server.federation_ip_range_whitelist, - hs.config.server.federation_ip_range_blacklist, + hs.config.server.federation_ip_range_allowlist, + hs.config.server.federation_ip_range_blocklist, ) - # Use a BlacklistingAgentWrapper to prevent circumventing the IP - # blacklist via IP literals in server names - self.agent = BlacklistingAgentWrapper( + # Use a BlocklistingAgentWrapper to prevent circumventing the IP + # blocking via IP literals in server names + self.agent = BlocklistingAgentWrapper( federation_agent, - ip_blacklist=hs.config.server.federation_ip_range_blacklist, + ip_blocklist=hs.config.server.federation_ip_range_blocklist, ) self.clock = hs.get_clock() @@ -793,7 +833,7 @@ class MatrixFederationHttpClient: backoff_on_404: bool = False, try_trailing_slash_on_400: bool = False, parser: Literal[None] = None, - ) -> Union[JsonDict, list]: + ) -> JsonDict: ... @overload @@ -825,8 +865,8 @@ class MatrixFederationHttpClient: ignore_backoff: bool = False, backoff_on_404: bool = False, try_trailing_slash_on_400: bool = False, - parser: Optional[ByteParser] = None, - ): + parser: Optional[ByteParser[T]] = None, + ) -> Union[JsonDict, T]: """Sends the specified json data using PUT Args: @@ -902,7 +942,7 @@ class MatrixFederationHttpClient: _sec_timeout = self.default_timeout if parser is None: - parser = JsonParser() + parser = cast(ByteParser[T], JsonParser()) body = await _handle_response( self.reactor, @@ -924,7 +964,7 @@ class MatrixFederationHttpClient: timeout: Optional[int] = None, ignore_backoff: bool = False, args: Optional[QueryParams] = None, - ) -> Union[JsonDict, list]: + ) -> JsonDict: """Sends the specified json data using POST Args: @@ -998,7 +1038,7 @@ class MatrixFederationHttpClient: ignore_backoff: bool = False, try_trailing_slash_on_400: bool = False, parser: Literal[None] = None, - ) -> Union[JsonDict, list]: + ) -> JsonDict: ... @overload @@ -1024,8 +1064,8 @@ class MatrixFederationHttpClient: timeout: Optional[int] = None, ignore_backoff: bool = False, try_trailing_slash_on_400: bool = False, - parser: Optional[ByteParser] = None, - ): + parser: Optional[ByteParser[T]] = None, + ) -> Union[JsonDict, T]: """GETs some json from the given host homeserver and path Args: @@ -1091,7 +1131,7 @@ class MatrixFederationHttpClient: _sec_timeout = self.default_timeout if parser is None: - parser = JsonParser() + parser = cast(ByteParser[T], JsonParser()) body = await _handle_response( self.reactor, @@ -1112,7 +1152,7 @@ class MatrixFederationHttpClient: timeout: Optional[int] = None, ignore_backoff: bool = False, args: Optional[QueryParams] = None, - ) -> Union[JsonDict, list]: + ) -> JsonDict: """Send a DELETE request to the remote expecting some json response Args: diff --git a/synapse/http/proxyagent.py b/synapse/http/proxyagent.py index 94ef737b9e..7bdc4acae7 100644 --- a/synapse/http/proxyagent.py +++ b/synapse/http/proxyagent.py @@ -53,7 +53,7 @@ class ProxyAgent(_AgentBase): connections. proxy_reactor: twisted reactor to use for connections to the proxy server - reactor might have some blacklisting applied (i.e. for DNS queries), + reactor might have some blocking applied (i.e. for DNS queries), but we need unblocked access to the proxy. contextFactory: A factory for TLS contexts, to control the diff --git a/synapse/http/replicationagent.py b/synapse/http/replicationagent.py new file mode 100644 index 0000000000..800f21873d --- /dev/null +++ b/synapse/http/replicationagent.py @@ -0,0 +1,170 @@ +# Copyright 2023 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from typing import Dict, Optional + +from zope.interface import implementer + +from twisted.internet import defer +from twisted.internet.endpoints import HostnameEndpoint, wrapClientTLS +from twisted.internet.interfaces import IStreamClientEndpoint +from twisted.python.failure import Failure +from twisted.web.client import URI, HTTPConnectionPool, _AgentBase +from twisted.web.error import SchemeNotSupported +from twisted.web.http_headers import Headers +from twisted.web.iweb import ( + IAgent, + IAgentEndpointFactory, + IBodyProducer, + IPolicyForHTTPS, + IResponse, +) + +from synapse.config.workers import InstanceLocationConfig +from synapse.types import ISynapseReactor + +logger = logging.getLogger(__name__) + + +@implementer(IAgentEndpointFactory) +class ReplicationEndpointFactory: + """Connect to a given TCP socket""" + + def __init__( + self, + reactor: ISynapseReactor, + instance_map: Dict[str, InstanceLocationConfig], + context_factory: IPolicyForHTTPS, + ) -> None: + self.reactor = reactor + self.instance_map = instance_map + self.context_factory = context_factory + + def endpointForURI(self, uri: URI) -> IStreamClientEndpoint: + """ + This part of the factory decides what kind of endpoint is being connected to. + + Args: + uri: The pre-parsed URI object containing all the uri data + + Returns: The correct client endpoint object + """ + # The given URI has a special scheme and includes the worker name. The + # actual connection details are pulled from the instance map. + worker_name = uri.netloc.decode("utf-8") + scheme = self.instance_map[worker_name].scheme() + + if scheme in ("http", "https"): + endpoint = HostnameEndpoint( + self.reactor, + self.instance_map[worker_name].host, + self.instance_map[worker_name].port, + ) + if scheme == "https": + endpoint = wrapClientTLS( + # The 'port' argument below isn't actually used by the function + self.context_factory.creatorForNetloc( + self.instance_map[worker_name].host, + self.instance_map[worker_name].port, + ), + endpoint, + ) + return endpoint + else: + raise SchemeNotSupported(f"Unsupported scheme: {scheme}") + + +@implementer(IAgent) +class ReplicationAgent(_AgentBase): + """ + Client for connecting to replication endpoints via HTTP and HTTPS. + + Much of this code is copied from Twisted's twisted.web.client.Agent. + """ + + def __init__( + self, + reactor: ISynapseReactor, + instance_map: Dict[str, InstanceLocationConfig], + contextFactory: IPolicyForHTTPS, + connectTimeout: Optional[float] = None, + bindAddress: Optional[bytes] = None, + pool: Optional[HTTPConnectionPool] = None, + ): + """ + Create a ReplicationAgent. + + Args: + reactor: A reactor for this Agent to place outgoing connections. + contextFactory: A factory for TLS contexts, to control the + verification parameters of OpenSSL. The default is to use a + BrowserLikePolicyForHTTPS, so unless you have special + requirements you can leave this as-is. + connectTimeout: The amount of time that this Agent will wait + for the peer to accept a connection. + bindAddress: The local address for client sockets to bind to. + pool: An HTTPConnectionPool instance, or None, in which + case a non-persistent HTTPConnectionPool instance will be + created. + """ + _AgentBase.__init__(self, reactor, pool) + endpoint_factory = ReplicationEndpointFactory( + reactor, instance_map, contextFactory + ) + self._endpointFactory = endpoint_factory + + def request( + self, + method: bytes, + uri: bytes, + headers: Optional[Headers] = None, + bodyProducer: Optional[IBodyProducer] = None, + ) -> "defer.Deferred[IResponse]": + """ + Issue a request to the server indicated by the given uri. + + An existing connection from the connection pool may be used or a new + one may be created. + + Currently, HTTP and HTTPS schemes are supported in uri. + + This is copied from twisted.web.client.Agent, except: + + * It uses a different pool key (combining the host & port). + * It does not call _ensureValidURI(...) since it breaks on some + UNIX paths. + + See: twisted.web.iweb.IAgent.request + """ + parsedURI = URI.fromBytes(uri) + try: + endpoint = self._endpointFactory.endpointForURI(parsedURI) + except SchemeNotSupported: + return defer.fail(Failure()) + + # This sets the Pool key to be: + # (http(s), <host:ip>) + key = (parsedURI.scheme, parsedURI.netloc) + + # _requestWithEndpoint comes from _AgentBase class + return self._requestWithEndpoint( + key, + endpoint, + method, + parsedURI, + headers, + bodyProducer, + parsedURI.originForm, + ) diff --git a/synapse/http/server.py b/synapse/http/server.py index 7b760505b2..101dc2e747 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -46,6 +46,13 @@ from twisted.internet import defer, interfaces from twisted.internet.defer import CancelledError from twisted.python import failure from twisted.web import resource + +try: + from twisted.web.pages import notFound +except ImportError: + from twisted.web.resource import NoResource as notFound # type: ignore[assignment] + +from twisted.web.resource import IResource from twisted.web.server import NOT_DONE_YET, Request from twisted.web.static import File from twisted.web.util import redirectTo @@ -569,6 +576,9 @@ class StaticResource(File): set_clickjacking_protection_headers(request) return super().render_GET(request) + def directoryListing(self) -> IResource: + return notFound() + class UnrecognizedRequestResource(resource.Resource): """ diff --git a/synapse/media/media_repository.py b/synapse/media/media_repository.py index b81e3c2b0c..e81c987b10 100644 --- a/synapse/media/media_repository.py +++ b/synapse/media/media_repository.py @@ -93,6 +93,7 @@ class MediaRepository: self.federation_domain_whitelist = ( hs.config.federation.federation_domain_whitelist ) + self.prevent_media_downloads_from = hs.config.media.prevent_media_downloads_from # List of StorageProviders where we should search for media and # potentially upload to. @@ -276,6 +277,14 @@ class MediaRepository: ): raise FederationDeniedError(server_name) + # Don't let users download media from domains listed in the config, even + # if we might have the media to serve. This is Trust & Safety tooling to + # block some servers' media from being accessible to local users. + # See `prevent_media_downloads_from` config docs for more info. + if server_name in self.prevent_media_downloads_from: + respond_404(request) + return + self.mark_recently_accessed(server_name, media_id) # We linearize here to ensure that we don't try and download remote diff --git a/synapse/media/media_storage.py b/synapse/media/media_storage.py index a7e22a91e1..a819d95407 100644 --- a/synapse/media/media_storage.py +++ b/synapse/media/media_storage.py @@ -36,7 +36,6 @@ from twisted.internet.defer import Deferred from twisted.internet.interfaces import IConsumer from twisted.protocols.basic import FileSender -import synapse from synapse.api.errors import NotFoundError from synapse.logging.context import defer_to_thread, make_deferred_yieldable from synapse.util import Clock @@ -74,7 +73,7 @@ class MediaStorage: self.local_media_directory = local_media_directory self.filepaths = filepaths self.storage_providers = storage_providers - self.spam_checker = hs.get_spam_checker() + self._spam_checker_module_callbacks = hs.get_module_api_callbacks().spam_checker self.clock = hs.get_clock() async def store_file(self, source: IO, file_info: FileInfo) -> str: @@ -145,10 +144,10 @@ class MediaStorage: f.flush() f.close() - spam_check = await self.spam_checker.check_media_file_for_spam( + spam_check = await self._spam_checker_module_callbacks.check_media_file_for_spam( ReadableFileWrapper(self.clock, fname), file_info ) - if spam_check != synapse.module_api.NOT_SPAM: + if spam_check != self._spam_checker_module_callbacks.NOT_SPAM: logger.info("Blocking media due to spam checker") # Note that we'll delete the stored media, due to the # try/except below. The media also won't be stored in diff --git a/synapse/media/url_previewer.py b/synapse/media/url_previewer.py index c8a4a809f1..70b32cee17 100644 --- a/synapse/media/url_previewer.py +++ b/synapse/media/url_previewer.py @@ -105,7 +105,7 @@ class UrlPreviewer: When Synapse is asked to preview a URL it does the following: - 1. Checks against a URL blacklist (defined as `url_preview_url_blacklist` in the + 1. Checks against a URL blocklist (defined as `url_preview_url_blacklist` in the config). 2. Checks the URL against an in-memory cache and returns the result if it exists. (This is also used to de-duplicate processing of multiple in-flight requests at once.) @@ -113,7 +113,7 @@ class UrlPreviewer: 1. Checks URL and timestamp against the database cache and returns the result if it has not expired and was successful (a 2xx return code). 2. Checks if the URL matches an oEmbed (https://oembed.com/) pattern. If it - does, update the URL to download. + does and the new URL is not blocked, update the URL to download. 3. Downloads the URL and stores it into a file via the media storage provider and saves the local media metadata. 4. If the media is an image: @@ -127,14 +127,14 @@ class UrlPreviewer: and saves the local media metadata. 2. Convert the oEmbed response to an Open Graph response. 3. Override any Open Graph data from the HTML with data from oEmbed. - 4. If an image exists in the Open Graph response: + 4. If an image URL exists in the Open Graph response: 1. Downloads the URL and stores it into a file via the media storage provider and saves the local media metadata. 2. Generates thumbnails. 3. Updates the Open Graph response based on image properties. - 6. If the media is JSON and an oEmbed URL was found: + 6. If an oEmbed URL was found and the media is JSON: 1. Convert the oEmbed response to an Open Graph response. - 2. If a thumbnail or image is in the oEmbed response: + 2. If an image URL is in the oEmbed response: 1. Downloads the URL and stores it into a file via the media storage provider and saves the local media metadata. 2. Generates thumbnails. @@ -144,7 +144,8 @@ class UrlPreviewer: If any additional requests (e.g. from oEmbed autodiscovery, step 5.3 or image thumbnailing, step 5.4 or 6.4) fails then the URL preview as a whole - does not fail. As much information as possible is returned. + does not fail. If any of them are blocked, then those additional requests + are skipped. As much information as possible is returned. The in-memory cache expires after 1 hour. @@ -166,8 +167,8 @@ class UrlPreviewer: self.client = SimpleHttpClient( hs, treq_args={"browser_like_redirects": True}, - ip_whitelist=hs.config.media.url_preview_ip_range_whitelist, - ip_blacklist=hs.config.media.url_preview_ip_range_blacklist, + ip_allowlist=hs.config.media.url_preview_ip_range_allowlist, + ip_blocklist=hs.config.media.url_preview_ip_range_blocklist, use_proxy=True, ) self.media_repo = media_repo @@ -185,7 +186,7 @@ class UrlPreviewer: or instance_running_jobs == hs.get_instance_name() ) - self.url_preview_url_blacklist = hs.config.media.url_preview_url_blacklist + self.url_preview_url_blocklist = hs.config.media.url_preview_url_blocklist self.url_preview_accept_language = hs.config.media.url_preview_accept_language # memory cache mapping urls to an ObservableDeferred returning @@ -203,48 +204,14 @@ class UrlPreviewer: ) async def preview(self, url: str, user: UserID, ts: int) -> bytes: - # XXX: we could move this into _do_preview if we wanted. - url_tuple = urlsplit(url) - for entry in self.url_preview_url_blacklist: - match = True - for attrib in entry: - pattern = entry[attrib] - value = getattr(url_tuple, attrib) - logger.debug( - "Matching attrib '%s' with value '%s' against pattern '%s'", - attrib, - value, - pattern, - ) - - if value is None: - match = False - continue - - # Some attributes might not be parsed as strings by urlsplit (such as the - # port, which is parsed as an int). Because we use match functions that - # expect strings, we want to make sure that's what we give them. - value_str = str(value) - - if pattern.startswith("^"): - if not re.match(pattern, value_str): - match = False - continue - else: - if not fnmatch.fnmatch(value_str, pattern): - match = False - continue - if match: - logger.warning("URL %s blocked by url_blacklist entry %s", url, entry) - raise SynapseError( - 403, "URL blocked by url pattern blacklist entry", Codes.UNKNOWN - ) - # the in-memory cache: - # * ensures that only one request is active at a time + # * ensures that only one request to a URL is active at a time # * takes load off the DB for the thundering herds # * also caches any failures (unlike the DB) so we don't keep - # requesting the same endpoint + # requesting the same endpoint + # + # Note that autodiscovered oEmbed URLs and pre-caching of images + # are not captured in the in-memory cache. observable = self._cache.get(url) @@ -283,7 +250,7 @@ class UrlPreviewer: og = og.encode("utf8") return og - # If this URL can be accessed via oEmbed, use that instead. + # If this URL can be accessed via an allowed oEmbed, use that instead. url_to_download = url oembed_url = self._oembed.get_oembed_url(url) if oembed_url: @@ -329,6 +296,7 @@ class UrlPreviewer: # defer to that. oembed_url = self._oembed.autodiscover_from_html(tree) og_from_oembed: JsonDict = {} + # Only download to the oEmbed URL if it is allowed. if oembed_url: try: oembed_info = await self._handle_url( @@ -411,6 +379,59 @@ class UrlPreviewer: return jsonog.encode("utf8") + def _is_url_blocked(self, url: str) -> bool: + """ + Check whether the URL is allowed to be previewed (according to the homeserver + configuration). + + Args: + url: The requested URL. + + Return: + True if the URL is blocked, False if it is allowed. + """ + url_tuple = urlsplit(url) + for entry in self.url_preview_url_blocklist: + match = True + # Iterate over each entry. If *all* attributes of that entry match + # the current URL, then reject it. + for attrib, pattern in entry.items(): + value = getattr(url_tuple, attrib) + logger.debug( + "Matching attrib '%s' with value '%s' against pattern '%s'", + attrib, + value, + pattern, + ) + + if value is None: + match = False + break + + # Some attributes might not be parsed as strings by urlsplit (such as the + # port, which is parsed as an int). Because we use match functions that + # expect strings, we want to make sure that's what we give them. + value_str = str(value) + + # Check the value against the pattern as either a regular expression or + # a glob. If it doesn't match, the entry doesn't match. + if pattern.startswith("^"): + if not re.match(pattern, value_str): + match = False + break + else: + if not fnmatch.fnmatch(value_str, pattern): + match = False + break + + # All fields matched, return true (the URL is blocked). + if match: + logger.warning("URL %s blocked by entry %s", url, entry) + return match + + # No matches were found, the URL is allowed. + return False + async def _download_url(self, url: str, output_stream: BinaryIO) -> DownloadResult: """ Fetches a remote URL and parses the headers. @@ -451,7 +472,7 @@ class UrlPreviewer: except DNSLookupError: # DNS lookup returned no results # Note: This will also be the case if one of the resolved IP - # addresses is blacklisted + # addresses is blocked. raise SynapseError( 502, "DNS resolution failure during URL preview generation", @@ -547,8 +568,16 @@ class UrlPreviewer: Returns: A MediaInfo object describing the fetched content. + + Raises: + SynapseError if the URL is blocked. """ + if self._is_url_blocked(url): + raise SynapseError( + 403, "URL blocked by url pattern blocklist entry", Codes.UNKNOWN + ) + # TODO: we should probably honour robots.txt... except in practice # we're most likely being explicitly triggered by a human rather than a # bot, so are we really a robot? @@ -624,7 +653,7 @@ class UrlPreviewer: return # The image URL from the HTML might be relative to the previewed page, - # convert it to an URL which can be requested directly. + # convert it to a URL which can be requested directly. url_parts = urlparse(image_url) if url_parts.scheme != "data": image_url = urljoin(media_info.uri, image_url) diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index 595c23e78d..0e9f366cba 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -44,34 +44,6 @@ from synapse.events.presence_router import ( GET_USERS_FOR_STATES_CALLBACK, PresenceRouter, ) -from synapse.events.spamcheck import ( - CHECK_EVENT_FOR_SPAM_CALLBACK, - CHECK_MEDIA_FILE_FOR_SPAM_CALLBACK, - CHECK_REGISTRATION_FOR_SPAM_CALLBACK, - CHECK_USERNAME_FOR_SPAM_CALLBACK, - SHOULD_DROP_FEDERATED_EVENT_CALLBACK, - USER_MAY_CREATE_ROOM_ALIAS_CALLBACK, - USER_MAY_CREATE_ROOM_CALLBACK, - USER_MAY_INVITE_CALLBACK, - USER_MAY_JOIN_ROOM_CALLBACK, - USER_MAY_PUBLISH_ROOM_CALLBACK, - USER_MAY_SEND_3PID_INVITE_CALLBACK, - SpamChecker, -) -from synapse.events.third_party_rules import ( - CHECK_CAN_DEACTIVATE_USER_CALLBACK, - CHECK_CAN_SHUTDOWN_ROOM_CALLBACK, - CHECK_EVENT_ALLOWED_CALLBACK, - CHECK_THREEPID_CAN_BE_INVITED_CALLBACK, - CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK, - ON_ADD_USER_THIRD_PARTY_IDENTIFIER_CALLBACK, - ON_CREATE_ROOM_CALLBACK, - ON_NEW_EVENT_CALLBACK, - ON_PROFILE_UPDATE_CALLBACK, - ON_REMOVE_USER_THIRD_PARTY_IDENTIFIER_CALLBACK, - ON_THREEPID_BIND_CALLBACK, - ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK, -) from synapse.handlers.account_data import ON_ACCOUNT_DATA_UPDATED_CALLBACK from synapse.handlers.auth import ( CHECK_3PID_AUTH_CALLBACK, @@ -105,6 +77,35 @@ from synapse.module_api.callbacks.account_validity_callbacks import ( ON_LEGACY_SEND_MAIL_CALLBACK, ON_USER_REGISTRATION_CALLBACK, ) +from synapse.module_api.callbacks.spamchecker_callbacks import ( + CHECK_EVENT_FOR_SPAM_CALLBACK, + CHECK_MEDIA_FILE_FOR_SPAM_CALLBACK, + CHECK_REGISTRATION_FOR_SPAM_CALLBACK, + CHECK_USERNAME_FOR_SPAM_CALLBACK, + SHOULD_DROP_FEDERATED_EVENT_CALLBACK, + USER_MAY_CREATE_ROOM_ALIAS_CALLBACK, + USER_MAY_CREATE_ROOM_CALLBACK, + USER_MAY_INVITE_CALLBACK, + USER_MAY_JOIN_ROOM_CALLBACK, + USER_MAY_PUBLISH_ROOM_CALLBACK, + USER_MAY_SEND_3PID_INVITE_CALLBACK, + SpamCheckerModuleApiCallbacks, +) +from synapse.module_api.callbacks.third_party_event_rules_callbacks import ( + CHECK_CAN_DEACTIVATE_USER_CALLBACK, + CHECK_CAN_SHUTDOWN_ROOM_CALLBACK, + CHECK_EVENT_ALLOWED_CALLBACK, + CHECK_THREEPID_CAN_BE_INVITED_CALLBACK, + CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK, + ON_ADD_USER_THIRD_PARTY_IDENTIFIER_CALLBACK, + ON_CREATE_ROOM_CALLBACK, + ON_NEW_EVENT_CALLBACK, + ON_PROFILE_UPDATE_CALLBACK, + ON_REMOVE_USER_THIRD_PARTY_IDENTIFIER_CALLBACK, + ON_THREEPID_BIND_CALLBACK, + ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK, +) +from synapse.push.httppusher import HttpPusher from synapse.rest.client.login import LoginResponse from synapse.storage import DataStore from synapse.storage.background_updates import ( @@ -133,7 +134,7 @@ from synapse.util.caches.descriptors import CachedFunction, cached as _cached from synapse.util.frozenutils import freeze if TYPE_CHECKING: - from synapse.app.generic_worker import GenericWorkerSlavedStore + from synapse.app.generic_worker import GenericWorkerStore from synapse.server import HomeServer @@ -147,7 +148,7 @@ are loaded into Synapse. """ PRESENCE_ALL_USERS = PresenceRouter.ALL_USERS -NOT_SPAM = SpamChecker.NOT_SPAM +NOT_SPAM = SpamCheckerModuleApiCallbacks.NOT_SPAM __all__ = [ "errors", @@ -155,6 +156,7 @@ __all__ = [ "parse_json_object_from_request", "respond_with_html", "run_in_background", + "run_as_background_process", "cached", "NOT_SPAM", "UserID", @@ -235,9 +237,7 @@ class ModuleApi: # TODO: Fix this type hint once the types for the data stores have been ironed # out. - self._store: Union[ - DataStore, "GenericWorkerSlavedStore" - ] = hs.get_datastores().main + self._store: Union[DataStore, "GenericWorkerStore"] = hs.get_datastores().main self._storage_controllers = hs.get_storage_controllers() self._auth = hs.get_auth() self._auth_handler = auth_handler @@ -248,6 +248,7 @@ class ModuleApi: self._registration_handler = hs.get_registration_handler() self._send_email_handler = hs.get_send_email_handler() self._push_rules_handler = hs.get_push_rules_handler() + self._pusherpool = hs.get_pusherpool() self._device_handler = hs.get_device_handler() self.custom_template_dir = hs.config.server.custom_template_directory self._callbacks = hs.get_module_api_callbacks() @@ -271,8 +272,6 @@ class ModuleApi: self._public_room_list_manager = PublicRoomListManager(hs) self._account_data_manager = AccountDataManager(hs) - self._spam_checker = hs.get_spam_checker() - self._third_party_event_rules = hs.get_third_party_event_rules() self._password_auth_provider = hs.get_password_auth_provider() self._presence_router = hs.get_presence_router() self._account_data_handler = hs.get_account_data_handler() @@ -305,7 +304,7 @@ class ModuleApi: Added in Synapse v1.37.0. """ - return self._spam_checker.register_callbacks( + return self._callbacks.spam_checker.register_callbacks( check_event_for_spam=check_event_for_spam, should_drop_federated_event=should_drop_federated_event, user_may_join_room=user_may_join_room, @@ -370,7 +369,7 @@ class ModuleApi: Added in Synapse v1.39.0. """ - return self._third_party_event_rules.register_third_party_rules_callbacks( + return self._callbacks.third_party_event_rules.register_third_party_rules_callbacks( check_event_allowed=check_event_allowed, on_create_room=on_create_room, check_threepid_can_be_invited=check_threepid_can_be_invited, @@ -1226,6 +1225,50 @@ class ModuleApi: await self._clock.sleep(seconds) + async def send_http_push_notification( + self, + user_id: str, + device_id: Optional[str], + content: JsonDict, + tweaks: Optional[JsonMapping] = None, + default_payload: Optional[JsonMapping] = None, + ) -> Dict[str, bool]: + """Send an HTTP push notification that is forwarded to the registered push gateway + for the specified user/device. + + Added in Synapse v1.82.0. + + Args: + user_id: The user ID to send the push notification to. + device_id: The device ID of the device where to send the push notification. If `None`, + the notification will be sent to all registered HTTP pushers of the user. + content: A dict of values that will be put in the `notification` field of the push + (cf Push Gateway spec). `devices` field will be overrided if included. + tweaks: A dict of `tweaks` that will be inserted in the `devices` section, cf spec. + default_payload: default payload to add in `devices[0].data.default_payload`. + This will be merged (and override if some matching values already exist there) + with existing `default_payload`. + + Returns: + a dict reprensenting the status of the push per device ID + """ + status = {} + if user_id in self._pusherpool.pushers: + for p in self._pusherpool.pushers[user_id].values(): + if isinstance(p, HttpPusher) and ( + not device_id or p.device_id == device_id + ): + res = await p.dispatch_push(content, tweaks, default_payload) + # Check if the push was successful and no pushers were rejected. + sent = res is not False and not res + + # This is mainly to accomodate mypy + # device_id should never be empty after the `set_device_id_for_pushers` + # background job has been properly run. + if p.device_id: + status[p.device_id] = sent + return status + async def send_mail( self, recipient: str, diff --git a/synapse/module_api/callbacks/__init__.py b/synapse/module_api/callbacks/__init__.py index 3d977bf655..dcb036552b 100644 --- a/synapse/module_api/callbacks/__init__.py +++ b/synapse/module_api/callbacks/__init__.py @@ -12,11 +12,24 @@ # See the License for the specific language governing permissions and # limitations under the License. +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from synapse.server import HomeServer + from synapse.module_api.callbacks.account_validity_callbacks import ( AccountValidityModuleApiCallbacks, ) +from synapse.module_api.callbacks.spamchecker_callbacks import ( + SpamCheckerModuleApiCallbacks, +) +from synapse.module_api.callbacks.third_party_event_rules_callbacks import ( + ThirdPartyEventRulesModuleApiCallbacks, +) class ModuleApiCallbacks: - def __init__(self) -> None: + def __init__(self, hs: "HomeServer") -> None: self.account_validity = AccountValidityModuleApiCallbacks() + self.spam_checker = SpamCheckerModuleApiCallbacks(hs) + self.third_party_event_rules = ThirdPartyEventRulesModuleApiCallbacks(hs) diff --git a/synapse/events/spamcheck.py b/synapse/module_api/callbacks/spamchecker_callbacks.py index 765c15bb51..4456d1b81e 100644 --- a/synapse/events/spamcheck.py +++ b/synapse/module_api/callbacks/spamchecker_callbacks.py @@ -286,11 +286,10 @@ def load_legacy_spam_checkers(hs: "synapse.server.HomeServer") -> None: api.register_spam_checker_callbacks(**hooks) -class SpamChecker: +class SpamCheckerModuleApiCallbacks: NOT_SPAM: Literal["NOT_SPAM"] = "NOT_SPAM" def __init__(self, hs: "synapse.server.HomeServer") -> None: - self.hs = hs self.clock = hs.get_clock() self._check_event_for_spam_callbacks: List[CHECK_EVENT_FOR_SPAM_CALLBACK] = [] diff --git a/synapse/events/third_party_rules.py b/synapse/module_api/callbacks/third_party_event_rules_callbacks.py index 61d4530be7..911f37ba42 100644 --- a/synapse/events/third_party_rules.py +++ b/synapse/module_api/callbacks/third_party_event_rules_callbacks.py @@ -140,7 +140,7 @@ def load_legacy_third_party_event_rules(hs: "HomeServer") -> None: api.register_third_party_rules_callbacks(**hooks) -class ThirdPartyEventRules: +class ThirdPartyEventRulesModuleApiCallbacks: """Allows server admins to provide a Python module implementing an extra set of rules to apply when processing events. @@ -149,8 +149,6 @@ class ThirdPartyEventRules: """ def __init__(self, hs: "HomeServer"): - self.third_party_rules = None - self.store = hs.get_datastores().main self._storage_controllers = hs.get_storage_controllers() diff --git a/synapse/notifier.py b/synapse/notifier.py index a8832a3f8e..897272ad5b 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -232,7 +232,7 @@ class Notifier: self._federation_client = hs.get_federation_http_client() - self._third_party_rules = hs.get_third_party_event_rules() + self._third_party_rules = hs.get_module_api_callbacks().third_party_event_rules self.clock = hs.get_clock() self.appservice_handler = hs.get_application_service_handler() diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index 199337673f..320084f5f5 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -326,6 +326,7 @@ class BulkPushRuleEvaluator: if ( not event.internal_metadata.is_notifiable() or event.internal_metadata.is_historical() + or event.room_id in self.hs.config.server.rooms_to_exclude_from_sync ): # Push rules for events that aren't notifiable can't be processed by this and # we want to skip push notification actions for historical messages diff --git a/synapse/push/clientformat.py b/synapse/push/clientformat.py index 222afbdcc8..88b52c26a0 100644 --- a/synapse/push/clientformat.py +++ b/synapse/push/clientformat.py @@ -22,7 +22,7 @@ from synapse.types import UserID def format_push_rules_for_user( user: UserID, ruleslist: FilteredPushRules -) -> Dict[str, Dict[str, list]]: +) -> Dict[str, Dict[str, List[Dict[str, Any]]]]: """Converts a list of rawrules and a enabled map into nested dictionaries to match the Matrix client-server format for push rules""" diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py index b048b03a74..50027680cb 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py @@ -13,8 +13,9 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging +import random import urllib.parse -from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Union +from typing import TYPE_CHECKING, Dict, List, Optional, Union from prometheus_client import Counter @@ -27,6 +28,7 @@ from synapse.logging import opentracing from synapse.metrics.background_process_metrics import run_as_background_process from synapse.push import Pusher, PusherConfig, PusherConfigException from synapse.storage.databases.main.event_push_actions import HttpPushAction +from synapse.types import JsonDict, JsonMapping from . import push_tools @@ -56,7 +58,7 @@ http_badges_failed_counter = Counter( ) -def tweaks_for_actions(actions: List[Union[str, Dict]]) -> Dict[str, Any]: +def tweaks_for_actions(actions: List[Union[str, Dict]]) -> JsonMapping: """ Converts a list of actions into a `tweaks` dict (which can then be passed to the push gateway). @@ -101,6 +103,7 @@ class HttpPusher(Pusher): self._storage_controllers = self.hs.get_storage_controllers() self.app_display_name = pusher_config.app_display_name self.device_display_name = pusher_config.device_display_name + self.device_id = pusher_config.device_id self.pushkey_ts = pusher_config.ts self.data = pusher_config.data self.backoff_delay = HttpPusher.INITIAL_BACKOFF_SEC @@ -112,6 +115,8 @@ class HttpPusher(Pusher): ) self._pusherpool = hs.get_pusherpool() + self.push_jitter_delay_ms = hs.config.push.push_jitter_delay_ms + self.data = pusher_config.data if self.data is None: raise PusherConfigException("'data' key can not be null for HTTP pusher") @@ -138,7 +143,7 @@ class HttpPusher(Pusher): ) self.url = url - self.http_client = hs.get_proxied_blacklisted_http_client() + self.http_client = hs.get_proxied_blocklisted_http_client() self.data_minus_url = {} self.data_minus_url.update(self.data) del self.data_minus_url["url"] @@ -324,7 +329,22 @@ class HttpPusher(Pusher): event = await self.store.get_event(push_action.event_id, allow_none=True) if event is None: return True # It's been redacted - rejected = await self.dispatch_push(event, tweaks, badge) + + # Check if we should delay sending out the notification by a random + # amount. + # + # Note: we base the delay off of when the event was sent, rather than + # now, to handle the case where we need to send out many notifications + # at once. If we just slept the random amount each loop then the last + # push notification in the set could be delayed by many times the max + # delay. + if self.push_jitter_delay_ms: + delay_ms = random.randint(1, self.push_jitter_delay_ms) + diff_ms = event.origin_server_ts + delay_ms - self.clock.time_msec() + if diff_ms > 0: + await self.clock.sleep(diff_ms / 1000) + + rejected = await self.dispatch_push_event(event, tweaks, badge) if rejected is False: return False @@ -342,9 +362,83 @@ class HttpPusher(Pusher): await self._pusherpool.remove_pusher(self.app_id, pk, self.user_id) return True - async def _build_notification_dict( - self, event: EventBase, tweaks: Dict[str, bool], badge: int - ) -> Dict[str, Any]: + async def dispatch_push( + self, + content: JsonDict, + tweaks: Optional[JsonMapping] = None, + default_payload: Optional[JsonMapping] = None, + ) -> Union[bool, List[str]]: + """Send a notification to the registered push gateway, with `content` being + the content of the `notification` top property specified in the spec. + Note that the `devices` property will be added with device-specific + information for this pusher. + + Args: + content: the content + tweaks: tweaks to add into the `devices` section + default_payload: default payload to add in `devices[0].data.default_payload`. + This will be merged (and override if some matching values already exist there) + with existing `default_payload`. + + Returns: + False if an error occured when calling the push gateway, or an array of + rejected push keys otherwise. If this array is empty, the push fully + succeeded. + """ + content = content.copy() + + data = self.data_minus_url.copy() + if default_payload: + data.setdefault("default_payload", {}).update(default_payload) + + device = { + "app_id": self.app_id, + "pushkey": self.pushkey, + "pushkey_ts": int(self.pushkey_ts / 1000), + "data": data, + } + if tweaks: + device["tweaks"] = tweaks + + content["devices"] = [device] + + try: + resp = await self.http_client.post_json_get_json( + self.url, {"notification": content} + ) + except Exception as e: + logger.warning( + "Failed to push data to %s: %s %s", + self.name, + type(e), + e, + ) + return False + rejected = [] + if "rejected" in resp: + rejected = resp["rejected"] + return rejected + + async def dispatch_push_event( + self, + event: EventBase, + tweaks: JsonMapping, + badge: int, + ) -> Union[bool, List[str]]: + """Send a notification to the registered push gateway by building it + from an event. + + Args: + event: the event + tweaks: tweaks to add into the `devices` section, used to decide the + push priority + badge: unread count to send with the push notification + + Returns: + False if an error occured when calling the push gateway, or an array of + rejected push keys otherwise. If this array is empty, the push fully + succeeded. + """ priority = "low" if ( event.type == EventTypes.Encrypted @@ -358,30 +452,20 @@ class HttpPusher(Pusher): # This was checked in the __init__, but mypy doesn't seem to know that. assert self.data is not None if self.data.get("format") == "event_id_only": - d: Dict[str, Any] = { - "notification": { - "event_id": event.event_id, - "room_id": event.room_id, - "counts": {"unread": badge}, - "prio": priority, - "devices": [ - { - "app_id": self.app_id, - "pushkey": self.pushkey, - "pushkey_ts": int(self.pushkey_ts / 1000), - "data": self.data_minus_url, - } - ], - } + content: JsonDict = { + "event_id": event.event_id, + "room_id": event.room_id, + "counts": {"unread": badge}, + "prio": priority, } - return d - - ctx = await push_tools.get_context_for_event( - self._storage_controllers, event, self.user_id - ) + # event_id_only doesn't include the tweaks, so override them. + tweaks = {} + else: + ctx = await push_tools.get_context_for_event( + self._storage_controllers, event, self.user_id + ) - d = { - "notification": { + content = { "id": event.event_id, # deprecated: remove soon "event_id": event.event_id, "room_id": event.room_id, @@ -392,57 +476,27 @@ class HttpPusher(Pusher): "unread": badge, # 'missed_calls': 2 }, - "devices": [ - { - "app_id": self.app_id, - "pushkey": self.pushkey, - "pushkey_ts": int(self.pushkey_ts / 1000), - "data": self.data_minus_url, - "tweaks": tweaks, - } - ], } - } - if event.type == "m.room.member" and event.is_state(): - d["notification"]["membership"] = event.content["membership"] - d["notification"]["user_is_target"] = event.state_key == self.user_id - if self.hs.config.push.push_include_content and event.content: - d["notification"]["content"] = event.content - - # We no longer send aliases separately, instead, we send the human - # readable name of the room, which may be an alias. - if "sender_display_name" in ctx and len(ctx["sender_display_name"]) > 0: - d["notification"]["sender_display_name"] = ctx["sender_display_name"] - if "name" in ctx and len(ctx["name"]) > 0: - d["notification"]["room_name"] = ctx["name"] - - return d - - async def dispatch_push( - self, event: EventBase, tweaks: Dict[str, bool], badge: int - ) -> Union[bool, Iterable[str]]: - notification_dict = await self._build_notification_dict(event, tweaks, badge) - if not notification_dict: - return [] - try: - resp = await self.http_client.post_json_get_json( - self.url, notification_dict - ) - except Exception as e: - logger.warning( - "Failed to push event %s to %s: %s %s", - event.event_id, - self.name, - type(e), - e, - ) - return False - rejected = [] - if "rejected" in resp: - rejected = resp["rejected"] - if not rejected: + if event.type == "m.room.member" and event.is_state(): + content["membership"] = event.content["membership"] + content["user_is_target"] = event.state_key == self.user_id + if self.hs.config.push.push_include_content and event.content: + content["content"] = event.content + + # We no longer send aliases separately, instead, we send the human + # readable name of the room, which may be an alias. + if "sender_display_name" in ctx and len(ctx["sender_display_name"]) > 0: + content["sender_display_name"] = ctx["sender_display_name"] + if "name" in ctx and len(ctx["name"]) > 0: + content["room_name"] = ctx["name"] + + res = await self.dispatch_push(content, tweaks) + + # If the push is successful and none are rejected, update the badge count. + if res is not False and not res: self.badge_count_last_call = badge - return rejected + + return res async def _send_badge(self, badge: int) -> None: """ diff --git a/synapse/replication/http/_base.py b/synapse/replication/http/_base.py index 8c2c54c07a..63cf24a14d 100644 --- a/synapse/replication/http/_base.py +++ b/synapse/replication/http/_base.py @@ -25,6 +25,7 @@ from twisted.internet.error import ConnectError, DNSLookupError from twisted.web.server import Request from synapse.api.errors import HttpResponseException, SynapseError +from synapse.config.workers import MAIN_PROCESS_INSTANCE_NAME from synapse.http import RequestTimedOutError from synapse.http.server import HttpServer from synapse.http.servlet import parse_json_object_from_request @@ -194,14 +195,9 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta): the `instance_map` config). """ clock = hs.get_clock() - client = hs.get_simple_http_client() + client = hs.get_replication_client() local_instance_name = hs.get_instance_name() - # The value of these option should match the replication listener settings - master_host = hs.config.worker.worker_replication_host - master_port = hs.config.worker.worker_replication_http_port - master_tls = hs.config.worker.worker_replication_http_tls - instance_map = hs.config.worker.instance_map outgoing_gauge = _pending_outgoing_requests.labels(cls.NAME) @@ -213,7 +209,9 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta): ) @trace_with_opname("outgoing_replication_request") - async def send_request(*, instance_name: str = "master", **kwargs: Any) -> Any: + async def send_request( + *, instance_name: str = MAIN_PROCESS_INSTANCE_NAME, **kwargs: Any + ) -> Any: # We have to pull these out here to avoid circular dependencies... streams = hs.get_replication_command_handler().get_streams_to_replicate() replication = hs.get_replication_data_handler() @@ -221,15 +219,7 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta): with outgoing_gauge.track_inprogress(): if instance_name == local_instance_name: raise Exception("Trying to send HTTP request to self") - if instance_name == "master": - host = master_host - port = master_port - tls = master_tls - elif instance_name in instance_map: - host = instance_map[instance_name].host - port = instance_map[instance_name].port - tls = instance_map[instance_name].tls - else: + if instance_name not in instance_map: raise Exception( "Instance %r not in 'instance_map' config" % (instance_name,) ) @@ -277,13 +267,11 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta): "Unknown METHOD on %s replication endpoint" % (cls.NAME,) ) - # Here the protocol is hard coded to be http by default or https in case the replication - # port is set to have tls true. - scheme = "https" if tls else "http" - uri = "%s://%s:%s/_synapse/replication/%s/%s" % ( - scheme, - host, - port, + # Hard code a special scheme to show this only used for replication. The + # instance_name will be passed into the ReplicationEndpointFactory to + # determine connection details from the instance_map. + uri = "synapse-replication://%s/_synapse/replication/%s/%s" % ( + instance_name, cls.NAME, "/".join(url_args), ) diff --git a/synapse/replication/http/devices.py b/synapse/replication/http/devices.py index cc3929dcf5..f874f072f9 100644 --- a/synapse/replication/http/devices.py +++ b/synapse/replication/http/devices.py @@ -28,62 +28,6 @@ if TYPE_CHECKING: logger = logging.getLogger(__name__) -class ReplicationUserDevicesResyncRestServlet(ReplicationEndpoint): - """Ask master to resync the device list for a user by contacting their - server. - - This must happen on master so that the results can be correctly cached in - the database and streamed to workers. - - Request format: - - POST /_synapse/replication/user_device_resync/:user_id - - {} - - Response is equivalent to ` /_matrix/federation/v1/user/devices/:user_id` - response, e.g.: - - { - "user_id": "@alice:example.org", - "devices": [ - { - "device_id": "JLAFKJWSCS", - "keys": { ... }, - "device_display_name": "Alice's Mobile Phone" - } - ] - } - """ - - NAME = "user_device_resync" - PATH_ARGS = ("user_id",) - CACHE = False - - def __init__(self, hs: "HomeServer"): - super().__init__(hs) - - from synapse.handlers.device import DeviceHandler - - handler = hs.get_device_handler() - assert isinstance(handler, DeviceHandler) - self.device_list_updater = handler.device_list_updater - - self.store = hs.get_datastores().main - self.clock = hs.get_clock() - - @staticmethod - async def _serialize_payload(user_id: str) -> JsonDict: # type: ignore[override] - return {} - - async def _handle_request( # type: ignore[override] - self, request: Request, content: JsonDict, user_id: str - ) -> Tuple[int, Optional[JsonDict]]: - user_devices = await self.device_list_updater.user_device_resync(user_id) - - return 200, user_devices - - class ReplicationMultiUserDevicesResyncRestServlet(ReplicationEndpoint): """Ask master to resync the device list for multiple users from the same remote server by contacting their server. @@ -216,6 +160,5 @@ class ReplicationUploadKeysForUserRestServlet(ReplicationEndpoint): def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: - ReplicationUserDevicesResyncRestServlet(hs).register(http_server) ReplicationMultiUserDevicesResyncRestServlet(hs).register(http_server) ReplicationUploadKeysForUserRestServlet(hs).register(http_server) diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index 200f667fdf..139f57cf86 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -60,7 +60,7 @@ _WAIT_FOR_REPLICATION_TIMEOUT_SECONDS = 5 class ReplicationDataHandler: """Handles incoming stream updates from replication. - This instance notifies the slave data store about updates. Can be subclassed + This instance notifies the data store about updates. Can be subclassed to handle updates in additional ways. """ @@ -91,7 +91,7 @@ class ReplicationDataHandler: ) -> None: """Called to handle a batch of replication data with a given stream token. - By default this just pokes the slave store. Can be overridden in subclasses to + By default, this just pokes the data store. Can be overridden in subclasses to handle more. Args: diff --git a/synapse/replication/tcp/context.py b/synapse/replication/tcp/context.py new file mode 100644 index 0000000000..4688b2200b --- /dev/null +++ b/synapse/replication/tcp/context.py @@ -0,0 +1,34 @@ +# Copyright 2023 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from OpenSSL.SSL import Context +from twisted.internet import ssl + +from synapse.config.redis import RedisConfig + + +class ClientContextFactory(ssl.ClientContextFactory): + def __init__(self, redis_config: RedisConfig): + self.redis_config = redis_config + + def getContext(self) -> Context: + ctx = super().getContext() + if self.redis_config.redis_certificate: + ctx.use_certificate_file(self.redis_config.redis_certificate) + if self.redis_config.redis_private_key: + ctx.use_privatekey_file(self.redis_config.redis_private_key) + if self.redis_config.redis_ca_file: + ctx.load_verify_locations(cafile=self.redis_config.redis_ca_file) + elif self.redis_config.redis_ca_path: + ctx.load_verify_locations(capath=self.redis_config.redis_ca_path) + return ctx diff --git a/synapse/replication/tcp/handler.py b/synapse/replication/tcp/handler.py index 2290b3e6fe..233ad61d49 100644 --- a/synapse/replication/tcp/handler.py +++ b/synapse/replication/tcp/handler.py @@ -46,6 +46,7 @@ from synapse.replication.tcp.commands import ( UserIpCommand, UserSyncCommand, ) +from synapse.replication.tcp.context import ClientContextFactory from synapse.replication.tcp.protocol import IReplicationConnection from synapse.replication.tcp.streams import ( STREAMS_MAP, @@ -348,13 +349,27 @@ class ReplicationCommandHandler: outbound_redis_connection, channel_names=self._channels_to_subscribe_to, ) - hs.get_reactor().connectTCP( - hs.config.redis.redis_host, - hs.config.redis.redis_port, - self._factory, - timeout=30, - bindAddress=None, - ) + + reactor = hs.get_reactor() + redis_config = hs.config.redis + if hs.config.redis.redis_use_tls: + ssl_context_factory = ClientContextFactory(hs.config.redis) + reactor.connectSSL( + redis_config.redis_host, + redis_config.redis_port, + self._factory, + ssl_context_factory, + timeout=30, + bindAddress=None, + ) + else: + reactor.connectTCP( + redis_config.redis_host, + redis_config.redis_port, + self._factory, + timeout=30, + bindAddress=None, + ) def get_streams(self) -> Dict[str, Stream]: """Get a map from stream name to all streams.""" diff --git a/synapse/replication/tcp/redis.py b/synapse/replication/tcp/redis.py index dfc061eb5e..c8f4bf8b27 100644 --- a/synapse/replication/tcp/redis.py +++ b/synapse/replication/tcp/redis.py @@ -35,6 +35,7 @@ from synapse.replication.tcp.commands import ( ReplicateCommand, parse_command_from_line, ) +from synapse.replication.tcp.context import ClientContextFactory from synapse.replication.tcp.protocol import ( IReplicationConnection, tcp_inbound_commands_counter, @@ -386,12 +387,24 @@ def lazyConnection( factory.continueTrying = reconnect reactor = hs.get_reactor() - reactor.connectTCP( - host, - port, - factory, - timeout=30, - bindAddress=None, - ) + + if hs.config.redis.redis_use_tls: + ssl_context_factory = ClientContextFactory(hs.config.redis) + reactor.connectSSL( + host, + port, + factory, + ssl_context_factory, + timeout=30, + bindAddress=None, + ) + else: + reactor.connectTCP( + host, + port, + factory, + timeout=30, + bindAddress=None, + ) return factory.handler diff --git a/synapse/res/providers.json b/synapse/res/providers.json index 7b9958e454..2dc9fec8e3 100644 --- a/synapse/res/providers.json +++ b/synapse/res/providers.json @@ -11,5 +11,18 @@ "url": "https://publish.twitter.com/oembed" } ] + }, + { + "provider_name": "YouTube Shorts", + "provider_url": "http://www.youtube.com/", + "endpoints": [ + { + "schemes": [ + "https://youtube.com/shorts/*", + "https://*.youtube.com/shorts/*" + ], + "url": "https://www.youtube.com/oembed" + } + ] } ] diff --git a/synapse/res/templates/recaptcha.html b/synapse/res/templates/recaptcha.html index f00992a24b..b80e5e8f24 100644 --- a/synapse/res/templates/recaptcha.html +++ b/synapse/res/templates/recaptcha.html @@ -3,7 +3,11 @@ {% block header %} <script src="https://www.recaptcha.net/recaptcha/api.js" async defer></script> -<link rel="stylesheet" href="/_matrix/static/client/register/style.css"> +<style type="text/css"> + .g-recaptcha div { + margin: auto; + } +</style> <script> function captchaDone() { document.getElementById('registrationForm').submit(); diff --git a/synapse/res/templates/registration_token.html b/synapse/res/templates/registration_token.html index ee4e5295e7..179e994279 100644 --- a/synapse/res/templates/registration_token.html +++ b/synapse/res/templates/registration_token.html @@ -1,12 +1,8 @@ {% extends "_base.html" %} {% block title %}Authentication{% endblock %} -{% block header %} -<link rel="stylesheet" href="/_matrix/static/client/register/style.css"> -{% endblock %} - {% block body %} -<form id="registrationForm" method="post" action="{{ myurl }}"> +<form method="post" action="{{ myurl }}"> <div> {% if error is defined %} <p class="error"><strong>Error: {{ error }}</strong></p> diff --git a/synapse/res/templates/style.css b/synapse/res/templates/style.css index 097b235ae5..9899238bb6 100644 --- a/synapse/res/templates/style.css +++ b/synapse/res/templates/style.css @@ -27,3 +27,7 @@ body { h3 { font-size: .85rem; } h4 { font-size: .8rem; } } + +.error { + color: red; +} diff --git a/synapse/res/templates/terms.html b/synapse/res/templates/terms.html index ffabebdd8b..66c40a7000 100644 --- a/synapse/res/templates/terms.html +++ b/synapse/res/templates/terms.html @@ -2,7 +2,12 @@ {% block title %}Authentication{% endblock %} {% block header %} -<link rel="stylesheet" href="/_matrix/static/client/register/style.css"> +<style type="text/css"> + #registrationForm input { + display: block; + margin: auto; + } +</style> {% endblock %} {% block body %} diff --git a/synapse/rest/__init__.py b/synapse/rest/__init__.py index 1d7c11b42d..1af8d99d20 100644 --- a/synapse/rest/__init__.py +++ b/synapse/rest/__init__.py @@ -100,8 +100,7 @@ class ClientRestResource(JsonResource): login.register_servlets(hs, client_resource) profile.register_servlets(hs, client_resource) presence.register_servlets(hs, client_resource) - if is_main_process: - directory.register_servlets(hs, client_resource) + directory.register_servlets(hs, client_resource) voip.register_servlets(hs, client_resource) if is_main_process: pusher.register_servlets(hs, client_resource) @@ -134,8 +133,8 @@ class ClientRestResource(JsonResource): if is_main_process: room_upgrade_rest_servlet.register_servlets(hs, client_resource) room_batch.register_servlets(hs, client_resource) + capabilities.register_servlets(hs, client_resource) if is_main_process: - capabilities.register_servlets(hs, client_resource) account_validity.register_servlets(hs, client_resource) relations.register_servlets(hs, client_resource) password_policy.register_servlets(hs, client_resource) diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index 79f22a59f1..c729364839 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -39,6 +39,7 @@ from synapse.rest.admin.event_reports import ( EventReportDetailRestServlet, EventReportsRestServlet, ) +from synapse.rest.admin.experimental_features import ExperimentalFeaturesRestServlet from synapse.rest.admin.federation import ( DestinationMembershipRestServlet, DestinationResetConnectionRestServlet, @@ -68,7 +69,10 @@ from synapse.rest.admin.rooms import ( RoomTimestampToEventRestServlet, ) from synapse.rest.admin.server_notice_servlet import SendServerNoticeServlet -from synapse.rest.admin.statistics import UserMediaStatisticsRestServlet +from synapse.rest.admin.statistics import ( + LargestRoomsStatistics, + UserMediaStatisticsRestServlet, +) from synapse.rest.admin.username_available import UsernameAvailableRestServlet from synapse.rest.admin.users import ( AccountDataRestServlet, @@ -259,6 +263,7 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: UserRestServletV2(hs).register(http_server) UsersRestServletV2(hs).register(http_server) UserMediaStatisticsRestServlet(hs).register(http_server) + LargestRoomsStatistics(hs).register(http_server) EventReportDetailRestServlet(hs).register(http_server) EventReportsRestServlet(hs).register(http_server) AccountDataRestServlet(hs).register(http_server) @@ -288,6 +293,7 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: BackgroundUpdateEnabledRestServlet(hs).register(http_server) BackgroundUpdateRestServlet(hs).register(http_server) BackgroundUpdateStartJobRestServlet(hs).register(http_server) + ExperimentalFeaturesRestServlet(hs).register(http_server) def register_servlets_for_client_rest_resource( diff --git a/synapse/rest/admin/devices.py b/synapse/rest/admin/devices.py index 3b2f2d9abb..11ebed9bfd 100644 --- a/synapse/rest/admin/devices.py +++ b/synapse/rest/admin/devices.py @@ -137,6 +137,35 @@ class DevicesRestServlet(RestServlet): devices = await self.device_handler.get_devices_by_user(target_user.to_string()) return HTTPStatus.OK, {"devices": devices, "total": len(devices)} + async def on_POST( + self, request: SynapseRequest, user_id: str + ) -> Tuple[int, JsonDict]: + """Creates a new device for the user.""" + await assert_requester_is_admin(self.auth, request) + + target_user = UserID.from_string(user_id) + if not self.is_mine(target_user): + raise SynapseError( + HTTPStatus.BAD_REQUEST, "Can only create devices for local users" + ) + + u = await self.store.get_user_by_id(target_user.to_string()) + if u is None: + raise NotFoundError("Unknown user") + + body = parse_json_object_from_request(request) + device_id = body.get("device_id") + if not device_id: + raise SynapseError(HTTPStatus.BAD_REQUEST, "Missing device_id") + if not isinstance(device_id, str): + raise SynapseError(HTTPStatus.BAD_REQUEST, "device_id must be a string") + + await self.device_handler.check_device_registered( + user_id=user_id, device_id=device_id + ) + + return HTTPStatus.CREATED, {} + class DeleteDevicesRestServlet(RestServlet): """ diff --git a/synapse/rest/admin/experimental_features.py b/synapse/rest/admin/experimental_features.py new file mode 100644 index 0000000000..abf273af10 --- /dev/null +++ b/synapse/rest/admin/experimental_features.py @@ -0,0 +1,118 @@ +# Copyright 2023 The Matrix.org Foundation C.I.C +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from enum import Enum +from http import HTTPStatus +from typing import TYPE_CHECKING, Dict, Tuple + +from synapse.api.errors import SynapseError +from synapse.http.servlet import RestServlet, parse_json_object_from_request +from synapse.http.site import SynapseRequest +from synapse.rest.admin import admin_patterns, assert_requester_is_admin +from synapse.types import JsonDict, UserID + +if TYPE_CHECKING: + from synapse.server import HomeServer + + +class ExperimentalFeature(str, Enum): + """ + Currently supported per-user features + """ + + MSC3026 = "msc3026" + MSC3881 = "msc3881" + MSC3967 = "msc3967" + + +class ExperimentalFeaturesRestServlet(RestServlet): + """ + Enable or disable experimental features for a user or determine which features are enabled + for a given user + """ + + PATTERNS = admin_patterns("/experimental_features/(?P<user_id>[^/]*)") + + def __init__(self, hs: "HomeServer"): + super().__init__() + self.auth = hs.get_auth() + self.store = hs.get_datastores().main + self.is_mine = hs.is_mine + + async def on_GET( + self, + request: SynapseRequest, + user_id: str, + ) -> Tuple[int, JsonDict]: + """ + List which features are enabled for a given user + """ + await assert_requester_is_admin(self.auth, request) + + target_user = UserID.from_string(user_id) + if not self.is_mine(target_user): + raise SynapseError( + HTTPStatus.BAD_REQUEST, + "User must be local to check what experimental features are enabled.", + ) + + enabled_features = await self.store.list_enabled_features(user_id) + + user_features = {} + for feature in ExperimentalFeature: + if feature in enabled_features: + user_features[feature] = True + else: + user_features[feature] = False + return HTTPStatus.OK, {"features": user_features} + + async def on_PUT( + self, request: SynapseRequest, user_id: str + ) -> Tuple[HTTPStatus, Dict]: + """ + Enable or disable the provided features for the requester + """ + await assert_requester_is_admin(self.auth, request) + + body = parse_json_object_from_request(request) + + target_user = UserID.from_string(user_id) + if not self.is_mine(target_user): + raise SynapseError( + HTTPStatus.BAD_REQUEST, + "User must be local to enable experimental features.", + ) + + features = body.get("features") + if not features: + raise SynapseError( + HTTPStatus.BAD_REQUEST, "You must provide features to set." + ) + + # validate the provided features + validated_features = {} + for feature, enabled in features.items(): + try: + validated_feature = ExperimentalFeature(feature) + validated_features[validated_feature] = enabled + except ValueError: + raise SynapseError( + HTTPStatus.BAD_REQUEST, + f"{feature!r} is not recognised as a valid experimental feature.", + ) + + await self.store.set_features_for_user(user_id, validated_features) + + return HTTPStatus.OK, {} diff --git a/synapse/rest/admin/media.py b/synapse/rest/admin/media.py index c134ccfb3d..b7637dff0b 100644 --- a/synapse/rest/admin/media.py +++ b/synapse/rest/admin/media.py @@ -258,7 +258,7 @@ class DeleteMediaByID(RestServlet): def __init__(self, hs: "HomeServer"): self.store = hs.get_datastores().main self.auth = hs.get_auth() - self.server_name = hs.hostname + self._is_mine_server_name = hs.is_mine_server_name self.media_repository = hs.get_media_repository() async def on_DELETE( @@ -266,7 +266,7 @@ class DeleteMediaByID(RestServlet): ) -> Tuple[int, JsonDict]: await assert_requester_is_admin(self.auth, request) - if self.server_name != server_name: + if not self._is_mine_server_name(server_name): raise SynapseError(HTTPStatus.BAD_REQUEST, "Can only delete local media") if await self.store.get_local_media(media_id) is None: diff --git a/synapse/rest/admin/rooms.py b/synapse/rest/admin/rooms.py index 4de56bf13f..1d65560265 100644 --- a/synapse/rest/admin/rooms.py +++ b/synapse/rest/admin/rooms.py @@ -70,7 +70,7 @@ class RoomRestV2Servlet(RestServlet): self._auth = hs.get_auth() self._store = hs.get_datastores().main self._pagination_handler = hs.get_pagination_handler() - self._third_party_rules = hs.get_third_party_event_rules() + self._third_party_rules = hs.get_module_api_callbacks().third_party_event_rules async def on_DELETE( self, request: SynapseRequest, room_id: str diff --git a/synapse/rest/admin/statistics.py b/synapse/rest/admin/statistics.py index 9c45f4650d..19780e4b4c 100644 --- a/synapse/rest/admin/statistics.py +++ b/synapse/rest/admin/statistics.py @@ -113,3 +113,28 @@ class UserMediaStatisticsRestServlet(RestServlet): ret["next_token"] = start + len(users_media) return HTTPStatus.OK, ret + + +class LargestRoomsStatistics(RestServlet): + """Get the largest rooms by database size. + + Only works when using PostgreSQL. + """ + + PATTERNS = admin_patterns("/statistics/database/rooms$") + + def __init__(self, hs: "HomeServer"): + self.auth = hs.get_auth() + self.stats_controller = hs.get_storage_controllers().stats + + async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + await assert_requester_is_admin(self.auth, request) + + room_sizes = await self.stats_controller.get_room_db_size_estimate() + + return HTTPStatus.OK, { + "rooms": [ + {"room_id": room_id, "estimated_size": size} + for room_id, size in room_sizes + ] + } diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index 331f225116..932333ae57 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -336,7 +336,7 @@ class UserRestServletV2(RestServlet): HTTPStatus.CONFLICT, "External id is already in use." ) - if "avatar_url" in body and isinstance(body["avatar_url"], str): + if "avatar_url" in body: await self.profile_handler.set_avatar_url( target_user, requester, body["avatar_url"], True ) diff --git a/synapse/rest/client/account_data.py b/synapse/rest/client/account_data.py index 43193ad086..b1f9e9dc9b 100644 --- a/synapse/rest/client/account_data.py +++ b/synapse/rest/client/account_data.py @@ -13,8 +13,9 @@ # limitations under the License. import logging -from typing import TYPE_CHECKING, Tuple +from typing import TYPE_CHECKING, Optional, Tuple +from synapse.api.constants import AccountDataTypes, ReceiptTypes from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError from synapse.http.server import HttpServer from synapse.http.servlet import RestServlet, parse_json_object_from_request @@ -29,6 +30,23 @@ if TYPE_CHECKING: logger = logging.getLogger(__name__) +def _check_can_set_account_data_type(account_data_type: str) -> None: + """The fully read marker and push rules cannot be directly set via /account_data.""" + if account_data_type == ReceiptTypes.FULLY_READ: + raise SynapseError( + 405, + "Cannot set m.fully_read through this API." + " Use /rooms/!roomId:server.name/read_markers", + Codes.BAD_JSON, + ) + elif account_data_type == AccountDataTypes.PUSH_RULES: + raise SynapseError( + 405, + "Cannot set m.push_rules through this API. Use /pushrules", + Codes.BAD_JSON, + ) + + class AccountDataServlet(RestServlet): """ PUT /user/{user_id}/account_data/{account_dataType} HTTP/1.1 @@ -46,6 +64,7 @@ class AccountDataServlet(RestServlet): self.auth = hs.get_auth() self.store = hs.get_datastores().main self.handler = hs.get_account_data_handler() + self._push_rules_handler = hs.get_push_rules_handler() async def on_PUT( self, request: SynapseRequest, user_id: str, account_data_type: str @@ -54,6 +73,10 @@ class AccountDataServlet(RestServlet): if user_id != requester.user.to_string(): raise AuthError(403, "Cannot add account data for other users.") + # Raise an error if the account data type cannot be set directly. + if self._hs.config.experimental.msc4010_push_rules_account_data: + _check_can_set_account_data_type(account_data_type) + body = parse_json_object_from_request(request) # If experimental support for MSC3391 is enabled, then providing an empty dict @@ -77,19 +100,28 @@ class AccountDataServlet(RestServlet): if user_id != requester.user.to_string(): raise AuthError(403, "Cannot get account data for other users.") - event = await self.store.get_global_account_data_by_type_for_user( - user_id, account_data_type - ) + # Push rules are stored in a separate table and must be queried separately. + if ( + self._hs.config.experimental.msc4010_push_rules_account_data + and account_data_type == AccountDataTypes.PUSH_RULES + ): + account_data: Optional[ + JsonDict + ] = await self._push_rules_handler.push_rules_for_user(requester.user) + else: + account_data = await self.store.get_global_account_data_by_type_for_user( + user_id, account_data_type + ) - if event is None: + if account_data is None: raise NotFoundError("Account data not found") # If experimental support for MSC3391 is enabled, then this endpoint should # return a 404 if the content for an account data type is an empty dict. - if self._hs.config.experimental.msc3391_enabled and event == {}: + if self._hs.config.experimental.msc3391_enabled and account_data == {}: raise NotFoundError("Account data not found") - return 200, event + return 200, account_data class UnstableAccountDataServlet(RestServlet): @@ -108,6 +140,7 @@ class UnstableAccountDataServlet(RestServlet): def __init__(self, hs: "HomeServer"): super().__init__() + self._hs = hs self.auth = hs.get_auth() self.handler = hs.get_account_data_handler() @@ -121,6 +154,10 @@ class UnstableAccountDataServlet(RestServlet): if user_id != requester.user.to_string(): raise AuthError(403, "Cannot delete account data for other users.") + # Raise an error if the account data type cannot be set directly. + if self._hs.config.experimental.msc4010_push_rules_account_data: + _check_can_set_account_data_type(account_data_type) + await self.handler.remove_account_data_for_user(user_id, account_data_type) return 200, {} @@ -164,9 +201,10 @@ class RoomAccountDataServlet(RestServlet): Codes.INVALID_PARAM, ) - body = parse_json_object_from_request(request) - - if account_data_type == "m.fully_read": + # Raise an error if the account data type cannot be set directly. + if self._hs.config.experimental.msc4010_push_rules_account_data: + _check_can_set_account_data_type(account_data_type) + elif account_data_type == ReceiptTypes.FULLY_READ: raise SynapseError( 405, "Cannot set m.fully_read through this API." @@ -174,6 +212,8 @@ class RoomAccountDataServlet(RestServlet): Codes.BAD_JSON, ) + body = parse_json_object_from_request(request) + # If experimental support for MSC3391 is enabled, then providing an empty dict # as the value for an account data type should be functionally equivalent to # calling the DELETE method on the same type. @@ -208,19 +248,26 @@ class RoomAccountDataServlet(RestServlet): Codes.INVALID_PARAM, ) - event = await self.store.get_account_data_for_room_and_type( - user_id, room_id, account_data_type - ) + # Room-specific push rules are not currently supported. + if ( + self._hs.config.experimental.msc4010_push_rules_account_data + and account_data_type == AccountDataTypes.PUSH_RULES + ): + account_data: Optional[JsonDict] = {} + else: + account_data = await self.store.get_account_data_for_room_and_type( + user_id, room_id, account_data_type + ) - if event is None: + if account_data is None: raise NotFoundError("Room account data not found") # If experimental support for MSC3391 is enabled, then this endpoint should # return a 404 if the content for an account data type is an empty dict. - if self._hs.config.experimental.msc3391_enabled and event == {}: + if self._hs.config.experimental.msc3391_enabled and account_data == {}: raise NotFoundError("Room account data not found") - return 200, event + return 200, account_data class UnstableRoomAccountDataServlet(RestServlet): @@ -240,6 +287,7 @@ class UnstableRoomAccountDataServlet(RestServlet): def __init__(self, hs: "HomeServer"): super().__init__() + self._hs = hs self.auth = hs.get_auth() self.handler = hs.get_account_data_handler() @@ -261,6 +309,10 @@ class UnstableRoomAccountDataServlet(RestServlet): Codes.INVALID_PARAM, ) + # Raise an error if the account data type cannot be set directly. + if self._hs.config.experimental.msc4010_push_rules_account_data: + _check_can_set_account_data_type(account_data_type) + await self.handler.remove_account_data_for_room( user_id, room_id, account_data_type ) diff --git a/synapse/rest/client/appservice_ping.py b/synapse/rest/client/appservice_ping.py index 31466a4ad4..3f553d14d1 100644 --- a/synapse/rest/client/appservice_ping.py +++ b/synapse/rest/client/appservice_ping.py @@ -39,9 +39,8 @@ logger = logging.getLogger(__name__) class AppservicePingRestServlet(RestServlet): PATTERNS = client_patterns( - "/fi.mau.msc2659/appservice/(?P<appservice_id>[^/]*)/ping", - unstable=True, - releases=(), + "/appservice/(?P<appservice_id>[^/]*)/ping", + releases=("v1",), ) def __init__(self, hs: "HomeServer"): @@ -107,9 +106,8 @@ class AppservicePingRestServlet(RestServlet): duration = time.monotonic() - start - return HTTPStatus.OK, {"duration": int(duration * 1000)} + return HTTPStatus.OK, {"duration_ms": int(duration * 1000)} def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: - if hs.config.experimental.msc2659_enabled: - AppservicePingRestServlet(hs).register(http_server) + AppservicePingRestServlet(hs).register(http_server) diff --git a/synapse/rest/client/capabilities.py b/synapse/rest/client/capabilities.py index 11fc0b0678..a77b0697b7 100644 --- a/synapse/rest/client/capabilities.py +++ b/synapse/rest/client/capabilities.py @@ -33,6 +33,7 @@ class CapabilitiesRestServlet(RestServlet): """End point to expose the capabilities of the server.""" PATTERNS = client_patterns("/capabilities$") + CATEGORY = "Client API requests" def __init__(self, hs: "HomeServer"): super().__init__() diff --git a/synapse/rest/client/directory.py b/synapse/rest/client/directory.py index f17b4c8d22..570bb52747 100644 --- a/synapse/rest/client/directory.py +++ b/synapse/rest/client/directory.py @@ -39,12 +39,14 @@ logger = logging.getLogger(__name__) def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: ClientDirectoryServer(hs).register(http_server) - ClientDirectoryListServer(hs).register(http_server) - ClientAppserviceDirectoryListServer(hs).register(http_server) + if hs.config.worker.worker_app is None: + ClientDirectoryListServer(hs).register(http_server) + ClientAppserviceDirectoryListServer(hs).register(http_server) class ClientDirectoryServer(RestServlet): PATTERNS = client_patterns("/directory/room/(?P<room_alias>[^/]*)$", v1=True) + CATEGORY = "Client API requests" def __init__(self, hs: "HomeServer"): super().__init__() diff --git a/synapse/rest/client/filter.py b/synapse/rest/client/filter.py index ab7d8c9419..04561f36d7 100644 --- a/synapse/rest/client/filter.py +++ b/synapse/rest/client/filter.py @@ -94,7 +94,7 @@ class CreateFilterRestServlet(RestServlet): set_timeline_upper_limit(content, self.hs.config.server.filter_timeline_limit) filter_id = await self.filtering.add_user_filter( - user_localpart=target_user.localpart, user_filter=content + user_id=target_user, user_filter=content ) return 200, {"filter_id": str(filter_id)} diff --git a/synapse/rest/client/keys.py b/synapse/rest/client/keys.py index 6209b79b01..9bbab5e624 100644 --- a/synapse/rest/client/keys.py +++ b/synapse/rest/client/keys.py @@ -15,7 +15,9 @@ # limitations under the License. import logging -from typing import TYPE_CHECKING, Any, Optional, Tuple +import re +from collections import Counter +from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple from synapse.api.errors import InvalidAPICallError, SynapseError from synapse.http.server import HttpServer @@ -288,7 +290,64 @@ class OneTimeKeyServlet(RestServlet): await self.auth.get_user_by_req(request, allow_guest=True) timeout = parse_integer(request, "timeout", 10 * 1000) body = parse_json_object_from_request(request) - result = await self.e2e_keys_handler.claim_one_time_keys(body, timeout) + + # Generate a count for each algorithm, which is hard-coded to 1. + query: Dict[str, Dict[str, Dict[str, int]]] = {} + for user_id, one_time_keys in body.get("one_time_keys", {}).items(): + for device_id, algorithm in one_time_keys.items(): + query.setdefault(user_id, {})[device_id] = {algorithm: 1} + + result = await self.e2e_keys_handler.claim_one_time_keys( + query, timeout, always_include_fallback_keys=False + ) + return 200, result + + +class UnstableOneTimeKeyServlet(RestServlet): + """ + Identical to the stable endpoint (OneTimeKeyServlet) except it allows for + querying for multiple OTKs at once and always includes fallback keys in the + response. + + POST /keys/claim HTTP/1.1 + { + "one_time_keys": { + "<user_id>": { + "<device_id>": ["<algorithm>", ...] + } } } + + HTTP/1.1 200 OK + { + "one_time_keys": { + "<user_id>": { + "<device_id>": { + "<algorithm>:<key_id>": "<key_base64>" + } } } } + + """ + + PATTERNS = [re.compile(r"^/_matrix/client/unstable/org.matrix.msc3983/keys/claim$")] + CATEGORY = "Encryption requests" + + def __init__(self, hs: "HomeServer"): + super().__init__() + self.auth = hs.get_auth() + self.e2e_keys_handler = hs.get_e2e_keys_handler() + + async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + await self.auth.get_user_by_req(request, allow_guest=True) + timeout = parse_integer(request, "timeout", 10 * 1000) + body = parse_json_object_from_request(request) + + # Generate a count for each algorithm. + query: Dict[str, Dict[str, Dict[str, int]]] = {} + for user_id, one_time_keys in body.get("one_time_keys", {}).items(): + for device_id, algorithms in one_time_keys.items(): + query.setdefault(user_id, {})[device_id] = Counter(algorithms) + + result = await self.e2e_keys_handler.claim_one_time_keys( + query, timeout, always_include_fallback_keys=True + ) return 200, result @@ -394,6 +453,8 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: KeyQueryServlet(hs).register(http_server) KeyChangesServlet(hs).register(http_server) OneTimeKeyServlet(hs).register(http_server) + if hs.config.experimental.msc3983_appservice_otk_claims: + UnstableOneTimeKeyServlet(hs).register(http_server) if hs.config.worker.worker_app is None: SigningKeyUploadServlet(hs).register(http_server) SignaturesUploadServlet(hs).register(http_server) diff --git a/synapse/rest/client/login.py b/synapse/rest/client/login.py index e04d4f2425..7d7714abb8 100644 --- a/synapse/rest/client/login.py +++ b/synapse/rest/client/login.py @@ -35,6 +35,7 @@ from synapse.api.errors import ( LoginError, NotApprovedError, SynapseError, + UserDeactivatedError, ) from synapse.api.ratelimiting import Ratelimiter from synapse.api.urls import CLIENT_API_PREFIX @@ -84,14 +85,10 @@ class LoginRestServlet(RestServlet): def __init__(self, hs: "HomeServer"): super().__init__() self.hs = hs + self._main_store = hs.get_datastores().main # JWT configuration variables. self.jwt_enabled = hs.config.jwt.jwt_enabled - self.jwt_secret = hs.config.jwt.jwt_secret - self.jwt_subject_claim = hs.config.jwt.jwt_subject_claim - self.jwt_algorithm = hs.config.jwt.jwt_algorithm - self.jwt_issuer = hs.config.jwt.jwt_issuer - self.jwt_audiences = hs.config.jwt.jwt_audiences # SSO configuration. self.saml2_enabled = hs.config.saml2.saml2_enabled @@ -120,13 +117,13 @@ class LoginRestServlet(RestServlet): self._well_known_builder = WellKnownBuilder(hs) self._address_ratelimiter = Ratelimiter( - store=hs.get_datastores().main, + store=self._main_store, clock=hs.get_clock(), rate_hz=self.hs.config.ratelimiting.rc_login_address.per_second, burst_count=self.hs.config.ratelimiting.rc_login_address.burst_count, ) self._account_ratelimiter = Ratelimiter( - store=hs.get_datastores().main, + store=self._main_store, clock=hs.get_clock(), rate_hz=self.hs.config.ratelimiting.rc_login_account.per_second, burst_count=self.hs.config.ratelimiting.rc_login_account.burst_count, @@ -292,6 +289,9 @@ class LoginRestServlet(RestServlet): login_submission, ratelimit=appservice.is_rate_limited(), should_issue_refresh_token=should_issue_refresh_token, + # The user represented by an appservice's configured sender_localpart + # is not actually created in Synapse. + should_check_deactivated=qualified_user_id != appservice.sender, ) async def _do_other_login( @@ -338,6 +338,7 @@ class LoginRestServlet(RestServlet): auth_provider_id: Optional[str] = None, should_issue_refresh_token: bool = False, auth_provider_session_id: Optional[str] = None, + should_check_deactivated: bool = True, ) -> LoginResponse: """Called when we've successfully authed the user and now need to actually login them in (e.g. create devices). This gets called on @@ -357,6 +358,11 @@ class LoginRestServlet(RestServlet): should_issue_refresh_token: True if this login should issue a refresh token alongside the access token. auth_provider_session_id: The session ID got during login from the SSO IdP. + should_check_deactivated: True if the user should be checked for + deactivation status before logging in. + + This exists purely for appservice's configured sender_localpart + which doesn't have an associated user in the database. Returns: Dictionary of account information after successful login. @@ -376,6 +382,12 @@ class LoginRestServlet(RestServlet): ) user_id = canonical_uid + # If the account has been deactivated, do not proceed with the login. + if should_check_deactivated: + deactivated = await self._main_store.get_user_deactivated_status(user_id) + if deactivated: + raise UserDeactivatedError("This account has been deactivated") + device_id = login_submission.get("device_id") # If device_id is present, check that device_id is not longer than a reasonable 512 characters @@ -434,7 +446,7 @@ class LoginRestServlet(RestServlet): self, login_submission: JsonDict, should_issue_refresh_token: bool = False ) -> LoginResponse: """ - Handle the final stage of SSO login. + Handle token login. Args: login_submission: The JSON request body. @@ -459,72 +471,24 @@ class LoginRestServlet(RestServlet): async def _do_jwt_login( self, login_submission: JsonDict, should_issue_refresh_token: bool = False ) -> LoginResponse: - token = login_submission.get("token", None) - if token is None: - raise LoginError( - 403, "Token field for JWT is missing", errcode=Codes.FORBIDDEN - ) - - from authlib.jose import JsonWebToken, JWTClaims - from authlib.jose.errors import BadSignatureError, InvalidClaimError, JoseError - - jwt = JsonWebToken([self.jwt_algorithm]) - claim_options = {} - if self.jwt_issuer is not None: - claim_options["iss"] = {"value": self.jwt_issuer, "essential": True} - if self.jwt_audiences is not None: - claim_options["aud"] = {"values": self.jwt_audiences, "essential": True} - - try: - claims = jwt.decode( - token, - key=self.jwt_secret, - claims_cls=JWTClaims, - claims_options=claim_options, - ) - except BadSignatureError: - # We handle this case separately to provide a better error message - raise LoginError( - 403, - "JWT validation failed: Signature verification failed", - errcode=Codes.FORBIDDEN, - ) - except JoseError as e: - # A JWT error occurred, return some info back to the client. - raise LoginError( - 403, - "JWT validation failed: %s" % (str(e),), - errcode=Codes.FORBIDDEN, - ) - - try: - claims.validate(leeway=120) # allows 2 min of clock skew - - # Enforce the old behavior which is rolled out in productive - # servers: if the JWT contains an 'aud' claim but none is - # configured, the login attempt will fail - if claims.get("aud") is not None: - if self.jwt_audiences is None or len(self.jwt_audiences) == 0: - raise InvalidClaimError("aud") - except JoseError as e: - raise LoginError( - 403, - "JWT validation failed: %s" % (str(e),), - errcode=Codes.FORBIDDEN, - ) + """ + Handle the custom JWT login. - user = claims.get(self.jwt_subject_claim, None) - if user is None: - raise LoginError(403, "Invalid JWT", errcode=Codes.FORBIDDEN) + Args: + login_submission: The JSON request body. + should_issue_refresh_token: True if this login should issue + a refresh token alongside the access token. - user_id = UserID(user, self.hs.hostname).to_string() - result = await self._complete_login( + Returns: + The body of the JSON response. + """ + user_id = self.hs.get_jwt_handler().validate_login(login_submission) + return await self._complete_login( user_id, login_submission, create_non_existent_users=True, should_issue_refresh_token=should_issue_refresh_token, ) - return result def _get_auth_flow_dict_for_idp(idp: SsoIdentityProvider) -> JsonDict: @@ -677,9 +641,17 @@ class CasTicketServlet(RestServlet): def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: LoginRestServlet(hs).register(http_server) - if hs.config.registration.refreshable_access_token_lifetime is not None: + if ( + hs.config.worker.worker_app is None + and hs.config.registration.refreshable_access_token_lifetime is not None + ): RefreshTokenServlet(hs).register(http_server) - SsoRedirectServlet(hs).register(http_server) + if ( + hs.config.cas.cas_enabled + or hs.config.saml2.saml2_enabled + or hs.config.oidc.oidc_enabled + ): + SsoRedirectServlet(hs).register(http_server) if hs.config.cas.cas_enabled: CasTicketServlet(hs).register(http_server) diff --git a/synapse/rest/client/mutual_rooms.py b/synapse/rest/client/mutual_rooms.py index 38ef4e459f..c99445da30 100644 --- a/synapse/rest/client/mutual_rooms.py +++ b/synapse/rest/client/mutual_rooms.py @@ -12,13 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from typing import TYPE_CHECKING, Tuple +from http import HTTPStatus +from typing import TYPE_CHECKING, Dict, List, Tuple from synapse.api.errors import Codes, SynapseError from synapse.http.server import HttpServer -from synapse.http.servlet import RestServlet +from synapse.http.servlet import RestServlet, parse_strings_from_args from synapse.http.site import SynapseRequest -from synapse.types import JsonDict, UserID +from synapse.types import JsonDict from ._base import client_patterns @@ -30,11 +31,11 @@ logger = logging.getLogger(__name__) class UserMutualRoomsServlet(RestServlet): """ - GET /uk.half-shot.msc2666/user/mutual_rooms/{user_id} HTTP/1.1 + GET /uk.half-shot.msc2666/user/mutual_rooms?user_id={user_id} HTTP/1.1 """ PATTERNS = client_patterns( - "/uk.half-shot.msc2666/user/mutual_rooms/(?P<user_id>[^/]*)", + "/uk.half-shot.msc2666/user/mutual_rooms$", releases=(), # This is an unstable feature ) @@ -43,17 +44,35 @@ class UserMutualRoomsServlet(RestServlet): self.auth = hs.get_auth() self.store = hs.get_datastores().main - async def on_GET( - self, request: SynapseRequest, user_id: str - ) -> Tuple[int, JsonDict]: - UserID.from_string(user_id) + async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + # twisted.web.server.Request.args is incorrectly defined as Optional[Any] + args: Dict[bytes, List[bytes]] = request.args # type: ignore + + user_ids = parse_strings_from_args(args, "user_id", required=True) + + if len(user_ids) > 1: + raise SynapseError( + HTTPStatus.BAD_REQUEST, + "Duplicate user_id query parameter", + errcode=Codes.INVALID_PARAM, + ) + + # We don't do batching, so a batch token is illegal by default + if b"batch_token" in args: + raise SynapseError( + HTTPStatus.BAD_REQUEST, + "Unknown batch_token", + errcode=Codes.INVALID_PARAM, + ) + + user_id = user_ids[0] requester = await self.auth.get_user_by_req(request) if user_id == requester.user.to_string(): raise SynapseError( - code=400, - msg="You cannot request a list of shared rooms with yourself", - errcode=Codes.FORBIDDEN, + HTTPStatus.UNPROCESSABLE_ENTITY, + "You cannot request a list of shared rooms with yourself", + errcode=Codes.INVALID_PARAM, ) rooms = await self.store.get_mutual_rooms_between_users( diff --git a/synapse/rest/client/push_rule.py b/synapse/rest/client/push_rule.py index 1147b6f8ec..5c9fece3ba 100644 --- a/synapse/rest/client/push_rule.py +++ b/synapse/rest/client/push_rule.py @@ -28,7 +28,6 @@ from synapse.http.servlet import ( parse_string, ) from synapse.http.site import SynapseRequest -from synapse.push.clientformat import format_push_rules_for_user from synapse.push.rulekinds import PRIORITY_CLASS_MAP from synapse.rest.client._base import client_patterns from synapse.storage.push_rule import InconsistentRuleException, RuleNotFoundException @@ -146,14 +145,12 @@ class PushRuleRestServlet(RestServlet): async def on_GET(self, request: SynapseRequest, path: str) -> Tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) - user_id = requester.user.to_string() + requester.user.to_string() # we build up the full structure and then decide which bits of it # to send which means doing unnecessary work sometimes but is # is probably not going to make a whole lot of difference - rules_raw = await self.store.get_push_rules_for_user(user_id) - - rules = format_push_rules_for_user(requester.user, rules_raw) + rules = await self._push_rules_handler.push_rules_for_user(requester.user) path_parts = path.split("/")[1:] diff --git a/synapse/rest/client/relations.py b/synapse/rest/client/relations.py index b8b296bc0c..785dfa08d8 100644 --- a/synapse/rest/client/relations.py +++ b/synapse/rest/client/relations.py @@ -19,7 +19,7 @@ from typing import TYPE_CHECKING, Optional, Tuple from synapse.api.constants import Direction from synapse.handlers.relations import ThreadsListInclude from synapse.http.server import HttpServer -from synapse.http.servlet import RestServlet, parse_integer, parse_string +from synapse.http.servlet import RestServlet, parse_boolean, parse_integer, parse_string from synapse.http.site import SynapseRequest from synapse.rest.client._base import client_patterns from synapse.storage.databases.main.relations import ThreadsNextBatch @@ -49,6 +49,7 @@ class RelationPaginationServlet(RestServlet): self.auth = hs.get_auth() self._store = hs.get_datastores().main self._relations_handler = hs.get_relations_handler() + self._support_recurse = hs.config.experimental.msc3981_recurse_relations async def on_GET( self, @@ -63,6 +64,12 @@ class RelationPaginationServlet(RestServlet): pagination_config = await PaginationConfig.from_request( self._store, request, default_limit=5, default_dir=Direction.BACKWARDS ) + if self._support_recurse: + recurse = parse_boolean( + request, "org.matrix.msc3981.recurse", default=False + ) + else: + recurse = False # The unstable version of this API returns an extra field for client # compatibility, see https://github.com/matrix-org/synapse/issues/12930. @@ -75,6 +82,7 @@ class RelationPaginationServlet(RestServlet): event_id=parent_id, room_id=room_id, pagin_config=pagination_config, + recurse=recurse, include_original_event=include_original_event, relation_type=relation_type, event_type=event_type, diff --git a/synapse/rest/client/room.py b/synapse/rest/client/room.py index c0705d4291..951bd033f5 100644 --- a/synapse/rest/client/room.py +++ b/synapse/rest/client/room.py @@ -501,7 +501,7 @@ class PublicRoomListRestServlet(RestServlet): limit = None handler = self.hs.get_room_list_handler() - if server and server != self.hs.config.server.server_name: + if server and not self.hs.is_mine_server_name(server): # Ensure the server is valid. try: parse_and_validate_server_name(server) @@ -551,7 +551,7 @@ class PublicRoomListRestServlet(RestServlet): limit = None handler = self.hs.get_room_list_handler() - if server and server != self.hs.config.server.server_name: + if server and not self.hs.is_mine_server_name(server): # Ensure the server is valid. try: parse_and_validate_server_name(server) @@ -1096,6 +1096,7 @@ class RoomRedactEventRestServlet(TransactionRestServlet): super().__init__(hs) self.event_creation_handler = hs.get_event_creation_handler() self.auth = hs.get_auth() + self._store = hs.get_datastores().main self._relation_handler = hs.get_relations_handler() self._msc3912_enabled = hs.config.experimental.msc3912_enabled @@ -1113,6 +1114,19 @@ class RoomRedactEventRestServlet(TransactionRestServlet): ) -> Tuple[int, JsonDict]: content = parse_json_object_from_request(request) + # Ensure the redacts property in the content matches the one provided in + # the URL. + room_version = await self._store.get_room_version(room_id) + if room_version.msc2176_redaction_rules: + if "redacts" in content and content["redacts"] != event_id: + raise SynapseError( + 400, + "Cannot provide a redacts value incoherent with the event_id of the URL parameter", + Codes.INVALID_PARAM, + ) + else: + content["redacts"] = event_id + try: with_relations = None if self._msc3912_enabled and "org.matrix.msc3912.with_relations" in content: @@ -1128,20 +1142,23 @@ class RoomRedactEventRestServlet(TransactionRestServlet): requester, txn_id, room_id ) + # Event is not yet redacted, create a new event to redact it. if event is None: + event_dict = { + "type": EventTypes.Redaction, + "content": content, + "room_id": room_id, + "sender": requester.user.to_string(), + } + # Earlier room versions had a top-level redacts property. + if not room_version.msc2176_redaction_rules: + event_dict["redacts"] = event_id + ( event, _, ) = await self.event_creation_handler.create_and_send_nonmember_event( - requester, - { - "type": EventTypes.Redaction, - "content": content, - "room_id": room_id, - "sender": requester.user.to_string(), - "redacts": event_id, - }, - txn_id=txn_id, + requester, event_dict, txn_id=txn_id ) if with_relations: diff --git a/synapse/rest/client/transactions.py b/synapse/rest/client/transactions.py index f2aaab6227..0d8a63d8be 100644 --- a/synapse/rest/client/transactions.py +++ b/synapse/rest/client/transactions.py @@ -50,6 +50,8 @@ class HttpTransactionCache: # for at *LEAST* 30 mins, and at *MOST* 60 mins. self.cleaner = self.clock.looping_call(self._cleanup, CLEANUP_PERIOD_MS) + self._msc3970_enabled = hs.config.experimental.msc3970_enabled + def _get_transaction_key(self, request: IRequest, requester: Requester) -> Hashable: """A helper function which returns a transaction key that can be used with TransactionCache for idempotent requests. @@ -58,6 +60,7 @@ class HttpTransactionCache: requests to the same endpoint. The key is formed from the HTTP request path and attributes from the requester: the access_token_id for regular users, the user ID for guest users, and the appservice ID for appservice users. + With MSC3970, for regular users, the key is based on the user ID and device ID. Args: request: The incoming request. @@ -67,11 +70,21 @@ class HttpTransactionCache: """ assert request.path is not None path: str = request.path.decode("utf8") + if requester.is_guest: assert requester.user is not None, "Guest requester must have a user ID set" return (path, "guest", requester.user) + elif requester.app_service is not None: return (path, "appservice", requester.app_service.id) + + # With MSC3970, we use the user ID and device ID as the transaction key + elif self._msc3970_enabled: + assert requester.user, "Requester must have a user" + assert requester.device_id, "Requester must have a device_id" + return (path, "user", requester.user, requester.device_id) + + # Otherwise, the pre-MSC3970 behaviour is to use the access token ID else: assert ( requester.access_token_id is not None diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py index ecd84f435f..1eb11081a0 100644 --- a/synapse/rest/client/versions.py +++ b/synapse/rest/client/versions.py @@ -79,6 +79,7 @@ class VersionsRestServlet(RestServlet): "v1.3", "v1.4", "v1.5", + "v1.6", ], # as per MSC1497: "unstable_features": { @@ -90,7 +91,7 @@ class VersionsRestServlet(RestServlet): # Implements additional endpoints as described in MSC2432 "org.matrix.msc2432": True, # Implements additional endpoints as described in MSC2666 - "uk.half-shot.msc2666.mutual_rooms": True, + "uk.half-shot.msc2666.query_mutual_rooms": True, # Whether new rooms will be set to encrypted or not (based on presets). "io.element.e2ee_forced.public": self.e2ee_forced_public, "io.element.e2ee_forced.private": self.e2ee_forced_private, @@ -111,7 +112,7 @@ class VersionsRestServlet(RestServlet): # Allows moderators to fetch redacted event content as described in MSC2815 "fi.mau.msc2815": self.config.experimental.msc2815_enabled, # Adds a ping endpoint for appservices to check HS->AS connection - "fi.mau.msc2659": self.config.experimental.msc2659_enabled, + "fi.mau.msc2659.stable": True, # TODO: remove when "v1.7" is added above # Adds support for remotely enabling/disabling pushers, as per MSC3881 "org.matrix.msc3881": self.config.experimental.msc3881_enabled, # Adds support for filtering /messages by event relation. @@ -123,6 +124,10 @@ class VersionsRestServlet(RestServlet): "org.matrix.msc3912": self.config.experimental.msc3912_enabled, # Adds support for unstable "intentional mentions" behaviour. "org.matrix.msc3952_intentional_mentions": self.config.experimental.msc3952_intentional_mentions, + # Whether recursively provide relations is supported. + "org.matrix.msc3981": self.config.experimental.msc3981_recurse_relations, + # Adds support for deleting account data. + "org.matrix.msc3391": self.config.experimental.msc3391_enabled, }, }, ) diff --git a/synapse/rest/key/v2/local_key_resource.py b/synapse/rest/key/v2/local_key_resource.py index d03e728d42..22e7bf9d86 100644 --- a/synapse/rest/key/v2/local_key_resource.py +++ b/synapse/rest/key/v2/local_key_resource.py @@ -34,6 +34,8 @@ class LocalKey(RestServlet): """HTTP resource containing encoding the TLS X.509 certificate and NACL signature verification keys for this server:: + GET /_matrix/key/v2/server HTTP/1.1 + GET /_matrix/key/v2/server/a.key.id HTTP/1.1 HTTP/1.1 200 OK @@ -100,6 +102,15 @@ class LocalKey(RestServlet): def on_GET( self, request: Request, key_id: Optional[str] = None ) -> Tuple[int, JsonDict]: + # Matrix 1.6 drops support for passing the key_id, this is incompatible + # with earlier versions and is allowed in order to support both. + # A warning is issued to help determine when it is safe to drop this. + if key_id: + logger.warning( + "Request for local server key with deprecated key ID (logging to determine usage level for future removal): %s", + key_id, + ) + time_now = self.clock.time_msec() # Update the expiry time if less than half the interval remains. if time_now + self.config.key.key_refresh_interval / 2 > self.valid_until_ts: diff --git a/synapse/rest/key/v2/remote_key_resource.py b/synapse/rest/key/v2/remote_key_resource.py index 3bdb6ec909..8f3865d412 100644 --- a/synapse/rest/key/v2/remote_key_resource.py +++ b/synapse/rest/key/v2/remote_key_resource.py @@ -126,6 +126,15 @@ class RemoteKey(RestServlet): self, request: Request, server: str, key_id: Optional[str] = None ) -> Tuple[int, JsonDict]: if server and key_id: + # Matrix 1.6 drops support for passing the key_id, this is incompatible + # with earlier versions and is allowed in order to support both. + # A warning is issued to help determine when it is safe to drop this. + logger.warning( + "Request for remote server key with deprecated key ID (logging to determine usage level for future removal): %s / %s", + server, + key_id, + ) + minimum_valid_until_ts = parse_integer(request, "minimum_valid_until_ts") arguments = {} if minimum_valid_until_ts is not None: @@ -155,13 +164,13 @@ class RemoteKey(RestServlet): for key_id in key_ids: store_queries.append((server_name, key_id, None)) - cached = await self.store.get_server_keys_json(store_queries) + cached = await self.store.get_server_keys_json_for_remote(store_queries) json_results: Set[bytes] = set() time_now_ms = self.clock.time_msec() - # Map server_name->key_id->int. Note that the value of the init is unused. + # Map server_name->key_id->int. Note that the value of the int is unused. # XXX: why don't we just use a set? cache_misses: Dict[str, Dict[str, int]] = {} for (server_name, key_id, _), key_results in cached.items(): diff --git a/synapse/rest/media/download_resource.py b/synapse/rest/media/download_resource.py index 8f270cf4cc..3c618ef60a 100644 --- a/synapse/rest/media/download_resource.py +++ b/synapse/rest/media/download_resource.py @@ -37,7 +37,7 @@ class DownloadResource(DirectServeJsonResource): def __init__(self, hs: "HomeServer", media_repo: "MediaRepository"): super().__init__() self.media_repo = media_repo - self.server_name = hs.hostname + self._is_mine_server_name = hs.is_mine_server_name async def _async_render_GET(self, request: SynapseRequest) -> None: set_cors_headers(request) @@ -59,7 +59,7 @@ class DownloadResource(DirectServeJsonResource): b"no-referrer", ) server_name, media_id, name = parse_media_id(request) - if server_name == self.server_name: + if self._is_mine_server_name(server_name): await self.media_repo.get_local_media(request, media_id, name) else: allow_remote = parse_boolean(request, "allow_remote", default=True) diff --git a/synapse/rest/media/thumbnail_resource.py b/synapse/rest/media/thumbnail_resource.py index 4ee2a0dbda..661e604b85 100644 --- a/synapse/rest/media/thumbnail_resource.py +++ b/synapse/rest/media/thumbnail_resource.py @@ -59,7 +59,8 @@ class ThumbnailResource(DirectServeJsonResource): self.media_repo = media_repo self.media_storage = media_storage self.dynamic_thumbnails = hs.config.media.dynamic_thumbnails - self.server_name = hs.hostname + self._is_mine_server_name = hs.is_mine_server_name + self.prevent_media_downloads_from = hs.config.media.prevent_media_downloads_from async def _async_render_GET(self, request: SynapseRequest) -> None: set_cors_headers(request) @@ -71,7 +72,7 @@ class ThumbnailResource(DirectServeJsonResource): # TODO Parse the Accept header to get an prioritised list of thumbnail types. m_type = "image/png" - if server_name == self.server_name: + if self._is_mine_server_name(server_name): if self.dynamic_thumbnails: await self._select_or_generate_local_thumbnail( request, media_id, width, height, method, m_type @@ -82,6 +83,14 @@ class ThumbnailResource(DirectServeJsonResource): ) self.media_repo.mark_recently_accessed(None, media_id) else: + # Don't let users download media from configured domains, even if it + # is already downloaded. This is Trust & Safety tooling to make some + # media inaccessible to local users. + # See `prevent_media_downloads_from` config docs for more info. + if server_name in self.prevent_media_downloads_from: + respond_404(request) + return + if self.dynamic_thumbnails: await self._select_or_generate_remote_thumbnail( request, server_name, media_id, width, height, method, m_type diff --git a/synapse/server.py b/synapse/server.py index a191c19993..f6e245569c 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -42,8 +42,6 @@ from synapse.crypto.context_factory import RegularPolicyForHTTPS from synapse.crypto.keyring import Keyring from synapse.events.builder import EventBuilderFactory from synapse.events.presence_router import PresenceRouter -from synapse.events.spamcheck import SpamChecker -from synapse.events.third_party_rules import ThirdPartyEventRules from synapse.events.utils import EventClientSerializer from synapse.federation.federation_client import FederationClient from synapse.federation.federation_server import ( @@ -94,7 +92,11 @@ from synapse.handlers.room import ( ) from synapse.handlers.room_batch import RoomBatchHandler from synapse.handlers.room_list import RoomListHandler -from synapse.handlers.room_member import RoomMemberHandler, RoomMemberMasterHandler +from synapse.handlers.room_member import ( + RoomForgetterHandler, + RoomMemberHandler, + RoomMemberMasterHandler, +) from synapse.handlers.room_member_worker import RoomMemberWorkerHandler from synapse.handlers.room_summary import RoomSummaryHandler from synapse.handlers.search import SearchHandler @@ -105,7 +107,11 @@ from synapse.handlers.stats import StatsHandler from synapse.handlers.sync import SyncHandler from synapse.handlers.typing import FollowerTypingHandler, TypingWriterHandler from synapse.handlers.user_directory import UserDirectoryHandler -from synapse.http.client import InsecureInterceptableContextFactory, SimpleHttpClient +from synapse.http.client import ( + InsecureInterceptableContextFactory, + ReplicationClient, + SimpleHttpClient, +) from synapse.http.matrixfederationclient import MatrixFederationHttpClient from synapse.media.media_repository import MediaRepository from synapse.metrics.common_usage_metrics import CommonUsageMetricsManager @@ -141,6 +147,7 @@ logger = logging.getLogger(__name__) if TYPE_CHECKING: from txredisapi import ConnectionHandler + from synapse.handlers.jwt import JwtHandler from synapse.handlers.oidc import OidcHandler from synapse.handlers.saml import SamlHandler @@ -233,6 +240,7 @@ class HomeServer(metaclass=abc.ABCMeta): "message", "pagination", "profile", + "room_forgetter", "stats", ] @@ -374,6 +382,10 @@ class HomeServer(metaclass=abc.ABCMeta): return False return localpart_hostname[1] == self.hostname + def is_mine_server_name(self, server_name: str) -> bool: + """Determines whether a server name refers to this homeserver.""" + return server_name == self.hostname + @cache_in_self def get_clock(self) -> Clock: return Clock(self._reactor) @@ -442,15 +454,15 @@ class HomeServer(metaclass=abc.ABCMeta): return SimpleHttpClient(self, use_proxy=True) @cache_in_self - def get_proxied_blacklisted_http_client(self) -> SimpleHttpClient: + def get_proxied_blocklisted_http_client(self) -> SimpleHttpClient: """ - An HTTP client that uses configured HTTP(S) proxies and blacklists IPs - based on the IP range blacklist/whitelist. + An HTTP client that uses configured HTTP(S) proxies and blocks IPs + based on the configured IP ranges. """ return SimpleHttpClient( self, - ip_whitelist=self.config.server.ip_range_whitelist, - ip_blacklist=self.config.server.ip_range_blacklist, + ip_allowlist=self.config.server.ip_range_allowlist, + ip_blocklist=self.config.server.ip_range_blocklist, use_proxy=True, ) @@ -465,6 +477,13 @@ class HomeServer(metaclass=abc.ABCMeta): return MatrixFederationHttpClient(self, tls_client_options_factory) @cache_in_self + def get_replication_client(self) -> ReplicationClient: + """ + An HTTP client for HTTP replication. + """ + return ReplicationClient(self) + + @cache_in_self def get_room_creation_handler(self) -> RoomCreationHandler: return RoomCreationHandler(self) @@ -516,6 +535,12 @@ class HomeServer(metaclass=abc.ABCMeta): return SsoHandler(self) @cache_in_self + def get_jwt_handler(self) -> "JwtHandler": + from synapse.handlers.jwt import JwtHandler + + return JwtHandler(self) + + @cache_in_self def get_sync_handler(self) -> SyncHandler: return SyncHandler(self) @@ -688,14 +713,6 @@ class HomeServer(metaclass=abc.ABCMeta): return StatsHandler(self) @cache_in_self - def get_spam_checker(self) -> SpamChecker: - return SpamChecker(self) - - @cache_in_self - def get_third_party_event_rules(self) -> ThirdPartyEventRules: - return ThirdPartyEventRules(self) - - @cache_in_self def get_password_auth_provider(self) -> PasswordAuthProvider: return PasswordAuthProvider() @@ -767,7 +784,9 @@ class HomeServer(metaclass=abc.ABCMeta): @cache_in_self def get_event_client_serializer(self) -> EventClientSerializer: - return EventClientSerializer() + return EventClientSerializer( + msc3970_enabled=self.config.experimental.msc3970_enabled + ) @cache_in_self def get_password_policy_handler(self) -> PasswordPolicyHandler: @@ -803,7 +822,7 @@ class HomeServer(metaclass=abc.ABCMeta): @cache_in_self def get_module_api_callbacks(self) -> ModuleApiCallbacks: - return ModuleApiCallbacks() + return ModuleApiCallbacks(self) @cache_in_self def get_account_data_handler(self) -> AccountDataHandler: @@ -830,6 +849,10 @@ class HomeServer(metaclass=abc.ABCMeta): return PushRulesHandler(self) @cache_in_self + def get_room_forgetter_handler(self) -> RoomForgetterHandler: + return RoomForgetterHandler(self) + + @cache_in_self def get_outbound_redis_connection(self) -> "ConnectionHandler": """ The Redis connection used for replication. diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index 6031095249..9bc0c3b7b9 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -45,6 +45,7 @@ from synapse.events.snapshot import ( UnpersistedEventContextBase, ) from synapse.logging.context import ContextResourceUsage +from synapse.logging.opentracing import tag_args, trace from synapse.replication.http.state import ReplicationUpdateCurrentStateRestServlet from synapse.state import v1, v2 from synapse.storage.databases.main.events_worker import EventRedactBehaviour @@ -270,6 +271,8 @@ class StateHandler: state = await entry.get_state(self._state_storage_controller, StateFilter.all()) return await self.store.get_joined_hosts(room_id, state, entry) + @trace + @tag_args async def calculate_context_info( self, event: EventBase, @@ -465,6 +468,7 @@ class StateHandler: return await unpersisted_context.persist(event) + @trace @measure_func() async def resolve_state_groups_for_events( self, room_id: str, event_ids: Collection[str], await_full_state: bool = True diff --git a/synapse/static/client/register/index.html b/synapse/static/client/register/index.html deleted file mode 100644 index 27bbd76f51..0000000000 --- a/synapse/static/client/register/index.html +++ /dev/null @@ -1,34 +0,0 @@ -<!doctype html> -<html> -<head> -<title> Registration </title> -<meta http-equiv="X-UA-Compatible" content="IE=edge"> -<meta name="viewport" content="width=device-width, initial-scale=1.0"> -<link rel="stylesheet" href="style.css"> -<script src="js/jquery-3.4.1.min.js"></script> -<script src="https://www.recaptcha.net/recaptcha/api/js/recaptcha_ajax.js"></script> -<script src="register_config.js"></script> -<script src="js/register.js"></script> -</head> -<body onload="matrixRegistration.onLoad()"> -<form id="registrationForm" onsubmit="matrixRegistration.signUp(); return false;"> - <div> - Create account:<br/> - - <div style="text-align: center"> - <input id="desired_user_id" size="32" type="text" placeholder="Matrix ID (e.g. bob)" autocapitalize="off" autocorrect="off" /> - <br/> - <input id="pwd1" size="32" type="password" placeholder="Type a password"/> - <br/> - <input id="pwd2" size="32" type="password" placeholder="Confirm your password"/> - <br/> - <span id="feedback" style="color: #f00"></span> - <br/> - <div id="regcaptcha"></div> - - <button type="submit" style="margin: 10px">Sign up</button> - </div> - </div> -</form> -</body> -</html> diff --git a/synapse/static/client/register/js/jquery-3.4.1.min.js b/synapse/static/client/register/js/jquery-3.4.1.min.js deleted file mode 100644 index a1c07fd803..0000000000 --- a/synapse/static/client/register/js/jquery-3.4.1.min.js +++ /dev/null @@ -1,2 +0,0 @@ -/*! jQuery v3.4.1 | (c) JS Foundation and other contributors | jquery.org/license */ -!function(e,t){"use strict";"object"==typeof module&&"object"==typeof module.exports?module.exports=e.document?t(e,!0):function(e){if(!e.document)throw new Error("jQuery requires a window with a document");return t(e)}:t(e)}("undefined"!=typeof window?window:this,function(C,e){"use strict";var t=[],E=C.document,r=Object.getPrototypeOf,s=t.slice,g=t.concat,u=t.push,i=t.indexOf,n={},o=n.toString,v=n.hasOwnProperty,a=v.toString,l=a.call(Object),y={},m=function(e){return"function"==typeof e&&"number"!=typeof e.nodeType},x=function(e){return null!=e&&e===e.window},c={type:!0,src:!0,nonce:!0,noModule:!0};function b(e,t,n){var r,i,o=(n=n||E).createElement("script");if(o.text=e,t)for(r in c)(i=t[r]||t.getAttribute&&t.getAttribute(r))&&o.setAttribute(r,i);n.head.appendChild(o).parentNode.removeChild(o)}function w(e){return null==e?e+"":"object"==typeof e||"function"==typeof e?n[o.call(e)]||"object":typeof e}var f="3.4.1",k=function(e,t){return new k.fn.init(e,t)},p=/^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g;function d(e){var t=!!e&&"length"in e&&e.length,n=w(e);return!m(e)&&!x(e)&&("array"===n||0===t||"number"==typeof t&&0<t&&t-1 in e)}k.fn=k.prototype={jquery:f,constructor:k,length:0,toArray:function(){return s.call(this)},get:function(e){return null==e?s.call(this):e<0?this[e+this.length]:this[e]},pushStack:function(e){var t=k.merge(this.constructor(),e);return t.prevObject=this,t},each:function(e){return k.each(this,e)},map:function(n){return this.pushStack(k.map(this,function(e,t){return n.call(e,t,e)}))},slice:function(){return this.pushStack(s.apply(this,arguments))},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},eq:function(e){var t=this.length,n=+e+(e<0?t:0);return this.pushStack(0<=n&&n<t?[this[n]]:[])},end:function(){return this.prevObject||this.constructor()},push:u,sort:t.sort,splice:t.splice},k.extend=k.fn.extend=function(){var e,t,n,r,i,o,a=arguments[0]||{},s=1,u=arguments.length,l=!1;for("boolean"==typeof a&&(l=a,a=arguments[s]||{},s++),"object"==typeof a||m(a)||(a={}),s===u&&(a=this,s--);s<u;s++)if(null!=(e=arguments[s]))for(t in e)r=e[t],"__proto__"!==t&&a!==r&&(l&&r&&(k.isPlainObject(r)||(i=Array.isArray(r)))?(n=a[t],o=i&&!Array.isArray(n)?[]:i||k.isPlainObject(n)?n:{},i=!1,a[t]=k.extend(l,o,r)):void 0!==r&&(a[t]=r));return a},k.extend({expando:"jQuery"+(f+Math.random()).replace(/\D/g,""),isReady:!0,error:function(e){throw new Error(e)},noop:function(){},isPlainObject:function(e){var t,n;return!(!e||"[object Object]"!==o.call(e))&&(!(t=r(e))||"function"==typeof(n=v.call(t,"constructor")&&t.constructor)&&a.call(n)===l)},isEmptyObject:function(e){var t;for(t in e)return!1;return!0},globalEval:function(e,t){b(e,{nonce:t&&t.nonce})},each:function(e,t){var n,r=0;if(d(e)){for(n=e.length;r<n;r++)if(!1===t.call(e[r],r,e[r]))break}else for(r in e)if(!1===t.call(e[r],r,e[r]))break;return e},trim:function(e){return null==e?"":(e+"").replace(p,"")},makeArray:function(e,t){var n=t||[];return null!=e&&(d(Object(e))?k.merge(n,"string"==typeof e?[e]:e):u.call(n,e)),n},inArray:function(e,t,n){return null==t?-1:i.call(t,e,n)},merge:function(e,t){for(var n=+t.length,r=0,i=e.length;r<n;r++)e[i++]=t[r];return e.length=i,e},grep:function(e,t,n){for(var r=[],i=0,o=e.length,a=!n;i<o;i++)!t(e[i],i)!==a&&r.push(e[i]);return r},map:function(e,t,n){var r,i,o=0,a=[];if(d(e))for(r=e.length;o<r;o++)null!=(i=t(e[o],o,n))&&a.push(i);else for(o in e)null!=(i=t(e[o],o,n))&&a.push(i);return g.apply([],a)},guid:1,support:y}),"function"==typeof Symbol&&(k.fn[Symbol.iterator]=t[Symbol.iterator]),k.each("Boolean Number String Function Array Date RegExp Object Error Symbol".split(" "),function(e,t){n["[object "+t+"]"]=t.toLowerCase()});var h=function(n){var e,d,b,o,i,h,f,g,w,u,l,T,C,a,E,v,s,c,y,k="sizzle"+1*new Date,m=n.document,S=0,r=0,p=ue(),x=ue(),N=ue(),A=ue(),D=function(e,t){return e===t&&(l=!0),0},j={}.hasOwnProperty,t=[],q=t.pop,L=t.push,H=t.push,O=t.slice,P=function(e,t){for(var n=0,r=e.length;n<r;n++)if(e[n]===t)return n;return-1},R="checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped",M="[\\x20\\t\\r\\n\\f]",I="(?:\\\\.|[\\w-]|[^\0-\\xa0])+",W="\\["+M+"*("+I+")(?:"+M+"*([*^$|!~]?=)"+M+"*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|("+I+"))|)"+M+"*\\]",$=":("+I+")(?:\\((('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|((?:\\\\.|[^\\\\()[\\]]|"+W+")*)|.*)\\)|)",F=new RegExp(M+"+","g"),B=new RegExp("^"+M+"+|((?:^|[^\\\\])(?:\\\\.)*)"+M+"+$","g"),_=new RegExp("^"+M+"*,"+M+"*"),z=new RegExp("^"+M+"*([>+~]|"+M+")"+M+"*"),U=new RegExp(M+"|>"),X=new RegExp($),V=new RegExp("^"+I+"$"),G={ID:new RegExp("^#("+I+")"),CLASS:new RegExp("^\\.("+I+")"),TAG:new RegExp("^("+I+"|[*])"),ATTR:new RegExp("^"+W),PSEUDO:new RegExp("^"+$),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+M+"*(even|odd|(([+-]|)(\\d*)n|)"+M+"*(?:([+-]|)"+M+"*(\\d+)|))"+M+"*\\)|)","i"),bool:new RegExp("^(?:"+R+")$","i"),needsContext:new RegExp("^"+M+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+M+"*((?:-\\d)?\\d*)"+M+"*\\)|)(?=[^-]|$)","i")},Y=/HTML$/i,Q=/^(?:input|select|textarea|button)$/i,J=/^h\d$/i,K=/^[^{]+\{\s*\[native \w/,Z=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,ee=/[+~]/,te=new RegExp("\\\\([\\da-f]{1,6}"+M+"?|("+M+")|.)","ig"),ne=function(e,t,n){var r="0x"+t-65536;return r!=r||n?t:r<0?String.fromCharCode(r+65536):String.fromCharCode(r>>10|55296,1023&r|56320)},re=/([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g,ie=function(e,t){return t?"\0"===e?"\ufffd":e.slice(0,-1)+"\\"+e.charCodeAt(e.length-1).toString(16)+" ":"\\"+e},oe=function(){T()},ae=be(function(e){return!0===e.disabled&&"fieldset"===e.nodeName.toLowerCase()},{dir:"parentNode",next:"legend"});try{H.apply(t=O.call(m.childNodes),m.childNodes),t[m.childNodes.length].nodeType}catch(e){H={apply:t.length?function(e,t){L.apply(e,O.call(t))}:function(e,t){var n=e.length,r=0;while(e[n++]=t[r++]);e.length=n-1}}}function se(t,e,n,r){var i,o,a,s,u,l,c,f=e&&e.ownerDocument,p=e?e.nodeType:9;if(n=n||[],"string"!=typeof t||!t||1!==p&&9!==p&&11!==p)return n;if(!r&&((e?e.ownerDocument||e:m)!==C&&T(e),e=e||C,E)){if(11!==p&&(u=Z.exec(t)))if(i=u[1]){if(9===p){if(!(a=e.getElementById(i)))return n;if(a.id===i)return n.push(a),n}else if(f&&(a=f.getElementById(i))&&y(e,a)&&a.id===i)return n.push(a),n}else{if(u[2])return H.apply(n,e.getElementsByTagName(t)),n;if((i=u[3])&&d.getElementsByClassName&&e.getElementsByClassName)return H.apply(n,e.getElementsByClassName(i)),n}if(d.qsa&&!A[t+" "]&&(!v||!v.test(t))&&(1!==p||"object"!==e.nodeName.toLowerCase())){if(c=t,f=e,1===p&&U.test(t)){(s=e.getAttribute("id"))?s=s.replace(re,ie):e.setAttribute("id",s=k),o=(l=h(t)).length;while(o--)l[o]="#"+s+" "+xe(l[o]);c=l.join(","),f=ee.test(t)&&ye(e.parentNode)||e}try{return H.apply(n,f.querySelectorAll(c)),n}catch(e){A(t,!0)}finally{s===k&&e.removeAttribute("id")}}}return g(t.replace(B,"$1"),e,n,r)}function ue(){var r=[];return function e(t,n){return r.push(t+" ")>b.cacheLength&&delete e[r.shift()],e[t+" "]=n}}function le(e){return e[k]=!0,e}function ce(e){var t=C.createElement("fieldset");try{return!!e(t)}catch(e){return!1}finally{t.parentNode&&t.parentNode.removeChild(t),t=null}}function fe(e,t){var n=e.split("|"),r=n.length;while(r--)b.attrHandle[n[r]]=t}function pe(e,t){var n=t&&e,r=n&&1===e.nodeType&&1===t.nodeType&&e.sourceIndex-t.sourceIndex;if(r)return r;if(n)while(n=n.nextSibling)if(n===t)return-1;return e?1:-1}function de(t){return function(e){return"input"===e.nodeName.toLowerCase()&&e.type===t}}function he(n){return function(e){var t=e.nodeName.toLowerCase();return("input"===t||"button"===t)&&e.type===n}}function ge(t){return function(e){return"form"in e?e.parentNode&&!1===e.disabled?"label"in e?"label"in e.parentNode?e.parentNode.disabled===t:e.disabled===t:e.isDisabled===t||e.isDisabled!==!t&&ae(e)===t:e.disabled===t:"label"in e&&e.disabled===t}}function ve(a){return le(function(o){return o=+o,le(function(e,t){var n,r=a([],e.length,o),i=r.length;while(i--)e[n=r[i]]&&(e[n]=!(t[n]=e[n]))})})}function ye(e){return e&&"undefined"!=typeof e.getElementsByTagName&&e}for(e in d=se.support={},i=se.isXML=function(e){var t=e.namespaceURI,n=(e.ownerDocument||e).documentElement;return!Y.test(t||n&&n.nodeName||"HTML")},T=se.setDocument=function(e){var t,n,r=e?e.ownerDocument||e:m;return r!==C&&9===r.nodeType&&r.documentElement&&(a=(C=r).documentElement,E=!i(C),m!==C&&(n=C.defaultView)&&n.top!==n&&(n.addEventListener?n.addEventListener("unload",oe,!1):n.attachEvent&&n.attachEvent("onunload",oe)),d.attributes=ce(function(e){return e.className="i",!e.getAttribute("className")}),d.getElementsByTagName=ce(function(e){return e.appendChild(C.createComment("")),!e.getElementsByTagName("*").length}),d.getElementsByClassName=K.test(C.getElementsByClassName),d.getById=ce(function(e){return a.appendChild(e).id=k,!C.getElementsByName||!C.getElementsByName(k).length}),d.getById?(b.filter.ID=function(e){var t=e.replace(te,ne);return function(e){return e.getAttribute("id")===t}},b.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&E){var n=t.getElementById(e);return n?[n]:[]}}):(b.filter.ID=function(e){var n=e.replace(te,ne);return function(e){var t="undefined"!=typeof e.getAttributeNode&&e.getAttributeNode("id");return t&&t.value===n}},b.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&E){var n,r,i,o=t.getElementById(e);if(o){if((n=o.getAttributeNode("id"))&&n.value===e)return[o];i=t.getElementsByName(e),r=0;while(o=i[r++])if((n=o.getAttributeNode("id"))&&n.value===e)return[o]}return[]}}),b.find.TAG=d.getElementsByTagName?function(e,t){return"undefined"!=typeof t.getElementsByTagName?t.getElementsByTagName(e):d.qsa?t.querySelectorAll(e):void 0}:function(e,t){var n,r=[],i=0,o=t.getElementsByTagName(e);if("*"===e){while(n=o[i++])1===n.nodeType&&r.push(n);return r}return o},b.find.CLASS=d.getElementsByClassName&&function(e,t){if("undefined"!=typeof t.getElementsByClassName&&E)return t.getElementsByClassName(e)},s=[],v=[],(d.qsa=K.test(C.querySelectorAll))&&(ce(function(e){a.appendChild(e).innerHTML="<a id='"+k+"'></a><select id='"+k+"-\r\\' msallowcapture=''><option selected=''></option></select>",e.querySelectorAll("[msallowcapture^='']").length&&v.push("[*^$]="+M+"*(?:''|\"\")"),e.querySelectorAll("[selected]").length||v.push("\\["+M+"*(?:value|"+R+")"),e.querySelectorAll("[id~="+k+"-]").length||v.push("~="),e.querySelectorAll(":checked").length||v.push(":checked"),e.querySelectorAll("a#"+k+"+*").length||v.push(".#.+[+~]")}),ce(function(e){e.innerHTML="<a href='' disabled='disabled'></a><select disabled='disabled'><option/></select>";var t=C.createElement("input");t.setAttribute("type","hidden"),e.appendChild(t).setAttribute("name","D"),e.querySelectorAll("[name=d]").length&&v.push("name"+M+"*[*^$|!~]?="),2!==e.querySelectorAll(":enabled").length&&v.push(":enabled",":disabled"),a.appendChild(e).disabled=!0,2!==e.querySelectorAll(":disabled").length&&v.push(":enabled",":disabled"),e.querySelectorAll("*,:x"),v.push(",.*:")})),(d.matchesSelector=K.test(c=a.matches||a.webkitMatchesSelector||a.mozMatchesSelector||a.oMatchesSelector||a.msMatchesSelector))&&ce(function(e){d.disconnectedMatch=c.call(e,"*"),c.call(e,"[s!='']:x"),s.push("!=",$)}),v=v.length&&new RegExp(v.join("|")),s=s.length&&new RegExp(s.join("|")),t=K.test(a.compareDocumentPosition),y=t||K.test(a.contains)?function(e,t){var n=9===e.nodeType?e.documentElement:e,r=t&&t.parentNode;return e===r||!(!r||1!==r.nodeType||!(n.contains?n.contains(r):e.compareDocumentPosition&&16&e.compareDocumentPosition(r)))}:function(e,t){if(t)while(t=t.parentNode)if(t===e)return!0;return!1},D=t?function(e,t){if(e===t)return l=!0,0;var n=!e.compareDocumentPosition-!t.compareDocumentPosition;return n||(1&(n=(e.ownerDocument||e)===(t.ownerDocument||t)?e.compareDocumentPosition(t):1)||!d.sortDetached&&t.compareDocumentPosition(e)===n?e===C||e.ownerDocument===m&&y(m,e)?-1:t===C||t.ownerDocument===m&&y(m,t)?1:u?P(u,e)-P(u,t):0:4&n?-1:1)}:function(e,t){if(e===t)return l=!0,0;var n,r=0,i=e.parentNode,o=t.parentNode,a=[e],s=[t];if(!i||!o)return e===C?-1:t===C?1:i?-1:o?1:u?P(u,e)-P(u,t):0;if(i===o)return pe(e,t);n=e;while(n=n.parentNode)a.unshift(n);n=t;while(n=n.parentNode)s.unshift(n);while(a[r]===s[r])r++;return r?pe(a[r],s[r]):a[r]===m?-1:s[r]===m?1:0}),C},se.matches=function(e,t){return se(e,null,null,t)},se.matchesSelector=function(e,t){if((e.ownerDocument||e)!==C&&T(e),d.matchesSelector&&E&&!A[t+" "]&&(!s||!s.test(t))&&(!v||!v.test(t)))try{var n=c.call(e,t);if(n||d.disconnectedMatch||e.document&&11!==e.document.nodeType)return n}catch(e){A(t,!0)}return 0<se(t,C,null,[e]).length},se.contains=function(e,t){return(e.ownerDocument||e)!==C&&T(e),y(e,t)},se.attr=function(e,t){(e.ownerDocument||e)!==C&&T(e);var n=b.attrHandle[t.toLowerCase()],r=n&&j.call(b.attrHandle,t.toLowerCase())?n(e,t,!E):void 0;return void 0!==r?r:d.attributes||!E?e.getAttribute(t):(r=e.getAttributeNode(t))&&r.specified?r.value:null},se.escape=function(e){return(e+"").replace(re,ie)},se.error=function(e){throw new Error("Syntax error, unrecognized expression: "+e)},se.uniqueSort=function(e){var t,n=[],r=0,i=0;if(l=!d.detectDuplicates,u=!d.sortStable&&e.slice(0),e.sort(D),l){while(t=e[i++])t===e[i]&&(r=n.push(i));while(r--)e.splice(n[r],1)}return u=null,e},o=se.getText=function(e){var t,n="",r=0,i=e.nodeType;if(i){if(1===i||9===i||11===i){if("string"==typeof e.textContent)return e.textContent;for(e=e.firstChild;e;e=e.nextSibling)n+=o(e)}else if(3===i||4===i)return e.nodeValue}else while(t=e[r++])n+=o(t);return n},(b=se.selectors={cacheLength:50,createPseudo:le,match:G,attrHandle:{},find:{},relative:{">":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(e){return e[1]=e[1].replace(te,ne),e[3]=(e[3]||e[4]||e[5]||"").replace(te,ne),"~="===e[2]&&(e[3]=" "+e[3]+" "),e.slice(0,4)},CHILD:function(e){return e[1]=e[1].toLowerCase(),"nth"===e[1].slice(0,3)?(e[3]||se.error(e[0]),e[4]=+(e[4]?e[5]+(e[6]||1):2*("even"===e[3]||"odd"===e[3])),e[5]=+(e[7]+e[8]||"odd"===e[3])):e[3]&&se.error(e[0]),e},PSEUDO:function(e){var t,n=!e[6]&&e[2];return G.CHILD.test(e[0])?null:(e[3]?e[2]=e[4]||e[5]||"":n&&X.test(n)&&(t=h(n,!0))&&(t=n.indexOf(")",n.length-t)-n.length)&&(e[0]=e[0].slice(0,t),e[2]=n.slice(0,t)),e.slice(0,3))}},filter:{TAG:function(e){var t=e.replace(te,ne).toLowerCase();return"*"===e?function(){return!0}:function(e){return e.nodeName&&e.nodeName.toLowerCase()===t}},CLASS:function(e){var t=p[e+" "];return t||(t=new RegExp("(^|"+M+")"+e+"("+M+"|$)"))&&p(e,function(e){return t.test("string"==typeof e.className&&e.className||"undefined"!=typeof e.getAttribute&&e.getAttribute("class")||"")})},ATTR:function(n,r,i){return function(e){var t=se.attr(e,n);return null==t?"!="===r:!r||(t+="","="===r?t===i:"!="===r?t!==i:"^="===r?i&&0===t.indexOf(i):"*="===r?i&&-1<t.indexOf(i):"$="===r?i&&t.slice(-i.length)===i:"~="===r?-1<(" "+t.replace(F," ")+" ").indexOf(i):"|="===r&&(t===i||t.slice(0,i.length+1)===i+"-"))}},CHILD:function(h,e,t,g,v){var y="nth"!==h.slice(0,3),m="last"!==h.slice(-4),x="of-type"===e;return 1===g&&0===v?function(e){return!!e.parentNode}:function(e,t,n){var r,i,o,a,s,u,l=y!==m?"nextSibling":"previousSibling",c=e.parentNode,f=x&&e.nodeName.toLowerCase(),p=!n&&!x,d=!1;if(c){if(y){while(l){a=e;while(a=a[l])if(x?a.nodeName.toLowerCase()===f:1===a.nodeType)return!1;u=l="only"===h&&!u&&"nextSibling"}return!0}if(u=[m?c.firstChild:c.lastChild],m&&p){d=(s=(r=(i=(o=(a=c)[k]||(a[k]={}))[a.uniqueID]||(o[a.uniqueID]={}))[h]||[])[0]===S&&r[1])&&r[2],a=s&&c.childNodes[s];while(a=++s&&a&&a[l]||(d=s=0)||u.pop())if(1===a.nodeType&&++d&&a===e){i[h]=[S,s,d];break}}else if(p&&(d=s=(r=(i=(o=(a=e)[k]||(a[k]={}))[a.uniqueID]||(o[a.uniqueID]={}))[h]||[])[0]===S&&r[1]),!1===d)while(a=++s&&a&&a[l]||(d=s=0)||u.pop())if((x?a.nodeName.toLowerCase()===f:1===a.nodeType)&&++d&&(p&&((i=(o=a[k]||(a[k]={}))[a.uniqueID]||(o[a.uniqueID]={}))[h]=[S,d]),a===e))break;return(d-=v)===g||d%g==0&&0<=d/g}}},PSEUDO:function(e,o){var t,a=b.pseudos[e]||b.setFilters[e.toLowerCase()]||se.error("unsupported pseudo: "+e);return a[k]?a(o):1<a.length?(t=[e,e,"",o],b.setFilters.hasOwnProperty(e.toLowerCase())?le(function(e,t){var n,r=a(e,o),i=r.length;while(i--)e[n=P(e,r[i])]=!(t[n]=r[i])}):function(e){return a(e,0,t)}):a}},pseudos:{not:le(function(e){var r=[],i=[],s=f(e.replace(B,"$1"));return s[k]?le(function(e,t,n,r){var i,o=s(e,null,r,[]),a=e.length;while(a--)(i=o[a])&&(e[a]=!(t[a]=i))}):function(e,t,n){return r[0]=e,s(r,null,n,i),r[0]=null,!i.pop()}}),has:le(function(t){return function(e){return 0<se(t,e).length}}),contains:le(function(t){return t=t.replace(te,ne),function(e){return-1<(e.textContent||o(e)).indexOf(t)}}),lang:le(function(n){return V.test(n||"")||se.error("unsupported lang: "+n),n=n.replace(te,ne).toLowerCase(),function(e){var t;do{if(t=E?e.lang:e.getAttribute("xml:lang")||e.getAttribute("lang"))return(t=t.toLowerCase())===n||0===t.indexOf(n+"-")}while((e=e.parentNode)&&1===e.nodeType);return!1}}),target:function(e){var t=n.location&&n.location.hash;return t&&t.slice(1)===e.id},root:function(e){return e===a},focus:function(e){return e===C.activeElement&&(!C.hasFocus||C.hasFocus())&&!!(e.type||e.href||~e.tabIndex)},enabled:ge(!1),disabled:ge(!0),checked:function(e){var t=e.nodeName.toLowerCase();return"input"===t&&!!e.checked||"option"===t&&!!e.selected},selected:function(e){return e.parentNode&&e.parentNode.selectedIndex,!0===e.selected},empty:function(e){for(e=e.firstChild;e;e=e.nextSibling)if(e.nodeType<6)return!1;return!0},parent:function(e){return!b.pseudos.empty(e)},header:function(e){return J.test(e.nodeName)},input:function(e){return Q.test(e.nodeName)},button:function(e){var t=e.nodeName.toLowerCase();return"input"===t&&"button"===e.type||"button"===t},text:function(e){var t;return"input"===e.nodeName.toLowerCase()&&"text"===e.type&&(null==(t=e.getAttribute("type"))||"text"===t.toLowerCase())},first:ve(function(){return[0]}),last:ve(function(e,t){return[t-1]}),eq:ve(function(e,t,n){return[n<0?n+t:n]}),even:ve(function(e,t){for(var n=0;n<t;n+=2)e.push(n);return e}),odd:ve(function(e,t){for(var n=1;n<t;n+=2)e.push(n);return e}),lt:ve(function(e,t,n){for(var r=n<0?n+t:t<n?t:n;0<=--r;)e.push(r);return e}),gt:ve(function(e,t,n){for(var r=n<0?n+t:n;++r<t;)e.push(r);return e})}}).pseudos.nth=b.pseudos.eq,{radio:!0,checkbox:!0,file:!0,password:!0,image:!0})b.pseudos[e]=de(e);for(e in{submit:!0,reset:!0})b.pseudos[e]=he(e);function me(){}function xe(e){for(var t=0,n=e.length,r="";t<n;t++)r+=e[t].value;return r}function be(s,e,t){var u=e.dir,l=e.next,c=l||u,f=t&&"parentNode"===c,p=r++;return e.first?function(e,t,n){while(e=e[u])if(1===e.nodeType||f)return s(e,t,n);return!1}:function(e,t,n){var r,i,o,a=[S,p];if(n){while(e=e[u])if((1===e.nodeType||f)&&s(e,t,n))return!0}else while(e=e[u])if(1===e.nodeType||f)if(i=(o=e[k]||(e[k]={}))[e.uniqueID]||(o[e.uniqueID]={}),l&&l===e.nodeName.toLowerCase())e=e[u]||e;else{if((r=i[c])&&r[0]===S&&r[1]===p)return a[2]=r[2];if((i[c]=a)[2]=s(e,t,n))return!0}return!1}}function we(i){return 1<i.length?function(e,t,n){var r=i.length;while(r--)if(!i[r](e,t,n))return!1;return!0}:i[0]}function Te(e,t,n,r,i){for(var o,a=[],s=0,u=e.length,l=null!=t;s<u;s++)(o=e[s])&&(n&&!n(o,r,i)||(a.push(o),l&&t.push(s)));return a}function Ce(d,h,g,v,y,e){return v&&!v[k]&&(v=Ce(v)),y&&!y[k]&&(y=Ce(y,e)),le(function(e,t,n,r){var i,o,a,s=[],u=[],l=t.length,c=e||function(e,t,n){for(var r=0,i=t.length;r<i;r++)se(e,t[r],n);return n}(h||"*",n.nodeType?[n]:n,[]),f=!d||!e&&h?c:Te(c,s,d,n,r),p=g?y||(e?d:l||v)?[]:t:f;if(g&&g(f,p,n,r),v){i=Te(p,u),v(i,[],n,r),o=i.length;while(o--)(a=i[o])&&(p[u[o]]=!(f[u[o]]=a))}if(e){if(y||d){if(y){i=[],o=p.length;while(o--)(a=p[o])&&i.push(f[o]=a);y(null,p=[],i,r)}o=p.length;while(o--)(a=p[o])&&-1<(i=y?P(e,a):s[o])&&(e[i]=!(t[i]=a))}}else p=Te(p===t?p.splice(l,p.length):p),y?y(null,t,p,r):H.apply(t,p)})}function Ee(e){for(var i,t,n,r=e.length,o=b.relative[e[0].type],a=o||b.relative[" "],s=o?1:0,u=be(function(e){return e===i},a,!0),l=be(function(e){return-1<P(i,e)},a,!0),c=[function(e,t,n){var r=!o&&(n||t!==w)||((i=t).nodeType?u(e,t,n):l(e,t,n));return i=null,r}];s<r;s++)if(t=b.relative[e[s].type])c=[be(we(c),t)];else{if((t=b.filter[e[s].type].apply(null,e[s].matches))[k]){for(n=++s;n<r;n++)if(b.relative[e[n].type])break;return Ce(1<s&&we(c),1<s&&xe(e.slice(0,s-1).concat({value:" "===e[s-2].type?"*":""})).replace(B,"$1"),t,s<n&&Ee(e.slice(s,n)),n<r&&Ee(e=e.slice(n)),n<r&&xe(e))}c.push(t)}return we(c)}return me.prototype=b.filters=b.pseudos,b.setFilters=new me,h=se.tokenize=function(e,t){var n,r,i,o,a,s,u,l=x[e+" "];if(l)return t?0:l.slice(0);a=e,s=[],u=b.preFilter;while(a){for(o in n&&!(r=_.exec(a))||(r&&(a=a.slice(r[0].length)||a),s.push(i=[])),n=!1,(r=z.exec(a))&&(n=r.shift(),i.push({value:n,type:r[0].replace(B," ")}),a=a.slice(n.length)),b.filter)!(r=G[o].exec(a))||u[o]&&!(r=u[o](r))||(n=r.shift(),i.push({value:n,type:o,matches:r}),a=a.slice(n.length));if(!n)break}return t?a.length:a?se.error(e):x(e,s).slice(0)},f=se.compile=function(e,t){var n,v,y,m,x,r,i=[],o=[],a=N[e+" "];if(!a){t||(t=h(e)),n=t.length;while(n--)(a=Ee(t[n]))[k]?i.push(a):o.push(a);(a=N(e,(v=o,m=0<(y=i).length,x=0<v.length,r=function(e,t,n,r,i){var o,a,s,u=0,l="0",c=e&&[],f=[],p=w,d=e||x&&b.find.TAG("*",i),h=S+=null==p?1:Math.random()||.1,g=d.length;for(i&&(w=t===C||t||i);l!==g&&null!=(o=d[l]);l++){if(x&&o){a=0,t||o.ownerDocument===C||(T(o),n=!E);while(s=v[a++])if(s(o,t||C,n)){r.push(o);break}i&&(S=h)}m&&((o=!s&&o)&&u--,e&&c.push(o))}if(u+=l,m&&l!==u){a=0;while(s=y[a++])s(c,f,t,n);if(e){if(0<u)while(l--)c[l]||f[l]||(f[l]=q.call(r));f=Te(f)}H.apply(r,f),i&&!e&&0<f.length&&1<u+y.length&&se.uniqueSort(r)}return i&&(S=h,w=p),c},m?le(r):r))).selector=e}return a},g=se.select=function(e,t,n,r){var i,o,a,s,u,l="function"==typeof e&&e,c=!r&&h(e=l.selector||e);if(n=n||[],1===c.length){if(2<(o=c[0]=c[0].slice(0)).length&&"ID"===(a=o[0]).type&&9===t.nodeType&&E&&b.relative[o[1].type]){if(!(t=(b.find.ID(a.matches[0].replace(te,ne),t)||[])[0]))return n;l&&(t=t.parentNode),e=e.slice(o.shift().value.length)}i=G.needsContext.test(e)?0:o.length;while(i--){if(a=o[i],b.relative[s=a.type])break;if((u=b.find[s])&&(r=u(a.matches[0].replace(te,ne),ee.test(o[0].type)&&ye(t.parentNode)||t))){if(o.splice(i,1),!(e=r.length&&xe(o)))return H.apply(n,r),n;break}}}return(l||f(e,c))(r,t,!E,n,!t||ee.test(e)&&ye(t.parentNode)||t),n},d.sortStable=k.split("").sort(D).join("")===k,d.detectDuplicates=!!l,T(),d.sortDetached=ce(function(e){return 1&e.compareDocumentPosition(C.createElement("fieldset"))}),ce(function(e){return e.innerHTML="<a href='#'></a>","#"===e.firstChild.getAttribute("href")})||fe("type|href|height|width",function(e,t,n){if(!n)return e.getAttribute(t,"type"===t.toLowerCase()?1:2)}),d.attributes&&ce(function(e){return e.innerHTML="<input/>",e.firstChild.setAttribute("value",""),""===e.firstChild.getAttribute("value")})||fe("value",function(e,t,n){if(!n&&"input"===e.nodeName.toLowerCase())return e.defaultValue}),ce(function(e){return null==e.getAttribute("disabled")})||fe(R,function(e,t,n){var r;if(!n)return!0===e[t]?t.toLowerCase():(r=e.getAttributeNode(t))&&r.specified?r.value:null}),se}(C);k.find=h,k.expr=h.selectors,k.expr[":"]=k.expr.pseudos,k.uniqueSort=k.unique=h.uniqueSort,k.text=h.getText,k.isXMLDoc=h.isXML,k.contains=h.contains,k.escapeSelector=h.escape;var T=function(e,t,n){var r=[],i=void 0!==n;while((e=e[t])&&9!==e.nodeType)if(1===e.nodeType){if(i&&k(e).is(n))break;r.push(e)}return r},S=function(e,t){for(var n=[];e;e=e.nextSibling)1===e.nodeType&&e!==t&&n.push(e);return n},N=k.expr.match.needsContext;function A(e,t){return e.nodeName&&e.nodeName.toLowerCase()===t.toLowerCase()}var D=/^<([a-z][^\/\0>:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i;function j(e,n,r){return m(n)?k.grep(e,function(e,t){return!!n.call(e,t,e)!==r}):n.nodeType?k.grep(e,function(e){return e===n!==r}):"string"!=typeof n?k.grep(e,function(e){return-1<i.call(n,e)!==r}):k.filter(n,e,r)}k.filter=function(e,t,n){var r=t[0];return n&&(e=":not("+e+")"),1===t.length&&1===r.nodeType?k.find.matchesSelector(r,e)?[r]:[]:k.find.matches(e,k.grep(t,function(e){return 1===e.nodeType}))},k.fn.extend({find:function(e){var t,n,r=this.length,i=this;if("string"!=typeof e)return this.pushStack(k(e).filter(function(){for(t=0;t<r;t++)if(k.contains(i[t],this))return!0}));for(n=this.pushStack([]),t=0;t<r;t++)k.find(e,i[t],n);return 1<r?k.uniqueSort(n):n},filter:function(e){return this.pushStack(j(this,e||[],!1))},not:function(e){return this.pushStack(j(this,e||[],!0))},is:function(e){return!!j(this,"string"==typeof e&&N.test(e)?k(e):e||[],!1).length}});var q,L=/^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]+))$/;(k.fn.init=function(e,t,n){var r,i;if(!e)return this;if(n=n||q,"string"==typeof e){if(!(r="<"===e[0]&&">"===e[e.length-1]&&3<=e.length?[null,e,null]:L.exec(e))||!r[1]&&t)return!t||t.jquery?(t||n).find(e):this.constructor(t).find(e);if(r[1]){if(t=t instanceof k?t[0]:t,k.merge(this,k.parseHTML(r[1],t&&t.nodeType?t.ownerDocument||t:E,!0)),D.test(r[1])&&k.isPlainObject(t))for(r in t)m(this[r])?this[r](t[r]):this.attr(r,t[r]);return this}return(i=E.getElementById(r[2]))&&(this[0]=i,this.length=1),this}return e.nodeType?(this[0]=e,this.length=1,this):m(e)?void 0!==n.ready?n.ready(e):e(k):k.makeArray(e,this)}).prototype=k.fn,q=k(E);var H=/^(?:parents|prev(?:Until|All))/,O={children:!0,contents:!0,next:!0,prev:!0};function P(e,t){while((e=e[t])&&1!==e.nodeType);return e}k.fn.extend({has:function(e){var t=k(e,this),n=t.length;return this.filter(function(){for(var e=0;e<n;e++)if(k.contains(this,t[e]))return!0})},closest:function(e,t){var n,r=0,i=this.length,o=[],a="string"!=typeof e&&k(e);if(!N.test(e))for(;r<i;r++)for(n=this[r];n&&n!==t;n=n.parentNode)if(n.nodeType<11&&(a?-1<a.index(n):1===n.nodeType&&k.find.matchesSelector(n,e))){o.push(n);break}return this.pushStack(1<o.length?k.uniqueSort(o):o)},index:function(e){return e?"string"==typeof e?i.call(k(e),this[0]):i.call(this,e.jquery?e[0]:e):this[0]&&this[0].parentNode?this.first().prevAll().length:-1},add:function(e,t){return this.pushStack(k.uniqueSort(k.merge(this.get(),k(e,t))))},addBack:function(e){return this.add(null==e?this.prevObject:this.prevObject.filter(e))}}),k.each({parent:function(e){var t=e.parentNode;return t&&11!==t.nodeType?t:null},parents:function(e){return T(e,"parentNode")},parentsUntil:function(e,t,n){return T(e,"parentNode",n)},next:function(e){return P(e,"nextSibling")},prev:function(e){return P(e,"previousSibling")},nextAll:function(e){return T(e,"nextSibling")},prevAll:function(e){return T(e,"previousSibling")},nextUntil:function(e,t,n){return T(e,"nextSibling",n)},prevUntil:function(e,t,n){return T(e,"previousSibling",n)},siblings:function(e){return S((e.parentNode||{}).firstChild,e)},children:function(e){return S(e.firstChild)},contents:function(e){return"undefined"!=typeof e.contentDocument?e.contentDocument:(A(e,"template")&&(e=e.content||e),k.merge([],e.childNodes))}},function(r,i){k.fn[r]=function(e,t){var n=k.map(this,i,e);return"Until"!==r.slice(-5)&&(t=e),t&&"string"==typeof t&&(n=k.filter(t,n)),1<this.length&&(O[r]||k.uniqueSort(n),H.test(r)&&n.reverse()),this.pushStack(n)}});var R=/[^\x20\t\r\n\f]+/g;function M(e){return e}function I(e){throw e}function W(e,t,n,r){var i;try{e&&m(i=e.promise)?i.call(e).done(t).fail(n):e&&m(i=e.then)?i.call(e,t,n):t.apply(void 0,[e].slice(r))}catch(e){n.apply(void 0,[e])}}k.Callbacks=function(r){var e,n;r="string"==typeof r?(e=r,n={},k.each(e.match(R)||[],function(e,t){n[t]=!0}),n):k.extend({},r);var i,t,o,a,s=[],u=[],l=-1,c=function(){for(a=a||r.once,o=i=!0;u.length;l=-1){t=u.shift();while(++l<s.length)!1===s[l].apply(t[0],t[1])&&r.stopOnFalse&&(l=s.length,t=!1)}r.memory||(t=!1),i=!1,a&&(s=t?[]:"")},f={add:function(){return s&&(t&&!i&&(l=s.length-1,u.push(t)),function n(e){k.each(e,function(e,t){m(t)?r.unique&&f.has(t)||s.push(t):t&&t.length&&"string"!==w(t)&&n(t)})}(arguments),t&&!i&&c()),this},remove:function(){return k.each(arguments,function(e,t){var n;while(-1<(n=k.inArray(t,s,n)))s.splice(n,1),n<=l&&l--}),this},has:function(e){return e?-1<k.inArray(e,s):0<s.length},empty:function(){return s&&(s=[]),this},disable:function(){return a=u=[],s=t="",this},disabled:function(){return!s},lock:function(){return a=u=[],t||i||(s=t=""),this},locked:function(){return!!a},fireWith:function(e,t){return a||(t=[e,(t=t||[]).slice?t.slice():t],u.push(t),i||c()),this},fire:function(){return f.fireWith(this,arguments),this},fired:function(){return!!o}};return f},k.extend({Deferred:function(e){var o=[["notify","progress",k.Callbacks("memory"),k.Callbacks("memory"),2],["resolve","done",k.Callbacks("once memory"),k.Callbacks("once memory"),0,"resolved"],["reject","fail",k.Callbacks("once memory"),k.Callbacks("once memory"),1,"rejected"]],i="pending",a={state:function(){return i},always:function(){return s.done(arguments).fail(arguments),this},"catch":function(e){return a.then(null,e)},pipe:function(){var i=arguments;return k.Deferred(function(r){k.each(o,function(e,t){var n=m(i[t[4]])&&i[t[4]];s[t[1]](function(){var e=n&&n.apply(this,arguments);e&&m(e.promise)?e.promise().progress(r.notify).done(r.resolve).fail(r.reject):r[t[0]+"With"](this,n?[e]:arguments)})}),i=null}).promise()},then:function(t,n,r){var u=0;function l(i,o,a,s){return function(){var n=this,r=arguments,e=function(){var e,t;if(!(i<u)){if((e=a.apply(n,r))===o.promise())throw new TypeError("Thenable self-resolution");t=e&&("object"==typeof e||"function"==typeof e)&&e.then,m(t)?s?t.call(e,l(u,o,M,s),l(u,o,I,s)):(u++,t.call(e,l(u,o,M,s),l(u,o,I,s),l(u,o,M,o.notifyWith))):(a!==M&&(n=void 0,r=[e]),(s||o.resolveWith)(n,r))}},t=s?e:function(){try{e()}catch(e){k.Deferred.exceptionHook&&k.Deferred.exceptionHook(e,t.stackTrace),u<=i+1&&(a!==I&&(n=void 0,r=[e]),o.rejectWith(n,r))}};i?t():(k.Deferred.getStackHook&&(t.stackTrace=k.Deferred.getStackHook()),C.setTimeout(t))}}return k.Deferred(function(e){o[0][3].add(l(0,e,m(r)?r:M,e.notifyWith)),o[1][3].add(l(0,e,m(t)?t:M)),o[2][3].add(l(0,e,m(n)?n:I))}).promise()},promise:function(e){return null!=e?k.extend(e,a):a}},s={};return k.each(o,function(e,t){var n=t[2],r=t[5];a[t[1]]=n.add,r&&n.add(function(){i=r},o[3-e][2].disable,o[3-e][3].disable,o[0][2].lock,o[0][3].lock),n.add(t[3].fire),s[t[0]]=function(){return s[t[0]+"With"](this===s?void 0:this,arguments),this},s[t[0]+"With"]=n.fireWith}),a.promise(s),e&&e.call(s,s),s},when:function(e){var n=arguments.length,t=n,r=Array(t),i=s.call(arguments),o=k.Deferred(),a=function(t){return function(e){r[t]=this,i[t]=1<arguments.length?s.call(arguments):e,--n||o.resolveWith(r,i)}};if(n<=1&&(W(e,o.done(a(t)).resolve,o.reject,!n),"pending"===o.state()||m(i[t]&&i[t].then)))return o.then();while(t--)W(i[t],a(t),o.reject);return o.promise()}});var $=/^(Eval|Internal|Range|Reference|Syntax|Type|URI)Error$/;k.Deferred.exceptionHook=function(e,t){C.console&&C.console.warn&&e&&$.test(e.name)&&C.console.warn("jQuery.Deferred exception: "+e.message,e.stack,t)},k.readyException=function(e){C.setTimeout(function(){throw e})};var F=k.Deferred();function B(){E.removeEventListener("DOMContentLoaded",B),C.removeEventListener("load",B),k.ready()}k.fn.ready=function(e){return F.then(e)["catch"](function(e){k.readyException(e)}),this},k.extend({isReady:!1,readyWait:1,ready:function(e){(!0===e?--k.readyWait:k.isReady)||(k.isReady=!0)!==e&&0<--k.readyWait||F.resolveWith(E,[k])}}),k.ready.then=F.then,"complete"===E.readyState||"loading"!==E.readyState&&!E.documentElement.doScroll?C.setTimeout(k.ready):(E.addEventListener("DOMContentLoaded",B),C.addEventListener("load",B));var _=function(e,t,n,r,i,o,a){var s=0,u=e.length,l=null==n;if("object"===w(n))for(s in i=!0,n)_(e,t,s,n[s],!0,o,a);else if(void 0!==r&&(i=!0,m(r)||(a=!0),l&&(a?(t.call(e,r),t=null):(l=t,t=function(e,t,n){return l.call(k(e),n)})),t))for(;s<u;s++)t(e[s],n,a?r:r.call(e[s],s,t(e[s],n)));return i?e:l?t.call(e):u?t(e[0],n):o},z=/^-ms-/,U=/-([a-z])/g;function X(e,t){return t.toUpperCase()}function V(e){return e.replace(z,"ms-").replace(U,X)}var G=function(e){return 1===e.nodeType||9===e.nodeType||!+e.nodeType};function Y(){this.expando=k.expando+Y.uid++}Y.uid=1,Y.prototype={cache:function(e){var t=e[this.expando];return t||(t={},G(e)&&(e.nodeType?e[this.expando]=t:Object.defineProperty(e,this.expando,{value:t,configurable:!0}))),t},set:function(e,t,n){var r,i=this.cache(e);if("string"==typeof t)i[V(t)]=n;else for(r in t)i[V(r)]=t[r];return i},get:function(e,t){return void 0===t?this.cache(e):e[this.expando]&&e[this.expando][V(t)]},access:function(e,t,n){return void 0===t||t&&"string"==typeof t&&void 0===n?this.get(e,t):(this.set(e,t,n),void 0!==n?n:t)},remove:function(e,t){var n,r=e[this.expando];if(void 0!==r){if(void 0!==t){n=(t=Array.isArray(t)?t.map(V):(t=V(t))in r?[t]:t.match(R)||[]).length;while(n--)delete r[t[n]]}(void 0===t||k.isEmptyObject(r))&&(e.nodeType?e[this.expando]=void 0:delete e[this.expando])}},hasData:function(e){var t=e[this.expando];return void 0!==t&&!k.isEmptyObject(t)}};var Q=new Y,J=new Y,K=/^(?:\{[\w\W]*\}|\[[\w\W]*\])$/,Z=/[A-Z]/g;function ee(e,t,n){var r,i;if(void 0===n&&1===e.nodeType)if(r="data-"+t.replace(Z,"-$&").toLowerCase(),"string"==typeof(n=e.getAttribute(r))){try{n="true"===(i=n)||"false"!==i&&("null"===i?null:i===+i+""?+i:K.test(i)?JSON.parse(i):i)}catch(e){}J.set(e,t,n)}else n=void 0;return n}k.extend({hasData:function(e){return J.hasData(e)||Q.hasData(e)},data:function(e,t,n){return J.access(e,t,n)},removeData:function(e,t){J.remove(e,t)},_data:function(e,t,n){return Q.access(e,t,n)},_removeData:function(e,t){Q.remove(e,t)}}),k.fn.extend({data:function(n,e){var t,r,i,o=this[0],a=o&&o.attributes;if(void 0===n){if(this.length&&(i=J.get(o),1===o.nodeType&&!Q.get(o,"hasDataAttrs"))){t=a.length;while(t--)a[t]&&0===(r=a[t].name).indexOf("data-")&&(r=V(r.slice(5)),ee(o,r,i[r]));Q.set(o,"hasDataAttrs",!0)}return i}return"object"==typeof n?this.each(function(){J.set(this,n)}):_(this,function(e){var t;if(o&&void 0===e)return void 0!==(t=J.get(o,n))?t:void 0!==(t=ee(o,n))?t:void 0;this.each(function(){J.set(this,n,e)})},null,e,1<arguments.length,null,!0)},removeData:function(e){return this.each(function(){J.remove(this,e)})}}),k.extend({queue:function(e,t,n){var r;if(e)return t=(t||"fx")+"queue",r=Q.get(e,t),n&&(!r||Array.isArray(n)?r=Q.access(e,t,k.makeArray(n)):r.push(n)),r||[]},dequeue:function(e,t){t=t||"fx";var n=k.queue(e,t),r=n.length,i=n.shift(),o=k._queueHooks(e,t);"inprogress"===i&&(i=n.shift(),r--),i&&("fx"===t&&n.unshift("inprogress"),delete o.stop,i.call(e,function(){k.dequeue(e,t)},o)),!r&&o&&o.empty.fire()},_queueHooks:function(e,t){var n=t+"queueHooks";return Q.get(e,n)||Q.access(e,n,{empty:k.Callbacks("once memory").add(function(){Q.remove(e,[t+"queue",n])})})}}),k.fn.extend({queue:function(t,n){var e=2;return"string"!=typeof t&&(n=t,t="fx",e--),arguments.length<e?k.queue(this[0],t):void 0===n?this:this.each(function(){var e=k.queue(this,t,n);k._queueHooks(this,t),"fx"===t&&"inprogress"!==e[0]&&k.dequeue(this,t)})},dequeue:function(e){return this.each(function(){k.dequeue(this,e)})},clearQueue:function(e){return this.queue(e||"fx",[])},promise:function(e,t){var n,r=1,i=k.Deferred(),o=this,a=this.length,s=function(){--r||i.resolveWith(o,[o])};"string"!=typeof e&&(t=e,e=void 0),e=e||"fx";while(a--)(n=Q.get(o[a],e+"queueHooks"))&&n.empty&&(r++,n.empty.add(s));return s(),i.promise(t)}});var te=/[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/.source,ne=new RegExp("^(?:([+-])=|)("+te+")([a-z%]*)$","i"),re=["Top","Right","Bottom","Left"],ie=E.documentElement,oe=function(e){return k.contains(e.ownerDocument,e)},ae={composed:!0};ie.getRootNode&&(oe=function(e){return k.contains(e.ownerDocument,e)||e.getRootNode(ae)===e.ownerDocument});var se=function(e,t){return"none"===(e=t||e).style.display||""===e.style.display&&oe(e)&&"none"===k.css(e,"display")},ue=function(e,t,n,r){var i,o,a={};for(o in t)a[o]=e.style[o],e.style[o]=t[o];for(o in i=n.apply(e,r||[]),t)e.style[o]=a[o];return i};function le(e,t,n,r){var i,o,a=20,s=r?function(){return r.cur()}:function(){return k.css(e,t,"")},u=s(),l=n&&n[3]||(k.cssNumber[t]?"":"px"),c=e.nodeType&&(k.cssNumber[t]||"px"!==l&&+u)&&ne.exec(k.css(e,t));if(c&&c[3]!==l){u/=2,l=l||c[3],c=+u||1;while(a--)k.style(e,t,c+l),(1-o)*(1-(o=s()/u||.5))<=0&&(a=0),c/=o;c*=2,k.style(e,t,c+l),n=n||[]}return n&&(c=+c||+u||0,i=n[1]?c+(n[1]+1)*n[2]:+n[2],r&&(r.unit=l,r.start=c,r.end=i)),i}var ce={};function fe(e,t){for(var n,r,i,o,a,s,u,l=[],c=0,f=e.length;c<f;c++)(r=e[c]).style&&(n=r.style.display,t?("none"===n&&(l[c]=Q.get(r,"display")||null,l[c]||(r.style.display="")),""===r.style.display&&se(r)&&(l[c]=(u=a=o=void 0,a=(i=r).ownerDocument,s=i.nodeName,(u=ce[s])||(o=a.body.appendChild(a.createElement(s)),u=k.css(o,"display"),o.parentNode.removeChild(o),"none"===u&&(u="block"),ce[s]=u)))):"none"!==n&&(l[c]="none",Q.set(r,"display",n)));for(c=0;c<f;c++)null!=l[c]&&(e[c].style.display=l[c]);return e}k.fn.extend({show:function(){return fe(this,!0)},hide:function(){return fe(this)},toggle:function(e){return"boolean"==typeof e?e?this.show():this.hide():this.each(function(){se(this)?k(this).show():k(this).hide()})}});var pe=/^(?:checkbox|radio)$/i,de=/<([a-z][^\/\0>\x20\t\r\n\f]*)/i,he=/^$|^module$|\/(?:java|ecma)script/i,ge={option:[1,"<select multiple='multiple'>","</select>"],thead:[1,"<table>","</table>"],col:[2,"<table><colgroup>","</colgroup></table>"],tr:[2,"<table><tbody>","</tbody></table>"],td:[3,"<table><tbody><tr>","</tr></tbody></table>"],_default:[0,"",""]};function ve(e,t){var n;return n="undefined"!=typeof e.getElementsByTagName?e.getElementsByTagName(t||"*"):"undefined"!=typeof e.querySelectorAll?e.querySelectorAll(t||"*"):[],void 0===t||t&&A(e,t)?k.merge([e],n):n}function ye(e,t){for(var n=0,r=e.length;n<r;n++)Q.set(e[n],"globalEval",!t||Q.get(t[n],"globalEval"))}ge.optgroup=ge.option,ge.tbody=ge.tfoot=ge.colgroup=ge.caption=ge.thead,ge.th=ge.td;var me,xe,be=/<|&#?\w+;/;function we(e,t,n,r,i){for(var o,a,s,u,l,c,f=t.createDocumentFragment(),p=[],d=0,h=e.length;d<h;d++)if((o=e[d])||0===o)if("object"===w(o))k.merge(p,o.nodeType?[o]:o);else if(be.test(o)){a=a||f.appendChild(t.createElement("div")),s=(de.exec(o)||["",""])[1].toLowerCase(),u=ge[s]||ge._default,a.innerHTML=u[1]+k.htmlPrefilter(o)+u[2],c=u[0];while(c--)a=a.lastChild;k.merge(p,a.childNodes),(a=f.firstChild).textContent=""}else p.push(t.createTextNode(o));f.textContent="",d=0;while(o=p[d++])if(r&&-1<k.inArray(o,r))i&&i.push(o);else if(l=oe(o),a=ve(f.appendChild(o),"script"),l&&ye(a),n){c=0;while(o=a[c++])he.test(o.type||"")&&n.push(o)}return f}me=E.createDocumentFragment().appendChild(E.createElement("div")),(xe=E.createElement("input")).setAttribute("type","radio"),xe.setAttribute("checked","checked"),xe.setAttribute("name","t"),me.appendChild(xe),y.checkClone=me.cloneNode(!0).cloneNode(!0).lastChild.checked,me.innerHTML="<textarea>x</textarea>",y.noCloneChecked=!!me.cloneNode(!0).lastChild.defaultValue;var Te=/^key/,Ce=/^(?:mouse|pointer|contextmenu|drag|drop)|click/,Ee=/^([^.]*)(?:\.(.+)|)/;function ke(){return!0}function Se(){return!1}function Ne(e,t){return e===function(){try{return E.activeElement}catch(e){}}()==("focus"===t)}function Ae(e,t,n,r,i,o){var a,s;if("object"==typeof t){for(s in"string"!=typeof n&&(r=r||n,n=void 0),t)Ae(e,s,n,r,t[s],o);return e}if(null==r&&null==i?(i=n,r=n=void 0):null==i&&("string"==typeof n?(i=r,r=void 0):(i=r,r=n,n=void 0)),!1===i)i=Se;else if(!i)return e;return 1===o&&(a=i,(i=function(e){return k().off(e),a.apply(this,arguments)}).guid=a.guid||(a.guid=k.guid++)),e.each(function(){k.event.add(this,t,i,r,n)})}function De(e,i,o){o?(Q.set(e,i,!1),k.event.add(e,i,{namespace:!1,handler:function(e){var t,n,r=Q.get(this,i);if(1&e.isTrigger&&this[i]){if(r.length)(k.event.special[i]||{}).delegateType&&e.stopPropagation();else if(r=s.call(arguments),Q.set(this,i,r),t=o(this,i),this[i](),r!==(n=Q.get(this,i))||t?Q.set(this,i,!1):n={},r!==n)return e.stopImmediatePropagation(),e.preventDefault(),n.value}else r.length&&(Q.set(this,i,{value:k.event.trigger(k.extend(r[0],k.Event.prototype),r.slice(1),this)}),e.stopImmediatePropagation())}})):void 0===Q.get(e,i)&&k.event.add(e,i,ke)}k.event={global:{},add:function(t,e,n,r,i){var o,a,s,u,l,c,f,p,d,h,g,v=Q.get(t);if(v){n.handler&&(n=(o=n).handler,i=o.selector),i&&k.find.matchesSelector(ie,i),n.guid||(n.guid=k.guid++),(u=v.events)||(u=v.events={}),(a=v.handle)||(a=v.handle=function(e){return"undefined"!=typeof k&&k.event.triggered!==e.type?k.event.dispatch.apply(t,arguments):void 0}),l=(e=(e||"").match(R)||[""]).length;while(l--)d=g=(s=Ee.exec(e[l])||[])[1],h=(s[2]||"").split(".").sort(),d&&(f=k.event.special[d]||{},d=(i?f.delegateType:f.bindType)||d,f=k.event.special[d]||{},c=k.extend({type:d,origType:g,data:r,handler:n,guid:n.guid,selector:i,needsContext:i&&k.expr.match.needsContext.test(i),namespace:h.join(".")},o),(p=u[d])||((p=u[d]=[]).delegateCount=0,f.setup&&!1!==f.setup.call(t,r,h,a)||t.addEventListener&&t.addEventListener(d,a)),f.add&&(f.add.call(t,c),c.handler.guid||(c.handler.guid=n.guid)),i?p.splice(p.delegateCount++,0,c):p.push(c),k.event.global[d]=!0)}},remove:function(e,t,n,r,i){var o,a,s,u,l,c,f,p,d,h,g,v=Q.hasData(e)&&Q.get(e);if(v&&(u=v.events)){l=(t=(t||"").match(R)||[""]).length;while(l--)if(d=g=(s=Ee.exec(t[l])||[])[1],h=(s[2]||"").split(".").sort(),d){f=k.event.special[d]||{},p=u[d=(r?f.delegateType:f.bindType)||d]||[],s=s[2]&&new RegExp("(^|\\.)"+h.join("\\.(?:.*\\.|)")+"(\\.|$)"),a=o=p.length;while(o--)c=p[o],!i&&g!==c.origType||n&&n.guid!==c.guid||s&&!s.test(c.namespace)||r&&r!==c.selector&&("**"!==r||!c.selector)||(p.splice(o,1),c.selector&&p.delegateCount--,f.remove&&f.remove.call(e,c));a&&!p.length&&(f.teardown&&!1!==f.teardown.call(e,h,v.handle)||k.removeEvent(e,d,v.handle),delete u[d])}else for(d in u)k.event.remove(e,d+t[l],n,r,!0);k.isEmptyObject(u)&&Q.remove(e,"handle events")}},dispatch:function(e){var t,n,r,i,o,a,s=k.event.fix(e),u=new Array(arguments.length),l=(Q.get(this,"events")||{})[s.type]||[],c=k.event.special[s.type]||{};for(u[0]=s,t=1;t<arguments.length;t++)u[t]=arguments[t];if(s.delegateTarget=this,!c.preDispatch||!1!==c.preDispatch.call(this,s)){a=k.event.handlers.call(this,s,l),t=0;while((i=a[t++])&&!s.isPropagationStopped()){s.currentTarget=i.elem,n=0;while((o=i.handlers[n++])&&!s.isImmediatePropagationStopped())s.rnamespace&&!1!==o.namespace&&!s.rnamespace.test(o.namespace)||(s.handleObj=o,s.data=o.data,void 0!==(r=((k.event.special[o.origType]||{}).handle||o.handler).apply(i.elem,u))&&!1===(s.result=r)&&(s.preventDefault(),s.stopPropagation()))}return c.postDispatch&&c.postDispatch.call(this,s),s.result}},handlers:function(e,t){var n,r,i,o,a,s=[],u=t.delegateCount,l=e.target;if(u&&l.nodeType&&!("click"===e.type&&1<=e.button))for(;l!==this;l=l.parentNode||this)if(1===l.nodeType&&("click"!==e.type||!0!==l.disabled)){for(o=[],a={},n=0;n<u;n++)void 0===a[i=(r=t[n]).selector+" "]&&(a[i]=r.needsContext?-1<k(i,this).index(l):k.find(i,this,null,[l]).length),a[i]&&o.push(r);o.length&&s.push({elem:l,handlers:o})}return l=this,u<t.length&&s.push({elem:l,handlers:t.slice(u)}),s},addProp:function(t,e){Object.defineProperty(k.Event.prototype,t,{enumerable:!0,configurable:!0,get:m(e)?function(){if(this.originalEvent)return e(this.originalEvent)}:function(){if(this.originalEvent)return this.originalEvent[t]},set:function(e){Object.defineProperty(this,t,{enumerable:!0,configurable:!0,writable:!0,value:e})}})},fix:function(e){return e[k.expando]?e:new k.Event(e)},special:{load:{noBubble:!0},click:{setup:function(e){var t=this||e;return pe.test(t.type)&&t.click&&A(t,"input")&&De(t,"click",ke),!1},trigger:function(e){var t=this||e;return pe.test(t.type)&&t.click&&A(t,"input")&&De(t,"click"),!0},_default:function(e){var t=e.target;return pe.test(t.type)&&t.click&&A(t,"input")&&Q.get(t,"click")||A(t,"a")}},beforeunload:{postDispatch:function(e){void 0!==e.result&&e.originalEvent&&(e.originalEvent.returnValue=e.result)}}}},k.removeEvent=function(e,t,n){e.removeEventListener&&e.removeEventListener(t,n)},k.Event=function(e,t){if(!(this instanceof k.Event))return new k.Event(e,t);e&&e.type?(this.originalEvent=e,this.type=e.type,this.isDefaultPrevented=e.defaultPrevented||void 0===e.defaultPrevented&&!1===e.returnValue?ke:Se,this.target=e.target&&3===e.target.nodeType?e.target.parentNode:e.target,this.currentTarget=e.currentTarget,this.relatedTarget=e.relatedTarget):this.type=e,t&&k.extend(this,t),this.timeStamp=e&&e.timeStamp||Date.now(),this[k.expando]=!0},k.Event.prototype={constructor:k.Event,isDefaultPrevented:Se,isPropagationStopped:Se,isImmediatePropagationStopped:Se,isSimulated:!1,preventDefault:function(){var e=this.originalEvent;this.isDefaultPrevented=ke,e&&!this.isSimulated&&e.preventDefault()},stopPropagation:function(){var e=this.originalEvent;this.isPropagationStopped=ke,e&&!this.isSimulated&&e.stopPropagation()},stopImmediatePropagation:function(){var e=this.originalEvent;this.isImmediatePropagationStopped=ke,e&&!this.isSimulated&&e.stopImmediatePropagation(),this.stopPropagation()}},k.each({altKey:!0,bubbles:!0,cancelable:!0,changedTouches:!0,ctrlKey:!0,detail:!0,eventPhase:!0,metaKey:!0,pageX:!0,pageY:!0,shiftKey:!0,view:!0,"char":!0,code:!0,charCode:!0,key:!0,keyCode:!0,button:!0,buttons:!0,clientX:!0,clientY:!0,offsetX:!0,offsetY:!0,pointerId:!0,pointerType:!0,screenX:!0,screenY:!0,targetTouches:!0,toElement:!0,touches:!0,which:function(e){var t=e.button;return null==e.which&&Te.test(e.type)?null!=e.charCode?e.charCode:e.keyCode:!e.which&&void 0!==t&&Ce.test(e.type)?1&t?1:2&t?3:4&t?2:0:e.which}},k.event.addProp),k.each({focus:"focusin",blur:"focusout"},function(e,t){k.event.special[e]={setup:function(){return De(this,e,Ne),!1},trigger:function(){return De(this,e),!0},delegateType:t}}),k.each({mouseenter:"mouseover",mouseleave:"mouseout",pointerenter:"pointerover",pointerleave:"pointerout"},function(e,i){k.event.special[e]={delegateType:i,bindType:i,handle:function(e){var t,n=e.relatedTarget,r=e.handleObj;return n&&(n===this||k.contains(this,n))||(e.type=r.origType,t=r.handler.apply(this,arguments),e.type=i),t}}}),k.fn.extend({on:function(e,t,n,r){return Ae(this,e,t,n,r)},one:function(e,t,n,r){return Ae(this,e,t,n,r,1)},off:function(e,t,n){var r,i;if(e&&e.preventDefault&&e.handleObj)return r=e.handleObj,k(e.delegateTarget).off(r.namespace?r.origType+"."+r.namespace:r.origType,r.selector,r.handler),this;if("object"==typeof e){for(i in e)this.off(i,t,e[i]);return this}return!1!==t&&"function"!=typeof t||(n=t,t=void 0),!1===n&&(n=Se),this.each(function(){k.event.remove(this,e,n,t)})}});var je=/<(?!area|br|col|embed|hr|img|input|link|meta|param)(([a-z][^\/\0>\x20\t\r\n\f]*)[^>]*)\/>/gi,qe=/<script|<style|<link/i,Le=/checked\s*(?:[^=]|=\s*.checked.)/i,He=/^\s*<!(?:\[CDATA\[|--)|(?:\]\]|--)>\s*$/g;function Oe(e,t){return A(e,"table")&&A(11!==t.nodeType?t:t.firstChild,"tr")&&k(e).children("tbody")[0]||e}function Pe(e){return e.type=(null!==e.getAttribute("type"))+"/"+e.type,e}function Re(e){return"true/"===(e.type||"").slice(0,5)?e.type=e.type.slice(5):e.removeAttribute("type"),e}function Me(e,t){var n,r,i,o,a,s,u,l;if(1===t.nodeType){if(Q.hasData(e)&&(o=Q.access(e),a=Q.set(t,o),l=o.events))for(i in delete a.handle,a.events={},l)for(n=0,r=l[i].length;n<r;n++)k.event.add(t,i,l[i][n]);J.hasData(e)&&(s=J.access(e),u=k.extend({},s),J.set(t,u))}}function Ie(n,r,i,o){r=g.apply([],r);var e,t,a,s,u,l,c=0,f=n.length,p=f-1,d=r[0],h=m(d);if(h||1<f&&"string"==typeof d&&!y.checkClone&&Le.test(d))return n.each(function(e){var t=n.eq(e);h&&(r[0]=d.call(this,e,t.html())),Ie(t,r,i,o)});if(f&&(t=(e=we(r,n[0].ownerDocument,!1,n,o)).firstChild,1===e.childNodes.length&&(e=t),t||o)){for(s=(a=k.map(ve(e,"script"),Pe)).length;c<f;c++)u=e,c!==p&&(u=k.clone(u,!0,!0),s&&k.merge(a,ve(u,"script"))),i.call(n[c],u,c);if(s)for(l=a[a.length-1].ownerDocument,k.map(a,Re),c=0;c<s;c++)u=a[c],he.test(u.type||"")&&!Q.access(u,"globalEval")&&k.contains(l,u)&&(u.src&&"module"!==(u.type||"").toLowerCase()?k._evalUrl&&!u.noModule&&k._evalUrl(u.src,{nonce:u.nonce||u.getAttribute("nonce")}):b(u.textContent.replace(He,""),u,l))}return n}function We(e,t,n){for(var r,i=t?k.filter(t,e):e,o=0;null!=(r=i[o]);o++)n||1!==r.nodeType||k.cleanData(ve(r)),r.parentNode&&(n&&oe(r)&&ye(ve(r,"script")),r.parentNode.removeChild(r));return e}k.extend({htmlPrefilter:function(e){return e.replace(je,"<$1></$2>")},clone:function(e,t,n){var r,i,o,a,s,u,l,c=e.cloneNode(!0),f=oe(e);if(!(y.noCloneChecked||1!==e.nodeType&&11!==e.nodeType||k.isXMLDoc(e)))for(a=ve(c),r=0,i=(o=ve(e)).length;r<i;r++)s=o[r],u=a[r],void 0,"input"===(l=u.nodeName.toLowerCase())&&pe.test(s.type)?u.checked=s.checked:"input"!==l&&"textarea"!==l||(u.defaultValue=s.defaultValue);if(t)if(n)for(o=o||ve(e),a=a||ve(c),r=0,i=o.length;r<i;r++)Me(o[r],a[r]);else Me(e,c);return 0<(a=ve(c,"script")).length&&ye(a,!f&&ve(e,"script")),c},cleanData:function(e){for(var t,n,r,i=k.event.special,o=0;void 0!==(n=e[o]);o++)if(G(n)){if(t=n[Q.expando]){if(t.events)for(r in t.events)i[r]?k.event.remove(n,r):k.removeEvent(n,r,t.handle);n[Q.expando]=void 0}n[J.expando]&&(n[J.expando]=void 0)}}}),k.fn.extend({detach:function(e){return We(this,e,!0)},remove:function(e){return We(this,e)},text:function(e){return _(this,function(e){return void 0===e?k.text(this):this.empty().each(function(){1!==this.nodeType&&11!==this.nodeType&&9!==this.nodeType||(this.textContent=e)})},null,e,arguments.length)},append:function(){return Ie(this,arguments,function(e){1!==this.nodeType&&11!==this.nodeType&&9!==this.nodeType||Oe(this,e).appendChild(e)})},prepend:function(){return Ie(this,arguments,function(e){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var t=Oe(this,e);t.insertBefore(e,t.firstChild)}})},before:function(){return Ie(this,arguments,function(e){this.parentNode&&this.parentNode.insertBefore(e,this)})},after:function(){return Ie(this,arguments,function(e){this.parentNode&&this.parentNode.insertBefore(e,this.nextSibling)})},empty:function(){for(var e,t=0;null!=(e=this[t]);t++)1===e.nodeType&&(k.cleanData(ve(e,!1)),e.textContent="");return this},clone:function(e,t){return e=null!=e&&e,t=null==t?e:t,this.map(function(){return k.clone(this,e,t)})},html:function(e){return _(this,function(e){var t=this[0]||{},n=0,r=this.length;if(void 0===e&&1===t.nodeType)return t.innerHTML;if("string"==typeof e&&!qe.test(e)&&!ge[(de.exec(e)||["",""])[1].toLowerCase()]){e=k.htmlPrefilter(e);try{for(;n<r;n++)1===(t=this[n]||{}).nodeType&&(k.cleanData(ve(t,!1)),t.innerHTML=e);t=0}catch(e){}}t&&this.empty().append(e)},null,e,arguments.length)},replaceWith:function(){var n=[];return Ie(this,arguments,function(e){var t=this.parentNode;k.inArray(this,n)<0&&(k.cleanData(ve(this)),t&&t.replaceChild(e,this))},n)}}),k.each({appendTo:"append",prependTo:"prepend",insertBefore:"before",insertAfter:"after",replaceAll:"replaceWith"},function(e,a){k.fn[e]=function(e){for(var t,n=[],r=k(e),i=r.length-1,o=0;o<=i;o++)t=o===i?this:this.clone(!0),k(r[o])[a](t),u.apply(n,t.get());return this.pushStack(n)}});var $e=new RegExp("^("+te+")(?!px)[a-z%]+$","i"),Fe=function(e){var t=e.ownerDocument.defaultView;return t&&t.opener||(t=C),t.getComputedStyle(e)},Be=new RegExp(re.join("|"),"i");function _e(e,t,n){var r,i,o,a,s=e.style;return(n=n||Fe(e))&&(""!==(a=n.getPropertyValue(t)||n[t])||oe(e)||(a=k.style(e,t)),!y.pixelBoxStyles()&&$e.test(a)&&Be.test(t)&&(r=s.width,i=s.minWidth,o=s.maxWidth,s.minWidth=s.maxWidth=s.width=a,a=n.width,s.width=r,s.minWidth=i,s.maxWidth=o)),void 0!==a?a+"":a}function ze(e,t){return{get:function(){if(!e())return(this.get=t).apply(this,arguments);delete this.get}}}!function(){function e(){if(u){s.style.cssText="position:absolute;left:-11111px;width:60px;margin-top:1px;padding:0;border:0",u.style.cssText="position:relative;display:block;box-sizing:border-box;overflow:scroll;margin:auto;border:1px;padding:1px;width:60%;top:1%",ie.appendChild(s).appendChild(u);var e=C.getComputedStyle(u);n="1%"!==e.top,a=12===t(e.marginLeft),u.style.right="60%",o=36===t(e.right),r=36===t(e.width),u.style.position="absolute",i=12===t(u.offsetWidth/3),ie.removeChild(s),u=null}}function t(e){return Math.round(parseFloat(e))}var n,r,i,o,a,s=E.createElement("div"),u=E.createElement("div");u.style&&(u.style.backgroundClip="content-box",u.cloneNode(!0).style.backgroundClip="",y.clearCloneStyle="content-box"===u.style.backgroundClip,k.extend(y,{boxSizingReliable:function(){return e(),r},pixelBoxStyles:function(){return e(),o},pixelPosition:function(){return e(),n},reliableMarginLeft:function(){return e(),a},scrollboxSize:function(){return e(),i}}))}();var Ue=["Webkit","Moz","ms"],Xe=E.createElement("div").style,Ve={};function Ge(e){var t=k.cssProps[e]||Ve[e];return t||(e in Xe?e:Ve[e]=function(e){var t=e[0].toUpperCase()+e.slice(1),n=Ue.length;while(n--)if((e=Ue[n]+t)in Xe)return e}(e)||e)}var Ye=/^(none|table(?!-c[ea]).+)/,Qe=/^--/,Je={position:"absolute",visibility:"hidden",display:"block"},Ke={letterSpacing:"0",fontWeight:"400"};function Ze(e,t,n){var r=ne.exec(t);return r?Math.max(0,r[2]-(n||0))+(r[3]||"px"):t}function et(e,t,n,r,i,o){var a="width"===t?1:0,s=0,u=0;if(n===(r?"border":"content"))return 0;for(;a<4;a+=2)"margin"===n&&(u+=k.css(e,n+re[a],!0,i)),r?("content"===n&&(u-=k.css(e,"padding"+re[a],!0,i)),"margin"!==n&&(u-=k.css(e,"border"+re[a]+"Width",!0,i))):(u+=k.css(e,"padding"+re[a],!0,i),"padding"!==n?u+=k.css(e,"border"+re[a]+"Width",!0,i):s+=k.css(e,"border"+re[a]+"Width",!0,i));return!r&&0<=o&&(u+=Math.max(0,Math.ceil(e["offset"+t[0].toUpperCase()+t.slice(1)]-o-u-s-.5))||0),u}function tt(e,t,n){var r=Fe(e),i=(!y.boxSizingReliable()||n)&&"border-box"===k.css(e,"boxSizing",!1,r),o=i,a=_e(e,t,r),s="offset"+t[0].toUpperCase()+t.slice(1);if($e.test(a)){if(!n)return a;a="auto"}return(!y.boxSizingReliable()&&i||"auto"===a||!parseFloat(a)&&"inline"===k.css(e,"display",!1,r))&&e.getClientRects().length&&(i="border-box"===k.css(e,"boxSizing",!1,r),(o=s in e)&&(a=e[s])),(a=parseFloat(a)||0)+et(e,t,n||(i?"border":"content"),o,r,a)+"px"}function nt(e,t,n,r,i){return new nt.prototype.init(e,t,n,r,i)}k.extend({cssHooks:{opacity:{get:function(e,t){if(t){var n=_e(e,"opacity");return""===n?"1":n}}}},cssNumber:{animationIterationCount:!0,columnCount:!0,fillOpacity:!0,flexGrow:!0,flexShrink:!0,fontWeight:!0,gridArea:!0,gridColumn:!0,gridColumnEnd:!0,gridColumnStart:!0,gridRow:!0,gridRowEnd:!0,gridRowStart:!0,lineHeight:!0,opacity:!0,order:!0,orphans:!0,widows:!0,zIndex:!0,zoom:!0},cssProps:{},style:function(e,t,n,r){if(e&&3!==e.nodeType&&8!==e.nodeType&&e.style){var i,o,a,s=V(t),u=Qe.test(t),l=e.style;if(u||(t=Ge(s)),a=k.cssHooks[t]||k.cssHooks[s],void 0===n)return a&&"get"in a&&void 0!==(i=a.get(e,!1,r))?i:l[t];"string"===(o=typeof n)&&(i=ne.exec(n))&&i[1]&&(n=le(e,t,i),o="number"),null!=n&&n==n&&("number"!==o||u||(n+=i&&i[3]||(k.cssNumber[s]?"":"px")),y.clearCloneStyle||""!==n||0!==t.indexOf("background")||(l[t]="inherit"),a&&"set"in a&&void 0===(n=a.set(e,n,r))||(u?l.setProperty(t,n):l[t]=n))}},css:function(e,t,n,r){var i,o,a,s=V(t);return Qe.test(t)||(t=Ge(s)),(a=k.cssHooks[t]||k.cssHooks[s])&&"get"in a&&(i=a.get(e,!0,n)),void 0===i&&(i=_e(e,t,r)),"normal"===i&&t in Ke&&(i=Ke[t]),""===n||n?(o=parseFloat(i),!0===n||isFinite(o)?o||0:i):i}}),k.each(["height","width"],function(e,u){k.cssHooks[u]={get:function(e,t,n){if(t)return!Ye.test(k.css(e,"display"))||e.getClientRects().length&&e.getBoundingClientRect().width?tt(e,u,n):ue(e,Je,function(){return tt(e,u,n)})},set:function(e,t,n){var r,i=Fe(e),o=!y.scrollboxSize()&&"absolute"===i.position,a=(o||n)&&"border-box"===k.css(e,"boxSizing",!1,i),s=n?et(e,u,n,a,i):0;return a&&o&&(s-=Math.ceil(e["offset"+u[0].toUpperCase()+u.slice(1)]-parseFloat(i[u])-et(e,u,"border",!1,i)-.5)),s&&(r=ne.exec(t))&&"px"!==(r[3]||"px")&&(e.style[u]=t,t=k.css(e,u)),Ze(0,t,s)}}}),k.cssHooks.marginLeft=ze(y.reliableMarginLeft,function(e,t){if(t)return(parseFloat(_e(e,"marginLeft"))||e.getBoundingClientRect().left-ue(e,{marginLeft:0},function(){return e.getBoundingClientRect().left}))+"px"}),k.each({margin:"",padding:"",border:"Width"},function(i,o){k.cssHooks[i+o]={expand:function(e){for(var t=0,n={},r="string"==typeof e?e.split(" "):[e];t<4;t++)n[i+re[t]+o]=r[t]||r[t-2]||r[0];return n}},"margin"!==i&&(k.cssHooks[i+o].set=Ze)}),k.fn.extend({css:function(e,t){return _(this,function(e,t,n){var r,i,o={},a=0;if(Array.isArray(t)){for(r=Fe(e),i=t.length;a<i;a++)o[t[a]]=k.css(e,t[a],!1,r);return o}return void 0!==n?k.style(e,t,n):k.css(e,t)},e,t,1<arguments.length)}}),((k.Tween=nt).prototype={constructor:nt,init:function(e,t,n,r,i,o){this.elem=e,this.prop=n,this.easing=i||k.easing._default,this.options=t,this.start=this.now=this.cur(),this.end=r,this.unit=o||(k.cssNumber[n]?"":"px")},cur:function(){var e=nt.propHooks[this.prop];return e&&e.get?e.get(this):nt.propHooks._default.get(this)},run:function(e){var t,n=nt.propHooks[this.prop];return this.options.duration?this.pos=t=k.easing[this.easing](e,this.options.duration*e,0,1,this.options.duration):this.pos=t=e,this.now=(this.end-this.start)*t+this.start,this.options.step&&this.options.step.call(this.elem,this.now,this),n&&n.set?n.set(this):nt.propHooks._default.set(this),this}}).init.prototype=nt.prototype,(nt.propHooks={_default:{get:function(e){var t;return 1!==e.elem.nodeType||null!=e.elem[e.prop]&&null==e.elem.style[e.prop]?e.elem[e.prop]:(t=k.css(e.elem,e.prop,""))&&"auto"!==t?t:0},set:function(e){k.fx.step[e.prop]?k.fx.step[e.prop](e):1!==e.elem.nodeType||!k.cssHooks[e.prop]&&null==e.elem.style[Ge(e.prop)]?e.elem[e.prop]=e.now:k.style(e.elem,e.prop,e.now+e.unit)}}}).scrollTop=nt.propHooks.scrollLeft={set:function(e){e.elem.nodeType&&e.elem.parentNode&&(e.elem[e.prop]=e.now)}},k.easing={linear:function(e){return e},swing:function(e){return.5-Math.cos(e*Math.PI)/2},_default:"swing"},k.fx=nt.prototype.init,k.fx.step={};var rt,it,ot,at,st=/^(?:toggle|show|hide)$/,ut=/queueHooks$/;function lt(){it&&(!1===E.hidden&&C.requestAnimationFrame?C.requestAnimationFrame(lt):C.setTimeout(lt,k.fx.interval),k.fx.tick())}function ct(){return C.setTimeout(function(){rt=void 0}),rt=Date.now()}function ft(e,t){var n,r=0,i={height:e};for(t=t?1:0;r<4;r+=2-t)i["margin"+(n=re[r])]=i["padding"+n]=e;return t&&(i.opacity=i.width=e),i}function pt(e,t,n){for(var r,i=(dt.tweeners[t]||[]).concat(dt.tweeners["*"]),o=0,a=i.length;o<a;o++)if(r=i[o].call(n,t,e))return r}function dt(o,e,t){var n,a,r=0,i=dt.prefilters.length,s=k.Deferred().always(function(){delete u.elem}),u=function(){if(a)return!1;for(var e=rt||ct(),t=Math.max(0,l.startTime+l.duration-e),n=1-(t/l.duration||0),r=0,i=l.tweens.length;r<i;r++)l.tweens[r].run(n);return s.notifyWith(o,[l,n,t]),n<1&&i?t:(i||s.notifyWith(o,[l,1,0]),s.resolveWith(o,[l]),!1)},l=s.promise({elem:o,props:k.extend({},e),opts:k.extend(!0,{specialEasing:{},easing:k.easing._default},t),originalProperties:e,originalOptions:t,startTime:rt||ct(),duration:t.duration,tweens:[],createTween:function(e,t){var n=k.Tween(o,l.opts,e,t,l.opts.specialEasing[e]||l.opts.easing);return l.tweens.push(n),n},stop:function(e){var t=0,n=e?l.tweens.length:0;if(a)return this;for(a=!0;t<n;t++)l.tweens[t].run(1);return e?(s.notifyWith(o,[l,1,0]),s.resolveWith(o,[l,e])):s.rejectWith(o,[l,e]),this}}),c=l.props;for(!function(e,t){var n,r,i,o,a;for(n in e)if(i=t[r=V(n)],o=e[n],Array.isArray(o)&&(i=o[1],o=e[n]=o[0]),n!==r&&(e[r]=o,delete e[n]),(a=k.cssHooks[r])&&"expand"in a)for(n in o=a.expand(o),delete e[r],o)n in e||(e[n]=o[n],t[n]=i);else t[r]=i}(c,l.opts.specialEasing);r<i;r++)if(n=dt.prefilters[r].call(l,o,c,l.opts))return m(n.stop)&&(k._queueHooks(l.elem,l.opts.queue).stop=n.stop.bind(n)),n;return k.map(c,pt,l),m(l.opts.start)&&l.opts.start.call(o,l),l.progress(l.opts.progress).done(l.opts.done,l.opts.complete).fail(l.opts.fail).always(l.opts.always),k.fx.timer(k.extend(u,{elem:o,anim:l,queue:l.opts.queue})),l}k.Animation=k.extend(dt,{tweeners:{"*":[function(e,t){var n=this.createTween(e,t);return le(n.elem,e,ne.exec(t),n),n}]},tweener:function(e,t){m(e)?(t=e,e=["*"]):e=e.match(R);for(var n,r=0,i=e.length;r<i;r++)n=e[r],dt.tweeners[n]=dt.tweeners[n]||[],dt.tweeners[n].unshift(t)},prefilters:[function(e,t,n){var r,i,o,a,s,u,l,c,f="width"in t||"height"in t,p=this,d={},h=e.style,g=e.nodeType&&se(e),v=Q.get(e,"fxshow");for(r in n.queue||(null==(a=k._queueHooks(e,"fx")).unqueued&&(a.unqueued=0,s=a.empty.fire,a.empty.fire=function(){a.unqueued||s()}),a.unqueued++,p.always(function(){p.always(function(){a.unqueued--,k.queue(e,"fx").length||a.empty.fire()})})),t)if(i=t[r],st.test(i)){if(delete t[r],o=o||"toggle"===i,i===(g?"hide":"show")){if("show"!==i||!v||void 0===v[r])continue;g=!0}d[r]=v&&v[r]||k.style(e,r)}if((u=!k.isEmptyObject(t))||!k.isEmptyObject(d))for(r in f&&1===e.nodeType&&(n.overflow=[h.overflow,h.overflowX,h.overflowY],null==(l=v&&v.display)&&(l=Q.get(e,"display")),"none"===(c=k.css(e,"display"))&&(l?c=l:(fe([e],!0),l=e.style.display||l,c=k.css(e,"display"),fe([e]))),("inline"===c||"inline-block"===c&&null!=l)&&"none"===k.css(e,"float")&&(u||(p.done(function(){h.display=l}),null==l&&(c=h.display,l="none"===c?"":c)),h.display="inline-block")),n.overflow&&(h.overflow="hidden",p.always(function(){h.overflow=n.overflow[0],h.overflowX=n.overflow[1],h.overflowY=n.overflow[2]})),u=!1,d)u||(v?"hidden"in v&&(g=v.hidden):v=Q.access(e,"fxshow",{display:l}),o&&(v.hidden=!g),g&&fe([e],!0),p.done(function(){for(r in g||fe([e]),Q.remove(e,"fxshow"),d)k.style(e,r,d[r])})),u=pt(g?v[r]:0,r,p),r in v||(v[r]=u.start,g&&(u.end=u.start,u.start=0))}],prefilter:function(e,t){t?dt.prefilters.unshift(e):dt.prefilters.push(e)}}),k.speed=function(e,t,n){var r=e&&"object"==typeof e?k.extend({},e):{complete:n||!n&&t||m(e)&&e,duration:e,easing:n&&t||t&&!m(t)&&t};return k.fx.off?r.duration=0:"number"!=typeof r.duration&&(r.duration in k.fx.speeds?r.duration=k.fx.speeds[r.duration]:r.duration=k.fx.speeds._default),null!=r.queue&&!0!==r.queue||(r.queue="fx"),r.old=r.complete,r.complete=function(){m(r.old)&&r.old.call(this),r.queue&&k.dequeue(this,r.queue)},r},k.fn.extend({fadeTo:function(e,t,n,r){return this.filter(se).css("opacity",0).show().end().animate({opacity:t},e,n,r)},animate:function(t,e,n,r){var i=k.isEmptyObject(t),o=k.speed(e,n,r),a=function(){var e=dt(this,k.extend({},t),o);(i||Q.get(this,"finish"))&&e.stop(!0)};return a.finish=a,i||!1===o.queue?this.each(a):this.queue(o.queue,a)},stop:function(i,e,o){var a=function(e){var t=e.stop;delete e.stop,t(o)};return"string"!=typeof i&&(o=e,e=i,i=void 0),e&&!1!==i&&this.queue(i||"fx",[]),this.each(function(){var e=!0,t=null!=i&&i+"queueHooks",n=k.timers,r=Q.get(this);if(t)r[t]&&r[t].stop&&a(r[t]);else for(t in r)r[t]&&r[t].stop&&ut.test(t)&&a(r[t]);for(t=n.length;t--;)n[t].elem!==this||null!=i&&n[t].queue!==i||(n[t].anim.stop(o),e=!1,n.splice(t,1));!e&&o||k.dequeue(this,i)})},finish:function(a){return!1!==a&&(a=a||"fx"),this.each(function(){var e,t=Q.get(this),n=t[a+"queue"],r=t[a+"queueHooks"],i=k.timers,o=n?n.length:0;for(t.finish=!0,k.queue(this,a,[]),r&&r.stop&&r.stop.call(this,!0),e=i.length;e--;)i[e].elem===this&&i[e].queue===a&&(i[e].anim.stop(!0),i.splice(e,1));for(e=0;e<o;e++)n[e]&&n[e].finish&&n[e].finish.call(this);delete t.finish})}}),k.each(["toggle","show","hide"],function(e,r){var i=k.fn[r];k.fn[r]=function(e,t,n){return null==e||"boolean"==typeof e?i.apply(this,arguments):this.animate(ft(r,!0),e,t,n)}}),k.each({slideDown:ft("show"),slideUp:ft("hide"),slideToggle:ft("toggle"),fadeIn:{opacity:"show"},fadeOut:{opacity:"hide"},fadeToggle:{opacity:"toggle"}},function(e,r){k.fn[e]=function(e,t,n){return this.animate(r,e,t,n)}}),k.timers=[],k.fx.tick=function(){var e,t=0,n=k.timers;for(rt=Date.now();t<n.length;t++)(e=n[t])()||n[t]!==e||n.splice(t--,1);n.length||k.fx.stop(),rt=void 0},k.fx.timer=function(e){k.timers.push(e),k.fx.start()},k.fx.interval=13,k.fx.start=function(){it||(it=!0,lt())},k.fx.stop=function(){it=null},k.fx.speeds={slow:600,fast:200,_default:400},k.fn.delay=function(r,e){return r=k.fx&&k.fx.speeds[r]||r,e=e||"fx",this.queue(e,function(e,t){var n=C.setTimeout(e,r);t.stop=function(){C.clearTimeout(n)}})},ot=E.createElement("input"),at=E.createElement("select").appendChild(E.createElement("option")),ot.type="checkbox",y.checkOn=""!==ot.value,y.optSelected=at.selected,(ot=E.createElement("input")).value="t",ot.type="radio",y.radioValue="t"===ot.value;var ht,gt=k.expr.attrHandle;k.fn.extend({attr:function(e,t){return _(this,k.attr,e,t,1<arguments.length)},removeAttr:function(e){return this.each(function(){k.removeAttr(this,e)})}}),k.extend({attr:function(e,t,n){var r,i,o=e.nodeType;if(3!==o&&8!==o&&2!==o)return"undefined"==typeof e.getAttribute?k.prop(e,t,n):(1===o&&k.isXMLDoc(e)||(i=k.attrHooks[t.toLowerCase()]||(k.expr.match.bool.test(t)?ht:void 0)),void 0!==n?null===n?void k.removeAttr(e,t):i&&"set"in i&&void 0!==(r=i.set(e,n,t))?r:(e.setAttribute(t,n+""),n):i&&"get"in i&&null!==(r=i.get(e,t))?r:null==(r=k.find.attr(e,t))?void 0:r)},attrHooks:{type:{set:function(e,t){if(!y.radioValue&&"radio"===t&&A(e,"input")){var n=e.value;return e.setAttribute("type",t),n&&(e.value=n),t}}}},removeAttr:function(e,t){var n,r=0,i=t&&t.match(R);if(i&&1===e.nodeType)while(n=i[r++])e.removeAttribute(n)}}),ht={set:function(e,t,n){return!1===t?k.removeAttr(e,n):e.setAttribute(n,n),n}},k.each(k.expr.match.bool.source.match(/\w+/g),function(e,t){var a=gt[t]||k.find.attr;gt[t]=function(e,t,n){var r,i,o=t.toLowerCase();return n||(i=gt[o],gt[o]=r,r=null!=a(e,t,n)?o:null,gt[o]=i),r}});var vt=/^(?:input|select|textarea|button)$/i,yt=/^(?:a|area)$/i;function mt(e){return(e.match(R)||[]).join(" ")}function xt(e){return e.getAttribute&&e.getAttribute("class")||""}function bt(e){return Array.isArray(e)?e:"string"==typeof e&&e.match(R)||[]}k.fn.extend({prop:function(e,t){return _(this,k.prop,e,t,1<arguments.length)},removeProp:function(e){return this.each(function(){delete this[k.propFix[e]||e]})}}),k.extend({prop:function(e,t,n){var r,i,o=e.nodeType;if(3!==o&&8!==o&&2!==o)return 1===o&&k.isXMLDoc(e)||(t=k.propFix[t]||t,i=k.propHooks[t]),void 0!==n?i&&"set"in i&&void 0!==(r=i.set(e,n,t))?r:e[t]=n:i&&"get"in i&&null!==(r=i.get(e,t))?r:e[t]},propHooks:{tabIndex:{get:function(e){var t=k.find.attr(e,"tabindex");return t?parseInt(t,10):vt.test(e.nodeName)||yt.test(e.nodeName)&&e.href?0:-1}}},propFix:{"for":"htmlFor","class":"className"}}),y.optSelected||(k.propHooks.selected={get:function(e){var t=e.parentNode;return t&&t.parentNode&&t.parentNode.selectedIndex,null},set:function(e){var t=e.parentNode;t&&(t.selectedIndex,t.parentNode&&t.parentNode.selectedIndex)}}),k.each(["tabIndex","readOnly","maxLength","cellSpacing","cellPadding","rowSpan","colSpan","useMap","frameBorder","contentEditable"],function(){k.propFix[this.toLowerCase()]=this}),k.fn.extend({addClass:function(t){var e,n,r,i,o,a,s,u=0;if(m(t))return this.each(function(e){k(this).addClass(t.call(this,e,xt(this)))});if((e=bt(t)).length)while(n=this[u++])if(i=xt(n),r=1===n.nodeType&&" "+mt(i)+" "){a=0;while(o=e[a++])r.indexOf(" "+o+" ")<0&&(r+=o+" ");i!==(s=mt(r))&&n.setAttribute("class",s)}return this},removeClass:function(t){var e,n,r,i,o,a,s,u=0;if(m(t))return this.each(function(e){k(this).removeClass(t.call(this,e,xt(this)))});if(!arguments.length)return this.attr("class","");if((e=bt(t)).length)while(n=this[u++])if(i=xt(n),r=1===n.nodeType&&" "+mt(i)+" "){a=0;while(o=e[a++])while(-1<r.indexOf(" "+o+" "))r=r.replace(" "+o+" "," ");i!==(s=mt(r))&&n.setAttribute("class",s)}return this},toggleClass:function(i,t){var o=typeof i,a="string"===o||Array.isArray(i);return"boolean"==typeof t&&a?t?this.addClass(i):this.removeClass(i):m(i)?this.each(function(e){k(this).toggleClass(i.call(this,e,xt(this),t),t)}):this.each(function(){var e,t,n,r;if(a){t=0,n=k(this),r=bt(i);while(e=r[t++])n.hasClass(e)?n.removeClass(e):n.addClass(e)}else void 0!==i&&"boolean"!==o||((e=xt(this))&&Q.set(this,"__className__",e),this.setAttribute&&this.setAttribute("class",e||!1===i?"":Q.get(this,"__className__")||""))})},hasClass:function(e){var t,n,r=0;t=" "+e+" ";while(n=this[r++])if(1===n.nodeType&&-1<(" "+mt(xt(n))+" ").indexOf(t))return!0;return!1}});var wt=/\r/g;k.fn.extend({val:function(n){var r,e,i,t=this[0];return arguments.length?(i=m(n),this.each(function(e){var t;1===this.nodeType&&(null==(t=i?n.call(this,e,k(this).val()):n)?t="":"number"==typeof t?t+="":Array.isArray(t)&&(t=k.map(t,function(e){return null==e?"":e+""})),(r=k.valHooks[this.type]||k.valHooks[this.nodeName.toLowerCase()])&&"set"in r&&void 0!==r.set(this,t,"value")||(this.value=t))})):t?(r=k.valHooks[t.type]||k.valHooks[t.nodeName.toLowerCase()])&&"get"in r&&void 0!==(e=r.get(t,"value"))?e:"string"==typeof(e=t.value)?e.replace(wt,""):null==e?"":e:void 0}}),k.extend({valHooks:{option:{get:function(e){var t=k.find.attr(e,"value");return null!=t?t:mt(k.text(e))}},select:{get:function(e){var t,n,r,i=e.options,o=e.selectedIndex,a="select-one"===e.type,s=a?null:[],u=a?o+1:i.length;for(r=o<0?u:a?o:0;r<u;r++)if(((n=i[r]).selected||r===o)&&!n.disabled&&(!n.parentNode.disabled||!A(n.parentNode,"optgroup"))){if(t=k(n).val(),a)return t;s.push(t)}return s},set:function(e,t){var n,r,i=e.options,o=k.makeArray(t),a=i.length;while(a--)((r=i[a]).selected=-1<k.inArray(k.valHooks.option.get(r),o))&&(n=!0);return n||(e.selectedIndex=-1),o}}}}),k.each(["radio","checkbox"],function(){k.valHooks[this]={set:function(e,t){if(Array.isArray(t))return e.checked=-1<k.inArray(k(e).val(),t)}},y.checkOn||(k.valHooks[this].get=function(e){return null===e.getAttribute("value")?"on":e.value})}),y.focusin="onfocusin"in C;var Tt=/^(?:focusinfocus|focusoutblur)$/,Ct=function(e){e.stopPropagation()};k.extend(k.event,{trigger:function(e,t,n,r){var i,o,a,s,u,l,c,f,p=[n||E],d=v.call(e,"type")?e.type:e,h=v.call(e,"namespace")?e.namespace.split("."):[];if(o=f=a=n=n||E,3!==n.nodeType&&8!==n.nodeType&&!Tt.test(d+k.event.triggered)&&(-1<d.indexOf(".")&&(d=(h=d.split(".")).shift(),h.sort()),u=d.indexOf(":")<0&&"on"+d,(e=e[k.expando]?e:new k.Event(d,"object"==typeof e&&e)).isTrigger=r?2:3,e.namespace=h.join("."),e.rnamespace=e.namespace?new RegExp("(^|\\.)"+h.join("\\.(?:.*\\.|)")+"(\\.|$)"):null,e.result=void 0,e.target||(e.target=n),t=null==t?[e]:k.makeArray(t,[e]),c=k.event.special[d]||{},r||!c.trigger||!1!==c.trigger.apply(n,t))){if(!r&&!c.noBubble&&!x(n)){for(s=c.delegateType||d,Tt.test(s+d)||(o=o.parentNode);o;o=o.parentNode)p.push(o),a=o;a===(n.ownerDocument||E)&&p.push(a.defaultView||a.parentWindow||C)}i=0;while((o=p[i++])&&!e.isPropagationStopped())f=o,e.type=1<i?s:c.bindType||d,(l=(Q.get(o,"events")||{})[e.type]&&Q.get(o,"handle"))&&l.apply(o,t),(l=u&&o[u])&&l.apply&&G(o)&&(e.result=l.apply(o,t),!1===e.result&&e.preventDefault());return e.type=d,r||e.isDefaultPrevented()||c._default&&!1!==c._default.apply(p.pop(),t)||!G(n)||u&&m(n[d])&&!x(n)&&((a=n[u])&&(n[u]=null),k.event.triggered=d,e.isPropagationStopped()&&f.addEventListener(d,Ct),n[d](),e.isPropagationStopped()&&f.removeEventListener(d,Ct),k.event.triggered=void 0,a&&(n[u]=a)),e.result}},simulate:function(e,t,n){var r=k.extend(new k.Event,n,{type:e,isSimulated:!0});k.event.trigger(r,null,t)}}),k.fn.extend({trigger:function(e,t){return this.each(function(){k.event.trigger(e,t,this)})},triggerHandler:function(e,t){var n=this[0];if(n)return k.event.trigger(e,t,n,!0)}}),y.focusin||k.each({focus:"focusin",blur:"focusout"},function(n,r){var i=function(e){k.event.simulate(r,e.target,k.event.fix(e))};k.event.special[r]={setup:function(){var e=this.ownerDocument||this,t=Q.access(e,r);t||e.addEventListener(n,i,!0),Q.access(e,r,(t||0)+1)},teardown:function(){var e=this.ownerDocument||this,t=Q.access(e,r)-1;t?Q.access(e,r,t):(e.removeEventListener(n,i,!0),Q.remove(e,r))}}});var Et=C.location,kt=Date.now(),St=/\?/;k.parseXML=function(e){var t;if(!e||"string"!=typeof e)return null;try{t=(new C.DOMParser).parseFromString(e,"text/xml")}catch(e){t=void 0}return t&&!t.getElementsByTagName("parsererror").length||k.error("Invalid XML: "+e),t};var Nt=/\[\]$/,At=/\r?\n/g,Dt=/^(?:submit|button|image|reset|file)$/i,jt=/^(?:input|select|textarea|keygen)/i;function qt(n,e,r,i){var t;if(Array.isArray(e))k.each(e,function(e,t){r||Nt.test(n)?i(n,t):qt(n+"["+("object"==typeof t&&null!=t?e:"")+"]",t,r,i)});else if(r||"object"!==w(e))i(n,e);else for(t in e)qt(n+"["+t+"]",e[t],r,i)}k.param=function(e,t){var n,r=[],i=function(e,t){var n=m(t)?t():t;r[r.length]=encodeURIComponent(e)+"="+encodeURIComponent(null==n?"":n)};if(null==e)return"";if(Array.isArray(e)||e.jquery&&!k.isPlainObject(e))k.each(e,function(){i(this.name,this.value)});else for(n in e)qt(n,e[n],t,i);return r.join("&")},k.fn.extend({serialize:function(){return k.param(this.serializeArray())},serializeArray:function(){return this.map(function(){var e=k.prop(this,"elements");return e?k.makeArray(e):this}).filter(function(){var e=this.type;return this.name&&!k(this).is(":disabled")&&jt.test(this.nodeName)&&!Dt.test(e)&&(this.checked||!pe.test(e))}).map(function(e,t){var n=k(this).val();return null==n?null:Array.isArray(n)?k.map(n,function(e){return{name:t.name,value:e.replace(At,"\r\n")}}):{name:t.name,value:n.replace(At,"\r\n")}}).get()}});var Lt=/%20/g,Ht=/#.*$/,Ot=/([?&])_=[^&]*/,Pt=/^(.*?):[ \t]*([^\r\n]*)$/gm,Rt=/^(?:GET|HEAD)$/,Mt=/^\/\//,It={},Wt={},$t="*/".concat("*"),Ft=E.createElement("a");function Bt(o){return function(e,t){"string"!=typeof e&&(t=e,e="*");var n,r=0,i=e.toLowerCase().match(R)||[];if(m(t))while(n=i[r++])"+"===n[0]?(n=n.slice(1)||"*",(o[n]=o[n]||[]).unshift(t)):(o[n]=o[n]||[]).push(t)}}function _t(t,i,o,a){var s={},u=t===Wt;function l(e){var r;return s[e]=!0,k.each(t[e]||[],function(e,t){var n=t(i,o,a);return"string"!=typeof n||u||s[n]?u?!(r=n):void 0:(i.dataTypes.unshift(n),l(n),!1)}),r}return l(i.dataTypes[0])||!s["*"]&&l("*")}function zt(e,t){var n,r,i=k.ajaxSettings.flatOptions||{};for(n in t)void 0!==t[n]&&((i[n]?e:r||(r={}))[n]=t[n]);return r&&k.extend(!0,e,r),e}Ft.href=Et.href,k.extend({active:0,lastModified:{},etag:{},ajaxSettings:{url:Et.href,type:"GET",isLocal:/^(?:about|app|app-storage|.+-extension|file|res|widget):$/.test(Et.protocol),global:!0,processData:!0,async:!0,contentType:"application/x-www-form-urlencoded; charset=UTF-8",accepts:{"*":$t,text:"text/plain",html:"text/html",xml:"application/xml, text/xml",json:"application/json, text/javascript"},contents:{xml:/\bxml\b/,html:/\bhtml/,json:/\bjson\b/},responseFields:{xml:"responseXML",text:"responseText",json:"responseJSON"},converters:{"* text":String,"text html":!0,"text json":JSON.parse,"text xml":k.parseXML},flatOptions:{url:!0,context:!0}},ajaxSetup:function(e,t){return t?zt(zt(e,k.ajaxSettings),t):zt(k.ajaxSettings,e)},ajaxPrefilter:Bt(It),ajaxTransport:Bt(Wt),ajax:function(e,t){"object"==typeof e&&(t=e,e=void 0),t=t||{};var c,f,p,n,d,r,h,g,i,o,v=k.ajaxSetup({},t),y=v.context||v,m=v.context&&(y.nodeType||y.jquery)?k(y):k.event,x=k.Deferred(),b=k.Callbacks("once memory"),w=v.statusCode||{},a={},s={},u="canceled",T={readyState:0,getResponseHeader:function(e){var t;if(h){if(!n){n={};while(t=Pt.exec(p))n[t[1].toLowerCase()+" "]=(n[t[1].toLowerCase()+" "]||[]).concat(t[2])}t=n[e.toLowerCase()+" "]}return null==t?null:t.join(", ")},getAllResponseHeaders:function(){return h?p:null},setRequestHeader:function(e,t){return null==h&&(e=s[e.toLowerCase()]=s[e.toLowerCase()]||e,a[e]=t),this},overrideMimeType:function(e){return null==h&&(v.mimeType=e),this},statusCode:function(e){var t;if(e)if(h)T.always(e[T.status]);else for(t in e)w[t]=[w[t],e[t]];return this},abort:function(e){var t=e||u;return c&&c.abort(t),l(0,t),this}};if(x.promise(T),v.url=((e||v.url||Et.href)+"").replace(Mt,Et.protocol+"//"),v.type=t.method||t.type||v.method||v.type,v.dataTypes=(v.dataType||"*").toLowerCase().match(R)||[""],null==v.crossDomain){r=E.createElement("a");try{r.href=v.url,r.href=r.href,v.crossDomain=Ft.protocol+"//"+Ft.host!=r.protocol+"//"+r.host}catch(e){v.crossDomain=!0}}if(v.data&&v.processData&&"string"!=typeof v.data&&(v.data=k.param(v.data,v.traditional)),_t(It,v,t,T),h)return T;for(i in(g=k.event&&v.global)&&0==k.active++&&k.event.trigger("ajaxStart"),v.type=v.type.toUpperCase(),v.hasContent=!Rt.test(v.type),f=v.url.replace(Ht,""),v.hasContent?v.data&&v.processData&&0===(v.contentType||"").indexOf("application/x-www-form-urlencoded")&&(v.data=v.data.replace(Lt,"+")):(o=v.url.slice(f.length),v.data&&(v.processData||"string"==typeof v.data)&&(f+=(St.test(f)?"&":"?")+v.data,delete v.data),!1===v.cache&&(f=f.replace(Ot,"$1"),o=(St.test(f)?"&":"?")+"_="+kt+++o),v.url=f+o),v.ifModified&&(k.lastModified[f]&&T.setRequestHeader("If-Modified-Since",k.lastModified[f]),k.etag[f]&&T.setRequestHeader("If-None-Match",k.etag[f])),(v.data&&v.hasContent&&!1!==v.contentType||t.contentType)&&T.setRequestHeader("Content-Type",v.contentType),T.setRequestHeader("Accept",v.dataTypes[0]&&v.accepts[v.dataTypes[0]]?v.accepts[v.dataTypes[0]]+("*"!==v.dataTypes[0]?", "+$t+"; q=0.01":""):v.accepts["*"]),v.headers)T.setRequestHeader(i,v.headers[i]);if(v.beforeSend&&(!1===v.beforeSend.call(y,T,v)||h))return T.abort();if(u="abort",b.add(v.complete),T.done(v.success),T.fail(v.error),c=_t(Wt,v,t,T)){if(T.readyState=1,g&&m.trigger("ajaxSend",[T,v]),h)return T;v.async&&0<v.timeout&&(d=C.setTimeout(function(){T.abort("timeout")},v.timeout));try{h=!1,c.send(a,l)}catch(e){if(h)throw e;l(-1,e)}}else l(-1,"No Transport");function l(e,t,n,r){var i,o,a,s,u,l=t;h||(h=!0,d&&C.clearTimeout(d),c=void 0,p=r||"",T.readyState=0<e?4:0,i=200<=e&&e<300||304===e,n&&(s=function(e,t,n){var r,i,o,a,s=e.contents,u=e.dataTypes;while("*"===u[0])u.shift(),void 0===r&&(r=e.mimeType||t.getResponseHeader("Content-Type"));if(r)for(i in s)if(s[i]&&s[i].test(r)){u.unshift(i);break}if(u[0]in n)o=u[0];else{for(i in n){if(!u[0]||e.converters[i+" "+u[0]]){o=i;break}a||(a=i)}o=o||a}if(o)return o!==u[0]&&u.unshift(o),n[o]}(v,T,n)),s=function(e,t,n,r){var i,o,a,s,u,l={},c=e.dataTypes.slice();if(c[1])for(a in e.converters)l[a.toLowerCase()]=e.converters[a];o=c.shift();while(o)if(e.responseFields[o]&&(n[e.responseFields[o]]=t),!u&&r&&e.dataFilter&&(t=e.dataFilter(t,e.dataType)),u=o,o=c.shift())if("*"===o)o=u;else if("*"!==u&&u!==o){if(!(a=l[u+" "+o]||l["* "+o]))for(i in l)if((s=i.split(" "))[1]===o&&(a=l[u+" "+s[0]]||l["* "+s[0]])){!0===a?a=l[i]:!0!==l[i]&&(o=s[0],c.unshift(s[1]));break}if(!0!==a)if(a&&e["throws"])t=a(t);else try{t=a(t)}catch(e){return{state:"parsererror",error:a?e:"No conversion from "+u+" to "+o}}}return{state:"success",data:t}}(v,s,T,i),i?(v.ifModified&&((u=T.getResponseHeader("Last-Modified"))&&(k.lastModified[f]=u),(u=T.getResponseHeader("etag"))&&(k.etag[f]=u)),204===e||"HEAD"===v.type?l="nocontent":304===e?l="notmodified":(l=s.state,o=s.data,i=!(a=s.error))):(a=l,!e&&l||(l="error",e<0&&(e=0))),T.status=e,T.statusText=(t||l)+"",i?x.resolveWith(y,[o,l,T]):x.rejectWith(y,[T,l,a]),T.statusCode(w),w=void 0,g&&m.trigger(i?"ajaxSuccess":"ajaxError",[T,v,i?o:a]),b.fireWith(y,[T,l]),g&&(m.trigger("ajaxComplete",[T,v]),--k.active||k.event.trigger("ajaxStop")))}return T},getJSON:function(e,t,n){return k.get(e,t,n,"json")},getScript:function(e,t){return k.get(e,void 0,t,"script")}}),k.each(["get","post"],function(e,i){k[i]=function(e,t,n,r){return m(t)&&(r=r||n,n=t,t=void 0),k.ajax(k.extend({url:e,type:i,dataType:r,data:t,success:n},k.isPlainObject(e)&&e))}}),k._evalUrl=function(e,t){return k.ajax({url:e,type:"GET",dataType:"script",cache:!0,async:!1,global:!1,converters:{"text script":function(){}},dataFilter:function(e){k.globalEval(e,t)}})},k.fn.extend({wrapAll:function(e){var t;return this[0]&&(m(e)&&(e=e.call(this[0])),t=k(e,this[0].ownerDocument).eq(0).clone(!0),this[0].parentNode&&t.insertBefore(this[0]),t.map(function(){var e=this;while(e.firstElementChild)e=e.firstElementChild;return e}).append(this)),this},wrapInner:function(n){return m(n)?this.each(function(e){k(this).wrapInner(n.call(this,e))}):this.each(function(){var e=k(this),t=e.contents();t.length?t.wrapAll(n):e.append(n)})},wrap:function(t){var n=m(t);return this.each(function(e){k(this).wrapAll(n?t.call(this,e):t)})},unwrap:function(e){return this.parent(e).not("body").each(function(){k(this).replaceWith(this.childNodes)}),this}}),k.expr.pseudos.hidden=function(e){return!k.expr.pseudos.visible(e)},k.expr.pseudos.visible=function(e){return!!(e.offsetWidth||e.offsetHeight||e.getClientRects().length)},k.ajaxSettings.xhr=function(){try{return new C.XMLHttpRequest}catch(e){}};var Ut={0:200,1223:204},Xt=k.ajaxSettings.xhr();y.cors=!!Xt&&"withCredentials"in Xt,y.ajax=Xt=!!Xt,k.ajaxTransport(function(i){var o,a;if(y.cors||Xt&&!i.crossDomain)return{send:function(e,t){var n,r=i.xhr();if(r.open(i.type,i.url,i.async,i.username,i.password),i.xhrFields)for(n in i.xhrFields)r[n]=i.xhrFields[n];for(n in i.mimeType&&r.overrideMimeType&&r.overrideMimeType(i.mimeType),i.crossDomain||e["X-Requested-With"]||(e["X-Requested-With"]="XMLHttpRequest"),e)r.setRequestHeader(n,e[n]);o=function(e){return function(){o&&(o=a=r.onload=r.onerror=r.onabort=r.ontimeout=r.onreadystatechange=null,"abort"===e?r.abort():"error"===e?"number"!=typeof r.status?t(0,"error"):t(r.status,r.statusText):t(Ut[r.status]||r.status,r.statusText,"text"!==(r.responseType||"text")||"string"!=typeof r.responseText?{binary:r.response}:{text:r.responseText},r.getAllResponseHeaders()))}},r.onload=o(),a=r.onerror=r.ontimeout=o("error"),void 0!==r.onabort?r.onabort=a:r.onreadystatechange=function(){4===r.readyState&&C.setTimeout(function(){o&&a()})},o=o("abort");try{r.send(i.hasContent&&i.data||null)}catch(e){if(o)throw e}},abort:function(){o&&o()}}}),k.ajaxPrefilter(function(e){e.crossDomain&&(e.contents.script=!1)}),k.ajaxSetup({accepts:{script:"text/javascript, application/javascript, application/ecmascript, application/x-ecmascript"},contents:{script:/\b(?:java|ecma)script\b/},converters:{"text script":function(e){return k.globalEval(e),e}}}),k.ajaxPrefilter("script",function(e){void 0===e.cache&&(e.cache=!1),e.crossDomain&&(e.type="GET")}),k.ajaxTransport("script",function(n){var r,i;if(n.crossDomain||n.scriptAttrs)return{send:function(e,t){r=k("<script>").attr(n.scriptAttrs||{}).prop({charset:n.scriptCharset,src:n.url}).on("load error",i=function(e){r.remove(),i=null,e&&t("error"===e.type?404:200,e.type)}),E.head.appendChild(r[0])},abort:function(){i&&i()}}});var Vt,Gt=[],Yt=/(=)\?(?=&|$)|\?\?/;k.ajaxSetup({jsonp:"callback",jsonpCallback:function(){var e=Gt.pop()||k.expando+"_"+kt++;return this[e]=!0,e}}),k.ajaxPrefilter("json jsonp",function(e,t,n){var r,i,o,a=!1!==e.jsonp&&(Yt.test(e.url)?"url":"string"==typeof e.data&&0===(e.contentType||"").indexOf("application/x-www-form-urlencoded")&&Yt.test(e.data)&&"data");if(a||"jsonp"===e.dataTypes[0])return r=e.jsonpCallback=m(e.jsonpCallback)?e.jsonpCallback():e.jsonpCallback,a?e[a]=e[a].replace(Yt,"$1"+r):!1!==e.jsonp&&(e.url+=(St.test(e.url)?"&":"?")+e.jsonp+"="+r),e.converters["script json"]=function(){return o||k.error(r+" was not called"),o[0]},e.dataTypes[0]="json",i=C[r],C[r]=function(){o=arguments},n.always(function(){void 0===i?k(C).removeProp(r):C[r]=i,e[r]&&(e.jsonpCallback=t.jsonpCallback,Gt.push(r)),o&&m(i)&&i(o[0]),o=i=void 0}),"script"}),y.createHTMLDocument=((Vt=E.implementation.createHTMLDocument("").body).innerHTML="<form></form><form></form>",2===Vt.childNodes.length),k.parseHTML=function(e,t,n){return"string"!=typeof e?[]:("boolean"==typeof t&&(n=t,t=!1),t||(y.createHTMLDocument?((r=(t=E.implementation.createHTMLDocument("")).createElement("base")).href=E.location.href,t.head.appendChild(r)):t=E),o=!n&&[],(i=D.exec(e))?[t.createElement(i[1])]:(i=we([e],t,o),o&&o.length&&k(o).remove(),k.merge([],i.childNodes)));var r,i,o},k.fn.load=function(e,t,n){var r,i,o,a=this,s=e.indexOf(" ");return-1<s&&(r=mt(e.slice(s)),e=e.slice(0,s)),m(t)?(n=t,t=void 0):t&&"object"==typeof t&&(i="POST"),0<a.length&&k.ajax({url:e,type:i||"GET",dataType:"html",data:t}).done(function(e){o=arguments,a.html(r?k("<div>").append(k.parseHTML(e)).find(r):e)}).always(n&&function(e,t){a.each(function(){n.apply(this,o||[e.responseText,t,e])})}),this},k.each(["ajaxStart","ajaxStop","ajaxComplete","ajaxError","ajaxSuccess","ajaxSend"],function(e,t){k.fn[t]=function(e){return this.on(t,e)}}),k.expr.pseudos.animated=function(t){return k.grep(k.timers,function(e){return t===e.elem}).length},k.offset={setOffset:function(e,t,n){var r,i,o,a,s,u,l=k.css(e,"position"),c=k(e),f={};"static"===l&&(e.style.position="relative"),s=c.offset(),o=k.css(e,"top"),u=k.css(e,"left"),("absolute"===l||"fixed"===l)&&-1<(o+u).indexOf("auto")?(a=(r=c.position()).top,i=r.left):(a=parseFloat(o)||0,i=parseFloat(u)||0),m(t)&&(t=t.call(e,n,k.extend({},s))),null!=t.top&&(f.top=t.top-s.top+a),null!=t.left&&(f.left=t.left-s.left+i),"using"in t?t.using.call(e,f):c.css(f)}},k.fn.extend({offset:function(t){if(arguments.length)return void 0===t?this:this.each(function(e){k.offset.setOffset(this,t,e)});var e,n,r=this[0];return r?r.getClientRects().length?(e=r.getBoundingClientRect(),n=r.ownerDocument.defaultView,{top:e.top+n.pageYOffset,left:e.left+n.pageXOffset}):{top:0,left:0}:void 0},position:function(){if(this[0]){var e,t,n,r=this[0],i={top:0,left:0};if("fixed"===k.css(r,"position"))t=r.getBoundingClientRect();else{t=this.offset(),n=r.ownerDocument,e=r.offsetParent||n.documentElement;while(e&&(e===n.body||e===n.documentElement)&&"static"===k.css(e,"position"))e=e.parentNode;e&&e!==r&&1===e.nodeType&&((i=k(e).offset()).top+=k.css(e,"borderTopWidth",!0),i.left+=k.css(e,"borderLeftWidth",!0))}return{top:t.top-i.top-k.css(r,"marginTop",!0),left:t.left-i.left-k.css(r,"marginLeft",!0)}}},offsetParent:function(){return this.map(function(){var e=this.offsetParent;while(e&&"static"===k.css(e,"position"))e=e.offsetParent;return e||ie})}}),k.each({scrollLeft:"pageXOffset",scrollTop:"pageYOffset"},function(t,i){var o="pageYOffset"===i;k.fn[t]=function(e){return _(this,function(e,t,n){var r;if(x(e)?r=e:9===e.nodeType&&(r=e.defaultView),void 0===n)return r?r[i]:e[t];r?r.scrollTo(o?r.pageXOffset:n,o?n:r.pageYOffset):e[t]=n},t,e,arguments.length)}}),k.each(["top","left"],function(e,n){k.cssHooks[n]=ze(y.pixelPosition,function(e,t){if(t)return t=_e(e,n),$e.test(t)?k(e).position()[n]+"px":t})}),k.each({Height:"height",Width:"width"},function(a,s){k.each({padding:"inner"+a,content:s,"":"outer"+a},function(r,o){k.fn[o]=function(e,t){var n=arguments.length&&(r||"boolean"!=typeof e),i=r||(!0===e||!0===t?"margin":"border");return _(this,function(e,t,n){var r;return x(e)?0===o.indexOf("outer")?e["inner"+a]:e.document.documentElement["client"+a]:9===e.nodeType?(r=e.documentElement,Math.max(e.body["scroll"+a],r["scroll"+a],e.body["offset"+a],r["offset"+a],r["client"+a])):void 0===n?k.css(e,t,i):k.style(e,t,n,i)},s,n?e:void 0,n)}})}),k.each("blur focus focusin focusout resize scroll click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup contextmenu".split(" "),function(e,n){k.fn[n]=function(e,t){return 0<arguments.length?this.on(n,null,e,t):this.trigger(n)}}),k.fn.extend({hover:function(e,t){return this.mouseenter(e).mouseleave(t||e)}}),k.fn.extend({bind:function(e,t,n){return this.on(e,null,t,n)},unbind:function(e,t){return this.off(e,null,t)},delegate:function(e,t,n,r){return this.on(t,e,n,r)},undelegate:function(e,t,n){return 1===arguments.length?this.off(e,"**"):this.off(t,e||"**",n)}}),k.proxy=function(e,t){var n,r,i;if("string"==typeof t&&(n=e[t],t=e,e=n),m(e))return r=s.call(arguments,2),(i=function(){return e.apply(t||this,r.concat(s.call(arguments)))}).guid=e.guid=e.guid||k.guid++,i},k.holdReady=function(e){e?k.readyWait++:k.ready(!0)},k.isArray=Array.isArray,k.parseJSON=JSON.parse,k.nodeName=A,k.isFunction=m,k.isWindow=x,k.camelCase=V,k.type=w,k.now=Date.now,k.isNumeric=function(e){var t=k.type(e);return("number"===t||"string"===t)&&!isNaN(e-parseFloat(e))},"function"==typeof define&&define.amd&&define("jquery",[],function(){return k});var Qt=C.jQuery,Jt=C.$;return k.noConflict=function(e){return C.$===k&&(C.$=Jt),e&&C.jQuery===k&&(C.jQuery=Qt),k},e||(C.jQuery=C.$=k),k}); diff --git a/synapse/static/client/register/js/register.js b/synapse/static/client/register/js/register.js deleted file mode 100644 index 3547f7be4f..0000000000 --- a/synapse/static/client/register/js/register.js +++ /dev/null @@ -1,117 +0,0 @@ -window.matrixRegistration = { - endpoint: location.origin + "/_matrix/client/api/v1/register" -}; - -var setupCaptcha = function() { - if (!window.matrixRegistrationConfig) { - return; - } - $.get(matrixRegistration.endpoint, function(response) { - var serverExpectsCaptcha = false; - for (var i=0; i<response.flows.length; i++) { - var flow = response.flows[i]; - if ("m.login.recaptcha" === flow.type) { - serverExpectsCaptcha = true; - break; - } - } - if (!serverExpectsCaptcha) { - console.log("This server does not require a captcha."); - return; - } - console.log("Setting up ReCaptcha for "+matrixRegistration.endpoint); - var public_key = window.matrixRegistrationConfig.recaptcha_public_key; - if (public_key === undefined) { - console.error("No public key defined for captcha!"); - setFeedbackString("Misconfigured captcha for server. Contact server admin."); - return; - } - Recaptcha.create(public_key, - "regcaptcha", - { - theme: "red", - callback: Recaptcha.focus_response_field - }); - window.matrixRegistration.isUsingRecaptcha = true; - }).fail(errorFunc); - -}; - -var submitCaptcha = function(user, pwd) { - var challengeToken = Recaptcha.get_challenge(); - var captchaEntry = Recaptcha.get_response(); - var data = { - type: "m.login.recaptcha", - challenge: challengeToken, - response: captchaEntry - }; - console.log("Submitting captcha"); - $.post(matrixRegistration.endpoint, JSON.stringify(data), function(response) { - console.log("Success -> "+JSON.stringify(response)); - submitPassword(user, pwd, response.session); - }).fail(function(err) { - Recaptcha.reload(); - errorFunc(err); - }); -}; - -var submitPassword = function(user, pwd, session) { - console.log("Registering..."); - var data = { - type: "m.login.password", - user: user, - password: pwd, - session: session - }; - $.post(matrixRegistration.endpoint, JSON.stringify(data), function(response) { - matrixRegistration.onRegistered( - response.home_server, response.user_id, response.access_token - ); - }).fail(errorFunc); -}; - -var errorFunc = function(err) { - if (err.responseJSON && err.responseJSON.error) { - setFeedbackString(err.responseJSON.error + " (" + err.responseJSON.errcode + ")"); - } - else { - setFeedbackString("Request failed: " + err.status); - } -}; - -var setFeedbackString = function(text) { - $("#feedback").text(text); -}; - -matrixRegistration.onLoad = function() { - setupCaptcha(); -}; - -matrixRegistration.signUp = function() { - var user = $("#desired_user_id").val(); - if (user.length == 0) { - setFeedbackString("Must specify a username."); - return; - } - var pwd1 = $("#pwd1").val(); - var pwd2 = $("#pwd2").val(); - if (pwd1.length < 6) { - setFeedbackString("Password: min. 6 characters."); - return; - } - if (pwd1 != pwd2) { - setFeedbackString("Passwords do not match."); - return; - } - if (window.matrixRegistration.isUsingRecaptcha) { - submitCaptcha(user, pwd1); - } - else { - submitPassword(user, pwd1); - } -}; - -matrixRegistration.onRegistered = function(hs_url, user_id, access_token) { - // clobber this function - console.warn("onRegistered - This function should be replaced to proceed."); -}; diff --git a/synapse/static/client/register/register_config.sample.js b/synapse/static/client/register/register_config.sample.js deleted file mode 100644 index c7ea180dee..0000000000 --- a/synapse/static/client/register/register_config.sample.js +++ /dev/null @@ -1,3 +0,0 @@ -window.matrixRegistrationConfig = { - recaptcha_public_key: "YOUR_PUBLIC_KEY" -}; diff --git a/synapse/static/client/register/style.css b/synapse/static/client/register/style.css deleted file mode 100644 index 8a39b5d0f5..0000000000 --- a/synapse/static/client/register/style.css +++ /dev/null @@ -1,64 +0,0 @@ -html { - height: 100%; -} - -body { - height: 100%; - font-family: "Myriad Pro", "Myriad", Helvetica, Arial, sans-serif; - font-size: 12pt; - margin: 0px; -} - -h1 { - font-size: 20pt; -} - -a:link { color: #666; } -a:visited { color: #666; } -a:hover { color: #000; } -a:active { color: #000; } - -input { - width: 100% -} - -textarea, input { - font-family: inherit; - font-size: inherit; -} - -.smallPrint { - color: #888; - font-size: 9pt ! important; - font-style: italic ! important; -} - -#recaptcha_area { - margin: auto -} - -.g-recaptcha div { - margin: auto; -} - -#registrationForm { - text-align: left; - padding: 5px; - margin-bottom: 40px; - display: inline-block; - - -webkit-border-radius: 10px; - -moz-border-radius: 10px; - border-radius: 10px; - - -webkit-box-shadow: 0px 0px 20px 0px rgba(0,0,0,0.15); - -moz-box-shadow: 0px 0px 20px 0px rgba(0,0,0,0.15); - box-shadow: 0px 0px 20px 0px rgba(0,0,0,0.15); - - background-color: #f8f8f8; - border: 1px #ccc solid; -} - -.error { - color: red; -} diff --git a/synapse/storage/controllers/__init__.py b/synapse/storage/controllers/__init__.py index 45101cda7a..0ef8602631 100644 --- a/synapse/storage/controllers/__init__.py +++ b/synapse/storage/controllers/__init__.py @@ -19,6 +19,7 @@ from synapse.storage.controllers.persist_events import ( ) from synapse.storage.controllers.purge_events import PurgeEventsStorageController from synapse.storage.controllers.state import StateStorageController +from synapse.storage.controllers.stats import StatsController from synapse.storage.databases import Databases from synapse.storage.databases.main import DataStore @@ -40,6 +41,7 @@ class StorageControllers: self.purge_events = PurgeEventsStorageController(hs, stores) self.state = StateStorageController(hs, stores) + self.stats = StatsController(hs, stores) self.persistence = None if stores.persist_events: diff --git a/synapse/storage/controllers/state.py b/synapse/storage/controllers/state.py index 9d7a8a792f..7089b0a1d8 100644 --- a/synapse/storage/controllers/state.py +++ b/synapse/storage/controllers/state.py @@ -16,7 +16,6 @@ from typing import ( TYPE_CHECKING, AbstractSet, Any, - Awaitable, Callable, Collection, Dict, @@ -67,6 +66,8 @@ class StateStorageController: """ self._partial_state_room_tracker.notify_un_partial_stated(room_id) + @trace + @tag_args async def get_state_group_delta( self, state_group: int ) -> Tuple[Optional[int], Optional[StateMap[str]]]: @@ -84,6 +85,8 @@ class StateStorageController: state_group_delta = await self.stores.state.get_state_group_delta(state_group) return state_group_delta.prev_group, state_group_delta.delta_ids + @trace + @tag_args async def get_state_groups_ids( self, _room_id: str, event_ids: Collection[str], await_full_state: bool = True ) -> Dict[int, MutableStateMap[str]]: @@ -114,6 +117,8 @@ class StateStorageController: return group_to_state + @trace + @tag_args async def get_state_ids_for_group( self, state_group: int, state_filter: Optional[StateFilter] = None ) -> StateMap[str]: @@ -130,6 +135,8 @@ class StateStorageController: return group_to_state[state_group] + @trace + @tag_args async def get_state_groups( self, room_id: str, event_ids: Collection[str] ) -> Dict[int, List[EventBase]]: @@ -165,9 +172,11 @@ class StateStorageController: for group, event_id_map in group_to_ids.items() } - def _get_state_groups_from_groups( + @trace + @tag_args + async def _get_state_groups_from_groups( self, groups: List[int], state_filter: StateFilter - ) -> Awaitable[Dict[int, StateMap[str]]]: + ) -> Dict[int, StateMap[str]]: """Returns the state groups for a given set of groups, filtering on types of state events. @@ -180,9 +189,12 @@ class StateStorageController: Dict of state group to state map. """ - return self.stores.state._get_state_groups_from_groups(groups, state_filter) + return await self.stores.state._get_state_groups_from_groups( + groups, state_filter + ) @trace + @tag_args async def get_state_for_events( self, event_ids: Collection[str], state_filter: Optional[StateFilter] = None ) -> Dict[str, StateMap[EventBase]]: @@ -280,6 +292,8 @@ class StateStorageController: return {event: event_to_state[event] for event in event_ids} + @trace + @tag_args async def get_state_for_event( self, event_id: str, state_filter: Optional[StateFilter] = None ) -> StateMap[EventBase]: @@ -303,6 +317,7 @@ class StateStorageController: return state_map[event_id] @trace + @tag_args async def get_state_ids_for_event( self, event_id: str, @@ -333,9 +348,11 @@ class StateStorageController: ) return state_map[event_id] - def get_state_for_groups( + @trace + @tag_args + async def get_state_for_groups( self, groups: Iterable[int], state_filter: Optional[StateFilter] = None - ) -> Awaitable[Dict[int, MutableStateMap[str]]]: + ) -> Dict[int, MutableStateMap[str]]: """Gets the state at each of a list of state groups, optionally filtering by type/state_key @@ -347,7 +364,7 @@ class StateStorageController: Returns: Dict of state group to state map. """ - return self.stores.state._get_state_for_groups( + return await self.stores.state._get_state_for_groups( groups, state_filter or StateFilter.all() ) @@ -402,6 +419,8 @@ class StateStorageController: event_id, room_id, prev_group, delta_ids, current_state_ids ) + @trace + @tag_args @cancellable async def get_current_state_ids( self, @@ -442,6 +461,8 @@ class StateStorageController: room_id, on_invalidate=on_invalidate ) + @trace + @tag_args async def get_canonical_alias_for_room(self, room_id: str) -> Optional[str]: """Get canonical alias for room, if any @@ -466,6 +487,8 @@ class StateStorageController: return event.content.get("canonical_alias") + @trace + @tag_args async def get_current_state_deltas( self, prev_stream_id: int, max_stream_id: int ) -> Tuple[int, List[Dict[str, Any]]]: @@ -500,6 +523,7 @@ class StateStorageController: ) @trace + @tag_args async def get_current_state( self, room_id: str, state_filter: Optional[StateFilter] = None ) -> StateMap[EventBase]: @@ -516,6 +540,8 @@ class StateStorageController: return state_map + @trace + @tag_args async def get_current_state_event( self, room_id: str, event_type: str, state_key: str ) -> Optional[EventBase]: @@ -527,6 +553,8 @@ class StateStorageController: ) return state_map.get(key) + @trace + @tag_args async def get_current_hosts_in_room(self, room_id: str) -> AbstractSet[str]: """Get current hosts in room based on current state. @@ -538,6 +566,8 @@ class StateStorageController: return await self.stores.main.get_current_hosts_in_room(room_id) + @trace + @tag_args async def get_current_hosts_in_room_ordered(self, room_id: str) -> List[str]: """Get current hosts in room based on current state. @@ -553,6 +583,8 @@ class StateStorageController: return await self.stores.main.get_current_hosts_in_room_ordered(room_id) + @trace + @tag_args async def get_current_hosts_in_room_or_partial_state_approximation( self, room_id: str ) -> Collection[str]: @@ -582,6 +614,8 @@ class StateStorageController: return hosts + @trace + @tag_args async def get_users_in_room_with_profiles( self, room_id: str ) -> Mapping[str, ProfileInfo]: diff --git a/synapse/storage/controllers/stats.py b/synapse/storage/controllers/stats.py new file mode 100644 index 0000000000..2a03528fee --- /dev/null +++ b/synapse/storage/controllers/stats.py @@ -0,0 +1,112 @@ +# Copyright 2023 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from typing import TYPE_CHECKING, Collection, Counter, List, Tuple + +from synapse.api.errors import SynapseError +from synapse.storage.database import LoggingTransaction +from synapse.storage.databases import Databases +from synapse.storage.engines import PostgresEngine + +if TYPE_CHECKING: + from synapse.server import HomeServer + +logger = logging.getLogger(__name__) + + +class StatsController: + """High level interface for getting statistics.""" + + def __init__(self, hs: "HomeServer", stores: Databases): + self.stores = stores + + async def get_room_db_size_estimate(self) -> List[Tuple[str, int]]: + """Get an estimate of the largest rooms and how much database space they + use, in bytes. + + Only works against PostgreSQL. + + Note: this uses the postgres statistics so is a very rough estimate. + """ + + # Note: We look at both tables on the main and state databases. + if not isinstance(self.stores.main.database_engine, PostgresEngine): + raise SynapseError(400, "Endpoint requires using PostgreSQL") + + if not isinstance(self.stores.state.database_engine, PostgresEngine): + raise SynapseError(400, "Endpoint requires using PostgreSQL") + + # For each "large" table, we go through and get the largest rooms + # and an estimate of how much space they take. We can then sum the + # results and return the top 10. + # + # This isn't the most accurate, but given all of these are estimates + # anyway its good enough. + room_estimates: Counter[str] = Counter() + + # Return size of the table on disk, including indexes and TOAST. + table_sql = """ + SELECT pg_total_relation_size(?) + """ + + # Get an estimate for the largest rooms and their frequency. + # + # Note: the cast here is a hack to cast from `anyarray` to an actual + # type. This ensures that psycopg2 passes us a back a a Python list. + column_sql = """ + SELECT + most_common_vals::TEXT::TEXT[], most_common_freqs::TEXT::NUMERIC[] + FROM pg_stats + WHERE tablename = ? and attname = 'room_id' + """ + + def get_room_db_size_estimate_txn( + txn: LoggingTransaction, + tables: Collection[str], + ) -> None: + for table in tables: + txn.execute(table_sql, (table,)) + row = txn.fetchone() + assert row is not None + (table_size,) = row + + txn.execute(column_sql, (table,)) + row = txn.fetchone() + assert row is not None + vals, freqs = row + + for room_id, freq in zip(vals, freqs): + room_estimates[room_id] += int(freq * table_size) + + await self.stores.main.db_pool.runInteraction( + "get_room_db_size_estimate_main", + get_room_db_size_estimate_txn, + ( + "event_json", + "events", + "event_search", + "event_edges", + "event_push_actions", + "stream_ordering_to_exterm", + ), + ) + + await self.stores.state.db_pool.runInteraction( + "get_room_db_size_estimate_state", + get_room_db_size_estimate_txn, + ("state_groups_state",), + ) + + return room_estimates.most_common(10) diff --git a/synapse/storage/database.py b/synapse/storage/database.py index 226ccc1671..bdaa508dbe 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -58,7 +58,7 @@ from synapse.metrics import register_threadpool from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage.background_updates import BackgroundUpdater from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine, Sqlite3Engine -from synapse.storage.types import Connection, Cursor +from synapse.storage.types import Connection, Cursor, SQLQueryParameters from synapse.util.async_helpers import delay_cancellation from synapse.util.iterutils import batch_iter @@ -371,33 +371,56 @@ class LoggingTransaction: if isinstance(self.database_engine, PostgresEngine): from psycopg2.extras import execute_batch + # TODO: is it safe for values to be Iterable[Iterable[Any]] here? + # https://www.psycopg.org/docs/extras.html?highlight=execute_batch#psycopg2.extras.execute_batch + # suggests each arg in args should be a sequence or mapping self._do_execute( lambda the_sql: execute_batch(self.txn, the_sql, args), sql ) else: + # TODO: is it safe for values to be Iterable[Iterable[Any]] here? + # https://docs.python.org/3/library/sqlite3.html?highlight=sqlite3#sqlite3.Cursor.executemany + # suggests that the outer collection may be iterable, but + # https://docs.python.org/3/library/sqlite3.html?highlight=sqlite3#how-to-use-placeholders-to-bind-values-in-sql-queries + # suggests that the inner collection should be a sequence or dict. self.executemany(sql, args) def execute_values( - self, sql: str, values: Iterable[Iterable[Any]], fetch: bool = True + self, + sql: str, + values: Iterable[Iterable[Any]], + template: Optional[str] = None, + fetch: bool = True, ) -> List[Tuple]: """Corresponds to psycopg2.extras.execute_values. Only available when using postgres. The `fetch` parameter must be set to False if the query does not return rows (e.g. INSERTs). + + The `template` is the snippet to merge to every item in argslist to + compose the query. """ assert isinstance(self.database_engine, PostgresEngine) from psycopg2.extras import execute_values return self._do_execute( - lambda the_sql: execute_values(self.txn, the_sql, values, fetch=fetch), + # TODO: is it safe for values to be Iterable[Iterable[Any]] here? + # https://www.psycopg.org/docs/extras.html?highlight=execute_batch#psycopg2.extras.execute_values says values should be Sequence[Sequence] + lambda the_sql: execute_values( + self.txn, the_sql, values, template=template, fetch=fetch + ), sql, ) - def execute(self, sql: str, *args: Any) -> None: - self._do_execute(self.txn.execute, sql, *args) + def execute(self, sql: str, parameters: SQLQueryParameters = ()) -> None: + self._do_execute(self.txn.execute, sql, parameters) def executemany(self, sql: str, *args: Any) -> None: + # TODO: we should add a type for *args here. Looking at Cursor.executemany + # and DBAPI2 it ought to be Sequence[_Parameter], but we pass in + # Iterable[Iterable[Any]] in execute_batch and execute_values above, which mypy + # complains about. self._do_execute(self.txn.executemany, sql, *args) def executescript(self, sql: str) -> None: @@ -542,9 +565,8 @@ class DatabasePool: # A set of tables that are not safe to use native upserts in. self._unsafe_to_upsert_tables = set(UNIQUE_INDEX_BACKGROUND_UPDATES.keys()) - # We add the user_directory_search table to the blacklist on SQLite - # because the existing search table does not have an index, making it - # unsafe to use native upserts. + # The user_directory_search table is unsafe to use native upserts + # on SQLite because the existing search table does not have an index. if isinstance(self.engine, Sqlite3Engine): self._unsafe_to_upsert_tables.add("user_directory_search") diff --git a/synapse/storage/databases/__init__.py b/synapse/storage/databases/__init__.py index ce3d1d4e94..7aa24ccf21 100644 --- a/synapse/storage/databases/__init__.py +++ b/synapse/storage/databases/__init__.py @@ -95,7 +95,7 @@ class Databases(Generic[DataStoreT]): # If we're on a process that can persist events also # instantiate a `PersistEventsStore` if hs.get_instance_name() in hs.config.worker.writers.events: - persist_events = PersistEventsStore(hs, database, main, db_conn) + persist_events = PersistEventsStore(hs, database, main, db_conn) # type: ignore[arg-type] if "state" in database_config.databases: logger.info( @@ -133,6 +133,6 @@ class Databases(Generic[DataStoreT]): # We use local variables here to ensure that the databases do not have # optional types. - self.main = main + self.main = main # type: ignore[assignment] self.state = state self.persist_events = persist_events diff --git a/synapse/storage/databases/main/__init__.py b/synapse/storage/databases/main/__init__.py index dc3948c170..0032a92f49 100644 --- a/synapse/storage/databases/main/__init__.py +++ b/synapse/storage/databases/main/__init__.py @@ -43,6 +43,7 @@ from .event_federation import EventFederationStore from .event_push_actions import EventPushActionsStore from .events_bg_updates import EventsBackgroundUpdatesStore from .events_forward_extremities import EventForwardExtremitiesStore +from .experimental_features import ExperimentalFeaturesStore from .filtering import FilteringWorkerStore from .keys import KeyStore from .lock import LockStore @@ -82,6 +83,7 @@ logger = logging.getLogger(__name__) class DataStore( EventsBackgroundUpdatesStore, + ExperimentalFeaturesStore, DeviceStore, RoomMemberStore, RoomStore, diff --git a/synapse/storage/databases/main/account_data.py b/synapse/storage/databases/main/account_data.py index a9843f6e17..8f7bdbc61a 100644 --- a/synapse/storage/databases/main/account_data.py +++ b/synapse/storage/databases/main/account_data.py @@ -85,13 +85,10 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) writers=hs.config.worker.writers.account_data, ) else: + # Multiple writers are not supported for SQLite. + # # We shouldn't be running in worker mode with SQLite, but its useful # to support it for unit tests. - # - # If this process is the writer than we need to use - # `StreamIdGenerator`, otherwise we use `SlavedIdTracker` which gets - # updated over replication. (Multiple writers are not supported for - # SQLite). self._account_data_id_gen = StreamIdGenerator( db_conn, hs.get_replication_notifier(), diff --git a/synapse/storage/databases/main/cache.py b/synapse/storage/databases/main/cache.py index 096dec7f87..46fa0a73f9 100644 --- a/synapse/storage/databases/main/cache.py +++ b/synapse/storage/databases/main/cache.py @@ -205,13 +205,13 @@ class CacheInvalidationWorkerStore(SQLBaseStore): ) elif row.type == EventsStreamCurrentStateRow.TypeId: assert isinstance(data, EventsStreamCurrentStateRow) - self._curr_state_delta_stream_cache.entity_has_changed(data.room_id, token) + self._curr_state_delta_stream_cache.entity_has_changed(data.room_id, token) # type: ignore[attr-defined] if data.type == EventTypes.Member: - self.get_rooms_for_user_with_stream_ordering.invalidate( + self.get_rooms_for_user_with_stream_ordering.invalidate( # type: ignore[attr-defined] (data.state_key,) ) - self.get_rooms_for_user.invalidate((data.state_key,)) + self.get_rooms_for_user.invalidate((data.state_key,)) # type: ignore[attr-defined] else: raise Exception("Unknown events stream row type %s" % (row.type,)) @@ -229,7 +229,7 @@ class CacheInvalidationWorkerStore(SQLBaseStore): # This invalidates any local in-memory cached event objects, the original # process triggering the invalidation is responsible for clearing any external # cached objects. - self._invalidate_local_get_event_cache(event_id) + self._invalidate_local_get_event_cache(event_id) # type: ignore[attr-defined] self._attempt_to_invalidate_cache("have_seen_event", (room_id, event_id)) self._attempt_to_invalidate_cache("get_latest_event_ids_in_room", (room_id,)) @@ -242,10 +242,10 @@ class CacheInvalidationWorkerStore(SQLBaseStore): self._attempt_to_invalidate_cache("_get_membership_from_event_id", (event_id,)) if not backfilled: - self._events_stream_cache.entity_has_changed(room_id, stream_ordering) + self._events_stream_cache.entity_has_changed(room_id, stream_ordering) # type: ignore[attr-defined] if redacts: - self._invalidate_local_get_event_cache(redacts) + self._invalidate_local_get_event_cache(redacts) # type: ignore[attr-defined] # Caches which might leak edits must be invalidated for the event being # redacted. self._attempt_to_invalidate_cache("get_relations_for_event", (redacts,)) @@ -254,7 +254,7 @@ class CacheInvalidationWorkerStore(SQLBaseStore): self._attempt_to_invalidate_cache("get_thread_id_for_receipts", (redacts,)) if etype == EventTypes.Member: - self._membership_stream_cache.entity_has_changed(state_key, stream_ordering) + self._membership_stream_cache.entity_has_changed(state_key, stream_ordering) # type: ignore[attr-defined] self._attempt_to_invalidate_cache( "get_invited_rooms_for_local_user", (state_key,) ) @@ -274,11 +274,11 @@ class CacheInvalidationWorkerStore(SQLBaseStore): async def invalidate_cache_and_stream( self, cache_name: str, keys: Tuple[Any, ...] ) -> None: - """Invalidates the cache and adds it to the cache stream so slaves + """Invalidates the cache and adds it to the cache stream so other workers will know to invalidate their caches. - This should only be used to invalidate caches where slaves won't - otherwise know from other replication streams that the cache should + This should only be used to invalidate caches where other workers won't + otherwise have known from other replication streams that the cache should be invalidated. """ cache_func = getattr(self, cache_name, None) @@ -297,11 +297,11 @@ class CacheInvalidationWorkerStore(SQLBaseStore): cache_func: CachedFunction, keys: Tuple[Any, ...], ) -> None: - """Invalidates the cache and adds it to the cache stream so slaves + """Invalidates the cache and adds it to the cache stream so other workers will know to invalidate their caches. - This should only be used to invalidate caches where slaves won't - otherwise know from other replication streams that the cache should + This should only be used to invalidate caches where other workers won't + otherwise have known from other replication streams that the cache should be invalidated. """ txn.call_after(cache_func.invalidate, keys) @@ -310,7 +310,7 @@ class CacheInvalidationWorkerStore(SQLBaseStore): def _invalidate_all_cache_and_stream( self, txn: LoggingTransaction, cache_func: CachedFunction ) -> None: - """Invalidates the entire cache and adds it to the cache stream so slaves + """Invalidates the entire cache and adds it to the cache stream so other workers will know to invalidate their caches. """ @@ -378,6 +378,8 @@ class CacheInvalidationWorkerStore(SQLBaseStore): ) if isinstance(self.database_engine, PostgresEngine): + assert self._cache_id_gen is not None + # get_next() returns a context manager which is designed to wrap # the transaction. However, we want to only get an ID when we want # to use it, here, so we need to call __enter__ manually, and have diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index 5503621ad6..a67fdb3c22 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -105,8 +105,6 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): is_writer=hs.config.worker.worker_app is None, ) - # Type-ignore: _device_list_id_gen is mixed in from either DataStore (as a - # StreamIdGenerator) or SlavedDataStore (as a SlavedIdTracker). device_list_max = self._device_list_id_gen.get_current_token() device_list_prefill, min_device_list_id = self.db_pool.get_cache_dict( db_conn, diff --git a/synapse/storage/databases/main/directory.py b/synapse/storage/databases/main/directory.py index 44aa181174..3cb4c90729 100644 --- a/synapse/storage/databases/main/directory.py +++ b/synapse/storage/databases/main/directory.py @@ -129,8 +129,6 @@ class DirectoryWorkerStore(CacheInvalidationWorkerStore): 409, "Room alias %s already exists" % room_alias.to_string() ) - -class DirectoryStore(DirectoryWorkerStore): async def delete_room_alias(self, room_alias: RoomAlias) -> Optional[str]: room_id = await self.db_pool.runInteraction( "delete_room_alias", self._delete_room_alias_txn, room_alias @@ -201,3 +199,7 @@ class DirectoryStore(DirectoryWorkerStore): await self.db_pool.runInteraction( "_update_aliases_for_room_txn", _update_aliases_for_room_txn ) + + +class DirectoryStore(DirectoryWorkerStore): + pass diff --git a/synapse/storage/databases/main/e2e_room_keys.py b/synapse/storage/databases/main/e2e_room_keys.py index 9f8d2e4bea..d01f28cc80 100644 --- a/synapse/storage/databases/main/e2e_room_keys.py +++ b/synapse/storage/databases/main/e2e_room_keys.py @@ -13,17 +13,24 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Dict, Iterable, Mapping, Optional, Tuple, cast +from typing import TYPE_CHECKING, Dict, Iterable, Mapping, Optional, Tuple, cast from typing_extensions import Literal, TypedDict from synapse.api.errors import StoreError from synapse.logging.opentracing import log_kv, trace from synapse.storage._base import SQLBaseStore, db_to_json -from synapse.storage.database import LoggingTransaction +from synapse.storage.database import ( + DatabasePool, + LoggingDatabaseConnection, + LoggingTransaction, +) from synapse.types import JsonDict, JsonSerializable, StreamKeyType from synapse.util import json_encoder +if TYPE_CHECKING: + from synapse.server import HomeServer + class RoomKey(TypedDict): """`KeyBackupData` in the Matrix spec. @@ -37,7 +44,82 @@ class RoomKey(TypedDict): session_data: JsonSerializable -class EndToEndRoomKeyStore(SQLBaseStore): +class EndToEndRoomKeyBackgroundStore(SQLBaseStore): + def __init__( + self, + database: DatabasePool, + db_conn: LoggingDatabaseConnection, + hs: "HomeServer", + ): + super().__init__(database, db_conn, hs) + + self.db_pool.updates.register_background_update_handler( + "delete_e2e_backup_keys_for_deactivated_users", + self._delete_e2e_backup_keys_for_deactivated_users, + ) + + def _delete_keys_txn(self, txn: LoggingTransaction, user_id: str) -> None: + self.db_pool.simple_delete_txn( + txn, + table="e2e_room_keys", + keyvalues={"user_id": user_id}, + ) + + self.db_pool.simple_delete_txn( + txn, + table="e2e_room_keys_versions", + keyvalues={"user_id": user_id}, + ) + + async def _delete_e2e_backup_keys_for_deactivated_users( + self, progress: JsonDict, batch_size: int + ) -> int: + """ + Retroactively purges account data for users that have already been deactivated. + Gets run as a background update caused by a schema delta. + """ + + last_user: str = progress.get("last_user", "") + + def _delete_backup_keys_for_deactivated_users_txn( + txn: LoggingTransaction, + ) -> int: + sql = """ + SELECT name FROM users + WHERE deactivated = ? and name > ? + ORDER BY name ASC + LIMIT ? + """ + + txn.execute(sql, (1, last_user, batch_size)) + users = [row[0] for row in txn] + + for user in users: + self._delete_keys_txn(txn, user) + + if users: + self.db_pool.updates._background_update_progress_txn( + txn, + "delete_e2e_backup_keys_for_deactivated_users", + {"last_user": users[-1]}, + ) + + return len(users) + + number_deleted = await self.db_pool.runInteraction( + "_delete_backup_keys_for_deactivated_users", + _delete_backup_keys_for_deactivated_users_txn, + ) + + if number_deleted < batch_size: + await self.db_pool.updates._end_background_update( + "delete_e2e_backup_keys_for_deactivated_users" + ) + + return number_deleted + + +class EndToEndRoomKeyStore(EndToEndRoomKeyBackgroundStore): """The store for end to end room key backups. See https://spec.matrix.org/v1.1/client-server-api/#server-side-key-backups @@ -550,3 +632,29 @@ class EndToEndRoomKeyStore(SQLBaseStore): await self.db_pool.runInteraction( "delete_e2e_room_keys_version", _delete_e2e_room_keys_version_txn ) + + async def bulk_delete_backup_keys_and_versions_for_user(self, user_id: str) -> None: + """ + Bulk deletes all backup room keys and versions for a given user. + + Args: + user_id: the user whose backup keys and versions we're deleting + """ + + def _delete_all_e2e_room_keys_and_versions_txn(txn: LoggingTransaction) -> None: + self.db_pool.simple_delete_txn( + txn, + table="e2e_room_keys", + keyvalues={"user_id": user_id}, + ) + + self.db_pool.simple_delete_txn( + txn, + table="e2e_room_keys_versions", + keyvalues={"user_id": user_id}, + ) + + await self.db_pool.runInteraction( + "delete_all_e2e_room_keys_and_versions", + _delete_all_e2e_room_keys_and_versions_txn, + ) diff --git a/synapse/storage/databases/main/end_to_end_keys.py b/synapse/storage/databases/main/end_to_end_keys.py index dc7768c50c..4bc391f213 100644 --- a/synapse/storage/databases/main/end_to_end_keys.py +++ b/synapse/storage/databases/main/end_to_end_keys.py @@ -1027,8 +1027,10 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker ... async def claim_e2e_one_time_keys( - self, query_list: Iterable[Tuple[str, str, str]] - ) -> Tuple[Dict[str, Dict[str, Dict[str, JsonDict]]], List[Tuple[str, str, str]]]: + self, query_list: Iterable[Tuple[str, str, str, int]] + ) -> Tuple[ + Dict[str, Dict[str, Dict[str, JsonDict]]], List[Tuple[str, str, str, int]] + ]: """Take a list of one time keys out of the database. Args: @@ -1043,8 +1045,12 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker @trace def _claim_e2e_one_time_key_simple( - txn: LoggingTransaction, user_id: str, device_id: str, algorithm: str - ) -> Optional[Tuple[str, str]]: + txn: LoggingTransaction, + user_id: str, + device_id: str, + algorithm: str, + count: int, + ) -> List[Tuple[str, str]]: """Claim OTK for device for DBs that don't support RETURNING. Returns: @@ -1055,36 +1061,41 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker sql = """ SELECT key_id, key_json FROM e2e_one_time_keys_json WHERE user_id = ? AND device_id = ? AND algorithm = ? - LIMIT 1 + LIMIT ? """ - txn.execute(sql, (user_id, device_id, algorithm)) - otk_row = txn.fetchone() - if otk_row is None: - return None - - key_id, key_json = otk_row + txn.execute(sql, (user_id, device_id, algorithm, count)) + otk_rows = list(txn) + if not otk_rows: + return [] - self.db_pool.simple_delete_one_txn( + self.db_pool.simple_delete_many_txn( txn, table="e2e_one_time_keys_json", + column="key_id", + values=[otk_row[0] for otk_row in otk_rows], keyvalues={ "user_id": user_id, "device_id": device_id, "algorithm": algorithm, - "key_id": key_id, }, ) self._invalidate_cache_and_stream( txn, self.count_e2e_one_time_keys, (user_id, device_id) ) - return f"{algorithm}:{key_id}", key_json + return [ + (f"{algorithm}:{key_id}", key_json) for key_id, key_json in otk_rows + ] @trace def _claim_e2e_one_time_key_returning( - txn: LoggingTransaction, user_id: str, device_id: str, algorithm: str - ) -> Optional[Tuple[str, str]]: + txn: LoggingTransaction, + user_id: str, + device_id: str, + algorithm: str, + count: int, + ) -> List[Tuple[str, str]]: """Claim OTK for device for DBs that support RETURNING. Returns: @@ -1099,28 +1110,30 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker AND key_id IN ( SELECT key_id FROM e2e_one_time_keys_json WHERE user_id = ? AND device_id = ? AND algorithm = ? - LIMIT 1 + LIMIT ? ) RETURNING key_id, key_json """ txn.execute( - sql, (user_id, device_id, algorithm, user_id, device_id, algorithm) + sql, + (user_id, device_id, algorithm, user_id, device_id, algorithm, count), ) - otk_row = txn.fetchone() - if otk_row is None: - return None + otk_rows = list(txn) + if not otk_rows: + return [] self._invalidate_cache_and_stream( txn, self.count_e2e_one_time_keys, (user_id, device_id) ) - key_id, key_json = otk_row - return f"{algorithm}:{key_id}", key_json + return [ + (f"{algorithm}:{key_id}", key_json) for key_id, key_json in otk_rows + ] results: Dict[str, Dict[str, Dict[str, JsonDict]]] = {} - missing: List[Tuple[str, str, str]] = [] - for user_id, device_id, algorithm in query_list: + missing: List[Tuple[str, str, str, int]] = [] + for user_id, device_id, algorithm, count in query_list: if self.database_engine.supports_returning: # If we support RETURNING clause we can use a single query that # allows us to use autocommit mode. @@ -1130,37 +1143,42 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker _claim_e2e_one_time_key = _claim_e2e_one_time_key_simple db_autocommit = False - claim_row = await self.db_pool.runInteraction( + claim_rows = await self.db_pool.runInteraction( "claim_e2e_one_time_keys", _claim_e2e_one_time_key, user_id, device_id, algorithm, + count, db_autocommit=db_autocommit, ) - if claim_row: + if claim_rows: device_results = results.setdefault(user_id, {}).setdefault( device_id, {} ) - device_results[claim_row[0]] = json_decoder.decode(claim_row[1]) - else: - missing.append((user_id, device_id, algorithm)) + for claim_row in claim_rows: + device_results[claim_row[0]] = json_decoder.decode(claim_row[1]) + # Did we get enough OTKs? + count -= len(claim_rows) + if count: + missing.append((user_id, device_id, algorithm, count)) return results, missing async def claim_e2e_fallback_keys( - self, query_list: Iterable[Tuple[str, str, str]] + self, query_list: Iterable[Tuple[str, str, str, bool]] ) -> Dict[str, Dict[str, Dict[str, JsonDict]]]: """Take a list of fallback keys out of the database. Args: - query_list: An iterable of tuples of (user ID, device ID, algorithm). + query_list: An iterable of tuples of + (user ID, device ID, algorithm, whether the key should be marked as used). Returns: A map of user ID -> a map device ID -> a map of key ID -> JSON. """ results: Dict[str, Dict[str, Dict[str, JsonDict]]] = {} - for user_id, device_id, algorithm in query_list: + for user_id, device_id, algorithm, mark_as_used in query_list: row = await self.db_pool.simple_select_one( table="e2e_fallback_keys_json", keyvalues={ @@ -1180,7 +1198,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker used = row["used"] # Mark fallback key as used if not already. - if not used: + if not used and mark_as_used: await self.db_pool.simple_update_one( table="e2e_fallback_keys_json", keyvalues={ diff --git a/synapse/storage/databases/main/event_federation.py b/synapse/storage/databases/main/event_federation.py index a19ba88bf8..ac19de183c 100644 --- a/synapse/storage/databases/main/event_federation.py +++ b/synapse/storage/databases/main/event_federation.py @@ -114,6 +114,10 @@ class _NoChainCoverIndex(Exception): class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBaseStore): + # TODO: this attribute comes from EventPushActionWorkerStore. Should we inherit from + # that store so that mypy can deduce this for itself? + stream_ordering_month_ago: Optional[int] + def __init__( self, database: DatabasePool, @@ -1171,6 +1175,38 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas return int(min_depth) if min_depth is not None else None + async def have_room_forward_extremities_changed_since( + self, + room_id: str, + stream_ordering: int, + ) -> bool: + """Check if the forward extremities in a room have changed since the + given stream ordering + + Throws a StoreError if we have since purged the index for + stream_orderings from that point. + """ + assert self.stream_ordering_month_ago is not None + if stream_ordering <= self.stream_ordering_month_ago: + raise StoreError(400, f"stream_ordering too old {stream_ordering}") + + sql = """ + SELECT 1 FROM stream_ordering_to_exterm + WHERE stream_ordering > ? AND room_id = ? + LIMIT 1 + """ + + def have_room_forward_extremities_changed_since_txn( + txn: LoggingTransaction, + ) -> bool: + txn.execute(sql, (stream_ordering, room_id)) + return txn.fetchone() is not None + + return await self.db_pool.runInteraction( + "have_room_forward_extremities_changed_since", + have_room_forward_extremities_changed_since_txn, + ) + @cancellable async def get_forward_extremities_for_room_at_stream_ordering( self, room_id: str, stream_ordering: int @@ -1199,7 +1235,8 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas # provided the last_change is recent enough, we now clamp the requested # stream_ordering to it. - if last_change > self.stream_ordering_month_ago: # type: ignore[attr-defined] + assert self.stream_ordering_month_ago is not None + if last_change > self.stream_ordering_month_ago: stream_ordering = min(last_change, stream_ordering) return await self._get_forward_extremeties_for_room(room_id, stream_ordering) @@ -1214,8 +1251,8 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas Throws a StoreError if we have since purged the index for stream_orderings from that point. """ - - if stream_ordering <= self.stream_ordering_month_ago: # type: ignore[attr-defined] + assert self.stream_ordering_month_ago is not None + if stream_ordering <= self.stream_ordering_month_ago: raise StoreError(400, "stream_ordering too old %s" % (stream_ordering,)) sql = """ @@ -1232,10 +1269,17 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas txn.execute(sql, (stream_ordering, room_id)) return [event_id for event_id, in txn] - return await self.db_pool.runInteraction( + event_ids = await self.db_pool.runInteraction( "get_forward_extremeties_for_room", get_forward_extremeties_for_room_txn ) + # If we didn't find any IDs, then we must have cleared out the + # associated `stream_ordering_to_exterm`. + if not event_ids: + raise StoreError(400, "stream_ordering too old %s" % (stream_ordering,)) + + return event_ids + def _get_connected_batch_event_backfill_results_txn( self, txn: LoggingTransaction, insertion_event_id: str, limit: int ) -> List[BackfillQueueNavigationItem]: @@ -1664,20 +1708,11 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas @wrap_as_background_process("delete_old_forward_extrem_cache") async def _delete_old_forward_extrem_cache(self) -> None: def _delete_old_forward_extrem_cache_txn(txn: LoggingTransaction) -> None: - # Delete entries older than a month, while making sure we don't delete - # the only entries for a room. sql = """ DELETE FROM stream_ordering_to_exterm - WHERE - room_id IN ( - SELECT room_id - FROM stream_ordering_to_exterm - WHERE stream_ordering > ? - ) AND stream_ordering < ? + WHERE stream_ordering < ? """ - txn.execute( - sql, (self.stream_ordering_month_ago, self.stream_ordering_month_ago) # type: ignore[attr-defined] - ) + txn.execute(sql, (self.stream_ordering_month_ago,)) await self.db_pool.runInteraction( "_delete_old_forward_extrem_cache", diff --git a/synapse/storage/databases/main/event_push_actions.py b/synapse/storage/databases/main/event_push_actions.py index eeccf5db24..6fdb1e292e 100644 --- a/synapse/storage/databases/main/event_push_actions.py +++ b/synapse/storage/databases/main/event_push_actions.py @@ -1836,6 +1836,15 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas # deletes. batch_size = self._rotate_count + if isinstance(self.database_engine, PostgresEngine): + # Temporarily disable sequential scans in this transaction. We + # need to do this as the postgres statistics don't take into + # account the `highlight = 0` part when estimating the + # distribution of `stream_ordering`. I.e. since we keep old + # highlight rows the query planner thinks there are way more old + # rows to delete than there actually are. + txn.execute("SET LOCAL enable_seqscan=off") + txn.execute( """ SELECT stream_ordering FROM event_push_actions diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index 9c1e506da6..e2e6eb479f 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -127,6 +127,8 @@ class PersistEventsStore: self._backfill_id_gen: AbstractStreamIdGenerator = self.store._backfill_id_gen self._stream_id_gen: AbstractStreamIdGenerator = self.store._stream_id_gen + self._msc3970_enabled = hs.config.experimental.msc3970_enabled + @trace async def _persist_events_and_state_updates( self, @@ -977,23 +979,43 @@ class PersistEventsStore: ) -> None: """Persist the mapping from transaction IDs to event IDs (if defined).""" - to_insert = [] + inserted_ts = self._clock.time_msec() + to_insert_token_id: List[Tuple[str, str, str, int, str, int]] = [] + to_insert_device_id: List[Tuple[str, str, str, str, str, int]] = [] for event, _ in events_and_contexts: - token_id = getattr(event.internal_metadata, "token_id", None) txn_id = getattr(event.internal_metadata, "txn_id", None) - if token_id and txn_id: - to_insert.append( - ( - event.event_id, - event.room_id, - event.sender, - token_id, - txn_id, - self._clock.time_msec(), + token_id = getattr(event.internal_metadata, "token_id", None) + device_id = getattr(event.internal_metadata, "device_id", None) + + if txn_id is not None: + if token_id is not None: + to_insert_token_id.append( + ( + event.event_id, + event.room_id, + event.sender, + token_id, + txn_id, + inserted_ts, + ) + ) + + if device_id is not None: + to_insert_device_id.append( + ( + event.event_id, + event.room_id, + event.sender, + device_id, + txn_id, + inserted_ts, + ) ) - ) - if to_insert: + # Pre-MSC3970, we rely on the access_token_id to scope the txn_id for events. + # Since this is an experimental flag, we still store the mapping even if the + # flag is disabled. + if to_insert_token_id: self.db_pool.simple_insert_many_txn( txn, table="event_txn_id", @@ -1005,7 +1027,25 @@ class PersistEventsStore: "txn_id", "inserted_ts", ), - values=to_insert, + values=to_insert_token_id, + ) + + # With MSC3970, we rely on the device_id instead to scope the txn_id for events. + # We're only inserting if MSC3970 is *enabled*, because else the pre-MSC3970 + # behaviour would allow for a UNIQUE constraint violation on this table + if to_insert_device_id and self._msc3970_enabled: + self.db_pool.simple_insert_many_txn( + txn, + table="event_txn_id_device_id", + keys=( + "event_id", + "room_id", + "user_id", + "device_id", + "txn_id", + "inserted_ts", + ), + values=to_insert_device_id, ) async def update_current_state( @@ -1127,11 +1167,15 @@ class PersistEventsStore: # been inserted into room_memberships. txn.execute_batch( """INSERT INTO current_state_events - (room_id, type, state_key, event_id, membership) - VALUES (?, ?, ?, ?, (SELECT membership FROM room_memberships WHERE event_id = ?)) + (room_id, type, state_key, event_id, membership, event_stream_ordering) + VALUES ( + ?, ?, ?, ?, + (SELECT membership FROM room_memberships WHERE event_id = ?), + (SELECT stream_ordering FROM events WHERE event_id = ?) + ) """, [ - (room_id, key[0], key[1], ev_id, ev_id) + (room_id, key[0], key[1], ev_id, ev_id, ev_id) for key, ev_id in to_insert.items() ], ) @@ -1158,11 +1202,15 @@ class PersistEventsStore: if to_insert: txn.execute_batch( """INSERT INTO local_current_membership - (room_id, user_id, event_id, membership) - VALUES (?, ?, ?, (SELECT membership FROM room_memberships WHERE event_id = ?)) + (room_id, user_id, event_id, membership, event_stream_ordering) + VALUES ( + ?, ?, ?, + (SELECT membership FROM room_memberships WHERE event_id = ?), + (SELECT stream_ordering FROM events WHERE event_id = ?) + ) """, [ - (room_id, key[1], ev_id, ev_id) + (room_id, key[1], ev_id, ev_id, ev_id) for key, ev_id in to_insert.items() if key[0] == EventTypes.Member and self.is_mine_id(key[1]) ], @@ -1768,6 +1816,7 @@ class PersistEventsStore: table="room_memberships", keys=( "event_id", + "event_stream_ordering", "user_id", "sender", "room_id", @@ -1778,6 +1827,7 @@ class PersistEventsStore: values=[ ( event.event_id, + event.internal_metadata.stream_ordering, event.state_key, event.user_id, event.room_id, @@ -1810,6 +1860,7 @@ class PersistEventsStore: keyvalues={"room_id": event.room_id, "user_id": event.state_key}, values={ "event_id": event.event_id, + "event_stream_ordering": event.internal_metadata.stream_ordering, "membership": event.membership, }, ) diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index 0cf46626d2..a39bc90974 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -213,13 +213,10 @@ class EventsWorkerStore(SQLBaseStore): writers=hs.config.worker.writers.events, ) else: + # Multiple writers are not supported for SQLite. + # # We shouldn't be running in worker mode with SQLite, but its useful # to support it for unit tests. - # - # If this process is the writer than we need to use - # `StreamIdGenerator`, otherwise we use `SlavedIdTracker` which gets - # updated over replication. (Multiple writers are not supported for - # SQLite). self._stream_id_gen = StreamIdGenerator( db_conn, hs.get_replication_notifier(), @@ -1976,12 +1973,6 @@ class EventsWorkerStore(SQLBaseStore): return rows, to_token, True - async def is_event_after(self, event_id1: str, event_id2: str) -> bool: - """Returns True if event_id1 is after event_id2 in the stream""" - to_1, so_1 = await self.get_event_ordering(event_id1) - to_2, so_2 = await self.get_event_ordering(event_id2) - return (to_1, so_1) > (to_2, so_2) - @cached(max_entries=5000) async def get_event_ordering(self, event_id: str) -> Tuple[int, int]: res = await self.db_pool.simple_select_one( @@ -2022,7 +2013,7 @@ class EventsWorkerStore(SQLBaseStore): desc="get_next_event_to_expire", func=get_next_event_to_expire_txn ) - async def get_event_id_from_transaction_id( + async def get_event_id_from_transaction_id_and_token_id( self, room_id: str, user_id: str, token_id: int, txn_id: str ) -> Optional[str]: """Look up if we have already persisted an event for the transaction ID, @@ -2038,7 +2029,26 @@ class EventsWorkerStore(SQLBaseStore): }, retcol="event_id", allow_none=True, - desc="get_event_id_from_transaction_id", + desc="get_event_id_from_transaction_id_and_token_id", + ) + + async def get_event_id_from_transaction_id_and_device_id( + self, room_id: str, user_id: str, device_id: str, txn_id: str + ) -> Optional[str]: + """Look up if we have already persisted an event for the transaction ID, + returning the event ID if so. + """ + return await self.db_pool.simple_select_one_onecol( + table="event_txn_id_device_id", + keyvalues={ + "room_id": room_id, + "user_id": user_id, + "device_id": device_id, + "txn_id": txn_id, + }, + retcol="event_id", + allow_none=True, + desc="get_event_id_from_transaction_id_and_device_id", ) async def get_already_persisted_events( @@ -2068,7 +2078,7 @@ class EventsWorkerStore(SQLBaseStore): # Check if this is a duplicate of an event we've already # persisted. - existing = await self.get_event_id_from_transaction_id( + existing = await self.get_event_id_from_transaction_id_and_token_id( event.room_id, event.sender, token_id, txn_id ) if existing: @@ -2084,11 +2094,17 @@ class EventsWorkerStore(SQLBaseStore): """Cleans out transaction id mappings older than 24hrs.""" def _cleanup_old_transaction_ids_txn(txn: LoggingTransaction) -> None: + one_day_ago = self._clock.time_msec() - 24 * 60 * 60 * 1000 sql = """ DELETE FROM event_txn_id WHERE inserted_ts < ? """ - one_day_ago = self._clock.time_msec() - 24 * 60 * 60 * 1000 + txn.execute(sql, (one_day_ago,)) + + sql = """ + DELETE FROM event_txn_id_device_id + WHERE inserted_ts < ? + """ txn.execute(sql, (one_day_ago,)) return await self.db_pool.runInteraction( diff --git a/synapse/storage/databases/main/experimental_features.py b/synapse/storage/databases/main/experimental_features.py new file mode 100644 index 0000000000..cf3226ae5a --- /dev/null +++ b/synapse/storage/databases/main/experimental_features.py @@ -0,0 +1,75 @@ +# Copyright 2023 The Matrix.org Foundation C.I.C +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import TYPE_CHECKING, Dict + +from synapse.storage.database import DatabasePool, LoggingDatabaseConnection +from synapse.storage.databases.main import CacheInvalidationWorkerStore +from synapse.types import StrCollection +from synapse.util.caches.descriptors import cached + +if TYPE_CHECKING: + from synapse.rest.admin.experimental_features import ExperimentalFeature + from synapse.server import HomeServer + + +class ExperimentalFeaturesStore(CacheInvalidationWorkerStore): + def __init__( + self, + database: DatabasePool, + db_conn: LoggingDatabaseConnection, + hs: "HomeServer", + ) -> None: + super().__init__(database, db_conn, hs) + + @cached() + async def list_enabled_features(self, user_id: str) -> StrCollection: + """ + Checks to see what features are enabled for a given user + Args: + user: + the user to be queried on + Returns: + the features currently enabled for the user + """ + enabled = await self.db_pool.simple_select_list( + "per_user_experimental_features", + {"user_id": user_id, "enabled": True}, + ["feature"], + ) + + return [feature["feature"] for feature in enabled] + + async def set_features_for_user( + self, + user: str, + features: Dict["ExperimentalFeature", bool], + ) -> None: + """ + Enables or disables features for a given user + Args: + user: + the user for whom to enable/disable the features + features: + pairs of features and True/False for whether the feature should be enabled + """ + for feature, enabled in features.items(): + await self.db_pool.simple_upsert( + table="per_user_experimental_features", + keyvalues={"feature": feature, "user_id": user}, + values={"enabled": enabled}, + insertion_values={"user_id": user, "feature": feature}, + ) + + await self.invalidate_cache_and_stream("list_enabled_features", (user,)) diff --git a/synapse/storage/databases/main/filtering.py b/synapse/storage/databases/main/filtering.py index 8e57c8e5a0..da31eb44dc 100644 --- a/synapse/storage/databases/main/filtering.py +++ b/synapse/storage/databases/main/filtering.py @@ -16,15 +16,133 @@ from typing import Optional, Tuple, Union, cast from canonicaljson import encode_canonical_json +from typing_extensions import TYPE_CHECKING from synapse.api.errors import Codes, StoreError, SynapseError from synapse.storage._base import SQLBaseStore, db_to_json -from synapse.storage.database import LoggingTransaction -from synapse.types import JsonDict +from synapse.storage.database import ( + DatabasePool, + LoggingDatabaseConnection, + LoggingTransaction, +) +from synapse.storage.engines import PostgresEngine +from synapse.types import JsonDict, UserID from synapse.util.caches.descriptors import cached +if TYPE_CHECKING: + from synapse.server import HomeServer + class FilteringWorkerStore(SQLBaseStore): + def __init__( + self, + database: DatabasePool, + db_conn: LoggingDatabaseConnection, + hs: "HomeServer", + ): + super().__init__(database, db_conn, hs) + self.server_name: str = hs.hostname + self.database_engine = database.engine + self.db_pool.updates.register_background_index_update( + "full_users_filters_unique_idx", + index_name="full_users_unique_idx", + table="user_filters", + columns=["full_user_id, filter_id"], + unique=True, + ) + + self.db_pool.updates.register_background_update_handler( + "populate_full_user_id_user_filters", + self.populate_full_user_id_user_filters, + ) + + async def populate_full_user_id_user_filters( + self, progress: JsonDict, batch_size: int + ) -> int: + """ + Background update to populate the column `full_user_id` of the table + user_filters from entries in the column `user_local_part` of the same table + """ + + lower_bound_id = progress.get("lower_bound_id", "") + + def _get_last_id(txn: LoggingTransaction) -> Optional[str]: + sql = """ + SELECT user_id FROM user_filters + WHERE user_id > ? + ORDER BY user_id + LIMIT 1 OFFSET 50 + """ + txn.execute(sql, (lower_bound_id,)) + res = txn.fetchone() + if res: + upper_bound_id = res[0] + return upper_bound_id + else: + return None + + def _process_batch( + txn: LoggingTransaction, lower_bound_id: str, upper_bound_id: str + ) -> None: + sql = """ + UPDATE user_filters + SET full_user_id = '@' || user_id || ? + WHERE ? < user_id AND user_id <= ? AND full_user_id IS NULL + """ + txn.execute(sql, (f":{self.server_name}", lower_bound_id, upper_bound_id)) + + def _final_batch(txn: LoggingTransaction, lower_bound_id: str) -> None: + sql = """ + UPDATE user_filters + SET full_user_id = '@' || user_id || ? + WHERE ? < user_id AND full_user_id IS NULL + """ + txn.execute( + sql, + ( + f":{self.server_name}", + lower_bound_id, + ), + ) + + if isinstance(self.database_engine, PostgresEngine): + sql = """ + ALTER TABLE user_filters VALIDATE CONSTRAINT full_user_id_not_null + """ + txn.execute(sql) + + upper_bound_id = await self.db_pool.runInteraction( + "populate_full_user_id_user_filters", _get_last_id + ) + + if upper_bound_id is None: + await self.db_pool.runInteraction( + "populate_full_user_id_user_filters", _final_batch, lower_bound_id + ) + + await self.db_pool.updates._end_background_update( + "populate_full_user_id_user_filters" + ) + return 1 + + await self.db_pool.runInteraction( + "populate_full_user_id_user_filters", + _process_batch, + lower_bound_id, + upper_bound_id, + ) + + progress["lower_bound_id"] = upper_bound_id + + await self.db_pool.runInteraction( + "populate_full_user_id_user_filters", + self.db_pool.updates._background_update_progress_txn, + "populate_full_user_id_user_filters", + progress, + ) + + return 50 + @cached(num_args=2) async def get_user_filter( self, user_localpart: str, filter_id: Union[int, str] @@ -46,7 +164,7 @@ class FilteringWorkerStore(SQLBaseStore): return db_to_json(def_json) - async def add_user_filter(self, user_localpart: str, user_filter: JsonDict) -> int: + async def add_user_filter(self, user_id: UserID, user_filter: JsonDict) -> int: def_json = encode_canonical_json(user_filter) # Need an atomic transaction to SELECT the maximal ID so far then @@ -56,13 +174,13 @@ class FilteringWorkerStore(SQLBaseStore): "SELECT filter_id FROM user_filters " "WHERE user_id = ? AND filter_json = ?" ) - txn.execute(sql, (user_localpart, bytearray(def_json))) + txn.execute(sql, (user_id.localpart, bytearray(def_json))) filter_id_response = txn.fetchone() if filter_id_response is not None: return filter_id_response[0] sql = "SELECT MAX(filter_id) FROM user_filters WHERE user_id = ?" - txn.execute(sql, (user_localpart,)) + txn.execute(sql, (user_id.localpart,)) max_id = cast(Tuple[Optional[int]], txn.fetchone())[0] if max_id is None: filter_id = 0 @@ -70,10 +188,18 @@ class FilteringWorkerStore(SQLBaseStore): filter_id = max_id + 1 sql = ( - "INSERT INTO user_filters (user_id, filter_id, filter_json)" - "VALUES(?, ?, ?)" + "INSERT INTO user_filters (full_user_id, user_id, filter_id, filter_json)" + "VALUES(?, ?, ?, ?)" + ) + txn.execute( + sql, + ( + user_id.to_string(), + user_id.localpart, + filter_id, + bytearray(def_json), + ), ) - txn.execute(sql, (user_localpart, filter_id, bytearray(def_json))) return filter_id diff --git a/synapse/storage/databases/main/keys.py b/synapse/storage/databases/main/keys.py index 0a19f607bd..1666e3c43b 100644 --- a/synapse/storage/databases/main/keys.py +++ b/synapse/storage/databases/main/keys.py @@ -14,10 +14,12 @@ # limitations under the License. import itertools +import json import logging -from typing import Any, Dict, Iterable, List, Optional, Tuple +from typing import Any, Dict, Iterable, List, Mapping, Optional, Tuple from signedjson.key import decode_verify_key_bytes +from unpaddedbase64 import decode_base64 from synapse.storage._base import SQLBaseStore from synapse.storage.database import LoggingTransaction @@ -36,15 +38,16 @@ class KeyStore(SQLBaseStore): """Persistence for signature verification keys""" @cached() - def _get_server_verify_key( + def _get_server_signature_key( self, server_name_and_key_id: Tuple[str, str] ) -> FetchKeyResult: raise NotImplementedError() @cachedList( - cached_method_name="_get_server_verify_key", list_name="server_name_and_key_ids" + cached_method_name="_get_server_signature_key", + list_name="server_name_and_key_ids", ) - async def get_server_verify_keys( + async def get_server_signature_keys( self, server_name_and_key_ids: Iterable[Tuple[str, str]] ) -> Dict[Tuple[str, str], FetchKeyResult]: """ @@ -62,10 +65,12 @@ class KeyStore(SQLBaseStore): """Processes a batch of keys to fetch, and adds the result to `keys`.""" # batch_iter always returns tuples so it's safe to do len(batch) - sql = ( - "SELECT server_name, key_id, verify_key, ts_valid_until_ms " - "FROM server_signature_keys WHERE 1=0" - ) + " OR (server_name=? AND key_id=?)" * len(batch) + sql = """ + SELECT server_name, key_id, verify_key, ts_valid_until_ms + FROM server_signature_keys WHERE 1=0 + """ + " OR (server_name=? AND key_id=?)" * len( + batch + ) txn.execute(sql, tuple(itertools.chain.from_iterable(batch))) @@ -89,13 +94,13 @@ class KeyStore(SQLBaseStore): _get_keys(txn, batch) return keys - return await self.db_pool.runInteraction("get_server_verify_keys", _txn) + return await self.db_pool.runInteraction("get_server_signature_keys", _txn) - async def store_server_verify_keys( + async def store_server_signature_keys( self, from_server: str, ts_added_ms: int, - verify_keys: Iterable[Tuple[str, str, FetchKeyResult]], + verify_keys: Mapping[Tuple[str, str], FetchKeyResult], ) -> None: """Stores NACL verification keys for remote servers. Args: @@ -108,7 +113,7 @@ class KeyStore(SQLBaseStore): key_values = [] value_values = [] invalidations = [] - for server_name, key_id, fetch_result in verify_keys: + for (server_name, key_id), fetch_result in verify_keys.items(): key_values.append((server_name, key_id)) value_values.append( ( @@ -119,7 +124,7 @@ class KeyStore(SQLBaseStore): ) ) # invalidate takes a tuple corresponding to the params of - # _get_server_verify_key. _get_server_verify_key only takes one + # _get_server_signature_key. _get_server_signature_key only takes one # param, which is itself the 2-tuple (server_name, key_id). invalidations.append((server_name, key_id)) @@ -134,10 +139,10 @@ class KeyStore(SQLBaseStore): "verify_key", ), value_values=value_values, - desc="store_server_verify_keys", + desc="store_server_signature_keys", ) - invalidate = self._get_server_verify_key.invalidate + invalidate = self._get_server_signature_key.invalidate for i in invalidations: invalidate((i,)) @@ -180,7 +185,75 @@ class KeyStore(SQLBaseStore): desc="store_server_keys_json", ) + # invalidate takes a tuple corresponding to the params of + # _get_server_keys_json. _get_server_keys_json only takes one + # param, which is itself the 2-tuple (server_name, key_id). + self._get_server_keys_json.invalidate((((server_name, key_id),))) + + @cached() + def _get_server_keys_json( + self, server_name_and_key_id: Tuple[str, str] + ) -> FetchKeyResult: + raise NotImplementedError() + + @cachedList( + cached_method_name="_get_server_keys_json", list_name="server_name_and_key_ids" + ) async def get_server_keys_json( + self, server_name_and_key_ids: Iterable[Tuple[str, str]] + ) -> Dict[Tuple[str, str], FetchKeyResult]: + """ + Args: + server_name_and_key_ids: + iterable of (server_name, key-id) tuples to fetch keys for + + Returns: + A map from (server_name, key_id) -> FetchKeyResult, or None if the + key is unknown + """ + keys = {} + + def _get_keys(txn: Cursor, batch: Tuple[Tuple[str, str], ...]) -> None: + """Processes a batch of keys to fetch, and adds the result to `keys`.""" + + # batch_iter always returns tuples so it's safe to do len(batch) + sql = """ + SELECT server_name, key_id, key_json, ts_valid_until_ms + FROM server_keys_json WHERE 1=0 + """ + " OR (server_name=? AND key_id=?)" * len( + batch + ) + + txn.execute(sql, tuple(itertools.chain.from_iterable(batch))) + + for server_name, key_id, key_json_bytes, ts_valid_until_ms in txn: + if ts_valid_until_ms is None: + # Old keys may be stored with a ts_valid_until_ms of null, + # in which case we treat this as if it was set to `0`, i.e. + # it won't match key requests that define a minimum + # `ts_valid_until_ms`. + ts_valid_until_ms = 0 + + # The entire signed JSON response is stored in server_keys_json, + # fetch out the bits needed. + key_json = json.loads(bytes(key_json_bytes)) + key_base64 = key_json["verify_keys"][key_id]["key"] + + keys[(server_name, key_id)] = FetchKeyResult( + verify_key=decode_verify_key_bytes( + key_id, decode_base64(key_base64) + ), + valid_until_ts=ts_valid_until_ms, + ) + + def _txn(txn: Cursor) -> Dict[Tuple[str, str], FetchKeyResult]: + for batch in batch_iter(server_name_and_key_ids, 50): + _get_keys(txn, batch) + return keys + + return await self.db_pool.runInteraction("get_server_keys_json", _txn) + + async def get_server_keys_json_for_remote( self, server_keys: Iterable[Tuple[str, Optional[str], Optional[str]]] ) -> Dict[Tuple[str, Optional[str], Optional[str]], List[Dict[str, Any]]]: """Retrieve the key json for a list of server_keys and key ids. @@ -188,8 +261,10 @@ class KeyStore(SQLBaseStore): that server, key_id, and source triplet entry will be an empty list. The JSON is returned as a byte array so that it can be efficiently used in an HTTP response. + Args: server_keys: List of (server_name, key_id, source) triplets. + Returns: A mapping from (server_name, key_id, source) triplets to a list of dicts """ diff --git a/synapse/storage/databases/main/metrics.py b/synapse/storage/databases/main/metrics.py index 14294a0bb8..595e22982e 100644 --- a/synapse/storage/databases/main/metrics.py +++ b/synapse/storage/databases/main/metrics.py @@ -248,89 +248,6 @@ class ServerMetricsStore(EventPushActionsWorkerStore, SQLBaseStore): (count,) = cast(Tuple[int], txn.fetchone()) return count - async def count_r30_users(self) -> Dict[str, int]: - """ - Counts the number of 30 day retained users, defined as:- - * Users who have created their accounts more than 30 days ago - * Where last seen at most 30 days ago - * Where account creation and last_seen are > 30 days apart - - Returns: - A mapping of counts globally as well as broken out by platform. - """ - - def _count_r30_users(txn: LoggingTransaction) -> Dict[str, int]: - thirty_days_in_secs = 86400 * 30 - now = int(self._clock.time()) - thirty_days_ago_in_secs = now - thirty_days_in_secs - - sql = """ - SELECT platform, COUNT(*) FROM ( - SELECT - users.name, platform, users.creation_ts * 1000, - MAX(uip.last_seen) - FROM users - INNER JOIN ( - SELECT - user_id, - last_seen, - CASE - WHEN user_agent LIKE '%%Android%%' THEN 'android' - WHEN user_agent LIKE '%%iOS%%' THEN 'ios' - WHEN user_agent LIKE '%%Electron%%' THEN 'electron' - WHEN user_agent LIKE '%%Mozilla%%' THEN 'web' - WHEN user_agent LIKE '%%Gecko%%' THEN 'web' - ELSE 'unknown' - END - AS platform - FROM user_ips - ) uip - ON users.name = uip.user_id - AND users.appservice_id is NULL - AND users.creation_ts < ? - AND uip.last_seen/1000 > ? - AND (uip.last_seen/1000) - users.creation_ts > 86400 * 30 - GROUP BY users.name, platform, users.creation_ts - ) u GROUP BY platform - """ - - results = {} - txn.execute(sql, (thirty_days_ago_in_secs, thirty_days_ago_in_secs)) - - for row in txn: - if row[0] == "unknown": - pass - results[row[0]] = row[1] - - sql = """ - SELECT COUNT(*) FROM ( - SELECT users.name, users.creation_ts * 1000, - MAX(uip.last_seen) - FROM users - INNER JOIN ( - SELECT - user_id, - last_seen - FROM user_ips - ) uip - ON users.name = uip.user_id - AND appservice_id is NULL - AND users.creation_ts < ? - AND uip.last_seen/1000 > ? - AND (uip.last_seen/1000) - users.creation_ts > 86400 * 30 - GROUP BY users.name, users.creation_ts - ) u - """ - - txn.execute(sql, (thirty_days_ago_in_secs, thirty_days_ago_in_secs)) - - (count,) = cast(Tuple[int], txn.fetchone()) - results["all"] = count - - return results - - return await self.db_pool.runInteraction("count_r30_users", _count_r30_users) - async def count_r30v2_users(self) -> Dict[str, int]: """ Counts the number of 30 day retained users, defined as users that: diff --git a/synapse/storage/databases/main/profile.py b/synapse/storage/databases/main/profile.py index a1747f04ce..65c92bef51 100644 --- a/synapse/storage/databases/main/profile.py +++ b/synapse/storage/databases/main/profile.py @@ -11,14 +11,132 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import Optional +from typing import TYPE_CHECKING, Optional from synapse.api.errors import StoreError from synapse.storage._base import SQLBaseStore +from synapse.storage.database import ( + DatabasePool, + LoggingDatabaseConnection, + LoggingTransaction, +) from synapse.storage.databases.main.roommember import ProfileInfo +from synapse.storage.engines import PostgresEngine +from synapse.types import JsonDict, UserID + +if TYPE_CHECKING: + from synapse.server import HomeServer class ProfileWorkerStore(SQLBaseStore): + def __init__( + self, + database: DatabasePool, + db_conn: LoggingDatabaseConnection, + hs: "HomeServer", + ): + super().__init__(database, db_conn, hs) + self.server_name: str = hs.hostname + self.database_engine = database.engine + self.db_pool.updates.register_background_index_update( + "profiles_full_user_id_key_idx", + index_name="profiles_full_user_id_key", + table="profiles", + columns=["full_user_id"], + unique=True, + ) + + self.db_pool.updates.register_background_update_handler( + "populate_full_user_id_profiles", self.populate_full_user_id_profiles + ) + + async def populate_full_user_id_profiles( + self, progress: JsonDict, batch_size: int + ) -> int: + """ + Background update to populate the column `full_user_id` of the table + profiles from entries in the column `user_local_part` of the same table + """ + + lower_bound_id = progress.get("lower_bound_id", "") + + def _get_last_id(txn: LoggingTransaction) -> Optional[str]: + sql = """ + SELECT user_id FROM profiles + WHERE user_id > ? + ORDER BY user_id + LIMIT 1 OFFSET 50 + """ + txn.execute(sql, (lower_bound_id,)) + res = txn.fetchone() + if res: + upper_bound_id = res[0] + return upper_bound_id + else: + return None + + def _process_batch( + txn: LoggingTransaction, lower_bound_id: str, upper_bound_id: str + ) -> None: + sql = """ + UPDATE profiles + SET full_user_id = '@' || user_id || ? + WHERE ? < user_id AND user_id <= ? AND full_user_id IS NULL + """ + txn.execute(sql, (f":{self.server_name}", lower_bound_id, upper_bound_id)) + + def _final_batch(txn: LoggingTransaction, lower_bound_id: str) -> None: + sql = """ + UPDATE profiles + SET full_user_id = '@' || user_id || ? + WHERE ? < user_id AND full_user_id IS NULL + """ + txn.execute( + sql, + ( + f":{self.server_name}", + lower_bound_id, + ), + ) + + if isinstance(self.database_engine, PostgresEngine): + sql = """ + ALTER TABLE profiles VALIDATE CONSTRAINT full_user_id_not_null + """ + txn.execute(sql) + + upper_bound_id = await self.db_pool.runInteraction( + "populate_full_user_id_profiles", _get_last_id + ) + + if upper_bound_id is None: + await self.db_pool.runInteraction( + "populate_full_user_id_profiles", _final_batch, lower_bound_id + ) + + await self.db_pool.updates._end_background_update( + "populate_full_user_id_profiles" + ) + return 1 + + await self.db_pool.runInteraction( + "populate_full_user_id_profiles", + _process_batch, + lower_bound_id, + upper_bound_id, + ) + + progress["lower_bound_id"] = upper_bound_id + + await self.db_pool.runInteraction( + "populate_full_user_id_profiles", + self.db_pool.updates._background_update_progress_txn, + "populate_full_user_id_profiles", + progress, + ) + + return 50 + async def get_profileinfo(self, user_localpart: str) -> ProfileInfo: try: profile = await self.db_pool.simple_select_one( @@ -54,28 +172,52 @@ class ProfileWorkerStore(SQLBaseStore): desc="get_profile_avatar_url", ) - async def create_profile(self, user_localpart: str) -> None: + async def create_profile(self, user_id: UserID) -> None: + user_localpart = user_id.localpart await self.db_pool.simple_insert( - table="profiles", values={"user_id": user_localpart}, desc="create_profile" + table="profiles", + values={"user_id": user_localpart, "full_user_id": user_id.to_string()}, + desc="create_profile", ) async def set_profile_displayname( - self, user_localpart: str, new_displayname: Optional[str] + self, user_id: UserID, new_displayname: Optional[str] ) -> None: + """ + Set the display name of a user. + + Args: + user_id: The user's ID. + new_displayname: The new display name. If this is None, the user's display + name is removed. + """ + user_localpart = user_id.localpart await self.db_pool.simple_upsert( table="profiles", keyvalues={"user_id": user_localpart}, - values={"displayname": new_displayname}, + values={ + "displayname": new_displayname, + "full_user_id": user_id.to_string(), + }, desc="set_profile_displayname", ) async def set_profile_avatar_url( - self, user_localpart: str, new_avatar_url: Optional[str] + self, user_id: UserID, new_avatar_url: Optional[str] ) -> None: + """ + Set the avatar of a user. + + Args: + user_id: The user's ID. + new_avatar_url: The new avatar URL. If this is None, the user's avatar is + removed. + """ + user_localpart = user_id.localpart await self.db_pool.simple_upsert( table="profiles", keyvalues={"user_id": user_localpart}, - values={"avatar_url": new_avatar_url}, + values={"avatar_url": new_avatar_url, "full_user_id": user_id.to_string()}, desc="set_profile_avatar_url", ) diff --git a/synapse/storage/databases/main/purge_events.py b/synapse/storage/databases/main/purge_events.py index 7a7c0d9c75..efbd3e75d9 100644 --- a/synapse/storage/databases/main/purge_events.py +++ b/synapse/storage/databases/main/purge_events.py @@ -428,14 +428,16 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore): "partial_state_events", "partial_state_rooms_servers", "partial_state_rooms", + # Note: the _membership(s) tables have foreign keys to the `events` table + # so must be deleted first. + "local_current_membership", + "room_memberships", "events", "federation_inbound_events_staging", - "local_current_membership", "receipts_graph", "receipts_linearized", "room_aliases", "room_depth", - "room_memberships", "room_stats_state", "room_stats_current", "room_stats_earliest_token", diff --git a/synapse/storage/databases/main/pusher.py b/synapse/storage/databases/main/pusher.py index aeb6034f46..87e28e22d3 100644 --- a/synapse/storage/databases/main/pusher.py +++ b/synapse/storage/databases/main/pusher.py @@ -562,7 +562,7 @@ class PusherBackgroundUpdatesStore(SQLBaseStore): ) self.db_pool.updates._background_update_progress_txn( - txn, "set_device_id_for_pushers", {"pusher_id": rows[-1]["id"]} + txn, "set_device_id_for_pushers", {"pusher_id": rows[-1]["pusher_id"]} ) return len(rows) diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py index 074942b167..5ee5c7ad9f 100644 --- a/synapse/storage/databases/main/receipts.py +++ b/synapse/storage/databases/main/receipts.py @@ -85,13 +85,10 @@ class ReceiptsWorkerStore(SQLBaseStore): else: self._can_write_to_receipts = True + # Multiple writers are not supported for SQLite. + # # We shouldn't be running in worker mode with SQLite, but its useful # to support it for unit tests. - # - # If this process is the writer than we need to use - # `StreamIdGenerator`, otherwise we use `SlavedIdTracker` which gets - # updated over replication. (Multiple writers are not supported for - # SQLite). self._receipts_id_gen = StreamIdGenerator( db_conn, hs.get_replication_notifier(), diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py index 717237e024..676d03bb7e 100644 --- a/synapse/storage/databases/main/registration.py +++ b/synapse/storage/databases/main/registration.py @@ -2414,8 +2414,8 @@ class RegistrationStore(StatsStore, RegistrationBackgroundUpdateStore): # *obviously* the 'profiles' table uses localpart for user_id # while everything else uses the full mxid. txn.execute( - "INSERT INTO profiles(user_id, displayname) VALUES (?,?)", - (user_id_obj.localpart, create_profile_with_displayname), + "INSERT INTO profiles(full_user_id, user_id, displayname) VALUES (?,?,?)", + (user_id, user_id_obj.localpart, create_profile_with_displayname), ) if self.hs.config.stats.stats_enabled: diff --git a/synapse/storage/databases/main/relations.py b/synapse/storage/databases/main/relations.py index 3955a8a9a5..4a6c6c724d 100644 --- a/synapse/storage/databases/main/relations.py +++ b/synapse/storage/databases/main/relations.py @@ -172,6 +172,7 @@ class RelationsWorkerStore(SQLBaseStore): direction: Direction = Direction.BACKWARDS, from_token: Optional[StreamToken] = None, to_token: Optional[StreamToken] = None, + recurse: bool = False, ) -> Tuple[Sequence[_RelatedEvent], Optional[StreamToken]]: """Get a list of relations for an event, ordered by topological ordering. @@ -186,6 +187,7 @@ class RelationsWorkerStore(SQLBaseStore): oldest first (forwards). from_token: Fetch rows from the given token, or from the start if None. to_token: Fetch rows up to the given token, or up to the end if None. + recurse: Whether to recursively find relations. Returns: A tuple of: @@ -200,8 +202,8 @@ class RelationsWorkerStore(SQLBaseStore): # Ensure bad limits aren't being passed in. assert limit >= 0 - where_clause = ["relates_to_id = ?", "room_id = ?"] - where_args: List[Union[str, int]] = [event.event_id, room_id] + where_clause = ["room_id = ?"] + where_args: List[Union[str, int]] = [room_id] is_redacted = event.internal_metadata.is_redacted() if relation_type is not None: @@ -229,23 +231,52 @@ class RelationsWorkerStore(SQLBaseStore): if pagination_clause: where_clause.append(pagination_clause) - sql = """ - SELECT event_id, relation_type, sender, topological_ordering, stream_ordering - FROM event_relations - INNER JOIN events USING (event_id) - WHERE %s - ORDER BY topological_ordering %s, stream_ordering %s - LIMIT ? - """ % ( - " AND ".join(where_clause), - order, - order, - ) + # If a recursive query is requested then the filters are applied after + # recursively following relationships from the requested event to children + # up to 3-relations deep. + # + # If no recursion is needed then the event_relations table is queried + # for direct children of the requested event. + if recurse: + sql = """ + WITH RECURSIVE related_events AS ( + SELECT event_id, relation_type, relates_to_id, 0 AS depth + FROM event_relations + WHERE relates_to_id = ? + UNION SELECT e.event_id, e.relation_type, e.relates_to_id, depth + 1 + FROM event_relations e + INNER JOIN related_events r ON r.event_id = e.relates_to_id + WHERE depth <= 3 + ) + SELECT event_id, relation_type, sender, topological_ordering, stream_ordering + FROM related_events + INNER JOIN events USING (event_id) + WHERE %s + ORDER BY topological_ordering %s, stream_ordering %s + LIMIT ?; + """ % ( + " AND ".join(where_clause), + order, + order, + ) + else: + sql = """ + SELECT event_id, relation_type, sender, topological_ordering, stream_ordering + FROM event_relations + INNER JOIN events USING (event_id) + WHERE relates_to_id = ? AND %s + ORDER BY topological_ordering %s, stream_ordering %s + LIMIT ? + """ % ( + " AND ".join(where_clause), + order, + order, + ) def _get_recent_references_for_event_txn( txn: LoggingTransaction, ) -> Tuple[List[_RelatedEvent], Optional[StreamToken]]: - txn.execute(sql, where_args + [limit + 1]) + txn.execute(sql, [event.event_id] + where_args + [limit + 1]) events = [] topo_orderings: List[int] = [] @@ -965,7 +996,7 @@ class RelationsWorkerStore(SQLBaseStore): # relation. sql = """ WITH RECURSIVE related_events AS ( - SELECT event_id, relates_to_id, relation_type, 0 depth + SELECT event_id, relates_to_id, relation_type, 0 AS depth FROM event_relations WHERE event_id = ? UNION SELECT e.event_id, e.relates_to_id, e.relation_type, depth + 1 @@ -1025,7 +1056,7 @@ class RelationsWorkerStore(SQLBaseStore): sql = """ SELECT relates_to_id FROM event_relations WHERE relates_to_id = COALESCE(( WITH RECURSIVE related_events AS ( - SELECT event_id, relates_to_id, relation_type, 0 depth + SELECT event_id, relates_to_id, relation_type, 0 AS depth FROM event_relations WHERE event_id = ? UNION SELECT e.event_id, e.relates_to_id, e.relation_type, depth + 1 diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py index 3825bd6079..ca8be8c80d 100644 --- a/synapse/storage/databases/main/room.py +++ b/synapse/storage/databases/main/room.py @@ -996,7 +996,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): If it is `None` media will be removed from quarantine """ logger.info("Quarantining media: %s/%s", server_name, media_id) - is_local = server_name == self.config.server.server_name + is_local = self.hs.is_mine_server_name(server_name) def _quarantine_media_by_id_txn(txn: LoggingTransaction) -> int: local_mxcs = [media_id] if is_local else [] @@ -1998,6 +1998,9 @@ class RoomBackgroundUpdateStore(SQLBaseStore): for room_id, event_json in room_id_to_create_event_results: event_dict = db_to_json(event_json) + # The creator property might not exist in newer room versions, but + # for those versions the creator column should be properly populate + # during room creation. creator = event_dict.get("content").get(EventContentFields.ROOM_CREATOR) self.db_pool.simple_update_txn( @@ -2132,12 +2135,16 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore): # invalid, and it would fail auth checks anyway. raise StoreError(400, "No create event in state") - room_creator = create_event.content.get(EventContentFields.ROOM_CREATOR) + # Before MSC2175, the room creator was a separate field. + if not room_version.msc2175_implicit_room_creator: + room_creator = create_event.content.get(EventContentFields.ROOM_CREATOR) - if not isinstance(room_creator, str): - # If the create event does not have a creator then the room is - # invalid, and it would fail auth checks anyway. - raise StoreError(400, "No creator defined on the create event") + if not isinstance(room_creator, str): + # If the create event does not have a creator then the room is + # invalid, and it would fail auth checks anyway. + raise StoreError(400, "No creator defined on the create event") + else: + room_creator = create_event.sender await self.db_pool.simple_upsert( desc="upsert_room_on_join", diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index daad58291a..ae9c201b87 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -82,7 +82,7 @@ class EventIdMembership: membership: str -class RoomMemberWorkerStore(EventsWorkerStore): +class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): def __init__( self, database: DatabasePool, @@ -1099,7 +1099,7 @@ class RoomMemberWorkerStore(EventsWorkerStore): # `get_joined_hosts` is called with the "current" state group for the # room, and so consecutive calls will be for consecutive state groups # which point to the previous state group. - cache = await self._get_joined_hosts_cache(room_id) # type: ignore[misc] + cache = await self._get_joined_hosts_cache(room_id) # If the state group in the cache matches, we already have the data we need. if state_entry.state_group == cache.state_group: @@ -1372,6 +1372,50 @@ class RoomMemberWorkerStore(EventsWorkerStore): _is_local_host_in_room_ignoring_users_txn, ) + async def forget(self, user_id: str, room_id: str) -> None: + """Indicate that user_id wishes to discard history for room_id.""" + + def f(txn: LoggingTransaction) -> None: + self.db_pool.simple_update_txn( + txn, + table="room_memberships", + keyvalues={"user_id": user_id, "room_id": room_id}, + updatevalues={"forgotten": 1}, + ) + + self._invalidate_cache_and_stream(txn, self.did_forget, (user_id, room_id)) + self._invalidate_cache_and_stream( + txn, self.get_forgotten_rooms_for_user, (user_id,) + ) + + await self.db_pool.runInteraction("forget_membership", f) + + async def get_room_forgetter_stream_pos(self) -> int: + """Get the stream position of the background process to forget rooms when left + by users. + """ + return await self.db_pool.simple_select_one_onecol( + table="room_forgetter_stream_pos", + keyvalues={}, + retcol="stream_id", + desc="room_forgetter_stream_pos", + ) + + async def update_room_forgetter_stream_pos(self, stream_id: int) -> None: + """Update the stream position of the background process to forget rooms when + left by users. + + Must only be used by the worker running the background process. + """ + assert self.hs.config.worker.run_background_tasks + + await self.db_pool.simple_update_one( + table="room_forgetter_stream_pos", + keyvalues={}, + updatevalues={"stream_id": stream_id}, + desc="room_forgetter_stream_pos", + ) + class RoomMemberBackgroundUpdateStore(SQLBaseStore): def __init__( @@ -1553,29 +1597,6 @@ class RoomMemberStore( ): super().__init__(database, db_conn, hs) - async def forget(self, user_id: str, room_id: str) -> None: - """Indicate that user_id wishes to discard history for room_id.""" - - def f(txn: LoggingTransaction) -> None: - sql = ( - "UPDATE" - " room_memberships" - " SET" - " forgotten = 1" - " WHERE" - " user_id = ?" - " AND" - " room_id = ?" - ) - txn.execute(sql, (user_id, room_id)) - - self._invalidate_cache_and_stream(txn, self.did_forget, (user_id, room_id)) - self._invalidate_cache_and_stream( - txn, self.get_forgotten_rooms_for_user, (user_id,) - ) - - await self.db_pool.runInteraction("forget_membership", f) - def extract_heroes_from_room_summary( details: Mapping[str, MemberSummary], me: str diff --git a/synapse/storage/databases/main/user_directory.py b/synapse/storage/databases/main/user_directory.py index 9fced4b997..b7d58978de 100644 --- a/synapse/storage/databases/main/user_directory.py +++ b/synapse/storage/databases/main/user_directory.py @@ -27,6 +27,8 @@ from typing import ( cast, ) +import attr + try: # Figure out if ICU support is available for searching users. import icu @@ -66,6 +68,19 @@ logger = logging.getLogger(__name__) TEMP_TABLE = "_temp_populate_user_directory" +@attr.s(auto_attribs=True, frozen=True) +class _UserDirProfile: + """Helper type for the user directory code for an entry to be inserted into + the directory. + """ + + user_id: str + + # If the display name or avatar URL are unexpected types, replace with None + display_name: Optional[str] = attr.ib(default=None, converter=non_null_str_or_none) + avatar_url: Optional[str] = attr.ib(default=None, converter=non_null_str_or_none) + + class UserDirectoryBackgroundUpdateStore(StateDeltasStore): # How many records do we calculate before sending it to # add_users_who_share_private_rooms? @@ -102,44 +117,34 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore): ) -> int: # Get all the rooms that we want to process. def _make_staging_area(txn: LoggingTransaction) -> None: - sql = ( - "CREATE TABLE IF NOT EXISTS " - + TEMP_TABLE - + "_rooms(room_id TEXT NOT NULL, events BIGINT NOT NULL)" - ) - txn.execute(sql) - - sql = ( - "CREATE TABLE IF NOT EXISTS " - + TEMP_TABLE - + "_position(position TEXT NOT NULL)" - ) - txn.execute(sql) - - # Get rooms we want to process from the database - sql = """ - SELECT room_id, count(*) FROM current_state_events + sql = f""" + CREATE TABLE IF NOT EXISTS {TEMP_TABLE}_rooms AS + SELECT room_id, count(*) AS events + FROM current_state_events GROUP BY room_id """ txn.execute(sql) - rooms = list(txn.fetchall()) - self.db_pool.simple_insert_many_txn( - txn, TEMP_TABLE + "_rooms", keys=("room_id", "events"), values=rooms + txn.execute( + f"CREATE INDEX IF NOT EXISTS {TEMP_TABLE}_rooms_rm ON {TEMP_TABLE}_rooms (room_id)" ) - del rooms - - sql = ( - "CREATE TABLE IF NOT EXISTS " - + TEMP_TABLE - + "_users(user_id TEXT NOT NULL)" + txn.execute( + f"CREATE INDEX IF NOT EXISTS {TEMP_TABLE}_rooms_evs ON {TEMP_TABLE}_rooms (events)" ) - txn.execute(sql) - txn.execute("SELECT name FROM users") - users = list(txn.fetchall()) + sql = f""" + CREATE TABLE IF NOT EXISTS {TEMP_TABLE}_position ( + position TEXT NOT NULL + ) + """ + txn.execute(sql) - self.db_pool.simple_insert_many_txn( - txn, TEMP_TABLE + "_users", keys=("user_id",), values=users + sql = f""" + CREATE TABLE IF NOT EXISTS {TEMP_TABLE}_users AS + SELECT name AS user_id FROM users + """ + txn.execute(sql) + txn.execute( + f"CREATE INDEX IF NOT EXISTS {TEMP_TABLE}_users_idx ON {TEMP_TABLE}_users (user_id)" ) new_pos = await self.get_max_stream_id_in_current_state_deltas() @@ -222,12 +227,13 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore): if not rooms_to_work_on: return None - # Get how many are left to process, so we can give status on how - # far we are in processing - txn.execute("SELECT COUNT(*) FROM " + TEMP_TABLE + "_rooms") - result = txn.fetchone() - assert result is not None - progress["remaining"] = result[0] + if "remaining" not in progress: + # Get how many are left to process, so we can give status on how + # far we are in processing + txn.execute("SELECT COUNT(*) FROM " + TEMP_TABLE + "_rooms") + result = txn.fetchone() + assert result is not None + progress["remaining"] = result[0] return rooms_to_work_on @@ -332,7 +338,14 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore): if processed_event_count > batch_size: # Don't process any more rooms, we've hit our batch size. - return processed_event_count + break + + await self.db_pool.runInteraction( + "populate_user_directory", + self.db_pool.updates._background_update_progress_txn, + "populate_user_directory_process_rooms", + progress, + ) return processed_event_count @@ -356,13 +369,14 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore): users_to_work_on = [x[0] for x in user_result] - # Get how many are left to process, so we can give status on how - # far we are in processing - sql = "SELECT COUNT(*) FROM " + TEMP_TABLE + "_users" - txn.execute(sql) - count_result = txn.fetchone() - assert count_result is not None - progress["remaining"] = count_result[0] + if "remaining" not in progress: + # Get how many are left to process, so we can give status on how + # far we are in processing + sql = "SELECT COUNT(*) FROM " + TEMP_TABLE + "_users" + txn.execute(sql) + count_result = txn.fetchone() + assert count_result is not None + progress["remaining"] = count_result[0] return users_to_work_on @@ -382,25 +396,65 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore): % (len(users_to_work_on), progress["remaining"]) ) - for user_id in users_to_work_on: - if await self.should_include_local_user_in_dir(user_id): - profile = await self.get_profileinfo(get_localpart_from_id(user_id)) # type: ignore[attr-defined] - await self.update_profile_in_user_dir( - user_id, profile.display_name, profile.avatar_url - ) - - # We've finished processing a user. Delete it from the table. - await self.db_pool.simple_delete_one( - TEMP_TABLE + "_users", {"user_id": user_id} - ) - # Update the remaining counter. - progress["remaining"] -= 1 - await self.db_pool.runInteraction( - "populate_user_directory", - self.db_pool.updates._background_update_progress_txn, - "populate_user_directory_process_users", - progress, + # First filter down to users we want to insert into the user directory. + users_to_insert = [ + user_id + for user_id in users_to_work_on + if await self.should_include_local_user_in_dir(user_id) + ] + + # Next fetch their profiles. Note that the `user_id` here is the + # *localpart*, and that not all users have profiles. + profile_rows = await self.db_pool.simple_select_many_batch( + table="profiles", + column="user_id", + iterable=[get_localpart_from_id(u) for u in users_to_insert], + retcols=( + "user_id", + "displayname", + "avatar_url", + ), + keyvalues={}, + desc="populate_user_directory_process_users_get_profiles", + ) + profiles = { + f"@{row['user_id']}:{self.server_name}": _UserDirProfile( + f"@{row['user_id']}:{self.server_name}", + row["displayname"], + row["avatar_url"], ) + for row in profile_rows + } + + profiles_to_insert = [ + profiles.get(user_id) or _UserDirProfile(user_id) + for user_id in users_to_insert + ] + + # Actually insert the users with their profiles into the directory. + await self.db_pool.runInteraction( + "populate_user_directory_process_users_insertion", + self._update_profiles_in_user_dir_txn, + profiles_to_insert, + ) + + # We've finished processing the users. Delete it from the table. + await self.db_pool.simple_delete_many( + table=TEMP_TABLE + "_users", + column="user_id", + iterable=users_to_work_on, + keyvalues={}, + desc="populate_user_directory_process_users_delete", + ) + + # Update the remaining counter. + progress["remaining"] -= len(users_to_work_on) + await self.db_pool.runInteraction( + "populate_user_directory", + self.db_pool.updates._background_update_progress_txn, + "populate_user_directory_process_users", + progress, + ) return len(users_to_work_on) @@ -585,72 +639,102 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore): Update or add a user's profile in the user directory. If the user is remote, the profile will be marked as not stale. """ - # If the display name or avatar URL are unexpected types, replace with None. - display_name = non_null_str_or_none(display_name) - avatar_url = non_null_str_or_none(avatar_url) + await self.db_pool.runInteraction( + "update_profiles_in_user_dir", + self._update_profiles_in_user_dir_txn, + [_UserDirProfile(user_id, display_name, avatar_url)], + ) + + def _update_profiles_in_user_dir_txn( + self, + txn: LoggingTransaction, + profiles: Sequence[_UserDirProfile], + ) -> None: + self.db_pool.simple_upsert_many_txn( + txn, + table="user_directory", + key_names=("user_id",), + key_values=[(p.user_id,) for p in profiles], + value_names=("display_name", "avatar_url"), + value_values=[ + ( + p.display_name, + p.avatar_url, + ) + for p in profiles + ], + ) - def _update_profile_in_user_dir_txn(txn: LoggingTransaction) -> None: - self.db_pool.simple_upsert_txn( + # Remote users: Make sure the profile is not marked as stale anymore. + remote_users = [ + p.user_id for p in profiles if not self.hs.is_mine_id(p.user_id) + ] + if remote_users: + self.db_pool.simple_delete_many_txn( txn, - table="user_directory", - keyvalues={"user_id": user_id}, - values={"display_name": display_name, "avatar_url": avatar_url}, + table="user_directory_stale_remote_users", + column="user_id", + values=remote_users, + keyvalues={}, ) - if not self.hs.is_mine_id(user_id): - # Remote users: Make sure the profile is not marked as stale anymore. - self.db_pool.simple_delete_txn( - txn, - table="user_directory_stale_remote_users", - keyvalues={"user_id": user_id}, + if isinstance(self.database_engine, PostgresEngine): + # We weight the localpart most highly, then display name and finally + # server name + template = """ + ( + %s, + setweight(to_tsvector('simple', %s), 'A') + || setweight(to_tsvector('simple', %s), 'D') + || setweight(to_tsvector('simple', COALESCE(%s, '')), 'B') ) + """ - # The display name that goes into the database index. - index_display_name = display_name - if index_display_name is not None: - index_display_name = _filter_text_for_index(index_display_name) - - if isinstance(self.database_engine, PostgresEngine): - # We weight the localpart most highly, then display name and finally - # server name - sql = """ - INSERT INTO user_directory_search(user_id, vector) - VALUES (?, - setweight(to_tsvector('simple', ?), 'A') - || setweight(to_tsvector('simple', ?), 'D') - || setweight(to_tsvector('simple', COALESCE(?, '')), 'B') - ) ON CONFLICT (user_id) DO UPDATE SET vector=EXCLUDED.vector - """ - txn.execute( - sql, + sql = """ + INSERT INTO user_directory_search(user_id, vector) + VALUES ? ON CONFLICT (user_id) DO UPDATE SET vector=EXCLUDED.vector + """ + txn.execute_values( + sql, + [ ( - user_id, - get_localpart_from_id(user_id), - get_domain_from_id(user_id), - index_display_name, - ), - ) - elif isinstance(self.database_engine, Sqlite3Engine): - value = ( - "%s %s" % (user_id, index_display_name) - if index_display_name - else user_id - ) - self.db_pool.simple_upsert_txn( - txn, - table="user_directory_search", - keyvalues={"user_id": user_id}, - values={"value": value}, - ) - else: - # This should be unreachable. - raise Exception("Unrecognized database engine") + p.user_id, + get_localpart_from_id(p.user_id), + get_domain_from_id(p.user_id), + _filter_text_for_index(p.display_name) + if p.display_name + else None, + ) + for p in profiles + ], + template=template, + fetch=False, + ) + elif isinstance(self.database_engine, Sqlite3Engine): + values = [] + for p in profiles: + if p.display_name is not None: + index_display_name = _filter_text_for_index(p.display_name) + value = f"{p.user_id} {index_display_name}" + else: + value = p.user_id - txn.call_after(self.get_user_in_directory.invalidate, (user_id,)) + values.append((value,)) - await self.db_pool.runInteraction( - "update_profile_in_user_dir", _update_profile_in_user_dir_txn - ) + self.db_pool.simple_upsert_many_txn( + txn, + table="user_directory_search", + key_names=("user_id",), + key_values=[(p.user_id,) for p in profiles], + value_names=("value",), + value_values=values, + ) + else: + # This should be unreachable. + raise Exception("Unrecognized database engine") + + for p in profiles: + txn.call_after(self.get_user_in_directory.invalidate, (p.user_id,)) async def add_users_who_share_private_room( self, room_id: str, user_id_tuples: Iterable[Tuple[str, str]] diff --git a/synapse/storage/databases/state/bg_updates.py b/synapse/storage/databases/state/bg_updates.py index 097dea5182..86eb1a8a08 100644 --- a/synapse/storage/databases/state/bg_updates.py +++ b/synapse/storage/databases/state/bg_updates.py @@ -15,6 +15,7 @@ import logging from typing import TYPE_CHECKING, Dict, List, Mapping, Optional, Tuple, Union +from synapse.logging.opentracing import tag_args, trace from synapse.storage._base import SQLBaseStore from synapse.storage.database import ( DatabasePool, @@ -40,6 +41,8 @@ class StateGroupBackgroundUpdateStore(SQLBaseStore): updates. """ + @trace + @tag_args def _count_state_group_hops_txn( self, txn: LoggingTransaction, state_group: int ) -> int: @@ -83,6 +86,8 @@ class StateGroupBackgroundUpdateStore(SQLBaseStore): return count + @trace + @tag_args def _get_state_groups_from_groups_txn( self, txn: LoggingTransaction, diff --git a/synapse/storage/databases/state/store.py b/synapse/storage/databases/state/store.py index 29ff64e876..6984d11352 100644 --- a/synapse/storage/databases/state/store.py +++ b/synapse/storage/databases/state/store.py @@ -20,6 +20,7 @@ import attr from synapse.api.constants import EventTypes from synapse.events import EventBase from synapse.events.snapshot import UnpersistedEventContext, UnpersistedEventContextBase +from synapse.logging.opentracing import tag_args, trace from synapse.storage._base import SQLBaseStore from synapse.storage.database import ( DatabasePool, @@ -159,6 +160,8 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): "get_state_group_delta", _get_state_group_delta_txn ) + @trace + @tag_args @cancellable async def _get_state_groups_from_groups( self, groups: List[int], state_filter: StateFilter @@ -187,6 +190,8 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): return results + @trace + @tag_args def _get_state_for_group_using_cache( self, cache: DictionaryCache[int, StateKey, str], @@ -239,6 +244,8 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): return state_filter.filter_state(state_dict_ids), not missing_types + @trace + @tag_args @cancellable async def _get_state_for_groups( self, groups: Iterable[int], state_filter: Optional[StateFilter] = None @@ -305,6 +312,8 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): return state + @trace + @tag_args def _get_state_for_groups_using_cache( self, groups: Iterable[int], @@ -403,6 +412,8 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): fetched_keys=non_member_types, ) + @trace + @tag_args async def store_state_deltas_for_batched( self, events_and_context: List[Tuple[EventBase, UnpersistedEventContextBase]], @@ -520,6 +531,8 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): prev_group, ) + @trace + @tag_args async def store_state_group( self, event_id: str, @@ -772,6 +785,8 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): ((sg,) for sg in state_groups_to_delete), ) + @trace + @tag_args async def get_previous_state_groups( self, state_groups: Iterable[int] ) -> Dict[int, int]: diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py index 2a1c6fa31b..38b7abd801 100644 --- a/synapse/storage/prepare_database.py +++ b/synapse/storage/prepare_database.py @@ -22,7 +22,7 @@ import attr from typing_extensions import Counter as CounterType from synapse.config.homeserver import HomeServerConfig -from synapse.storage.database import LoggingDatabaseConnection +from synapse.storage.database import LoggingDatabaseConnection, LoggingTransaction from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine, Sqlite3Engine from synapse.storage.schema import SCHEMA_COMPAT_VERSION, SCHEMA_VERSION from synapse.storage.types import Cursor @@ -168,7 +168,9 @@ def prepare_database( def _setup_new_database( - cur: Cursor, database_engine: BaseDatabaseEngine, databases: Collection[str] + cur: LoggingTransaction, + database_engine: BaseDatabaseEngine, + databases: Collection[str], ) -> None: """Sets up the physical database by finding a base set of "full schemas" and then applying any necessary deltas, including schemas from the given data @@ -289,7 +291,7 @@ def _setup_new_database( def _upgrade_existing_database( - cur: Cursor, + cur: LoggingTransaction, current_schema_state: _SchemaState, database_engine: BaseDatabaseEngine, config: Optional[HomeServerConfig], diff --git a/synapse/storage/schema/__init__.py b/synapse/storage/schema/__init__.py index d3103a6c7a..df2cc31ca6 100644 --- a/synapse/storage/schema/__init__.py +++ b/synapse/storage/schema/__init__.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -SCHEMA_VERSION = 74 # remember to update the list below when updating +SCHEMA_VERSION = 77 # remember to update the list below when updating """Represents the expectations made by the codebase about the database schema This should be incremented whenever the codebase changes its requirements on the @@ -91,13 +91,28 @@ Changes in SCHEMA_VERSION = 74: - A query on `event_stream_ordering` column has now been disambiguated (i.e. the codebase can handle the `current_state_events`, `local_current_memberships` and `room_memberships` tables having an `event_stream_ordering` column). + +Changes in SCHEMA_VERSION = 75: + - The `event_stream_ordering` column in membership tables (`current_state_events`, + `local_current_membership` & `room_memberships`) is now being populated for new + rows. When the background job to populate historical rows lands this will + become the compat schema version. + +Changes in SCHEMA_VERSION = 76: + - Adds a full_user_id column to tables profiles and user_filters. + +Changes in SCHEMA_VERSION = 77 + - (Postgres) Add NOT VALID CHECK (full_user_id IS NOT NULL) to tables profiles and user_filters """ SCHEMA_COMPAT_VERSION = ( - # The threads_id column must exist for event_push_actions, event_push_summary, - # receipts_linearized, and receipts_graph. - 73 + # Queries against `event_stream_ordering` columns in membership tables must + # be disambiguated. + # + # insertions to the column `full_user_id` of tables profiles and user_filters can no + # longer be null + 76 ) """Limit on how far the synapse codebase can be rolled back without breaking db compat diff --git a/synapse/storage/schema/main/delta/20/pushers.py b/synapse/storage/schema/main/delta/20/pushers.py index 45b846e6a7..08ae0efc21 100644 --- a/synapse/storage/schema/main/delta/20/pushers.py +++ b/synapse/storage/schema/main/delta/20/pushers.py @@ -24,10 +24,13 @@ UTF-8 bytes, so we have to do it in Python. import logging +from synapse.storage.database import LoggingTransaction +from synapse.storage.engines import BaseDatabaseEngine + logger = logging.getLogger(__name__) -def run_create(cur, database_engine, *args, **kwargs): +def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> None: logger.info("Porting pushers table...") cur.execute( """ @@ -61,8 +64,8 @@ def run_create(cur, database_engine, *args, **kwargs): """ ) count = 0 - for row in cur.fetchall(): - row = list(row) + for tuple_row in cur.fetchall(): + row = list(tuple_row) row[8] = bytes(row[8]).decode("utf-8") row[11] = bytes(row[11]).decode("utf-8") cur.execute( @@ -81,7 +84,3 @@ def run_create(cur, database_engine, *args, **kwargs): cur.execute("DROP TABLE pushers") cur.execute("ALTER TABLE pushers2 RENAME TO pushers") logger.info("Moved %d pushers to new table", count) - - -def run_upgrade(*args, **kwargs): - pass diff --git a/synapse/storage/schema/main/delta/25/fts.py b/synapse/storage/schema/main/delta/25/fts.py index 21f57825d4..831f8e914d 100644 --- a/synapse/storage/schema/main/delta/25/fts.py +++ b/synapse/storage/schema/main/delta/25/fts.py @@ -14,7 +14,8 @@ import json import logging -from synapse.storage.engines import PostgresEngine, Sqlite3Engine +from synapse.storage.database import LoggingTransaction +from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine, Sqlite3Engine from synapse.storage.prepare_database import get_statements logger = logging.getLogger(__name__) @@ -41,7 +42,7 @@ SQLITE_TABLE = ( ) -def run_create(cur, database_engine, *args, **kwargs): +def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> None: if isinstance(database_engine, PostgresEngine): for statement in get_statements(POSTGRES_TABLE.splitlines()): cur.execute(statement) @@ -72,7 +73,3 @@ def run_create(cur, database_engine, *args, **kwargs): ) cur.execute(sql, ("event_search", progress_json)) - - -def run_upgrade(*args, **kwargs): - pass diff --git a/synapse/storage/schema/main/delta/27/ts.py b/synapse/storage/schema/main/delta/27/ts.py index 1c6058063f..8962afdeda 100644 --- a/synapse/storage/schema/main/delta/27/ts.py +++ b/synapse/storage/schema/main/delta/27/ts.py @@ -14,6 +14,8 @@ import json import logging +from synapse.storage.database import LoggingTransaction +from synapse.storage.engines import BaseDatabaseEngine from synapse.storage.prepare_database import get_statements logger = logging.getLogger(__name__) @@ -25,7 +27,7 @@ ALTER_TABLE = ( ) -def run_create(cur, database_engine, *args, **kwargs): +def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> None: for statement in get_statements(ALTER_TABLE.splitlines()): cur.execute(statement) @@ -51,7 +53,3 @@ def run_create(cur, database_engine, *args, **kwargs): ) cur.execute(sql, ("event_origin_server_ts", progress_json)) - - -def run_upgrade(*args, **kwargs): - pass diff --git a/synapse/storage/schema/main/delta/30/as_users.py b/synapse/storage/schema/main/delta/30/as_users.py index 4b4b166e37..b9d8df1231 100644 --- a/synapse/storage/schema/main/delta/30/as_users.py +++ b/synapse/storage/schema/main/delta/30/as_users.py @@ -12,13 +12,17 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging +from typing import Dict, Iterable, List, Tuple, cast from synapse.config.appservice import load_appservices +from synapse.config.homeserver import HomeServerConfig +from synapse.storage.database import LoggingTransaction +from synapse.storage.engines import BaseDatabaseEngine logger = logging.getLogger(__name__) -def run_create(cur, database_engine, *args, **kwargs): +def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> None: # NULL indicates user was not registered by an appservice. try: cur.execute("ALTER TABLE users ADD COLUMN appservice_id TEXT") @@ -27,9 +31,13 @@ def run_create(cur, database_engine, *args, **kwargs): pass -def run_upgrade(cur, database_engine, config, *args, **kwargs): +def run_upgrade( + cur: LoggingTransaction, + database_engine: BaseDatabaseEngine, + config: HomeServerConfig, +) -> None: cur.execute("SELECT name FROM users") - rows = cur.fetchall() + rows = cast(Iterable[Tuple[str]], cur.fetchall()) config_files = [] try: @@ -39,7 +47,7 @@ def run_upgrade(cur, database_engine, config, *args, **kwargs): appservices = load_appservices(config.server.server_name, config_files) - owned = {} + owned: Dict[str, List[str]] = {} for row in rows: user_id = row[0] diff --git a/synapse/storage/schema/main/delta/31/pushers.py b/synapse/storage/schema/main/delta/31/pushers_0.py index 5be81c806a..e772e2dc65 100644 --- a/synapse/storage/schema/main/delta/31/pushers.py +++ b/synapse/storage/schema/main/delta/31/pushers_0.py @@ -20,14 +20,17 @@ import logging +from synapse.storage.database import LoggingTransaction +from synapse.storage.engines import BaseDatabaseEngine + logger = logging.getLogger(__name__) -def token_to_stream_ordering(token): +def token_to_stream_ordering(token: str) -> int: return int(token[1:].split("_")[0]) -def run_create(cur, database_engine, *args, **kwargs): +def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> None: logger.info("Porting pushers table, delta 31...") cur.execute( """ @@ -61,8 +64,8 @@ def run_create(cur, database_engine, *args, **kwargs): """ ) count = 0 - for row in cur.fetchall(): - row = list(row) + for tuple_row in cur.fetchall(): + row = list(tuple_row) row[12] = token_to_stream_ordering(row[12]) cur.execute( """ @@ -80,7 +83,3 @@ def run_create(cur, database_engine, *args, **kwargs): cur.execute("DROP TABLE pushers") cur.execute("ALTER TABLE pushers2 RENAME TO pushers") logger.info("Moved %d pushers to new table", count) - - -def run_upgrade(cur, database_engine, *args, **kwargs): - pass diff --git a/synapse/storage/schema/main/delta/31/search_update.py b/synapse/storage/schema/main/delta/31/search_update.py index b84c844e3a..e20e92e454 100644 --- a/synapse/storage/schema/main/delta/31/search_update.py +++ b/synapse/storage/schema/main/delta/31/search_update.py @@ -14,7 +14,8 @@ import json import logging -from synapse.storage.engines import PostgresEngine +from synapse.storage.database import LoggingTransaction +from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine from synapse.storage.prepare_database import get_statements logger = logging.getLogger(__name__) @@ -26,7 +27,7 @@ ALTER TABLE event_search ADD COLUMN stream_ordering BIGINT; """ -def run_create(cur, database_engine, *args, **kwargs): +def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> None: if not isinstance(database_engine, PostgresEngine): return @@ -56,7 +57,3 @@ def run_create(cur, database_engine, *args, **kwargs): ) cur.execute(sql, ("event_search_order", progress_json)) - - -def run_upgrade(cur, database_engine, *args, **kwargs): - pass diff --git a/synapse/storage/schema/main/delta/33/event_fields.py b/synapse/storage/schema/main/delta/33/event_fields.py index e928c66a8f..8d806f5b52 100644 --- a/synapse/storage/schema/main/delta/33/event_fields.py +++ b/synapse/storage/schema/main/delta/33/event_fields.py @@ -14,6 +14,8 @@ import json import logging +from synapse.storage.database import LoggingTransaction +from synapse.storage.engines import BaseDatabaseEngine from synapse.storage.prepare_database import get_statements logger = logging.getLogger(__name__) @@ -25,7 +27,7 @@ ALTER TABLE events ADD COLUMN contains_url BOOLEAN; """ -def run_create(cur, database_engine, *args, **kwargs): +def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> None: for statement in get_statements(ALTER_TABLE.splitlines()): cur.execute(statement) @@ -51,7 +53,3 @@ def run_create(cur, database_engine, *args, **kwargs): ) cur.execute(sql, ("event_fields_sender_url", progress_json)) - - -def run_upgrade(cur, database_engine, *args, **kwargs): - pass diff --git a/synapse/storage/schema/main/delta/33/remote_media_ts.py b/synapse/storage/schema/main/delta/33/remote_media_ts.py index 3907189e29..35499e43b5 100644 --- a/synapse/storage/schema/main/delta/33/remote_media_ts.py +++ b/synapse/storage/schema/main/delta/33/remote_media_ts.py @@ -14,14 +14,22 @@ import time +from synapse.config.homeserver import HomeServerConfig +from synapse.storage.database import LoggingTransaction +from synapse.storage.engines import BaseDatabaseEngine + ALTER_TABLE = "ALTER TABLE remote_media_cache ADD COLUMN last_access_ts BIGINT" -def run_create(cur, database_engine, *args, **kwargs): +def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> None: cur.execute(ALTER_TABLE) -def run_upgrade(cur, database_engine, *args, **kwargs): +def run_upgrade( + cur: LoggingTransaction, + database_engine: BaseDatabaseEngine, + config: HomeServerConfig, +) -> None: cur.execute( "UPDATE remote_media_cache SET last_access_ts = ?", (int(time.time() * 1000),), diff --git a/synapse/storage/schema/main/delta/34/cache_stream.py b/synapse/storage/schema/main/delta/34/cache_stream.py index cf09e43e2b..882f9b893b 100644 --- a/synapse/storage/schema/main/delta/34/cache_stream.py +++ b/synapse/storage/schema/main/delta/34/cache_stream.py @@ -14,13 +14,14 @@ import logging -from synapse.storage.engines import PostgresEngine +from synapse.storage.database import LoggingTransaction +from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine from synapse.storage.prepare_database import get_statements logger = logging.getLogger(__name__) -# This stream is used to notify replication slaves that some caches have +# This stream is used to notify workers over replication that some caches have # been invalidated that they cannot infer from the other streams. CREATE_TABLE = """ CREATE TABLE cache_invalidation_stream ( @@ -34,13 +35,9 @@ CREATE INDEX cache_invalidation_stream_id ON cache_invalidation_stream(stream_id """ -def run_create(cur, database_engine, *args, **kwargs): +def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> None: if not isinstance(database_engine, PostgresEngine): return for statement in get_statements(CREATE_TABLE.splitlines()): cur.execute(statement) - - -def run_upgrade(cur, database_engine, *args, **kwargs): - pass diff --git a/synapse/storage/schema/main/delta/34/received_txn_purge.py b/synapse/storage/schema/main/delta/34/received_txn_purge.py index 67d505e68b..dcfe3bc45a 100644 --- a/synapse/storage/schema/main/delta/34/received_txn_purge.py +++ b/synapse/storage/schema/main/delta/34/received_txn_purge.py @@ -14,19 +14,16 @@ import logging -from synapse.storage.engines import PostgresEngine +from synapse.storage.database import LoggingTransaction +from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine logger = logging.getLogger(__name__) -def run_create(cur, database_engine, *args, **kwargs): +def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> None: if isinstance(database_engine, PostgresEngine): cur.execute("TRUNCATE received_transactions") else: cur.execute("DELETE FROM received_transactions") cur.execute("CREATE INDEX received_transactions_ts ON received_transactions(ts)") - - -def run_upgrade(cur, database_engine, *args, **kwargs): - pass diff --git a/synapse/storage/schema/main/delta/37/remove_auth_idx.py b/synapse/storage/schema/main/delta/37/remove_auth_idx.py index a377884169..d672f9b43c 100644 --- a/synapse/storage/schema/main/delta/37/remove_auth_idx.py +++ b/synapse/storage/schema/main/delta/37/remove_auth_idx.py @@ -14,7 +14,8 @@ import logging -from synapse.storage.engines import PostgresEngine +from synapse.storage.database import LoggingTransaction +from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine from synapse.storage.prepare_database import get_statements logger = logging.getLogger(__name__) @@ -68,7 +69,7 @@ CREATE INDEX evauth_edges_id ON event_auth(event_id); """ -def run_create(cur, database_engine, *args, **kwargs): +def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> None: for statement in get_statements(DROP_INDICES.splitlines()): cur.execute(statement) @@ -79,7 +80,3 @@ def run_create(cur, database_engine, *args, **kwargs): for statement in get_statements(drop_constraint.splitlines()): cur.execute(statement) - - -def run_upgrade(cur, database_engine, *args, **kwargs): - pass diff --git a/synapse/storage/schema/main/delta/42/user_dir.py b/synapse/storage/schema/main/delta/42/user_dir.py index 506f326f4d..7e5c307c62 100644 --- a/synapse/storage/schema/main/delta/42/user_dir.py +++ b/synapse/storage/schema/main/delta/42/user_dir.py @@ -14,7 +14,8 @@ import logging -from synapse.storage.engines import PostgresEngine, Sqlite3Engine +from synapse.storage.database import LoggingTransaction +from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine, Sqlite3Engine from synapse.storage.prepare_database import get_statements logger = logging.getLogger(__name__) @@ -66,7 +67,7 @@ CREATE VIRTUAL TABLE user_directory_search """ -def run_create(cur, database_engine, *args, **kwargs): +def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> None: for statement in get_statements(BOTH_TABLES.splitlines()): cur.execute(statement) @@ -78,7 +79,3 @@ def run_create(cur, database_engine, *args, **kwargs): cur.execute(statement) else: raise Exception("Unrecognized database engine") - - -def run_upgrade(*args, **kwargs): - pass diff --git a/synapse/storage/schema/main/delta/48/group_unique_indexes.py b/synapse/storage/schema/main/delta/48/group_unique_indexes.py index 49f5f2c003..ad2da4c8af 100644 --- a/synapse/storage/schema/main/delta/48/group_unique_indexes.py +++ b/synapse/storage/schema/main/delta/48/group_unique_indexes.py @@ -12,7 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.storage.engines import PostgresEngine + +from synapse.storage.database import LoggingTransaction +from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine from synapse.storage.prepare_database import get_statements FIX_INDEXES = """ @@ -34,7 +36,7 @@ CREATE INDEX group_rooms_r_idx ON group_rooms(room_id); """ -def run_create(cur, database_engine, *args, **kwargs): +def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> None: rowid = "ctid" if isinstance(database_engine, PostgresEngine) else "rowid" # remove duplicates from group_users & group_invites tables @@ -57,7 +59,3 @@ def run_create(cur, database_engine, *args, **kwargs): for statement in get_statements(FIX_INDEXES.splitlines()): cur.execute(statement) - - -def run_upgrade(*args, **kwargs): - pass diff --git a/synapse/storage/schema/main/delta/50/make_event_content_nullable.py b/synapse/storage/schema/main/delta/50/make_event_content_nullable.py index acd6ad1e1f..3e8a348b8a 100644 --- a/synapse/storage/schema/main/delta/50/make_event_content_nullable.py +++ b/synapse/storage/schema/main/delta/50/make_event_content_nullable.py @@ -53,16 +53,13 @@ SQLite: import logging -from synapse.storage.engines import PostgresEngine +from synapse.storage.database import LoggingTransaction +from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine logger = logging.getLogger(__name__) -def run_create(cur, database_engine, *args, **kwargs): - pass - - -def run_upgrade(cur, database_engine, *args, **kwargs): +def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> None: if isinstance(database_engine, PostgresEngine): cur.execute( """ @@ -76,7 +73,9 @@ def run_upgrade(cur, database_engine, *args, **kwargs): cur.execute( "SELECT sql FROM sqlite_master WHERE tbl_name='events' AND type='table'" ) - (oldsql,) = cur.fetchone() + row = cur.fetchone() + assert row is not None + (oldsql,) = row sql = oldsql.replace("content TEXT NOT NULL", "content TEXT") if sql == oldsql: @@ -85,7 +84,9 @@ def run_upgrade(cur, database_engine, *args, **kwargs): logger.info("Replacing definition of 'events' with: %s", sql) cur.execute("PRAGMA schema_version") - (oldver,) = cur.fetchone() + row = cur.fetchone() + assert row is not None + (oldver,) = row cur.execute("PRAGMA writable_schema=ON") cur.execute( "UPDATE sqlite_master SET sql=? WHERE tbl_name='events' AND type='table'", diff --git a/synapse/storage/schema/main/delta/56/unique_user_filter_index.py b/synapse/storage/schema/main/delta/56/unique_user_filter_index.py index bb7296852a..2461f87d77 100644 --- a/synapse/storage/schema/main/delta/56/unique_user_filter_index.py +++ b/synapse/storage/schema/main/delta/56/unique_user_filter_index.py @@ -1,7 +1,8 @@ import logging from io import StringIO -from synapse.storage.engines import PostgresEngine +from synapse.storage.database import LoggingTransaction +from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine from synapse.storage.prepare_database import execute_statements_from_stream logger = logging.getLogger(__name__) @@ -16,11 +17,7 @@ This migration updates the user_filters table as follows: """ -def run_upgrade(cur, database_engine, *args, **kwargs): - pass - - -def run_create(cur, database_engine, *args, **kwargs): +def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> None: if isinstance(database_engine, PostgresEngine): select_clause = """ SELECT DISTINCT ON (user_id, filter_id) user_id, filter_id, filter_json diff --git a/synapse/storage/schema/main/delta/57/local_current_membership.py b/synapse/storage/schema/main/delta/57/local_current_membership.py index d25093c19f..cc0f2109bb 100644 --- a/synapse/storage/schema/main/delta/57/local_current_membership.py +++ b/synapse/storage/schema/main/delta/57/local_current_membership.py @@ -27,7 +27,16 @@ # equivalent behaviour as if the server had remained in the room). -def run_upgrade(cur, database_engine, config, *args, **kwargs): +from synapse.config.homeserver import HomeServerConfig +from synapse.storage.database import LoggingTransaction +from synapse.storage.engines import BaseDatabaseEngine + + +def run_upgrade( + cur: LoggingTransaction, + database_engine: BaseDatabaseEngine, + config: HomeServerConfig, +) -> None: # We need to do the insert in `run_upgrade` section as we don't have access # to `config` in `run_create`. @@ -77,7 +86,7 @@ def run_upgrade(cur, database_engine, config, *args, **kwargs): ) -def run_create(cur, database_engine, *args, **kwargs): +def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> None: cur.execute( """ CREATE TABLE local_current_membership ( diff --git a/synapse/storage/schema/main/delta/58/06dlols_unique_idx.py b/synapse/storage/schema/main/delta/58/06dlols_unique_idx.py index d353f2bcb3..4eaab9e086 100644 --- a/synapse/storage/schema/main/delta/58/06dlols_unique_idx.py +++ b/synapse/storage/schema/main/delta/58/06dlols_unique_idx.py @@ -20,18 +20,14 @@ entries, and with a UNIQUE index. import logging from io import StringIO +from synapse.storage.database import LoggingTransaction from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine from synapse.storage.prepare_database import execute_statements_from_stream -from synapse.storage.types import Cursor logger = logging.getLogger(__name__) -def run_upgrade(*args, **kwargs): - pass - - -def run_create(cur: Cursor, database_engine: BaseDatabaseEngine, *args, **kwargs): +def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> None: # some instances might already have this index, in which case we can skip this if isinstance(database_engine, PostgresEngine): cur.execute( diff --git a/synapse/storage/schema/main/delta/58/11user_id_seq.py b/synapse/storage/schema/main/delta/58/11user_id_seq.py index 4310ec12ce..32f7e0a252 100644 --- a/synapse/storage/schema/main/delta/58/11user_id_seq.py +++ b/synapse/storage/schema/main/delta/58/11user_id_seq.py @@ -16,19 +16,16 @@ Adds a postgres SEQUENCE for generating guest user IDs. """ +from synapse.storage.database import LoggingTransaction from synapse.storage.databases.main.registration import ( find_max_generated_user_id_localpart, ) -from synapse.storage.engines import PostgresEngine +from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine -def run_create(cur, database_engine, *args, **kwargs): +def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> None: if not isinstance(database_engine, PostgresEngine): return next_id = find_max_generated_user_id_localpart(cur) + 1 cur.execute("CREATE SEQUENCE user_id_seq START WITH %s", (next_id,)) - - -def run_upgrade(*args, **kwargs): - pass diff --git a/synapse/storage/schema/main/delta/59/01ignored_user.py b/synapse/storage/schema/main/delta/59/01ignored_user.py index 9e8f35c1d2..c53e2bade2 100644 --- a/synapse/storage/schema/main/delta/59/01ignored_user.py +++ b/synapse/storage/schema/main/delta/59/01ignored_user.py @@ -20,18 +20,14 @@ import logging from io import StringIO from synapse.storage._base import db_to_json +from synapse.storage.database import LoggingTransaction from synapse.storage.engines import BaseDatabaseEngine from synapse.storage.prepare_database import execute_statements_from_stream -from synapse.storage.types import Cursor logger = logging.getLogger(__name__) -def run_upgrade(cur: Cursor, database_engine: BaseDatabaseEngine, *args, **kwargs): - pass - - -def run_create(cur: Cursor, database_engine: BaseDatabaseEngine, *args, **kwargs): +def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> None: logger.info("Creating ignored_users table") execute_statements_from_stream(cur, StringIO(_create_commands)) diff --git a/synapse/storage/schema/main/delta/61/03recreate_min_depth.py b/synapse/storage/schema/main/delta/61/03recreate_min_depth.py index f8d7db9f2e..4a06b65888 100644 --- a/synapse/storage/schema/main/delta/61/03recreate_min_depth.py +++ b/synapse/storage/schema/main/delta/61/03recreate_min_depth.py @@ -16,11 +16,11 @@ This migration handles the process of changing the type of `room_depth.min_depth` to a BIGINT. """ +from synapse.storage.database import LoggingTransaction from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine -from synapse.storage.types import Cursor -def run_create(cur: Cursor, database_engine: BaseDatabaseEngine, *args, **kwargs): +def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> None: if not isinstance(database_engine, PostgresEngine): # this only applies to postgres - sqlite does not distinguish between big and # little ints. @@ -64,7 +64,3 @@ def run_create(cur: Cursor, database_engine: BaseDatabaseEngine, *args, **kwargs (6103, 'replace_room_depth_min_depth', '{}', 'populate_room_depth2') """ ) - - -def run_upgrade(cur: Cursor, database_engine: BaseDatabaseEngine, *args, **kwargs): - pass diff --git a/synapse/storage/schema/main/delta/68/05partial_state_rooms_triggers.py b/synapse/storage/schema/main/delta/68/05partial_state_rooms_triggers.py index a2ec4fc26e..9210026dde 100644 --- a/synapse/storage/schema/main/delta/68/05partial_state_rooms_triggers.py +++ b/synapse/storage/schema/main/delta/68/05partial_state_rooms_triggers.py @@ -18,11 +18,11 @@ This migration adds triggers to the partial_state_events tables to enforce uniqu Triggers cannot be expressed in .sql files, so we have to use a separate file. """ +from synapse.storage.database import LoggingTransaction from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine, Sqlite3Engine -from synapse.storage.types import Cursor -def run_create(cur: Cursor, database_engine: BaseDatabaseEngine, *args, **kwargs): +def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> None: # complain if the room_id in partial_state_events doesn't match # that in `events`. We already have a fk constraint which ensures that the event # exists in `events`, so all we have to do is raise if there is a row with a diff --git a/synapse/storage/schema/main/delta/69/01as_txn_seq.py b/synapse/storage/schema/main/delta/69/01as_txn_seq.py index 24bd4b391e..6c112425f2 100644 --- a/synapse/storage/schema/main/delta/69/01as_txn_seq.py +++ b/synapse/storage/schema/main/delta/69/01as_txn_seq.py @@ -17,10 +17,11 @@ Adds a postgres SEQUENCE for generating application service transaction IDs. """ -from synapse.storage.engines import PostgresEngine +from synapse.storage.database import LoggingTransaction +from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine -def run_create(cur, database_engine, *args, **kwargs): +def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> None: if isinstance(database_engine, PostgresEngine): # If we already have some AS TXNs we want to start from the current # maximum value. There are two potential places this is stored - the @@ -30,10 +31,12 @@ def run_create(cur, database_engine, *args, **kwargs): cur.execute("SELECT COALESCE(max(txn_id), 0) FROM application_services_txns") row = cur.fetchone() + assert row is not None txn_max = row[0] cur.execute("SELECT COALESCE(max(last_txn), 0) FROM application_services_state") row = cur.fetchone() + assert row is not None last_txn_max = row[0] start_val = max(last_txn_max, txn_max) + 1 diff --git a/synapse/storage/schema/main/delta/72/03bg_populate_events_columns.py b/synapse/storage/schema/main/delta/72/03bg_populate_events_columns.py index 55a5d092cc..2ec1830c6f 100644 --- a/synapse/storage/schema/main/delta/72/03bg_populate_events_columns.py +++ b/synapse/storage/schema/main/delta/72/03bg_populate_events_columns.py @@ -14,10 +14,11 @@ import json -from synapse.storage.types import Cursor +from synapse.storage.database import LoggingTransaction +from synapse.storage.engines import BaseDatabaseEngine -def run_create(cur: Cursor, database_engine, *args, **kwargs): +def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> None: """Add a bg update to populate the `state_key` and `rejection_reason` columns of `events`""" # we know that any new events will have the columns populated (and that has been @@ -27,7 +28,9 @@ def run_create(cur: Cursor, database_engine, *args, **kwargs): # current min and max stream orderings, since that is guaranteed to include all # the events that were stored before the new columns were added. cur.execute("SELECT MIN(stream_ordering), MAX(stream_ordering) FROM events") - (min_stream_ordering, max_stream_ordering) = cur.fetchone() + row = cur.fetchone() + assert row is not None + (min_stream_ordering, max_stream_ordering) = row if min_stream_ordering is None: # no rows, nothing to do. diff --git a/synapse/storage/schema/main/delta/72/07force_update_current_state_events_membership.py b/synapse/storage/schema/main/delta/72/07force_update_current_state_events_membership.py index b5853d125c..5c3e3584a2 100644 --- a/synapse/storage/schema/main/delta/72/07force_update_current_state_events_membership.py +++ b/synapse/storage/schema/main/delta/72/07force_update_current_state_events_membership.py @@ -19,9 +19,16 @@ for its completion can be removed. Note the background job must still remain defined in the database class. """ +from synapse.config.homeserver import HomeServerConfig +from synapse.storage.database import LoggingTransaction +from synapse.storage.engines import BaseDatabaseEngine -def run_upgrade(cur, database_engine, *args, **kwargs): +def run_upgrade( + cur: LoggingTransaction, + database_engine: BaseDatabaseEngine, + config: HomeServerConfig, +) -> None: cur.execute("SELECT update_name FROM background_updates") rows = cur.fetchall() for row in rows: diff --git a/synapse/storage/schema/main/delta/73/10_update_sqlite_fts4_tokenizer.py b/synapse/storage/schema/main/delta/73/10_update_sqlite_fts4_tokenizer.py index 3de0a709eb..c7ed258e9d 100644 --- a/synapse/storage/schema/main/delta/73/10_update_sqlite_fts4_tokenizer.py +++ b/synapse/storage/schema/main/delta/73/10_update_sqlite_fts4_tokenizer.py @@ -13,11 +13,11 @@ # limitations under the License. import json +from synapse.storage.database import LoggingTransaction from synapse.storage.engines import BaseDatabaseEngine, Sqlite3Engine -from synapse.storage.types import Cursor -def run_create(cur: Cursor, database_engine: BaseDatabaseEngine) -> None: +def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> None: """ Upgrade the event_search table to use the porter tokenizer if it isn't already @@ -38,6 +38,7 @@ def run_create(cur: Cursor, database_engine: BaseDatabaseEngine) -> None: # Re-run the background job to re-populate the event_search table. cur.execute("SELECT MIN(stream_ordering) FROM events") row = cur.fetchone() + assert row is not None min_stream_id = row[0] # If there are not any events, nothing to do. @@ -46,6 +47,7 @@ def run_create(cur: Cursor, database_engine: BaseDatabaseEngine) -> None: cur.execute("SELECT MAX(stream_ordering) FROM events") row = cur.fetchone() + assert row is not None max_stream_id = row[0] progress = { diff --git a/synapse/storage/schema/main/delta/74/03_membership_tables_event_stream_ordering.sql.postgres b/synapse/storage/schema/main/delta/74/03_membership_tables_event_stream_ordering.sql.postgres new file mode 100644 index 0000000000..ceb750a9fa --- /dev/null +++ b/synapse/storage/schema/main/delta/74/03_membership_tables_event_stream_ordering.sql.postgres @@ -0,0 +1,29 @@ +/* Copyright 2022 Beeper + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- Each of these are denormalised copies of `stream_ordering` from the corresponding row in` events` which +-- we use to improve database performance by reduring JOINs. + +-- NOTE: these are set to NOT VALID to prevent locks while adding the column on large existing tables, +-- which will be validated in a later migration. For all new/updated rows the FKEY will be checked. + +ALTER TABLE current_state_events ADD COLUMN event_stream_ordering BIGINT; +ALTER TABLE current_state_events ADD CONSTRAINT event_stream_ordering_fkey FOREIGN KEY (event_stream_ordering) REFERENCES events(stream_ordering) NOT VALID; + +ALTER TABLE local_current_membership ADD COLUMN event_stream_ordering BIGINT; +ALTER TABLE local_current_membership ADD CONSTRAINT event_stream_ordering_fkey FOREIGN KEY (event_stream_ordering) REFERENCES events(stream_ordering) NOT VALID; + +ALTER TABLE room_memberships ADD COLUMN event_stream_ordering BIGINT; +ALTER TABLE room_memberships ADD CONSTRAINT event_stream_ordering_fkey FOREIGN KEY (event_stream_ordering) REFERENCES events(stream_ordering) NOT VALID; diff --git a/synapse/storage/schema/main/delta/74/03_membership_tables_event_stream_ordering.sql.sqlite b/synapse/storage/schema/main/delta/74/03_membership_tables_event_stream_ordering.sql.sqlite new file mode 100644 index 0000000000..6f6283fdb7 --- /dev/null +++ b/synapse/storage/schema/main/delta/74/03_membership_tables_event_stream_ordering.sql.sqlite @@ -0,0 +1,23 @@ +/* Copyright 2022 Beeper + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- Each of these are denormalised copies of `stream_ordering` from the corresponding row in` events` which +-- we use to improve database performance by reduring JOINs. + +-- NOTE: sqlite does not support ADD CONSTRAINT so we add the new columns with FK constraint as-is + +ALTER TABLE current_state_events ADD COLUMN event_stream_ordering BIGINT REFERENCES events(stream_ordering); +ALTER TABLE local_current_membership ADD COLUMN event_stream_ordering BIGINT REFERENCES events(stream_ordering); +ALTER TABLE room_memberships ADD COLUMN event_stream_ordering BIGINT REFERENCES events(stream_ordering); diff --git a/synapse/storage/schema/main/delta/74/04_delete_e2e_backup_keys_for_deactivated_users.sql b/synapse/storage/schema/main/delta/74/04_delete_e2e_backup_keys_for_deactivated_users.sql new file mode 100644 index 0000000000..a194f4cece --- /dev/null +++ b/synapse/storage/schema/main/delta/74/04_delete_e2e_backup_keys_for_deactivated_users.sql @@ -0,0 +1,17 @@ +/* Copyright 2023 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +INSERT INTO background_updates (ordering, update_name, progress_json) VALUES + (7404, 'delete_e2e_backup_keys_for_deactivated_users', '{}'); \ No newline at end of file diff --git a/synapse/storage/schema/main/delta/74/04_membership_tables_event_stream_ordering_triggers.py b/synapse/storage/schema/main/delta/74/04_membership_tables_event_stream_ordering_triggers.py new file mode 100644 index 0000000000..2ee2bc9422 --- /dev/null +++ b/synapse/storage/schema/main/delta/74/04_membership_tables_event_stream_ordering_triggers.py @@ -0,0 +1,79 @@ +# Copyright 2022 Beeper +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +""" +This migration adds triggers to the room membership tables to enforce consistency. +Triggers cannot be expressed in .sql files, so we have to use a separate file. +""" +from synapse.storage.database import LoggingTransaction +from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine, Sqlite3Engine + + +def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> None: + # Complain if the `event_stream_ordering` in membership tables doesn't match + # the `stream_ordering` row with the same `event_id` in `events`. + if isinstance(database_engine, Sqlite3Engine): + for table in ( + "current_state_events", + "local_current_membership", + "room_memberships", + ): + cur.execute( + f""" + CREATE TRIGGER IF NOT EXISTS {table}_bad_event_stream_ordering + BEFORE INSERT ON {table} + FOR EACH ROW + BEGIN + SELECT RAISE(ABORT, 'Incorrect event_stream_ordering in {table}') + WHERE EXISTS ( + SELECT 1 FROM events + WHERE events.event_id = NEW.event_id + AND events.stream_ordering != NEW.event_stream_ordering + ); + END; + """ + ) + elif isinstance(database_engine, PostgresEngine): + cur.execute( + """ + CREATE OR REPLACE FUNCTION check_event_stream_ordering() RETURNS trigger AS $BODY$ + BEGIN + IF EXISTS ( + SELECT 1 FROM events + WHERE events.event_id = NEW.event_id + AND events.stream_ordering != NEW.event_stream_ordering + ) THEN + RAISE EXCEPTION 'Incorrect event_stream_ordering'; + END IF; + RETURN NEW; + END; + $BODY$ LANGUAGE plpgsql; + """ + ) + + for table in ( + "current_state_events", + "local_current_membership", + "room_memberships", + ): + cur.execute( + f""" + CREATE TRIGGER check_event_stream_ordering BEFORE INSERT OR UPDATE ON {table} + FOR EACH ROW + EXECUTE PROCEDURE check_event_stream_ordering() + """ + ) + else: + raise NotImplementedError("Unknown database engine") diff --git a/synapse/storage/schema/main/delta/74/05_events_txn_id_device_id.sql b/synapse/storage/schema/main/delta/74/05_events_txn_id_device_id.sql new file mode 100644 index 0000000000..517a821a56 --- /dev/null +++ b/synapse/storage/schema/main/delta/74/05_events_txn_id_device_id.sql @@ -0,0 +1,53 @@ +/* Copyright 2023 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- For MSC3970, in addition to the (room_id, user_id, token_id, txn_id) -> event_id mapping for each local event, +-- we also store the (room_id, user_id, device_id, txn_id) -> event_id mapping. +-- +-- This adds a new event_txn_id_device_id table. + +-- A map of recent events persisted with transaction IDs. Used to deduplicate +-- send event requests with the same transaction ID. +-- +-- Note: with MSC3970, transaction IDs are scoped to the +-- room ID/user ID/device ID that was used to make the request. +-- +-- Note: The foreign key constraints are ON DELETE CASCADE, as if we delete the +-- event or device we don't want to try and de-duplicate the event. +CREATE TABLE IF NOT EXISTS event_txn_id_device_id ( + event_id TEXT NOT NULL, + room_id TEXT NOT NULL, + user_id TEXT NOT NULL, + device_id TEXT NOT NULL, + txn_id TEXT NOT NULL, + inserted_ts BIGINT NOT NULL, + FOREIGN KEY (event_id) + REFERENCES events (event_id) ON DELETE CASCADE, + FOREIGN KEY (user_id, device_id) + REFERENCES devices (user_id, device_id) ON DELETE CASCADE +); + +-- This ensures that there is only one mapping per event_id. +CREATE UNIQUE INDEX IF NOT EXISTS event_txn_id_device_id_event_id + ON event_txn_id_device_id(event_id); + +-- This ensures that there is only one mapping per (room_id, user_id, device_id, txn_id) tuple. +-- Events are usually looked up using this index. +CREATE UNIQUE INDEX IF NOT EXISTS event_txn_id_device_id_txn_id + ON event_txn_id_device_id(room_id, user_id, device_id, txn_id); + +-- This table is cleaned up regularly, removing the oldest entries, hence this index. +CREATE INDEX IF NOT EXISTS event_txn_id_device_id_ts + ON event_txn_id_device_id(inserted_ts); diff --git a/synapse/storage/schema/main/delta/76/01_add_profiles_full_user_id_column.sql b/synapse/storage/schema/main/delta/76/01_add_profiles_full_user_id_column.sql new file mode 100644 index 0000000000..9cd680325a --- /dev/null +++ b/synapse/storage/schema/main/delta/76/01_add_profiles_full_user_id_column.sql @@ -0,0 +1,20 @@ +/* Copyright 2023 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +ALTER TABLE profiles ADD COLUMN full_user_id TEXT; + +-- Make sure the column has a unique constraint, mirroring the `profiles_user_id_key` +-- constraint. +INSERT INTO background_updates (ordering, update_name, progress_json) VALUES (7501, 'profiles_full_user_id_key_idx', '{}'); diff --git a/synapse/storage/schema/main/delta/76/02_add_user_filters_full_user_id_column.sql b/synapse/storage/schema/main/delta/76/02_add_user_filters_full_user_id_column.sql new file mode 100644 index 0000000000..fd231adeef --- /dev/null +++ b/synapse/storage/schema/main/delta/76/02_add_user_filters_full_user_id_column.sql @@ -0,0 +1,20 @@ +/* Copyright 2023 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +ALTER TABLE user_filters ADD COLUMN full_user_id TEXT; + +-- Add a unique index on the new column, mirroring the `user_filters_unique` unique +-- index. +INSERT INTO background_updates (ordering, update_name, progress_json) VALUES (7502, 'full_users_filters_unique_idx', '{}'); \ No newline at end of file diff --git a/synapse/storage/schema/main/delta/76/03_per_user_experimental_features.sql b/synapse/storage/schema/main/delta/76/03_per_user_experimental_features.sql new file mode 100644 index 0000000000..c4ef81846c --- /dev/null +++ b/synapse/storage/schema/main/delta/76/03_per_user_experimental_features.sql @@ -0,0 +1,27 @@ +/* Copyright 2023 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- Table containing experimental features and whether they are enabled for a given user +CREATE TABLE per_user_experimental_features ( + -- The User ID to check/set the feature for + user_id TEXT NOT NULL, + -- Contains features to be enabled/disabled + feature TEXT NOT NULL, + -- whether the feature is enabled/disabled for a given user, defaults to disabled + enabled BOOLEAN DEFAULT FALSE, + FOREIGN KEY (user_id) REFERENCES users(name), + PRIMARY KEY (user_id, feature) +); + diff --git a/synapse/storage/schema/main/delta/76/04_add_room_forgetter.sql b/synapse/storage/schema/main/delta/76/04_add_room_forgetter.sql new file mode 100644 index 0000000000..be4b57d86f --- /dev/null +++ b/synapse/storage/schema/main/delta/76/04_add_room_forgetter.sql @@ -0,0 +1,24 @@ +/* Copyright 2023 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +CREATE TABLE room_forgetter_stream_pos ( + Lock CHAR(1) NOT NULL DEFAULT 'X' UNIQUE, -- Makes sure this table only has one row. + stream_id BIGINT NOT NULL, + CHECK (Lock='X') +); + +INSERT INTO room_forgetter_stream_pos ( + stream_id +) SELECT COALESCE(MAX(stream_ordering), 0) from events; diff --git a/synapse/storage/schema/main/delta/77/01_add_profiles_not_valid_check.sql.postgres b/synapse/storage/schema/main/delta/77/01_add_profiles_not_valid_check.sql.postgres new file mode 100644 index 0000000000..3eb226c648 --- /dev/null +++ b/synapse/storage/schema/main/delta/77/01_add_profiles_not_valid_check.sql.postgres @@ -0,0 +1,16 @@ +/* Copyright 2023 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +ALTER TABLE profiles ADD CONSTRAINT full_user_id_not_null CHECK (full_user_id IS NOT NULL) NOT VALID; \ No newline at end of file diff --git a/synapse/storage/schema/main/delta/77/02_add_user_filters_not_valid_check.sql.postgres b/synapse/storage/schema/main/delta/77/02_add_user_filters_not_valid_check.sql.postgres new file mode 100644 index 0000000000..ba037daf47 --- /dev/null +++ b/synapse/storage/schema/main/delta/77/02_add_user_filters_not_valid_check.sql.postgres @@ -0,0 +1,16 @@ +/* Copyright 2023 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +ALTER TABLE user_filters ADD CONSTRAINT full_user_id_not_null CHECK (full_user_id IS NOT NULL) NOT VALID; \ No newline at end of file diff --git a/synapse/storage/schema/main/delta/77/03bg_populate_full_user_id_profiles.sql b/synapse/storage/schema/main/delta/77/03bg_populate_full_user_id_profiles.sql new file mode 100644 index 0000000000..12101ab914 --- /dev/null +++ b/synapse/storage/schema/main/delta/77/03bg_populate_full_user_id_profiles.sql @@ -0,0 +1,16 @@ +/* Copyright 2023 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +INSERT INTO background_updates (ordering, update_name, progress_json) VALUES (7703, 'populate_full_user_id_profiles', '{}'); \ No newline at end of file diff --git a/synapse/storage/schema/main/delta/77/04bg_populate_full_user_id_user_filters.sql b/synapse/storage/schema/main/delta/77/04bg_populate_full_user_id_user_filters.sql new file mode 100644 index 0000000000..1f4d683cac --- /dev/null +++ b/synapse/storage/schema/main/delta/77/04bg_populate_full_user_id_user_filters.sql @@ -0,0 +1,16 @@ +/* Copyright 2023 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +INSERT INTO background_updates (ordering, update_name, progress_json) VALUES (7704, 'populate_full_user_id_user_filters', '{}'); \ No newline at end of file diff --git a/synapse/storage/schema/state/delta/47/state_group_seq.py b/synapse/storage/schema/state/delta/47/state_group_seq.py index 9fd1ccf6f7..42aff50227 100644 --- a/synapse/storage/schema/state/delta/47/state_group_seq.py +++ b/synapse/storage/schema/state/delta/47/state_group_seq.py @@ -12,15 +12,17 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.storage.engines import PostgresEngine +from synapse.storage.database import LoggingTransaction +from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine -def run_create(cur, database_engine, *args, **kwargs): +def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> None: if isinstance(database_engine, PostgresEngine): # if we already have some state groups, we want to start making new # ones with a higher id. cur.execute("SELECT max(id) FROM state_groups") row = cur.fetchone() + assert row is not None if row[0] is None: start_val = 1 @@ -28,7 +30,3 @@ def run_create(cur, database_engine, *args, **kwargs): start_val = row[0] + 1 cur.execute("CREATE SEQUENCE state_group_id_seq START WITH %s", (start_val,)) - - -def run_upgrade(*args, **kwargs): - pass diff --git a/synapse/storage/types.py b/synapse/storage/types.py index 56a0048539..34ac807530 100644 --- a/synapse/storage/types.py +++ b/synapse/storage/types.py @@ -31,14 +31,14 @@ from typing_extensions import Protocol Some very basic protocol definitions for the DB-API2 classes specified in PEP-249 """ -_Parameters = Union[Sequence[Any], Mapping[str, Any]] +SQLQueryParameters = Union[Sequence[Any], Mapping[str, Any]] class Cursor(Protocol): - def execute(self, sql: str, parameters: _Parameters = ...) -> Any: + def execute(self, sql: str, parameters: SQLQueryParameters = ...) -> Any: ... - def executemany(self, sql: str, parameters: Sequence[_Parameters]) -> Any: + def executemany(self, sql: str, parameters: Sequence[SQLQueryParameters]) -> Any: ... def fetchone(self) -> Optional[Tuple]: diff --git a/synapse/types/__init__.py b/synapse/types/__init__.py index 5cee9c3194..42baf8ac6b 100644 --- a/synapse/types/__init__.py +++ b/synapse/types/__init__.py @@ -84,7 +84,15 @@ JsonSerializable = object # Collection[str] that does not include str itself; str being a Sequence[str] # is very misleading and results in bugs. +# +# StrCollection is an unordered collection of strings. If ordering is important, +# StrSequence can be used instead. StrCollection = Union[Tuple[str, ...], List[str], AbstractSet[str]] +# Sequence[str] that does not include str itself; str being a Sequence[str] +# is very misleading and results in bugs. +# +# Unlike StrCollection, StrSequence is an ordered collection of strings. +StrSequence = Union[Tuple[str, ...], List[str]] # Note that this seems to require inheriting *directly* from Interface in order @@ -335,18 +343,35 @@ class EventID(DomainSpecificString): mxid_localpart_allowed_characters = set( "_-./=" + string.ascii_lowercase + string.digits ) +# MSC4007 adds the + to the allowed characters. +# +# TODO If this was accepted, update the SSO code to support this, see the callers +# of map_username_to_mxid_localpart. +extended_mxid_localpart_allowed_characters = mxid_localpart_allowed_characters | {"+"} + +# Guest user IDs are purely numeric. +GUEST_USER_ID_PATTERN = re.compile(r"^\d+$") -def contains_invalid_mxid_characters(localpart: str) -> bool: +def contains_invalid_mxid_characters( + localpart: str, use_extended_character_set: bool +) -> bool: """Check for characters not allowed in an mxid or groupid localpart Args: localpart: the localpart to be checked + use_extended_character_set: True to use the extended allowed characters + from MSC4009. Returns: True if there are any naughty characters """ - return any(c not in mxid_localpart_allowed_characters for c in localpart) + allowed_characters = ( + extended_mxid_localpart_allowed_characters + if use_extended_character_set + else mxid_localpart_allowed_characters + ) + return any(c not in allowed_characters for c in localpart) UPPER_CASE_PATTERN = re.compile(b"[A-Z_]") diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index 81df71a0c5..8514a75a1c 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -220,7 +220,9 @@ class DeferredCacheDescriptor(_CacheDescriptorBase): self.iterable = iterable self.prune_unread_entries = prune_unread_entries - def __get__(self, obj: Optional[Any], owner: Optional[Type]) -> Callable[..., Any]: + def __get__( + self, obj: Optional[Any], owner: Optional[Type] + ) -> Callable[..., "defer.Deferred[Any]"]: cache: DeferredCache[CacheKey, Any] = DeferredCache( name=self.name, max_entries=self.max_entries, @@ -232,7 +234,7 @@ class DeferredCacheDescriptor(_CacheDescriptorBase): get_cache_key = self.cache_key_builder @functools.wraps(self.orig) - def _wrapped(*args: Any, **kwargs: Any) -> Any: + def _wrapped(*args: Any, **kwargs: Any) -> "defer.Deferred[Any]": # If we're passed a cache_context then we'll want to call its invalidate() # whenever we are invalidated invalidate_callback = kwargs.pop("on_invalidate", None) diff --git a/synapse/util/module_loader.py b/synapse/util/module_loader.py index 5a638c6e9a..e3a54df48b 100644 --- a/synapse/util/module_loader.py +++ b/synapse/util/module_loader.py @@ -14,17 +14,17 @@ import importlib import importlib.util -import itertools from types import ModuleType -from typing import Any, Iterable, Tuple, Type +from typing import Any, Tuple, Type import jsonschema from synapse.config._base import ConfigError from synapse.config._util import json_error_to_config_error +from synapse.types import StrSequence -def load_module(provider: dict, config_path: Iterable[str]) -> Tuple[Type, Any]: +def load_module(provider: dict, config_path: StrSequence) -> Tuple[Type, Any]: """Loads a synapse module with its config Args: @@ -39,9 +39,7 @@ def load_module(provider: dict, config_path: Iterable[str]) -> Tuple[Type, Any]: modulename = provider.get("module") if not isinstance(modulename, str): - raise ConfigError( - "expected a string", path=itertools.chain(config_path, ("module",)) - ) + raise ConfigError("expected a string", path=tuple(config_path) + ("module",)) # We need to import the module, and then pick the class out of # that, so we split based on the last dot. @@ -55,19 +53,17 @@ def load_module(provider: dict, config_path: Iterable[str]) -> Tuple[Type, Any]: try: provider_config = provider_class.parse_config(module_config) except jsonschema.ValidationError as e: - raise json_error_to_config_error( - e, itertools.chain(config_path, ("config",)) - ) + raise json_error_to_config_error(e, tuple(config_path) + ("config",)) except ConfigError as e: raise _wrap_config_error( "Failed to parse config for module %r" % (modulename,), - prefix=itertools.chain(config_path, ("config",)), + prefix=tuple(config_path) + ("config",), e=e, ) except Exception as e: raise ConfigError( "Failed to parse config for module %r" % (modulename,), - path=itertools.chain(config_path, ("config",)), + path=tuple(config_path) + ("config",), ) from e else: provider_config = module_config @@ -92,9 +88,7 @@ def load_python_module(location: str) -> ModuleType: return mod -def _wrap_config_error( - msg: str, prefix: Iterable[str], e: ConfigError -) -> "ConfigError": +def _wrap_config_error(msg: str, prefix: StrSequence, e: ConfigError) -> "ConfigError": """Wrap a relative ConfigError with a new path This is useful when we have a ConfigError with a relative path due to a problem @@ -102,7 +96,7 @@ def _wrap_config_error( """ path = prefix if e.path: - path = itertools.chain(prefix, e.path) + path = tuple(prefix) + tuple(e.path) e1 = ConfigError(msg, path) diff --git a/synapse/util/msisdn.py b/synapse/util/msisdn.py index 1046224f15..3721a1558e 100644 --- a/synapse/util/msisdn.py +++ b/synapse/util/msisdn.py @@ -22,12 +22,16 @@ def phone_number_to_msisdn(country: str, number: str) -> str: Takes an ISO-3166-1 2 letter country code and phone number and returns an msisdn representing the canonical version of that phone number. + + As an example, if `country` is "GB" and `number` is "7470674927", this + function will return "447470674927". + Args: country: ISO-3166-1 2 letter country code number: Phone number in a national or international format Returns: - The canonical form of the phone number, as an msisdn + The canonical form of the phone number, as an msisdn. Raises: SynapseError if the number could not be parsed. """ diff --git a/synapse/util/ratelimitutils.py b/synapse/util/ratelimitutils.py index f262bf95a0..2ad55ac13e 100644 --- a/synapse/util/ratelimitutils.py +++ b/synapse/util/ratelimitutils.py @@ -25,10 +25,12 @@ from typing import ( Iterator, List, Mapping, + MutableSet, Optional, Set, Tuple, ) +from weakref import WeakSet from prometheus_client.core import Counter from typing_extensions import ContextManager @@ -86,7 +88,9 @@ queue_wait_timer = Histogram( ) -_rate_limiter_instances: Set["FederationRateLimiter"] = set() +# This must be a `WeakSet`, otherwise we indirectly hold on to entire `HomeServer`s +# during trial test runs and leak a lot of memory. +_rate_limiter_instances: MutableSet["FederationRateLimiter"] = WeakSet() # Protects the _rate_limiter_instances set from concurrent access _rate_limiter_instances_lock = threading.Lock() diff --git a/tests/api/test_filtering.py b/tests/api/test_filtering.py index 6c6a9ab4b4..aa6af5ad7b 100644 --- a/tests/api/test_filtering.py +++ b/tests/api/test_filtering.py @@ -26,13 +26,15 @@ from synapse.api.errors import SynapseError from synapse.api.filtering import Filter from synapse.api.presence import UserPresenceState from synapse.server import HomeServer -from synapse.types import JsonDict +from synapse.types import JsonDict, UserID from synapse.util import Clock from synapse.util.frozenutils import freeze from tests import unittest from tests.events.test_utils import MockEvent +user_id = UserID.from_string("@test_user:test") +user2_id = UserID.from_string("@test_user2:test") user_localpart = "test_user" @@ -46,8 +48,6 @@ class FilteringTestCase(unittest.HomeserverTestCase): invalid_filters: List[JsonDict] = [ # `account_data` must be a dictionary {"account_data": "Hello World"}, - # `event_fields` entries must not contain backslashes - {"event_fields": [r"\\foo"]}, # `event_format` must be "client" or "federation" {"event_format": "other"}, # `not_rooms` must contain valid room IDs @@ -112,10 +112,6 @@ class FilteringTestCase(unittest.HomeserverTestCase): "event_format": "client", "event_fields": ["type", "content", "sender"], }, - # a single backslash should be permitted (though it is debatable whether - # it should be permitted before anything other than `.`, and what that - # actually means) - # # (note that event_fields is implemented in # synapse.events.utils.serialize_event, and so whether this actually works # is tested elsewhere. We just want to check that it is allowed through the @@ -437,7 +433,7 @@ class FilteringTestCase(unittest.HomeserverTestCase): user_filter_json = {"presence": {"senders": ["@foo:bar"]}} filter_id = self.get_success( self.datastore.add_user_filter( - user_localpart=user_localpart, user_filter=user_filter_json + user_id=user_id, user_filter=user_filter_json ) ) presence_states = [ @@ -467,7 +463,7 @@ class FilteringTestCase(unittest.HomeserverTestCase): filter_id = self.get_success( self.datastore.add_user_filter( - user_localpart=user_localpart + "2", user_filter=user_filter_json + user_id=user2_id, user_filter=user_filter_json ) ) presence_states = [ @@ -495,7 +491,7 @@ class FilteringTestCase(unittest.HomeserverTestCase): user_filter_json = {"room": {"state": {"types": ["m.*"]}}} filter_id = self.get_success( self.datastore.add_user_filter( - user_localpart=user_localpart, user_filter=user_filter_json + user_id=user_id, user_filter=user_filter_json ) ) event = MockEvent(sender="@foo:bar", type="m.room.topic", room_id="!foo:bar") @@ -514,7 +510,7 @@ class FilteringTestCase(unittest.HomeserverTestCase): user_filter_json = {"room": {"state": {"types": ["m.*"]}}} filter_id = self.get_success( self.datastore.add_user_filter( - user_localpart=user_localpart, user_filter=user_filter_json + user_id=user_id, user_filter=user_filter_json ) ) event = MockEvent( @@ -598,7 +594,7 @@ class FilteringTestCase(unittest.HomeserverTestCase): filter_id = self.get_success( self.filtering.add_user_filter( - user_localpart=user_localpart, user_filter=user_filter_json + user_id=user_id, user_filter=user_filter_json ) ) @@ -619,7 +615,7 @@ class FilteringTestCase(unittest.HomeserverTestCase): filter_id = self.get_success( self.datastore.add_user_filter( - user_localpart=user_localpart, user_filter=user_filter_json + user_id=user_id, user_filter=user_filter_json ) ) diff --git a/tests/app/test_openid_listener.py b/tests/app/test_openid_listener.py index 2ee343d8a4..6e0413400e 100644 --- a/tests/app/test_openid_listener.py +++ b/tests/app/test_openid_listener.py @@ -38,7 +38,7 @@ class FederationReaderOpenIDListenerTests(HomeserverTestCase): def default_config(self) -> JsonDict: conf = super().default_config() - # we're using FederationReaderServer, which uses a SlavedStore, so we + # we're using GenericWorkerServer, which uses a GenericWorkerStore, so we # have to tell the FederationHandler not to try to access stuff that is only # in the primary store. conf["worker_app"] = "yes" diff --git a/tests/app/test_phone_stats_home.py b/tests/app/test_phone_stats_home.py index a860eedbcf..9305b758d7 100644 --- a/tests/app/test_phone_stats_home.py +++ b/tests/app/test_phone_stats_home.py @@ -4,7 +4,6 @@ from synapse.rest.client import login, room from synapse.server import HomeServer from synapse.util import Clock -from tests import unittest from tests.server import ThreadedMemoryReactorClock from tests.unittest import HomeserverTestCase @@ -12,154 +11,6 @@ FIVE_MINUTES_IN_SECONDS = 300 ONE_DAY_IN_SECONDS = 86400 -class PhoneHomeTestCase(HomeserverTestCase): - servlets = [ - synapse.rest.admin.register_servlets_for_client_rest_resource, - room.register_servlets, - login.register_servlets, - ] - - # Override the retention time for the user_ips table because otherwise it - # gets pruned too aggressively for our R30 test. - @unittest.override_config({"user_ips_max_age": "365d"}) - def test_r30_minimum_usage(self) -> None: - """ - Tests the minimum amount of interaction necessary for the R30 metric - to consider a user 'retained'. - """ - - # Register a user, log it in, create a room and send a message - user_id = self.register_user("u1", "secret!") - access_token = self.login("u1", "secret!") - room_id = self.helper.create_room_as(room_creator=user_id, tok=access_token) - self.helper.send(room_id, "message", tok=access_token) - - # Check the R30 results do not count that user. - r30_results = self.get_success(self.hs.get_datastores().main.count_r30_users()) - self.assertEqual(r30_results, {"all": 0}) - - # Advance 30 days (+ 1 second, because strict inequality causes issues if we are - # bang on 30 days later). - self.reactor.advance(30 * ONE_DAY_IN_SECONDS + 1) - - # (Make sure the user isn't somehow counted by this point.) - r30_results = self.get_success(self.hs.get_datastores().main.count_r30_users()) - self.assertEqual(r30_results, {"all": 0}) - - # Send a message (this counts as activity) - self.helper.send(room_id, "message2", tok=access_token) - - # We have to wait some time for _update_client_ips_batch to get - # called and update the user_ips table. - self.reactor.advance(2 * 60 * 60) - - # *Now* the user is counted. - r30_results = self.get_success(self.hs.get_datastores().main.count_r30_users()) - self.assertEqual(r30_results, {"all": 1, "unknown": 1}) - - # Advance 29 days. The user has now not posted for 29 days. - self.reactor.advance(29 * ONE_DAY_IN_SECONDS) - - # The user is still counted. - r30_results = self.get_success(self.hs.get_datastores().main.count_r30_users()) - self.assertEqual(r30_results, {"all": 1, "unknown": 1}) - - # Advance another day. The user has now not posted for 30 days. - self.reactor.advance(ONE_DAY_IN_SECONDS) - - # The user is now no longer counted in R30. - r30_results = self.get_success(self.hs.get_datastores().main.count_r30_users()) - self.assertEqual(r30_results, {"all": 0}) - - def test_r30_minimum_usage_using_default_config(self) -> None: - """ - Tests the minimum amount of interaction necessary for the R30 metric - to consider a user 'retained'. - - N.B. This test does not override the `user_ips_max_age` config setting, - which defaults to 28 days. - """ - - # Register a user, log it in, create a room and send a message - user_id = self.register_user("u1", "secret!") - access_token = self.login("u1", "secret!") - room_id = self.helper.create_room_as(room_creator=user_id, tok=access_token) - self.helper.send(room_id, "message", tok=access_token) - - # Check the R30 results do not count that user. - r30_results = self.get_success(self.hs.get_datastores().main.count_r30_users()) - self.assertEqual(r30_results, {"all": 0}) - - # Advance 30 days (+ 1 second, because strict inequality causes issues if we are - # bang on 30 days later). - self.reactor.advance(30 * ONE_DAY_IN_SECONDS + 1) - - # (Make sure the user isn't somehow counted by this point.) - r30_results = self.get_success(self.hs.get_datastores().main.count_r30_users()) - self.assertEqual(r30_results, {"all": 0}) - - # Send a message (this counts as activity) - self.helper.send(room_id, "message2", tok=access_token) - - # We have to wait some time for _update_client_ips_batch to get - # called and update the user_ips table. - self.reactor.advance(2 * 60 * 60) - - # *Now* the user is counted. - r30_results = self.get_success(self.hs.get_datastores().main.count_r30_users()) - self.assertEqual(r30_results, {"all": 1, "unknown": 1}) - - # Advance 27 days. The user has now not posted for 27 days. - self.reactor.advance(27 * ONE_DAY_IN_SECONDS) - - # The user is still counted. - r30_results = self.get_success(self.hs.get_datastores().main.count_r30_users()) - self.assertEqual(r30_results, {"all": 1, "unknown": 1}) - - # Advance another day. The user has now not posted for 28 days. - self.reactor.advance(ONE_DAY_IN_SECONDS) - - # The user is now no longer counted in R30. - # (This is because the user_ips table has been pruned, which by default - # only preserves the last 28 days of entries.) - r30_results = self.get_success(self.hs.get_datastores().main.count_r30_users()) - self.assertEqual(r30_results, {"all": 0}) - - def test_r30_user_must_be_retained_for_at_least_a_month(self) -> None: - """ - Tests that a newly-registered user must be retained for a whole month - before appearing in the R30 statistic, even if they post every day - during that time! - """ - # Register a user and send a message - user_id = self.register_user("u1", "secret!") - access_token = self.login("u1", "secret!") - room_id = self.helper.create_room_as(room_creator=user_id, tok=access_token) - self.helper.send(room_id, "message", tok=access_token) - - # Check the user does not contribute to R30 yet. - r30_results = self.get_success(self.hs.get_datastores().main.count_r30_users()) - self.assertEqual(r30_results, {"all": 0}) - - for _ in range(30): - # This loop posts a message every day for 30 days - self.reactor.advance(ONE_DAY_IN_SECONDS) - self.helper.send(room_id, "I'm still here", tok=access_token) - - # Notice that the user *still* does not contribute to R30! - r30_results = self.get_success( - self.hs.get_datastores().main.count_r30_users() - ) - self.assertEqual(r30_results, {"all": 0}) - - self.reactor.advance(ONE_DAY_IN_SECONDS) - self.helper.send(room_id, "Still here!", tok=access_token) - - # *Now* the user appears in R30. - r30_results = self.get_success(self.hs.get_datastores().main.count_r30_users()) - self.assertEqual(r30_results, {"all": 1, "unknown": 1}) - - class PhoneHomeR30V2TestCase(HomeserverTestCase): servlets = [ synapse.rest.admin.register_servlets_for_client_rest_resource, @@ -363,11 +214,6 @@ class PhoneHomeR30V2TestCase(HomeserverTestCase): r30_results, {"all": 0, "android": 0, "electron": 0, "ios": 0, "web": 0} ) - # Check that this is a situation where old R30 differs: - # old R30 DOES count this as 'retained'. - r30_results = self.get_success(store.count_r30_users()) - self.assertEqual(r30_results, {"all": 1, "ios": 1}) - # Now we want to check that the user will still be able to appear in # R30v2 as long as the user performs some other activity between # 30 and 60 days later. diff --git a/tests/appservice/test_api.py b/tests/appservice/test_api.py index 7deb923a28..15fce165b6 100644 --- a/tests/appservice/test_api.py +++ b/tests/appservice/test_api.py @@ -195,11 +195,11 @@ class ApplicationServiceApiTestCase(unittest.HomeserverTestCase): MISSING_KEYS = [ # Known user, known device, missing algorithm. - ("@alice:example.org", "DEVICE_1", "signed_curve25519:DDDDHg"), + ("@alice:example.org", "DEVICE_2", "xyz", 1), # Known user, missing device. - ("@alice:example.org", "DEVICE_3", "signed_curve25519:EEEEHg"), + ("@alice:example.org", "DEVICE_3", "signed_curve25519", 1), # Unknown user. - ("@bob:example.org", "DEVICE_4", "signed_curve25519:FFFFHg"), + ("@bob:example.org", "DEVICE_4", "signed_curve25519", 1), ] claimed_keys, missing = self.get_success( @@ -207,9 +207,8 @@ class ApplicationServiceApiTestCase(unittest.HomeserverTestCase): self.service, [ # Found devices - ("@alice:example.org", "DEVICE_1", "signed_curve25519:AAAAHg"), - ("@alice:example.org", "DEVICE_1", "signed_curve25519:BBBBHg"), - ("@alice:example.org", "DEVICE_2", "signed_curve25519:CCCCHg"), + ("@alice:example.org", "DEVICE_1", "signed_curve25519", 1), + ("@alice:example.org", "DEVICE_2", "signed_curve25519", 1), ] + MISSING_KEYS, ) diff --git a/tests/appservice/test_appservice.py b/tests/appservice/test_appservice.py index dee976356f..66753c60c4 100644 --- a/tests/appservice/test_appservice.py +++ b/tests/appservice/test_appservice.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import re -from typing import Generator +from typing import Any, Generator from unittest.mock import Mock from twisted.internet import defer @@ -49,15 +49,13 @@ class ApplicationServiceTestCase(unittest.TestCase): @defer.inlineCallbacks def test_regex_user_id_prefix_match( self, - ) -> Generator["defer.Deferred[object]", object, None]: + ) -> Generator["defer.Deferred[Any]", object, None]: self.service.namespaces[ApplicationService.NS_USERS].append(_regex("@irc_.*")) self.event.sender = "@irc_foobar:matrix.org" self.assertTrue( ( - yield defer.ensureDeferred( - self.service.is_interested_in_event( - self.event.event_id, self.event, self.store - ) + yield self.service.is_interested_in_event( + self.event.event_id, self.event, self.store ) ) ) @@ -65,15 +63,13 @@ class ApplicationServiceTestCase(unittest.TestCase): @defer.inlineCallbacks def test_regex_user_id_prefix_no_match( self, - ) -> Generator["defer.Deferred[object]", object, None]: + ) -> Generator["defer.Deferred[Any]", object, None]: self.service.namespaces[ApplicationService.NS_USERS].append(_regex("@irc_.*")) self.event.sender = "@someone_else:matrix.org" self.assertFalse( ( - yield defer.ensureDeferred( - self.service.is_interested_in_event( - self.event.event_id, self.event, self.store - ) + yield self.service.is_interested_in_event( + self.event.event_id, self.event, self.store ) ) ) @@ -81,17 +77,15 @@ class ApplicationServiceTestCase(unittest.TestCase): @defer.inlineCallbacks def test_regex_room_member_is_checked( self, - ) -> Generator["defer.Deferred[object]", object, None]: + ) -> Generator["defer.Deferred[Any]", object, None]: self.service.namespaces[ApplicationService.NS_USERS].append(_regex("@irc_.*")) self.event.sender = "@someone_else:matrix.org" self.event.type = "m.room.member" self.event.state_key = "@irc_foobar:matrix.org" self.assertTrue( ( - yield defer.ensureDeferred( - self.service.is_interested_in_event( - self.event.event_id, self.event, self.store - ) + yield self.service.is_interested_in_event( + self.event.event_id, self.event, self.store ) ) ) @@ -99,17 +93,15 @@ class ApplicationServiceTestCase(unittest.TestCase): @defer.inlineCallbacks def test_regex_room_id_match( self, - ) -> Generator["defer.Deferred[object]", object, None]: + ) -> Generator["defer.Deferred[Any]", object, None]: self.service.namespaces[ApplicationService.NS_ROOMS].append( _regex("!some_prefix.*some_suffix:matrix.org") ) self.event.room_id = "!some_prefixs0m3th1nGsome_suffix:matrix.org" self.assertTrue( ( - yield defer.ensureDeferred( - self.service.is_interested_in_event( - self.event.event_id, self.event, self.store - ) + yield self.service.is_interested_in_event( + self.event.event_id, self.event, self.store ) ) ) @@ -117,25 +109,21 @@ class ApplicationServiceTestCase(unittest.TestCase): @defer.inlineCallbacks def test_regex_room_id_no_match( self, - ) -> Generator["defer.Deferred[object]", object, None]: + ) -> Generator["defer.Deferred[Any]", object, None]: self.service.namespaces[ApplicationService.NS_ROOMS].append( _regex("!some_prefix.*some_suffix:matrix.org") ) self.event.room_id = "!XqBunHwQIXUiqCaoxq:matrix.org" self.assertFalse( ( - yield defer.ensureDeferred( - self.service.is_interested_in_event( - self.event.event_id, self.event, self.store - ) + yield self.service.is_interested_in_event( + self.event.event_id, self.event, self.store ) ) ) @defer.inlineCallbacks - def test_regex_alias_match( - self, - ) -> Generator["defer.Deferred[object]", object, None]: + def test_regex_alias_match(self) -> Generator["defer.Deferred[Any]", object, None]: self.service.namespaces[ApplicationService.NS_ALIASES].append( _regex("#irc_.*:matrix.org") ) @@ -145,10 +133,8 @@ class ApplicationServiceTestCase(unittest.TestCase): self.store.get_local_users_in_room = simple_async_mock([]) self.assertTrue( ( - yield defer.ensureDeferred( - self.service.is_interested_in_event( - self.event.event_id, self.event, self.store - ) + yield self.service.is_interested_in_event( + self.event.event_id, self.event, self.store ) ) ) @@ -192,7 +178,7 @@ class ApplicationServiceTestCase(unittest.TestCase): @defer.inlineCallbacks def test_regex_alias_no_match( self, - ) -> Generator["defer.Deferred[object]", object, None]: + ) -> Generator["defer.Deferred[Any]", object, None]: self.service.namespaces[ApplicationService.NS_ALIASES].append( _regex("#irc_.*:matrix.org") ) @@ -213,7 +199,7 @@ class ApplicationServiceTestCase(unittest.TestCase): @defer.inlineCallbacks def test_regex_multiple_matches( self, - ) -> Generator["defer.Deferred[object]", object, None]: + ) -> Generator["defer.Deferred[Any]", object, None]: self.service.namespaces[ApplicationService.NS_ALIASES].append( _regex("#irc_.*:matrix.org") ) @@ -223,18 +209,14 @@ class ApplicationServiceTestCase(unittest.TestCase): self.store.get_local_users_in_room = simple_async_mock([]) self.assertTrue( ( - yield defer.ensureDeferred( - self.service.is_interested_in_event( - self.event.event_id, self.event, self.store - ) + yield self.service.is_interested_in_event( + self.event.event_id, self.event, self.store ) ) ) @defer.inlineCallbacks - def test_interested_in_self( - self, - ) -> Generator["defer.Deferred[object]", object, None]: + def test_interested_in_self(self) -> Generator["defer.Deferred[Any]", object, None]: # make sure invites get through self.service.sender = "@appservice:name" self.service.namespaces[ApplicationService.NS_USERS].append(_regex("@irc_.*")) @@ -243,18 +225,14 @@ class ApplicationServiceTestCase(unittest.TestCase): self.event.state_key = self.service.sender self.assertTrue( ( - yield defer.ensureDeferred( - self.service.is_interested_in_event( - self.event.event_id, self.event, self.store - ) + yield self.service.is_interested_in_event( + self.event.event_id, self.event, self.store ) ) ) @defer.inlineCallbacks - def test_member_list_match( - self, - ) -> Generator["defer.Deferred[object]", object, None]: + def test_member_list_match(self) -> Generator["defer.Deferred[Any]", object, None]: self.service.namespaces[ApplicationService.NS_USERS].append(_regex("@irc_.*")) # Note that @irc_fo:here is the AS user. self.store.get_local_users_in_room = simple_async_mock( @@ -265,10 +243,8 @@ class ApplicationServiceTestCase(unittest.TestCase): self.event.sender = "@xmpp_foobar:matrix.org" self.assertTrue( ( - yield defer.ensureDeferred( - self.service.is_interested_in_event( - self.event.event_id, self.event, self.store - ) + yield self.service.is_interested_in_event( + self.event.event_id, self.event, self.store ) ) ) diff --git a/tests/config/test_appservice.py b/tests/config/test_appservice.py new file mode 100644 index 0000000000..d2d1a40dfc --- /dev/null +++ b/tests/config/test_appservice.py @@ -0,0 +1,40 @@ +# Copyright 2023 Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from synapse.config.appservice import AppServiceConfig, ConfigError + +from tests.unittest import TestCase + + +class AppServiceConfigTest(TestCase): + def test_invalid_app_service_config_files(self) -> None: + for invalid_value in [ + "foobar", + 1, + None, + True, + False, + {}, + ["foo", "bar", False], + ]: + with self.assertRaises(ConfigError): + AppServiceConfig().read_config( + {"app_service_config_files": invalid_value} + ) + + def test_valid_app_service_config_files(self) -> None: + AppServiceConfig().read_config({"app_service_config_files": []}) + AppServiceConfig().read_config( + {"app_service_config_files": ["/not/a/real/path", "/not/a/real/path/2"]} + ) diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py index 1b9696748f..7c63b2ea4c 100644 --- a/tests/crypto/test_keyring.py +++ b/tests/crypto/test_keyring.py @@ -190,10 +190,23 @@ class KeyringTestCase(unittest.HomeserverTestCase): kr = keyring.Keyring(self.hs) key1 = signedjson.key.generate_signing_key("1") - r = self.hs.get_datastores().main.store_server_verify_keys( + r = self.hs.get_datastores().main.store_server_keys_json( "server9", - int(time.time() * 1000), - [("server9", get_key_id(key1), FetchKeyResult(get_verify_key(key1), 1000))], + get_key_id(key1), + from_server="test", + ts_now_ms=int(time.time() * 1000), + ts_expires_ms=1000, + # The entire response gets signed & stored, just include the bits we + # care about. + key_json_bytes=canonicaljson.encode_canonical_json( + { + "verify_keys": { + get_key_id(key1): { + "key": encode_verify_key_base64(get_verify_key(key1)) + } + } + } + ), ) self.get_success(r) @@ -280,45 +293,26 @@ class KeyringTestCase(unittest.HomeserverTestCase): mock_fetcher = Mock() mock_fetcher.get_keys = Mock(return_value=make_awaitable({})) - kr = keyring.Keyring( - self.hs, key_fetchers=(StoreKeyFetcher(self.hs), mock_fetcher) - ) - key1 = signedjson.key.generate_signing_key("1") - r = self.hs.get_datastores().main.store_server_verify_keys( + r = self.hs.get_datastores().main.store_server_signature_keys( "server9", int(time.time() * 1000), # None is not a valid value in FetchKeyResult, but we're abusing this # API to insert null values into the database. The nulls get converted - # to 0 when fetched in KeyStore.get_server_verify_keys. - [("server9", get_key_id(key1), FetchKeyResult(get_verify_key(key1), None))], # type: ignore[arg-type] + # to 0 when fetched in KeyStore.get_server_signature_keys. + {("server9", get_key_id(key1)): FetchKeyResult(get_verify_key(key1), None)}, # type: ignore[arg-type] ) self.get_success(r) json1: JsonDict = {} signedjson.sign.sign_json(json1, "server9", key1) - # should fail immediately on an unsigned object - d = kr.verify_json_for_server("server9", {}, 0) - self.get_failure(d, SynapseError) - - # should fail on a signed object with a non-zero minimum_valid_until_ms, - # as it tries to refetch the keys and fails. - d = kr.verify_json_for_server("server9", json1, 500) - self.get_failure(d, SynapseError) - - # We expect the keyring tried to refetch the key once. - mock_fetcher.get_keys.assert_called_once_with( - "server9", [get_key_id(key1)], 500 - ) - # should succeed on a signed object with a 0 minimum_valid_until_ms - d = kr.verify_json_for_server( - "server9", - json1, - 0, + d = self.hs.get_datastores().main.get_server_signature_keys( + [("server9", get_key_id(key1))] ) - self.get_success(d) + result = self.get_success(d) + self.assertEquals(result[("server9", get_key_id(key1))].valid_until_ts, 0) def test_verify_json_dedupes_key_requests(self) -> None: """Two requests for the same key should be deduped.""" @@ -464,7 +458,9 @@ class ServerKeyFetcherTestCase(unittest.HomeserverTestCase): # check that the perspectives store is correctly updated lookup_triplet = (SERVER_NAME, testverifykey_id, None) key_json = self.get_success( - self.hs.get_datastores().main.get_server_keys_json([lookup_triplet]) + self.hs.get_datastores().main.get_server_keys_json_for_remote( + [lookup_triplet] + ) ) res_keys = key_json[lookup_triplet] self.assertEqual(len(res_keys), 1) @@ -582,7 +578,9 @@ class PerspectivesKeyFetcherTestCase(unittest.HomeserverTestCase): # check that the perspectives store is correctly updated lookup_triplet = (SERVER_NAME, testverifykey_id, None) key_json = self.get_success( - self.hs.get_datastores().main.get_server_keys_json([lookup_triplet]) + self.hs.get_datastores().main.get_server_keys_json_for_remote( + [lookup_triplet] + ) ) res_keys = key_json[lookup_triplet] self.assertEqual(len(res_keys), 1) @@ -703,7 +701,9 @@ class PerspectivesKeyFetcherTestCase(unittest.HomeserverTestCase): # check that the perspectives store is correctly updated lookup_triplet = (SERVER_NAME, testverifykey_id, None) key_json = self.get_success( - self.hs.get_datastores().main.get_server_keys_json([lookup_triplet]) + self.hs.get_datastores().main.get_server_keys_json_for_remote( + [lookup_triplet] + ) ) res_keys = key_json[lookup_triplet] self.assertEqual(len(res_keys), 1) diff --git a/tests/events/test_utils.py b/tests/events/test_utils.py index 4174a237ec..c9a610db9a 100644 --- a/tests/events/test_utils.py +++ b/tests/events/test_utils.py @@ -15,12 +15,16 @@ import unittest as stdlib_unittest from typing import Any, List, Mapping, Optional +import attr +from parameterized import parameterized + from synapse.api.constants import EventContentFields from synapse.api.room_versions import RoomVersions from synapse.events import EventBase, make_event_from_dict from synapse.events.utils import ( PowerLevelsContent, SerializeEventConfig, + _split_field, copy_and_fixup_power_levels_contents, maybe_upsert_event_field, prune_event, @@ -143,6 +147,13 @@ class PruneEventTestCase(stdlib_unittest.TestCase): room_version=RoomVersions.MSC2176, ) + # As of MSC3989 we now redact the origin key. + self.run_test( + {"type": "A", "origin": "example.com"}, + {"type": "A", "content": {}, "signatures": {}, "unsigned": {}}, + room_version=RoomVersions.MSC3989, + ) + def test_unsigned(self) -> None: """Ensure that unsigned properties get stripped (except age_ts and replaces_state).""" self.run_test( @@ -311,7 +322,11 @@ class PruneEventTestCase(stdlib_unittest.TestCase): """Redaction events have no special behaviour until MSC2174/MSC2176.""" self.run_test( - {"type": "m.room.redaction", "content": {"redacts": "$test2:domain"}}, + { + "type": "m.room.redaction", + "content": {"redacts": "$test2:domain"}, + "redacts": "$test2:domain", + }, { "type": "m.room.redaction", "content": {}, @@ -323,7 +338,11 @@ class PruneEventTestCase(stdlib_unittest.TestCase): # After MSC2174, redaction events keep the redacts content key. self.run_test( - {"type": "m.room.redaction", "content": {"redacts": "$test2:domain"}}, + { + "type": "m.room.redaction", + "content": {"redacts": "$test2:domain"}, + "redacts": "$test2:domain", + }, { "type": "m.room.redaction", "content": {"redacts": "$test2:domain"}, @@ -377,7 +396,7 @@ class PruneEventTestCase(stdlib_unittest.TestCase): ) def test_member(self) -> None: - """Member events have changed behavior starting with MSC3375.""" + """Member events have changed behavior in MSC3375 and MSC3821.""" self.run_test( { "type": "m.room.member", @@ -420,6 +439,167 @@ class PruneEventTestCase(stdlib_unittest.TestCase): room_version=RoomVersions.V9, ) + # After MSC3821, the signed key under third_party_invite is protected + # from redaction. + THIRD_PARTY_INVITE = { + "display_name": "alice", + "signed": { + "mxid": "@alice:example.org", + "signatures": { + "magic.forest": { + "ed25519:3": "fQpGIW1Snz+pwLZu6sTy2aHy/DYWWTspTJRPyNp0PKkymfIsNffysMl6ObMMFdIJhk6g6pwlIqZ54rxo8SLmAg" + } + }, + "token": "abc123", + }, + } + + self.run_test( + { + "type": "m.room.member", + "content": { + "membership": "invite", + "third_party_invite": THIRD_PARTY_INVITE, + "other_key": "stripped", + }, + }, + { + "type": "m.room.member", + "content": { + "membership": "invite", + "third_party_invite": {"signed": THIRD_PARTY_INVITE["signed"]}, + }, + "signatures": {}, + "unsigned": {}, + }, + room_version=RoomVersions.MSC3821, + ) + + # Ensure this doesn't break if an invalid field is sent. + self.run_test( + { + "type": "m.room.member", + "content": { + "membership": "invite", + "third_party_invite": {}, + "other_key": "stripped", + }, + }, + { + "type": "m.room.member", + "content": {"membership": "invite", "third_party_invite": {}}, + "signatures": {}, + "unsigned": {}, + }, + room_version=RoomVersions.MSC3821, + ) + + self.run_test( + { + "type": "m.room.member", + "content": { + "membership": "invite", + "third_party_invite": "stripped", + "other_key": "stripped", + }, + }, + { + "type": "m.room.member", + "content": {"membership": "invite"}, + "signatures": {}, + "unsigned": {}, + }, + room_version=RoomVersions.MSC3821, + ) + + def test_relations(self) -> None: + """Event relations get redacted until MSC3389.""" + # Normally the m._relates_to field is redacted. + self.run_test( + { + "type": "m.room.message", + "content": { + "body": "foo", + "m.relates_to": { + "rel_type": "rel_type", + "event_id": "$parent:domain", + "other": "stripped", + }, + }, + }, + { + "type": "m.room.message", + "content": {}, + "signatures": {}, + "unsigned": {}, + }, + room_version=RoomVersions.V10, + ) + + # Create a new room version. + msc3389_room_ver = attr.evolve( + RoomVersions.V10, msc3389_relation_redactions=True + ) + + self.run_test( + { + "type": "m.room.message", + "content": { + "body": "foo", + "m.relates_to": { + "rel_type": "rel_type", + "event_id": "$parent:domain", + "other": "stripped", + }, + }, + }, + { + "type": "m.room.message", + "content": { + "m.relates_to": { + "rel_type": "rel_type", + "event_id": "$parent:domain", + }, + }, + "signatures": {}, + "unsigned": {}, + }, + room_version=msc3389_room_ver, + ) + + # If the field is not an object, redact it. + self.run_test( + { + "type": "m.room.message", + "content": { + "body": "foo", + "m.relates_to": "stripped", + }, + }, + { + "type": "m.room.message", + "content": {}, + "signatures": {}, + "unsigned": {}, + }, + room_version=msc3389_room_ver, + ) + + # If the m.relates_to property would be empty, redact it. + self.run_test( + { + "type": "m.room.message", + "content": {"body": "foo", "m.relates_to": {"foo": "stripped"}}, + }, + { + "type": "m.room.message", + "content": {}, + "signatures": {}, + "unsigned": {}, + }, + room_version=msc3389_room_ver, + ) + class SerializeEventTestCase(stdlib_unittest.TestCase): def serialize(self, ev: EventBase, fields: Optional[List[str]]) -> JsonDict: @@ -616,3 +796,40 @@ class CopyPowerLevelsContentTestCase(stdlib_unittest.TestCase): def test_invalid_nesting_raises_type_error(self) -> None: with self.assertRaises(TypeError): copy_and_fixup_power_levels_contents({"a": {"b": {"c": 1}}}) # type: ignore[dict-item] + + +class SplitFieldTestCase(stdlib_unittest.TestCase): + @parameterized.expand( + [ + # A field with no dots. + ["m", ["m"]], + # Simple dotted fields. + ["m.foo", ["m", "foo"]], + ["m.foo.bar", ["m", "foo", "bar"]], + # Backslash is used as an escape character. + [r"m\.foo", ["m.foo"]], + [r"m\\.foo", ["m\\", "foo"]], + [r"m\\\.foo", [r"m\.foo"]], + [r"m\\\\.foo", ["m\\\\", "foo"]], + [r"m\foo", [r"m\foo"]], + [r"m\\foo", [r"m\foo"]], + [r"m\\\foo", [r"m\\foo"]], + [r"m\\\\foo", [r"m\\foo"]], + # Ensure that escapes at the end don't cause issues. + ["m.foo\\", ["m", "foo\\"]], + ["m.foo\\", ["m", "foo\\"]], + [r"m.foo\.", ["m", "foo."]], + [r"m.foo\\.", ["m", "foo\\", ""]], + [r"m.foo\\\.", ["m", r"foo\."]], + # Empty parts (corresponding to properties which are an empty string) are allowed. + [".m", ["", "m"]], + ["..m", ["", "", "m"]], + ["m.", ["m", ""]], + ["m..", ["m", "", ""]], + ["m..foo", ["m", "", "foo"]], + # Invalid escape sequences. + [r"\m", [r"\m"]], + ] + ) + def test_split_field(self, input: str, expected: str) -> None: + self.assertEqual(_split_field(input), expected) diff --git a/tests/federation/test_complexity.py b/tests/federation/test_complexity.py index 33af8770fd..129d7cfd93 100644 --- a/tests/federation/test_complexity.py +++ b/tests/federation/test_complexity.py @@ -75,7 +75,7 @@ class RoomComplexityTests(unittest.FederatingHomeserverTestCase): fed_transport = self.hs.get_federation_transport_client() # Mock out some things, because we don't want to test the whole join - fed_transport.client.get_json = Mock(return_value=make_awaitable({"v1": 9999})) + fed_transport.client.get_json = Mock(return_value=make_awaitable({"v1": 9999})) # type: ignore[assignment] handler.federation_handler.do_invite_join = Mock( # type: ignore[assignment] return_value=make_awaitable(("", 1)) ) @@ -106,7 +106,7 @@ class RoomComplexityTests(unittest.FederatingHomeserverTestCase): fed_transport = self.hs.get_federation_transport_client() # Mock out some things, because we don't want to test the whole join - fed_transport.client.get_json = Mock(return_value=make_awaitable({"v1": 9999})) + fed_transport.client.get_json = Mock(return_value=make_awaitable({"v1": 9999})) # type: ignore[assignment] handler.federation_handler.do_invite_join = Mock( # type: ignore[assignment] return_value=make_awaitable(("", 1)) ) @@ -143,7 +143,7 @@ class RoomComplexityTests(unittest.FederatingHomeserverTestCase): fed_transport = self.hs.get_federation_transport_client() # Mock out some things, because we don't want to test the whole join - fed_transport.client.get_json = Mock(return_value=make_awaitable(None)) + fed_transport.client.get_json = Mock(return_value=make_awaitable(None)) # type: ignore[assignment] handler.federation_handler.do_invite_join = Mock( # type: ignore[assignment] return_value=make_awaitable(("", 1)) ) @@ -200,7 +200,7 @@ class RoomComplexityAdminTests(unittest.FederatingHomeserverTestCase): fed_transport = self.hs.get_federation_transport_client() # Mock out some things, because we don't want to test the whole join - fed_transport.client.get_json = Mock(return_value=make_awaitable({"v1": 9999})) + fed_transport.client.get_json = Mock(return_value=make_awaitable({"v1": 9999})) # type: ignore[assignment] handler.federation_handler.do_invite_join = Mock( # type: ignore[assignment] return_value=make_awaitable(("", 1)) ) @@ -230,7 +230,7 @@ class RoomComplexityAdminTests(unittest.FederatingHomeserverTestCase): fed_transport = self.hs.get_federation_transport_client() # Mock out some things, because we don't want to test the whole join - fed_transport.client.get_json = Mock(return_value=make_awaitable({"v1": 9999})) + fed_transport.client.get_json = Mock(return_value=make_awaitable({"v1": 9999})) # type: ignore[assignment] handler.federation_handler.do_invite_join = Mock( # type: ignore[assignment] return_value=make_awaitable(("", 1)) ) diff --git a/tests/federation/test_federation_server.py b/tests/federation/test_federation_server.py index 6c7738d810..5c850d1843 100644 --- a/tests/federation/test_federation_server.py +++ b/tests/federation/test_federation_server.py @@ -63,7 +63,7 @@ class FederationServerTests(unittest.FederatingHomeserverTestCase): class ServerACLsTestCase(unittest.TestCase): - def test_blacklisted_server(self) -> None: + def test_blocked_server(self) -> None: e = _create_acl_event({"allow": ["*"], "deny": ["evil.com"]}) logging.info("ACL event: %s", e.content) diff --git a/tests/federation/transport/test_client.py b/tests/federation/transport/test_client.py index 3d61b1e8a9..93e5c85a27 100644 --- a/tests/federation/transport/test_client.py +++ b/tests/federation/transport/test_client.py @@ -86,18 +86,7 @@ class SendJoinParserTestCase(TestCase): return parsed_response.members_omitted self.assertTrue(parse({"members_omitted": True})) - self.assertTrue(parse({"org.matrix.msc3706.partial_state": True})) - self.assertFalse(parse({"members_omitted": False})) - self.assertFalse(parse({"org.matrix.msc3706.partial_state": False})) - - # If there's a conflict, the stable field wins. - self.assertTrue( - parse({"members_omitted": True, "org.matrix.msc3706.partial_state": False}) - ) - self.assertFalse( - parse({"members_omitted": False, "org.matrix.msc3706.partial_state": True}) - ) def test_servers_in_room(self) -> None: """Check that the servers_in_room field is correctly parsed""" @@ -113,28 +102,10 @@ class SendJoinParserTestCase(TestCase): parsed_response = parser.finish() return parsed_response.servers_in_room - self.assertEqual( - parse({"org.matrix.msc3706.servers_in_room": ["hs1", "hs2"]}), - ["hs1", "hs2"], - ) self.assertEqual(parse({"servers_in_room": ["example.com"]}), ["example.com"]) - # If both are provided, the stable identifier should win - self.assertEqual( - parse( - { - "org.matrix.msc3706.servers_in_room": ["old"], - "servers_in_room": ["new"], - } - ), - ["new"], - ) - - # And lastly, we should be able to tell if neither field was present. - self.assertEqual( - parse({}), - None, - ) + # We should be able to tell the field is not present. + self.assertEqual(parse({}), None) def test_errors_closing_coroutines(self) -> None: """Check we close all coroutines, even if closing the first raises an Exception. @@ -143,7 +114,7 @@ class SendJoinParserTestCase(TestCase): assertions about its attributes or type. """ parser = SendJoinParser(RoomVersions.V1, False) - response = {"org.matrix.msc3706.servers_in_room": ["hs1", "hs2"]} + response = {"servers_in_room": ["hs1", "hs2"]} serialisation = json.dumps(response).encode() # Mock the coroutines managed by this parser. diff --git a/tests/handlers/test_device.py b/tests/handlers/test_device.py index ce7525e29c..ee48f9e546 100644 --- a/tests/handlers/test_device.py +++ b/tests/handlers/test_device.py @@ -15,15 +15,22 @@ # limitations under the License. from typing import Optional +from unittest import mock from twisted.test.proto_helpers import MemoryReactor +from synapse.api.constants import RoomEncryptionAlgorithms from synapse.api.errors import NotFoundError, SynapseError +from synapse.appservice import ApplicationService from synapse.handlers.device import MAX_DEVICE_DISPLAY_NAME_LEN, DeviceHandler from synapse.server import HomeServer +from synapse.storage.databases.main.appservice import _make_exclusive_regex +from synapse.types import JsonDict from synapse.util import Clock from tests import unittest +from tests.test_utils import make_awaitable +from tests.unittest import override_config user1 = "@boris:aaa" user2 = "@theresa:bbb" @@ -31,7 +38,12 @@ user2 = "@theresa:bbb" class DeviceTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - hs = self.setup_test_homeserver("server", federation_http_client=None) + self.appservice_api = mock.Mock() + hs = self.setup_test_homeserver( + "server", + federation_http_client=None, + application_service_api=self.appservice_api, + ) handler = hs.get_device_handler() assert isinstance(handler, DeviceHandler) self.handler = handler @@ -265,6 +277,127 @@ class DeviceTestCase(unittest.HomeserverTestCase): ) self.reactor.advance(1000) + @override_config({"experimental_features": {"msc3984_appservice_key_query": True}}) + def test_on_federation_query_user_devices_appservice(self) -> None: + """Test that querying of appservices for keys overrides responses from the database.""" + local_user = "@boris:" + self.hs.hostname + device_1 = "abc" + device_2 = "def" + device_3 = "ghi" + + # There are 3 devices: + # + # 1. One which is uploaded to the homeserver. + # 2. One which is uploaded to the homeserver, but a newer copy is returned + # by the appservice. + # 3. One which is only returned by the appservice. + device_key_1: JsonDict = { + "user_id": local_user, + "device_id": device_1, + "algorithms": [ + "m.olm.curve25519-aes-sha2", + RoomEncryptionAlgorithms.MEGOLM_V1_AES_SHA2, + ], + "keys": { + "ed25519:abc": "base64+ed25519+key", + "curve25519:abc": "base64+curve25519+key", + }, + "signatures": {local_user: {"ed25519:abc": "base64+signature"}}, + } + device_key_2a: JsonDict = { + "user_id": local_user, + "device_id": device_2, + "algorithms": [ + "m.olm.curve25519-aes-sha2", + RoomEncryptionAlgorithms.MEGOLM_V1_AES_SHA2, + ], + "keys": { + "ed25519:def": "base64+ed25519+key", + "curve25519:def": "base64+curve25519+key", + }, + "signatures": {local_user: {"ed25519:def": "base64+signature"}}, + } + + device_key_2b: JsonDict = { + "user_id": local_user, + "device_id": device_2, + "algorithms": [ + "m.olm.curve25519-aes-sha2", + RoomEncryptionAlgorithms.MEGOLM_V1_AES_SHA2, + ], + # The device ID is the same (above), but the keys are different. + "keys": { + "ed25519:xyz": "base64+ed25519+key", + "curve25519:xyz": "base64+curve25519+key", + }, + "signatures": {local_user: {"ed25519:xyz": "base64+signature"}}, + } + device_key_3: JsonDict = { + "user_id": local_user, + "device_id": device_3, + "algorithms": [ + "m.olm.curve25519-aes-sha2", + RoomEncryptionAlgorithms.MEGOLM_V1_AES_SHA2, + ], + "keys": { + "ed25519:jkl": "base64+ed25519+key", + "curve25519:jkl": "base64+curve25519+key", + }, + "signatures": {local_user: {"ed25519:jkl": "base64+signature"}}, + } + + # Upload keys for devices 1 & 2a. + e2e_keys_handler = self.hs.get_e2e_keys_handler() + self.get_success( + e2e_keys_handler.upload_keys_for_user( + local_user, device_1, {"device_keys": device_key_1} + ) + ) + self.get_success( + e2e_keys_handler.upload_keys_for_user( + local_user, device_2, {"device_keys": device_key_2a} + ) + ) + + # Inject an appservice interested in this user. + appservice = ApplicationService( + token="i_am_an_app_service", + id="1234", + namespaces={"users": [{"regex": r"@boris:.+", "exclusive": True}]}, + # Note: this user does not have to match the regex above + sender="@as_main:test", + ) + self.hs.get_datastores().main.services_cache = [appservice] + self.hs.get_datastores().main.exclusive_user_regex = _make_exclusive_regex( + [appservice] + ) + + # Setup a response. + self.appservice_api.query_keys.return_value = make_awaitable( + { + "device_keys": { + local_user: {device_2: device_key_2b, device_3: device_key_3} + } + } + ) + + # Request all devices. + res = self.get_success( + self.handler.on_federation_query_user_devices(local_user) + ) + self.assertIn("devices", res) + res_devices = res["devices"] + for device in res_devices: + device["keys"].pop("unsigned", None) + self.assertEqual( + res_devices, + [ + {"device_id": device_1, "keys": device_key_1}, + {"device_id": device_2, "keys": device_key_2b}, + {"device_id": device_3, "keys": device_key_3}, + ], + ) + class DehydrationTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: diff --git a/tests/handlers/test_e2e_keys.py b/tests/handlers/test_e2e_keys.py index 013b9ee550..72d0584061 100644 --- a/tests/handlers/test_e2e_keys.py +++ b/tests/handlers/test_e2e_keys.py @@ -160,7 +160,9 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): res2 = self.get_success( self.handler.claim_one_time_keys( - {"one_time_keys": {local_user: {device_id: "alg1"}}}, timeout=None + {local_user: {device_id: {"alg1": 1}}}, + timeout=None, + always_include_fallback_keys=False, ) ) self.assertEqual( @@ -203,7 +205,9 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): # key claim_res = self.get_success( self.handler.claim_one_time_keys( - {"one_time_keys": {local_user: {device_id: "alg1"}}}, timeout=None + {local_user: {device_id: {"alg1": 1}}}, + timeout=None, + always_include_fallback_keys=False, ) ) self.assertEqual( @@ -220,7 +224,9 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): # claiming an OTK again should return the same fallback key claim_res = self.get_success( self.handler.claim_one_time_keys( - {"one_time_keys": {local_user: {device_id: "alg1"}}}, timeout=None + {local_user: {device_id: {"alg1": 1}}}, + timeout=None, + always_include_fallback_keys=False, ) ) self.assertEqual( @@ -267,7 +273,9 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): claim_res = self.get_success( self.handler.claim_one_time_keys( - {"one_time_keys": {local_user: {device_id: "alg1"}}}, timeout=None + {local_user: {device_id: {"alg1": 1}}}, + timeout=None, + always_include_fallback_keys=False, ) ) self.assertEqual( @@ -277,7 +285,9 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): claim_res = self.get_success( self.handler.claim_one_time_keys( - {"one_time_keys": {local_user: {device_id: "alg1"}}}, timeout=None + {local_user: {device_id: {"alg1": 1}}}, + timeout=None, + always_include_fallback_keys=False, ) ) self.assertEqual( @@ -296,7 +306,9 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): claim_res = self.get_success( self.handler.claim_one_time_keys( - {"one_time_keys": {local_user: {device_id: "alg1"}}}, timeout=None + {local_user: {device_id: {"alg1": 1}}}, + timeout=None, + always_include_fallback_keys=False, ) ) self.assertEqual( @@ -304,6 +316,75 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): {"failures": {}, "one_time_keys": {local_user: {device_id: fallback_key3}}}, ) + def test_fallback_key_always_returned(self) -> None: + local_user = "@boris:" + self.hs.hostname + device_id = "xyz" + fallback_key = {"alg1:k1": "fallback_key1"} + otk = {"alg1:k2": "key2"} + + # we shouldn't have any unused fallback keys yet + res = self.get_success( + self.store.get_e2e_unused_fallback_key_types(local_user, device_id) + ) + self.assertEqual(res, []) + + # Upload a OTK & fallback key. + self.get_success( + self.handler.upload_keys_for_user( + local_user, + device_id, + {"one_time_keys": otk, "fallback_keys": fallback_key}, + ) + ) + + # we should now have an unused alg1 key + fallback_res = self.get_success( + self.store.get_e2e_unused_fallback_key_types(local_user, device_id) + ) + self.assertEqual(fallback_res, ["alg1"]) + + # Claiming an OTK and requesting to always return the fallback key should + # return both. + claim_res = self.get_success( + self.handler.claim_one_time_keys( + {local_user: {device_id: {"alg1": 1}}}, + timeout=None, + always_include_fallback_keys=True, + ) + ) + self.assertEqual( + claim_res, + { + "failures": {}, + "one_time_keys": {local_user: {device_id: {**fallback_key, **otk}}}, + }, + ) + + # This should not mark the key as used. + fallback_res = self.get_success( + self.store.get_e2e_unused_fallback_key_types(local_user, device_id) + ) + self.assertEqual(fallback_res, ["alg1"]) + + # Claiming an OTK again should return only the fallback key. + claim_res = self.get_success( + self.handler.claim_one_time_keys( + {local_user: {device_id: {"alg1": 1}}}, + timeout=None, + always_include_fallback_keys=True, + ) + ) + self.assertEqual( + claim_res, + {"failures": {}, "one_time_keys": {local_user: {device_id: fallback_key}}}, + ) + + # And mark it as used. + fallback_res = self.get_success( + self.store.get_e2e_unused_fallback_key_types(local_user, device_id) + ) + self.assertEqual(fallback_res, []) + def test_replace_master_key(self) -> None: """uploading a new signing key should make the old signing key unavailable""" local_user = "@boris:" + self.hs.hostname @@ -971,7 +1052,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): # Setup a response, but only for device 2. self.appservice_api.claim_client_keys.return_value = make_awaitable( - ({local_user: {device_id_2: otk}}, [(local_user, device_id_1, "alg1")]) + ({local_user: {device_id_2: otk}}, [(local_user, device_id_1, "alg1", 1)]) ) # we shouldn't have any unused fallback keys yet @@ -998,12 +1079,9 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): # query the fallback keys. claim_res = self.get_success( self.handler.claim_one_time_keys( - { - "one_time_keys": { - local_user: {device_id_1: "alg1", device_id_2: "alg1"} - } - }, + {local_user: {device_id_1: {"alg1": 1}, device_id_2: {"alg1": 1}}}, timeout=None, + always_include_fallback_keys=False, ) ) self.assertEqual( @@ -1016,6 +1094,153 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): }, ) + @override_config({"experimental_features": {"msc3983_appservice_otk_claims": True}}) + def test_query_appservice_with_fallback(self) -> None: + local_user = "@boris:" + self.hs.hostname + device_id_1 = "xyz" + fallback_key = {"alg1:k1": {"desc": "fallback_key1", "fallback": True}} + otk = {"alg1:k2": {"desc": "key2"}} + as_fallback_key = {"alg1:k3": {"desc": "fallback_key3", "fallback": True}} + as_otk = {"alg1:k4": {"desc": "key4"}} + + # Inject an appservice interested in this user. + appservice = ApplicationService( + token="i_am_an_app_service", + id="1234", + namespaces={"users": [{"regex": r"@boris:.+", "exclusive": True}]}, + # Note: this user does not have to match the regex above + sender="@as_main:test", + ) + self.hs.get_datastores().main.services_cache = [appservice] + self.hs.get_datastores().main.exclusive_user_regex = _make_exclusive_regex( + [appservice] + ) + + # Setup a response. + self.appservice_api.claim_client_keys.return_value = make_awaitable( + ({local_user: {device_id_1: {**as_otk, **as_fallback_key}}}, []) + ) + + # Claim OTKs, which will ask the appservice and do nothing else. + claim_res = self.get_success( + self.handler.claim_one_time_keys( + {local_user: {device_id_1: {"alg1": 1}}}, + timeout=None, + always_include_fallback_keys=True, + ) + ) + self.assertEqual( + claim_res, + { + "failures": {}, + "one_time_keys": { + local_user: {device_id_1: {**as_otk, **as_fallback_key}} + }, + }, + ) + + # Now upload a fallback key. + res = self.get_success( + self.store.get_e2e_unused_fallback_key_types(local_user, device_id_1) + ) + self.assertEqual(res, []) + + self.get_success( + self.handler.upload_keys_for_user( + local_user, + device_id_1, + {"fallback_keys": fallback_key}, + ) + ) + + # we should now have an unused alg1 key + fallback_res = self.get_success( + self.store.get_e2e_unused_fallback_key_types(local_user, device_id_1) + ) + self.assertEqual(fallback_res, ["alg1"]) + + # The appservice will return only the OTK. + self.appservice_api.claim_client_keys.return_value = make_awaitable( + ({local_user: {device_id_1: as_otk}}, []) + ) + + # Claim OTKs, which should return the OTK from the appservice and the + # uploaded fallback key. + claim_res = self.get_success( + self.handler.claim_one_time_keys( + {local_user: {device_id_1: {"alg1": 1}}}, + timeout=None, + always_include_fallback_keys=True, + ) + ) + self.assertEqual( + claim_res, + { + "failures": {}, + "one_time_keys": { + local_user: {device_id_1: {**as_otk, **fallback_key}} + }, + }, + ) + + # But the fallback key should not be marked as used. + fallback_res = self.get_success( + self.store.get_e2e_unused_fallback_key_types(local_user, device_id_1) + ) + self.assertEqual(fallback_res, ["alg1"]) + + # Now upload a OTK. + self.get_success( + self.handler.upload_keys_for_user( + local_user, + device_id_1, + {"one_time_keys": otk}, + ) + ) + + # Claim OTKs, which will return information only from the database. + claim_res = self.get_success( + self.handler.claim_one_time_keys( + {local_user: {device_id_1: {"alg1": 1}}}, + timeout=None, + always_include_fallback_keys=True, + ) + ) + self.assertEqual( + claim_res, + { + "failures": {}, + "one_time_keys": {local_user: {device_id_1: {**otk, **fallback_key}}}, + }, + ) + + # But the fallback key should not be marked as used. + fallback_res = self.get_success( + self.store.get_e2e_unused_fallback_key_types(local_user, device_id_1) + ) + self.assertEqual(fallback_res, ["alg1"]) + + # Finally, return only the fallback key from the appservice. + self.appservice_api.claim_client_keys.return_value = make_awaitable( + ({local_user: {device_id_1: as_fallback_key}}, []) + ) + + # Claim OTKs, which will return only the fallback key from the database. + claim_res = self.get_success( + self.handler.claim_one_time_keys( + {local_user: {device_id_1: {"alg1": 1}}}, + timeout=None, + always_include_fallback_keys=True, + ) + ) + self.assertEqual( + claim_res, + { + "failures": {}, + "one_time_keys": {local_user: {device_id_1: as_fallback_key}}, + }, + ) + @override_config({"experimental_features": {"msc3984_appservice_key_query": True}}) def test_query_local_devices_appservice(self) -> None: """Test that querying of appservices for keys overrides responses from the database.""" diff --git a/tests/handlers/test_password_providers.py b/tests/handlers/test_password_providers.py index aa91bc0a3d..394006f5f3 100644 --- a/tests/handlers/test_password_providers.py +++ b/tests/handlers/test_password_providers.py @@ -18,13 +18,17 @@ from http import HTTPStatus from typing import Any, Dict, List, Optional, Type, Union from unittest.mock import Mock +from twisted.test.proto_helpers import MemoryReactor + import synapse from synapse.api.constants import LoginType from synapse.api.errors import Codes from synapse.handlers.account import AccountHandler from synapse.module_api import ModuleApi from synapse.rest.client import account, devices, login, logout, register +from synapse.server import HomeServer from synapse.types import JsonDict, UserID +from synapse.util import Clock from tests import unittest from tests.server import FakeChannel @@ -162,10 +166,16 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): CALLBACK_USERNAME = "get_username_for_registration" CALLBACK_DISPLAYNAME = "get_displayname_for_registration" - def setUp(self) -> None: + def prepare( + self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer + ) -> None: # we use a global mock device, so make sure we are starting with a clean slate mock_password_provider.reset_mock() - super().setUp() + + # The mock password provider doesn't register the users, so ensure they + # are registered first. + self.register_user("u", "not-the-tested-password") + self.register_user("user", "not-the-tested-password") @override_config(legacy_providers_config(LegacyPasswordOnlyAuthProvider)) def test_password_only_auth_progiver_login_legacy(self) -> None: @@ -185,22 +195,12 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): mock_password_provider.reset_mock() # login with mxid should work too - channel = self._send_password_login("@u:bz", "p") + channel = self._send_password_login("@u:test", "p") self.assertEqual(channel.code, HTTPStatus.OK, channel.result) - self.assertEqual("@u:bz", channel.json_body["user_id"]) - mock_password_provider.check_password.assert_called_once_with("@u:bz", "p") + self.assertEqual("@u:test", channel.json_body["user_id"]) + mock_password_provider.check_password.assert_called_once_with("@u:test", "p") mock_password_provider.reset_mock() - # try a weird username / pass. Honestly it's unclear what we *expect* to happen - # in these cases, but at least we can guard against the API changing - # unexpectedly - channel = self._send_password_login(" USER🙂NAME ", " pASS\U0001F622word ") - self.assertEqual(channel.code, HTTPStatus.OK, channel.result) - self.assertEqual("@ USER🙂NAME :test", channel.json_body["user_id"]) - mock_password_provider.check_password.assert_called_once_with( - "@ USER🙂NAME :test", " pASS😢word " - ) - @override_config(legacy_providers_config(LegacyPasswordOnlyAuthProvider)) def test_password_only_auth_provider_ui_auth_legacy(self) -> None: self.password_only_auth_provider_ui_auth_test_body() @@ -208,10 +208,6 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): def password_only_auth_provider_ui_auth_test_body(self) -> None: """UI Auth should delegate correctly to the password provider""" - # create the user, otherwise access doesn't work - module_api = self.hs.get_module_api() - self.get_success(module_api.register_user("u")) - # log in twice, to get two devices mock_password_provider.check_password.return_value = make_awaitable(True) tok1 = self.login("u", "p") @@ -401,29 +397,16 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): mock_password_provider.check_auth.assert_not_called() mock_password_provider.check_auth.return_value = make_awaitable( - ("@user:bz", None) + ("@user:test", None) ) channel = self._send_login("test.login_type", "u", test_field="y") self.assertEqual(channel.code, HTTPStatus.OK, channel.result) - self.assertEqual("@user:bz", channel.json_body["user_id"]) + self.assertEqual("@user:test", channel.json_body["user_id"]) mock_password_provider.check_auth.assert_called_once_with( "u", "test.login_type", {"test_field": "y"} ) mock_password_provider.reset_mock() - # try a weird username. Again, it's unclear what we *expect* to happen - # in these cases, but at least we can guard against the API changing - # unexpectedly - mock_password_provider.check_auth.return_value = make_awaitable( - ("@ MALFORMED! :bz", None) - ) - channel = self._send_login("test.login_type", " USER🙂NAME ", test_field=" abc ") - self.assertEqual(channel.code, HTTPStatus.OK, channel.result) - self.assertEqual("@ MALFORMED! :bz", channel.json_body["user_id"]) - mock_password_provider.check_auth.assert_called_once_with( - " USER🙂NAME ", "test.login_type", {"test_field": " abc "} - ) - @override_config(legacy_providers_config(LegacyCustomAuthProvider)) def test_custom_auth_provider_ui_auth_legacy(self) -> None: self.custom_auth_provider_ui_auth_test_body() @@ -465,7 +448,7 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): # right params, but authing as the wrong user mock_password_provider.check_auth.return_value = make_awaitable( - ("@user:bz", None) + ("@user:test", None) ) body["auth"]["test_field"] = "foo" channel = self._delete_device(tok1, "dev2", body) @@ -498,11 +481,11 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): callback = Mock(return_value=make_awaitable(None)) mock_password_provider.check_auth.return_value = make_awaitable( - ("@user:bz", callback) + ("@user:test", callback) ) channel = self._send_login("test.login_type", "u", test_field="y") self.assertEqual(channel.code, HTTPStatus.OK, channel.result) - self.assertEqual("@user:bz", channel.json_body["user_id"]) + self.assertEqual("@user:test", channel.json_body["user_id"]) mock_password_provider.check_auth.assert_called_once_with( "u", "test.login_type", {"test_field": "y"} ) @@ -512,7 +495,7 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): call_args, call_kwargs = callback.call_args # should be one positional arg self.assertEqual(len(call_args), 1) - self.assertEqual(call_args[0]["user_id"], "@user:bz") + self.assertEqual(call_args[0]["user_id"], "@user:test") for p in ["user_id", "access_token", "device_id", "home_server"]: self.assertIn(p, call_args[0]) diff --git a/tests/handlers/test_profile.py b/tests/handlers/test_profile.py index 7c174782da..64a9a22afe 100644 --- a/tests/handlers/test_profile.py +++ b/tests/handlers/test_profile.py @@ -66,9 +66,7 @@ class ProfileTestCase(unittest.HomeserverTestCase): self.handler = hs.get_profile_handler() def test_get_my_name(self) -> None: - self.get_success( - self.store.set_profile_displayname(self.frank.localpart, "Frank") - ) + self.get_success(self.store.set_profile_displayname(self.frank, "Frank")) displayname = self.get_success(self.handler.get_displayname(self.frank)) @@ -121,9 +119,7 @@ class ProfileTestCase(unittest.HomeserverTestCase): self.hs.config.registration.enable_set_displayname = False # Setting displayname for the first time is allowed - self.get_success( - self.store.set_profile_displayname(self.frank.localpart, "Frank") - ) + self.get_success(self.store.set_profile_displayname(self.frank, "Frank")) self.assertEqual( ( @@ -166,8 +162,14 @@ class ProfileTestCase(unittest.HomeserverTestCase): ) def test_incoming_fed_query(self) -> None: - self.get_success(self.store.create_profile("caroline")) - self.get_success(self.store.set_profile_displayname("caroline", "Caroline")) + self.get_success( + self.store.create_profile(UserID.from_string("@caroline:test")) + ) + self.get_success( + self.store.set_profile_displayname( + UserID.from_string("@caroline:test"), "Caroline" + ) + ) response = self.get_success( self.query_handlers["profile"]( @@ -183,9 +185,7 @@ class ProfileTestCase(unittest.HomeserverTestCase): def test_get_my_avatar(self) -> None: self.get_success( - self.store.set_profile_avatar_url( - self.frank.localpart, "http://my.server/me.png" - ) + self.store.set_profile_avatar_url(self.frank, "http://my.server/me.png") ) avatar_url = self.get_success(self.handler.get_avatar_url(self.frank)) @@ -237,9 +237,7 @@ class ProfileTestCase(unittest.HomeserverTestCase): # Setting displayname for the first time is allowed self.get_success( - self.store.set_profile_avatar_url( - self.frank.localpart, "http://my.server/me.png" - ) + self.store.set_profile_avatar_url(self.frank, "http://my.server/me.png") ) self.assertEqual( diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py index aff1ec4758..73822b07a5 100644 --- a/tests/handlers/test_register.py +++ b/tests/handlers/test_register.py @@ -586,6 +586,19 @@ class RegistrationTestCase(unittest.HomeserverTestCase): d = self.store.is_support_user(user_id) self.assertFalse(self.get_success(d)) + def test_invalid_user_id(self) -> None: + invalid_user_id = "+abcd" + self.get_failure( + self.handler.register_user(localpart=invalid_user_id), SynapseError + ) + + @override_config({"experimental_features": {"msc4009_e164_mxids": True}}) + def text_extended_user_ids(self) -> None: + """+ should be allowed according to MSC4009.""" + valid_user_id = "+1234" + user_id = self.get_success(self.handler.register_user(localpart=valid_user_id)) + self.assertEqual(user_id, valid_user_id) + def test_invalid_user_id_length(self) -> None: invalid_user_id = "x" * 256 self.get_failure( diff --git a/tests/handlers/test_room_member.py b/tests/handlers/test_room_member.py index 6a38893b68..a444d822cd 100644 --- a/tests/handlers/test_room_member.py +++ b/tests/handlers/test_room_member.py @@ -333,6 +333,17 @@ class RoomMemberMasterHandlerTestCase(HomeserverTestCase): self.get_success(self.store.is_locally_forgotten_room(self.room_id)) ) + @override_config({"forget_rooms_on_leave": True}) + def test_leave_and_auto_forget(self) -> None: + """Tests the `forget_rooms_on_leave` config option.""" + self.helper.join(self.room_id, user=self.bob, tok=self.bob_token) + + # alice is not the last room member that leaves and forgets the room + self.helper.leave(self.room_id, user=self.alice, tok=self.alice_token) + self.assertTrue( + self.get_success(self.store.did_forget(self.alice, self.room_id)) + ) + def test_leave_and_forget_last_user(self) -> None: """Tests that forget a room is successfully when the last user has left the room.""" diff --git a/tests/handlers/test_sso.py b/tests/handlers/test_sso.py index d6f43a98fc..b9ffdb4ced 100644 --- a/tests/handlers/test_sso.py +++ b/tests/handlers/test_sso.py @@ -31,11 +31,11 @@ class TestSSOHandler(unittest.HomeserverTestCase): self.http_client.get_file.side_effect = mock_get_file self.http_client.user_agent = b"Synapse Test" hs = self.setup_test_homeserver( - proxied_blacklisted_http_client=self.http_client + proxied_blocklisted_http_client=self.http_client ) return hs - async def test_set_avatar(self) -> None: + def test_set_avatar(self) -> None: """Tests successfully setting the avatar of a newly created user""" handler = self.hs.get_sso_handler() @@ -54,7 +54,7 @@ class TestSSOHandler(unittest.HomeserverTestCase): self.assertIsNot(profile["avatar_url"], None) @unittest.override_config({"max_avatar_size": 1}) - async def test_set_avatar_too_big_image(self) -> None: + def test_set_avatar_too_big_image(self) -> None: """Tests that saving an avatar fails when it is too big""" handler = self.hs.get_sso_handler() @@ -66,7 +66,7 @@ class TestSSOHandler(unittest.HomeserverTestCase): ) @unittest.override_config({"allowed_avatar_mimetypes": ["image/jpeg"]}) - async def test_set_avatar_incorrect_mime_type(self) -> None: + def test_set_avatar_incorrect_mime_type(self) -> None: """Tests that saving an avatar fails when its mime type is not allowed""" handler = self.hs.get_sso_handler() @@ -77,7 +77,7 @@ class TestSSOHandler(unittest.HomeserverTestCase): self.get_success(handler.set_avatar(user_id, "http://my.server/me.png")) ) - async def test_skip_saving_avatar_when_not_changed(self) -> None: + def test_skip_saving_avatar_when_not_changed(self) -> None: """Tests whether saving of avatar correctly skips if the avatar hasn't changed""" handler = self.hs.get_sso_handler() diff --git a/tests/handlers/test_user_directory.py b/tests/handlers/test_user_directory.py index da4d240826..15a7dc6818 100644 --- a/tests/handlers/test_user_directory.py +++ b/tests/handlers/test_user_directory.py @@ -792,7 +792,7 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase): return False # Configure a spam checker that does not filter any users. - spam_checker = self.hs.get_spam_checker() + spam_checker = self.hs.get_module_api_callbacks().spam_checker spam_checker._check_username_for_spam_callbacks = [allow_all] # The results do not change: diff --git a/tests/http/federation/test_matrix_federation_agent.py b/tests/http/federation/test_matrix_federation_agent.py index eb7f53fee5..105b4caefa 100644 --- a/tests/http/federation/test_matrix_federation_agent.py +++ b/tests/http/federation/test_matrix_federation_agent.py @@ -269,8 +269,8 @@ class MatrixFederationAgentTests(unittest.TestCase): reactor=cast(ISynapseReactor, self.reactor), tls_client_options_factory=self.tls_factory, user_agent=b"test-agent", # Note that this is unused since _well_known_resolver is provided. - ip_whitelist=IPSet(), - ip_blacklist=IPSet(), + ip_allowlist=IPSet(), + ip_blocklist=IPSet(), _srv_resolver=self.mock_resolver, _well_known_resolver=self.well_known_resolver, ) @@ -997,8 +997,8 @@ class MatrixFederationAgentTests(unittest.TestCase): reactor=self.reactor, tls_client_options_factory=tls_factory, user_agent=b"test-agent", # This is unused since _well_known_resolver is passed below. - ip_whitelist=IPSet(), - ip_blacklist=IPSet(), + ip_allowlist=IPSet(), + ip_blocklist=IPSet(), _srv_resolver=self.mock_resolver, _well_known_resolver=WellKnownResolver( cast(ISynapseReactor, self.reactor), diff --git a/tests/http/test_client.py b/tests/http/test_client.py index 57b6a84e23..a05b9f17a6 100644 --- a/tests/http/test_client.py +++ b/tests/http/test_client.py @@ -27,8 +27,8 @@ from twisted.web.iweb import UNKNOWN_LENGTH from synapse.api.errors import SynapseError from synapse.http.client import ( - BlacklistingAgentWrapper, - BlacklistingReactorWrapper, + BlocklistingAgentWrapper, + BlocklistingReactorWrapper, BodyExceededMaxSize, _DiscardBodyWithMaxSizeProtocol, read_body_with_max_size, @@ -140,7 +140,7 @@ class ReadBodyWithMaxSizeTests(TestCase): self.assertEqual(result.getvalue(), b"") -class BlacklistingAgentTest(TestCase): +class BlocklistingAgentTest(TestCase): def setUp(self) -> None: self.reactor, self.clock = get_clock() @@ -157,16 +157,16 @@ class BlacklistingAgentTest(TestCase): self.reactor.lookups[domain.decode()] = ip.decode() self.reactor.lookups[ip.decode()] = ip.decode() - self.ip_whitelist = IPSet([self.allowed_ip.decode()]) - self.ip_blacklist = IPSet(["5.0.0.0/8"]) + self.ip_allowlist = IPSet([self.allowed_ip.decode()]) + self.ip_blocklist = IPSet(["5.0.0.0/8"]) def test_reactor(self) -> None: - """Apply the blacklisting reactor and ensure it properly blocks connections to particular domains and IPs.""" + """Apply the blocklisting reactor and ensure it properly blocks connections to particular domains and IPs.""" agent = Agent( - BlacklistingReactorWrapper( + BlocklistingReactorWrapper( self.reactor, - ip_whitelist=self.ip_whitelist, - ip_blacklist=self.ip_blacklist, + ip_allowlist=self.ip_allowlist, + ip_blocklist=self.ip_blocklist, ), ) @@ -207,11 +207,11 @@ class BlacklistingAgentTest(TestCase): self.assertEqual(response.code, 200) def test_agent(self) -> None: - """Apply the blacklisting agent and ensure it properly blocks connections to particular IPs.""" - agent = BlacklistingAgentWrapper( + """Apply the blocklisting agent and ensure it properly blocks connections to particular IPs.""" + agent = BlocklistingAgentWrapper( Agent(self.reactor), - ip_blacklist=self.ip_blacklist, - ip_whitelist=self.ip_whitelist, + ip_blocklist=self.ip_blocklist, + ip_allowlist=self.ip_allowlist, ) # The unsafe IPs should be rejected. diff --git a/tests/http/test_matrixfederationclient.py b/tests/http/test_matrixfederationclient.py index fdd22a8e94..0dfc03ce50 100644 --- a/tests/http/test_matrixfederationclient.py +++ b/tests/http/test_matrixfederationclient.py @@ -26,7 +26,7 @@ from twisted.web.http import HTTPChannel from synapse.api.errors import RequestSendFailed from synapse.http.matrixfederationclient import ( - JsonParser, + ByteParser, MatrixFederationHttpClient, MatrixFederationRequest, ) @@ -231,11 +231,11 @@ class FederationClientTests(HomeserverTestCase): self.assertIsInstance(f.value, RequestSendFailed) self.assertIsInstance(f.value.inner_exception, ResponseNeverReceived) - def test_client_ip_range_blacklist(self) -> None: - """Ensure that Synapse does not try to connect to blacklisted IPs""" + def test_client_ip_range_blocklist(self) -> None: + """Ensure that Synapse does not try to connect to blocked IPs""" - # Set up the ip_range blacklist - self.hs.config.server.federation_ip_range_blacklist = IPSet( + # Set up the ip_range blocklist + self.hs.config.server.federation_ip_range_blocklist = IPSet( ["127.0.0.0/8", "fe80::/64"] ) self.reactor.lookups["internal"] = "127.0.0.1" @@ -243,7 +243,7 @@ class FederationClientTests(HomeserverTestCase): self.reactor.lookups["fine"] = "10.20.30.40" cl = MatrixFederationHttpClient(self.hs, None) - # Try making a GET request to a blacklisted IPv4 address + # Try making a GET request to a blocked IPv4 address # ------------------------------------------------------ # Make the request d = defer.ensureDeferred(cl.get_json("internal:8008", "foo/bar", timeout=10000)) @@ -261,7 +261,7 @@ class FederationClientTests(HomeserverTestCase): self.assertIsInstance(f.value, RequestSendFailed) self.assertIsInstance(f.value.inner_exception, DNSLookupError) - # Try making a POST request to a blacklisted IPv6 address + # Try making a POST request to a blocked IPv6 address # ------------------------------------------------------- # Make the request d = defer.ensureDeferred( @@ -278,11 +278,11 @@ class FederationClientTests(HomeserverTestCase): clients = self.reactor.tcpClients self.assertEqual(len(clients), 0) - # Check that it was due to a blacklisted DNS lookup + # Check that it was due to a blocked DNS lookup f = self.failureResultOf(d, RequestSendFailed) self.assertIsInstance(f.value.inner_exception, DNSLookupError) - # Try making a GET request to a non-blacklisted IPv4 address + # Try making a GET request to an allowed IPv4 address # ---------------------------------------------------------- # Make the request d = defer.ensureDeferred(cl.post_json("fine:8008", "foo/bar", timeout=10000)) @@ -618,9 +618,9 @@ class FederationClientTests(HomeserverTestCase): while not test_d.called: protocol.dataReceived(b"a" * chunk_size) sent += chunk_size - self.assertLessEqual(sent, JsonParser.MAX_RESPONSE_SIZE) + self.assertLessEqual(sent, ByteParser.MAX_RESPONSE_SIZE) - self.assertEqual(sent, JsonParser.MAX_RESPONSE_SIZE) + self.assertEqual(sent, ByteParser.MAX_RESPONSE_SIZE) f = self.failureResultOf(test_d) self.assertIsInstance(f.value, RequestSendFailed) diff --git a/tests/http/test_proxyagent.py b/tests/http/test_proxyagent.py index cc175052ac..e0ae5a88ff 100644 --- a/tests/http/test_proxyagent.py +++ b/tests/http/test_proxyagent.py @@ -32,7 +32,7 @@ from twisted.internet.protocol import Factory, Protocol from twisted.protocols.tls import TLSMemoryBIOFactory, TLSMemoryBIOProtocol from twisted.web.http import HTTPChannel -from synapse.http.client import BlacklistingReactorWrapper +from synapse.http.client import BlocklistingReactorWrapper from synapse.http.connectproxyclient import ProxyCredentials from synapse.http.proxyagent import ProxyAgent, parse_proxy @@ -684,11 +684,11 @@ class MatrixFederationAgentTests(TestCase): self.assertEqual(body, b"result") @patch.dict(os.environ, {"http_proxy": "proxy.com:8888"}) - def test_http_request_via_proxy_with_blacklist(self) -> None: - # The blacklist includes the configured proxy IP. + def test_http_request_via_proxy_with_blocklist(self) -> None: + # The blocklist includes the configured proxy IP. agent = ProxyAgent( - BlacklistingReactorWrapper( - self.reactor, ip_whitelist=None, ip_blacklist=IPSet(["1.0.0.0/8"]) + BlocklistingReactorWrapper( + self.reactor, ip_allowlist=None, ip_blocklist=IPSet(["1.0.0.0/8"]) ), self.reactor, use_proxy=True, @@ -730,11 +730,11 @@ class MatrixFederationAgentTests(TestCase): self.assertEqual(body, b"result") @patch.dict(os.environ, {"HTTPS_PROXY": "proxy.com"}) - def test_https_request_via_uppercase_proxy_with_blacklist(self) -> None: - # The blacklist includes the configured proxy IP. + def test_https_request_via_uppercase_proxy_with_blocklist(self) -> None: + # The blocklist includes the configured proxy IP. agent = ProxyAgent( - BlacklistingReactorWrapper( - self.reactor, ip_whitelist=None, ip_blacklist=IPSet(["1.0.0.0/8"]) + BlocklistingReactorWrapper( + self.reactor, ip_allowlist=None, ip_blocklist=IPSet(["1.0.0.0/8"]) ), self.reactor, contextFactory=get_test_https_policy(), diff --git a/tests/http/test_simple_client.py b/tests/http/test_simple_client.py index 010601da4b..be731645bf 100644 --- a/tests/http/test_simple_client.py +++ b/tests/http/test_simple_client.py @@ -123,17 +123,17 @@ class SimpleHttpClientTests(HomeserverTestCase): self.assertIsInstance(f.value, RequestTimedOutError) - def test_client_ip_range_blacklist(self) -> None: - """Ensure that Synapse does not try to connect to blacklisted IPs""" + def test_client_ip_range_blocklist(self) -> None: + """Ensure that Synapse does not try to connect to blocked IPs""" - # Add some DNS entries we'll blacklist + # Add some DNS entries we'll block self.reactor.lookups["internal"] = "127.0.0.1" self.reactor.lookups["internalv6"] = "fe80:0:0:0:0:8a2e:370:7337" - ip_blacklist = IPSet(["127.0.0.0/8", "fe80::/64"]) + ip_blocklist = IPSet(["127.0.0.0/8", "fe80::/64"]) - cl = SimpleHttpClient(self.hs, ip_blacklist=ip_blacklist) + cl = SimpleHttpClient(self.hs, ip_blocklist=ip_blocklist) - # Try making a GET request to a blacklisted IPv4 address + # Try making a GET request to a blocked IPv4 address # ------------------------------------------------------ # Make the request d = defer.ensureDeferred(cl.get_json("http://internal:8008/foo/bar")) @@ -145,7 +145,7 @@ class SimpleHttpClientTests(HomeserverTestCase): self.failureResultOf(d, DNSLookupError) - # Try making a POST request to a blacklisted IPv6 address + # Try making a POST request to a blocked IPv6 address # ------------------------------------------------------- # Make the request d = defer.ensureDeferred( @@ -159,10 +159,10 @@ class SimpleHttpClientTests(HomeserverTestCase): clients = self.reactor.tcpClients self.assertEqual(len(clients), 0) - # Check that it was due to a blacklisted DNS lookup + # Check that it was due to a blocked DNS lookup self.failureResultOf(d, DNSLookupError) - # Try making a GET request to a non-blacklisted IPv4 address + # Try making a GET request to a non-blocked IPv4 address # ---------------------------------------------------------- # Make the request d = defer.ensureDeferred(cl.get_json("http://testserv:8008/foo/bar")) diff --git a/tests/media/test_media_storage.py b/tests/media/test_media_storage.py index 870047d0f2..f0f2da65db 100644 --- a/tests/media/test_media_storage.py +++ b/tests/media/test_media_storage.py @@ -31,7 +31,6 @@ from twisted.test.proto_helpers import MemoryReactor from synapse.api.errors import Codes from synapse.events import EventBase -from synapse.events.spamcheck import load_legacy_spam_checkers from synapse.http.types import QueryParams from synapse.logging.context import make_deferred_yieldable from synapse.media._base import FileInfo @@ -39,6 +38,7 @@ from synapse.media.filepath import MediaFilePaths from synapse.media.media_storage import MediaStorage, ReadableFileWrapper from synapse.media.storage_provider import FileStorageProviderBackend from synapse.module_api import ModuleApi +from synapse.module_api.callbacks.spamchecker_callbacks import load_legacy_spam_checkers from synapse.rest import admin from synapse.rest.client import login from synapse.server import HomeServer diff --git a/tests/media/test_url_previewer.py b/tests/media/test_url_previewer.py new file mode 100644 index 0000000000..3c4c7d6765 --- /dev/null +++ b/tests/media/test_url_previewer.py @@ -0,0 +1,113 @@ +# Copyright 2023 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os + +from twisted.test.proto_helpers import MemoryReactor + +from synapse.server import HomeServer +from synapse.util import Clock + +from tests import unittest +from tests.unittest import override_config + +try: + import lxml +except ImportError: + lxml = None + + +class URLPreviewTests(unittest.HomeserverTestCase): + if not lxml: + skip = "url preview feature requires lxml" + + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: + config = self.default_config() + config["url_preview_enabled"] = True + config["max_spider_size"] = 9999999 + config["url_preview_ip_range_blacklist"] = ( + "192.168.1.1", + "1.0.0.0/8", + "3fff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", + "2001:800::/21", + ) + + self.storage_path = self.mktemp() + self.media_store_path = self.mktemp() + os.mkdir(self.storage_path) + os.mkdir(self.media_store_path) + config["media_store_path"] = self.media_store_path + + provider_config = { + "module": "synapse.media.storage_provider.FileStorageProviderBackend", + "store_local": True, + "store_synchronous": False, + "store_remote": True, + "config": {"directory": self.storage_path}, + } + + config["media_storage_providers"] = [provider_config] + + return self.setup_test_homeserver(config=config) + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + media_repo_resource = hs.get_media_repository_resource() + preview_url = media_repo_resource.children[b"preview_url"] + self.url_previewer = preview_url._url_previewer + + def test_all_urls_allowed(self) -> None: + self.assertFalse(self.url_previewer._is_url_blocked("http://matrix.org")) + self.assertFalse(self.url_previewer._is_url_blocked("https://matrix.org")) + self.assertFalse(self.url_previewer._is_url_blocked("http://localhost:8000")) + self.assertFalse( + self.url_previewer._is_url_blocked("http://user:pass@matrix.org") + ) + + @override_config( + { + "url_preview_url_blacklist": [ + {"username": "user"}, + {"scheme": "http", "netloc": "matrix.org"}, + ] + } + ) + def test_blocked_url(self) -> None: + # Blocked via scheme and URL. + self.assertTrue(self.url_previewer._is_url_blocked("http://matrix.org")) + # Not blocked because all components must match. + self.assertFalse(self.url_previewer._is_url_blocked("https://matrix.org")) + + # Blocked due to the user. + self.assertTrue( + self.url_previewer._is_url_blocked("http://user:pass@example.com") + ) + self.assertTrue(self.url_previewer._is_url_blocked("http://user@example.com")) + + @override_config({"url_preview_url_blacklist": [{"netloc": "*.example.com"}]}) + def test_glob_blocked_url(self) -> None: + # All subdomains are blocked. + self.assertTrue(self.url_previewer._is_url_blocked("http://foo.example.com")) + self.assertTrue(self.url_previewer._is_url_blocked("http://.example.com")) + + # The TLD is not blocked. + self.assertFalse(self.url_previewer._is_url_blocked("https://example.com")) + + @override_config({"url_preview_url_blacklist": [{"netloc": "^.+\\.example\\.com"}]}) + def test_regex_blocked_urL(self) -> None: + # All subdomains are blocked. + self.assertTrue(self.url_previewer._is_url_blocked("http://foo.example.com")) + # Requires a non-empty subdomain. + self.assertFalse(self.url_previewer._is_url_blocked("http://.example.com")) + + # The TLD is not blocked. + self.assertFalse(self.url_previewer._is_url_blocked("https://example.com")) diff --git a/tests/module_api/test_api.py b/tests/module_api/test_api.py index 3a1929691e..bff7114cd8 100644 --- a/tests/module_api/test_api.py +++ b/tests/module_api/test_api.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Dict +from typing import Any, Dict, Optional from unittest.mock import Mock from twisted.internet import defer @@ -21,6 +21,7 @@ from synapse.api.constants import EduTypes, EventTypes from synapse.api.errors import NotFoundError from synapse.events import EventBase from synapse.federation.units import Transaction +from synapse.handlers.device import DeviceHandler from synapse.handlers.presence import UserPresenceState from synapse.handlers.push_rules import InvalidRuleException from synapse.module_api import ModuleApi @@ -773,6 +774,54 @@ class ModuleApiTestCase(BaseModuleApiTestCase): # Check room alias. self.assertIsNone(room_alias) + def test_on_logged_out(self) -> None: + """Test that on_logged_out module hook is properly called when logging out + a device, and that related pushers are still available at this time. + """ + device_id = "AAAAAAA" + user_id = self.register_user("test_on_logged_out", "secret") + self.login("test_on_logged_out", "secret", device_id) + + self.get_success( + self.hs.get_pusherpool().add_or_update_pusher( + user_id=user_id, + device_id=device_id, + kind="http", + app_id="m.http", + app_display_name="HTTP Push Notifications", + device_display_name="pushy push", + pushkey="a@example.com", + lang=None, + data={"url": "http://example.com/_matrix/push/v1/notify"}, + ) + ) + + # Setup a callback counting the number of pushers. + number_of_pushers_in_callback: Optional[int] = None + + async def _on_logged_out_mock( + user_id: str, device_id: Optional[str], access_token: str + ) -> None: + nonlocal number_of_pushers_in_callback + number_of_pushers_in_callback = len( + self.hs.get_pusherpool().pushers[user_id].values() + ) + + self.module_api.register_password_auth_provider_callbacks( + on_logged_out=_on_logged_out_mock + ) + + # Delete the device. + device_handler = self.hs.get_device_handler() + assert isinstance(device_handler, DeviceHandler) + self.get_success(device_handler.delete_devices(user_id, [device_id])) + + # Check that the callback was called and the pushers still existed. + self.assertEqual(number_of_pushers_in_callback, 1) + + # Ensure the pushers were deleted after the callback. + self.assertEqual(len(self.hs.get_pusherpool().pushers[user_id].values()), 0) + class ModuleApiWorkerTestCase(BaseModuleApiTestCase, BaseMultiWorkerStreamTestCase): """For testing ModuleApi functionality in a multi-worker setup""" @@ -788,6 +837,7 @@ class ModuleApiWorkerTestCase(BaseModuleApiTestCase, BaseMultiWorkerStreamTestCa conf = super().default_config() conf["stream_writers"] = {"presence": ["presence_writer"]} conf["instance_map"] = { + "main": {"host": "testserv", "port": 8765}, "presence_writer": {"host": "testserv", "port": 1001}, } return conf diff --git a/tests/push/test_http.py b/tests/push/test_http.py index 99cec0836b..e68a979ee0 100644 --- a/tests/push/test_http.py +++ b/tests/push/test_http.py @@ -52,7 +52,7 @@ class HTTPPusherTests(HomeserverTestCase): m.post_json_get_json = post_json_get_json - hs = self.setup_test_homeserver(proxied_blacklisted_http_client=m) + hs = self.setup_test_homeserver(proxied_blocklisted_http_client=m) return hs @@ -962,3 +962,40 @@ class HTTPPusherTests(HomeserverTestCase): channel.json_body["pushers"][0]["org.matrix.msc3881.device_id"], lookup_result.device_id, ) + + @override_config({"push": {"jitter_delay": "10s"}}) + def test_jitter(self) -> None: + """Tests that enabling jitter actually delays sending push.""" + user_id, access_token = self._make_user_with_pusher("user") + other_user_id, other_access_token = self._make_user_with_pusher("otheruser") + + room = self.helper.create_room_as(user_id, tok=access_token) + self.helper.join(room=room, user=other_user_id, tok=other_access_token) + + # Send a message and check that it did not generate a push, as it should + # be delayed. + self.helper.send(room, body="Hi!", tok=other_access_token) + self.assertEqual(len(self.push_attempts), 0) + + # Now advance time past the max jitter, and assert the message was sent. + self.reactor.advance(15) + self.assertEqual(len(self.push_attempts), 1) + + self.push_attempts[0][0].callback({}) + + # Now we send a bunch of messages and assert that they were all sent + # within the 10s max delay. + for _ in range(10): + self.helper.send(room, body="Hi!", tok=other_access_token) + + index = 1 + for _ in range(11): + while len(self.push_attempts) > index: + self.push_attempts[index][0].callback({}) + self.pump() + index += 1 + + self.reactor.advance(1) + self.pump() + + self.assertEqual(len(self.push_attempts), 11) diff --git a/tests/replication/_base.py b/tests/replication/_base.py index 0f1a8a145f..eb9b1f1cd9 100644 --- a/tests/replication/_base.py +++ b/tests/replication/_base.py @@ -110,8 +110,7 @@ class BaseStreamTestCase(unittest.HomeserverTestCase): def _get_worker_hs_config(self) -> dict: config = self.default_config() config["worker_app"] = "synapse.app.generic_worker" - config["worker_replication_host"] = "testserv" - config["worker_replication_http_port"] = "8765" + config["instance_map"] = {"main": {"host": "testserv", "port": 8765}} return config def _build_replication_data_handler(self) -> "TestReplicationDataHandler": @@ -249,6 +248,7 @@ class BaseMultiWorkerStreamTestCase(unittest.HomeserverTestCase): """ base = super().default_config() base["redis"] = {"enabled": True} + base["instance_map"] = {"main": {"host": "testserv", "port": 8765}} return base def setUp(self) -> None: @@ -310,7 +310,7 @@ class BaseMultiWorkerStreamTestCase(unittest.HomeserverTestCase): def make_worker_hs( self, worker_app: str, extra_config: Optional[dict] = None, **kwargs: Any ) -> HomeServer: - """Make a new worker HS instance, correctly connecting replcation + """Make a new worker HS instance, correctly connecting replication stream to the master HS. Args: @@ -388,8 +388,6 @@ class BaseMultiWorkerStreamTestCase(unittest.HomeserverTestCase): def _get_worker_hs_config(self) -> dict: config = self.default_config() - config["worker_replication_host"] = "testserv" - config["worker_replication_http_port"] = "8765" return config def replicate(self) -> None: diff --git a/tests/replication/slave/storage/__init__.py b/tests/replication/slave/storage/__init__.py deleted file mode 100644 index f43a360a80..0000000000 --- a/tests/replication/slave/storage/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/tests/replication/slave/__init__.py b/tests/replication/storage/__init__.py index f43a360a80..f43a360a80 100644 --- a/tests/replication/slave/__init__.py +++ b/tests/replication/storage/__init__.py diff --git a/tests/replication/slave/storage/_base.py b/tests/replication/storage/_base.py index 4c9b494344..de26a62ae1 100644 --- a/tests/replication/slave/storage/_base.py +++ b/tests/replication/storage/_base.py @@ -24,7 +24,7 @@ from synapse.util import Clock from tests.replication._base import BaseStreamTestCase -class BaseSlavedStoreTestCase(BaseStreamTestCase): +class BaseWorkerStoreTestCase(BaseStreamTestCase): def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: return self.setup_test_homeserver(federation_client=Mock()) @@ -34,7 +34,7 @@ class BaseSlavedStoreTestCase(BaseStreamTestCase): self.reconnect() self.master_store = hs.get_datastores().main - self.slaved_store = self.worker_hs.get_datastores().main + self.worker_store = self.worker_hs.get_datastores().main persistence = hs.get_storage_controllers().persistence assert persistence is not None self.persistance = persistence @@ -50,7 +50,7 @@ class BaseSlavedStoreTestCase(BaseStreamTestCase): self, method: str, args: Iterable[Any], expected_result: Optional[Any] = None ) -> None: master_result = self.get_success(getattr(self.master_store, method)(*args)) - slaved_result = self.get_success(getattr(self.slaved_store, method)(*args)) + worker_result = self.get_success(getattr(self.worker_store, method)(*args)) if expected_result is not None: self.assertEqual( master_result, @@ -59,14 +59,14 @@ class BaseSlavedStoreTestCase(BaseStreamTestCase): % (expected_result, master_result), ) self.assertEqual( - slaved_result, + worker_result, expected_result, - "Expected slave result to be %r but was %r" - % (expected_result, slaved_result), + "Expected worker result to be %r but was %r" + % (expected_result, worker_result), ) self.assertEqual( master_result, - slaved_result, - "Slave result %r does not match master result %r" - % (slaved_result, master_result), + worker_result, + "Worker result %r does not match master result %r" + % (worker_result, master_result), ) diff --git a/tests/replication/slave/storage/test_events.py b/tests/replication/storage/test_events.py index b2125b1fea..f7c6417a09 100644 --- a/tests/replication/slave/storage/test_events.py +++ b/tests/replication/storage/test_events.py @@ -36,7 +36,7 @@ from synapse.util import Clock from tests.server import FakeTransport -from ._base import BaseSlavedStoreTestCase +from ._base import BaseWorkerStoreTestCase USER_ID = "@feeling:test" USER_ID_2 = "@bright:test" @@ -63,7 +63,7 @@ def patch__eq__(cls: object) -> Callable[[], None]: return unpatch -class EventsWorkerStoreTestCase(BaseSlavedStoreTestCase): +class EventsWorkerStoreTestCase(BaseWorkerStoreTestCase): STORE_TYPE = EventsWorkerStore def setUp(self) -> None: @@ -294,7 +294,7 @@ class EventsWorkerStoreTestCase(BaseSlavedStoreTestCase): assert j2.internal_metadata.stream_ordering is not None event_source = RoomEventSource(self.hs) - event_source.store = self.slaved_store + event_source.store = self.worker_store current_token = event_source.get_current_key() # gradually stream out the replication @@ -310,12 +310,12 @@ class EventsWorkerStoreTestCase(BaseSlavedStoreTestCase): # # First, we get a list of the rooms we are joined to joined_rooms = self.get_success( - self.slaved_store.get_rooms_for_user_with_stream_ordering(USER_ID_2) + self.worker_store.get_rooms_for_user_with_stream_ordering(USER_ID_2) ) # Then, we get a list of the events since the last sync membership_changes = self.get_success( - self.slaved_store.get_membership_changes_for_user( + self.worker_store.get_membership_changes_for_user( USER_ID_2, prev_token, current_token ) ) diff --git a/tests/replication/test_auth.py b/tests/replication/test_auth.py index 98602371e4..f7bca0063d 100644 --- a/tests/replication/test_auth.py +++ b/tests/replication/test_auth.py @@ -43,9 +43,6 @@ class WorkerAuthenticationTestCase(BaseMultiWorkerStreamTestCase): def _get_worker_hs_config(self) -> dict: config = self.default_config() config["worker_app"] = "synapse.app.generic_worker" - config["worker_replication_host"] = "testserv" - config["worker_replication_http_port"] = "8765" - return config def _test_register(self) -> FakeChannel: diff --git a/tests/replication/test_client_reader_shard.py b/tests/replication/test_client_reader_shard.py index eca5033761..a18859099f 100644 --- a/tests/replication/test_client_reader_shard.py +++ b/tests/replication/test_client_reader_shard.py @@ -29,8 +29,6 @@ class ClientReaderTestCase(BaseMultiWorkerStreamTestCase): def _get_worker_hs_config(self) -> dict: config = self.default_config() config["worker_app"] = "synapse.app.generic_worker" - config["worker_replication_host"] = "testserv" - config["worker_replication_http_port"] = "8765" return config def test_register_single_worker(self) -> None: diff --git a/tests/replication/test_pusher_shard.py b/tests/replication/test_pusher_shard.py index dcb3e6669b..875811669c 100644 --- a/tests/replication/test_pusher_shard.py +++ b/tests/replication/test_pusher_shard.py @@ -93,7 +93,7 @@ class PusherShardTestCase(BaseMultiWorkerStreamTestCase): self.make_worker_hs( "synapse.app.generic_worker", {"worker_name": "pusher1", "pusher_instances": ["pusher1"]}, - proxied_blacklisted_http_client=http_client_mock, + proxied_blocklisted_http_client=http_client_mock, ) event_id = self._create_pusher_and_send_msg("user") @@ -126,7 +126,7 @@ class PusherShardTestCase(BaseMultiWorkerStreamTestCase): "worker_name": "pusher1", "pusher_instances": ["pusher1", "pusher2"], }, - proxied_blacklisted_http_client=http_client_mock1, + proxied_blocklisted_http_client=http_client_mock1, ) http_client_mock2 = Mock(spec_set=["post_json_get_json"]) @@ -140,7 +140,7 @@ class PusherShardTestCase(BaseMultiWorkerStreamTestCase): "worker_name": "pusher2", "pusher_instances": ["pusher1", "pusher2"], }, - proxied_blacklisted_http_client=http_client_mock2, + proxied_blocklisted_http_client=http_client_mock2, ) # We choose a user name that we know should go to pusher1. diff --git a/tests/replication/test_sharded_event_persister.py b/tests/replication/test_sharded_event_persister.py index 7f9cc67e73..4623d737fb 100644 --- a/tests/replication/test_sharded_event_persister.py +++ b/tests/replication/test_sharded_event_persister.py @@ -50,6 +50,7 @@ class EventPersisterShardTestCase(BaseMultiWorkerStreamTestCase): conf = super().default_config() conf["stream_writers"] = {"events": ["worker1", "worker2"]} conf["instance_map"] = { + "main": {"host": "testserv", "port": 8765}, "worker1": {"host": "testserv", "port": 1001}, "worker2": {"host": "testserv", "port": 1002}, } diff --git a/tests/rest/admin/test_admin.py b/tests/rest/admin/test_admin.py index a8f6436836..695e84357a 100644 --- a/tests/rest/admin/test_admin.py +++ b/tests/rest/admin/test_admin.py @@ -372,3 +372,126 @@ class PurgeHistoryTestCase(unittest.HomeserverTestCase): self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("complete", channel.json_body["status"]) + + +class ExperimentalFeaturesTestCase(unittest.HomeserverTestCase): + servlets = [ + synapse.rest.admin.register_servlets, + login.register_servlets, + ] + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.admin_user = self.register_user("admin", "pass", admin=True) + self.admin_user_tok = self.login("admin", "pass") + + self.other_user = self.register_user("user", "pass") + self.other_user_tok = self.login("user", "pass") + + self.url = "/_synapse/admin/v1/experimental_features" + + def test_enable_and_disable(self) -> None: + """ + Test basic functionality of ExperimentalFeatures endpoint + """ + # test enabling features works + url = f"{self.url}/{self.other_user}" + channel = self.make_request( + "PUT", + url, + content={ + "features": {"msc3026": True, "msc3881": True}, + }, + access_token=self.admin_user_tok, + ) + self.assertEqual(channel.code, 200) + + # list which features are enabled and ensure the ones we enabled are listed + self.assertEqual(channel.code, 200) + url = f"{self.url}/{self.other_user}" + channel = self.make_request( + "GET", + url, + access_token=self.admin_user_tok, + ) + self.assertEqual(channel.code, 200) + self.assertEqual( + True, + channel.json_body["features"]["msc3026"], + ) + self.assertEqual( + True, + channel.json_body["features"]["msc3881"], + ) + + # test disabling a feature works + url = f"{self.url}/{self.other_user}" + channel = self.make_request( + "PUT", + url, + content={"features": {"msc3026": False}}, + access_token=self.admin_user_tok, + ) + self.assertEqual(channel.code, 200) + + # list the features enabled/disabled and ensure they are still are correct + self.assertEqual(channel.code, 200) + url = f"{self.url}/{self.other_user}" + channel = self.make_request( + "GET", + url, + access_token=self.admin_user_tok, + ) + self.assertEqual(channel.code, 200) + self.assertEqual( + False, + channel.json_body["features"]["msc3026"], + ) + self.assertEqual( + True, + channel.json_body["features"]["msc3881"], + ) + self.assertEqual( + False, + channel.json_body["features"]["msc3967"], + ) + + # test nothing blows up if you try to disable a feature that isn't already enabled + url = f"{self.url}/{self.other_user}" + channel = self.make_request( + "PUT", + url, + content={"features": {"msc3026": False}}, + access_token=self.admin_user_tok, + ) + self.assertEqual(channel.code, 200) + + # test trying to enable a feature without an admin access token is denied + url = f"{self.url}/f{self.other_user}" + channel = self.make_request( + "PUT", + url, + content={"features": {"msc3881": True}}, + access_token=self.other_user_tok, + ) + self.assertEqual(channel.code, 403) + self.assertEqual( + channel.json_body, + {"errcode": "M_FORBIDDEN", "error": "You are not a server admin"}, + ) + + # test trying to enable a bogus msc is denied + url = f"{self.url}/{self.other_user}" + channel = self.make_request( + "PUT", + url, + content={"features": {"msc6666": True}}, + access_token=self.admin_user_tok, + ) + self.assertEqual(channel.code, 400) + self.assertEqual( + channel.json_body, + { + "errcode": "M_UNKNOWN", + "error": "'msc6666' is not recognised as a valid experimental feature.", + }, + ) diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py index b4241ceaf0..434bb56d44 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py @@ -802,9 +802,21 @@ class UsersListTestCase(unittest.HomeserverTestCase): # Set avatar URL to all users, that no user has a NULL value to avoid # different sort order between SQlite and PostreSQL - self.get_success(self.store.set_profile_avatar_url("user1", "mxc://url3")) - self.get_success(self.store.set_profile_avatar_url("user2", "mxc://url2")) - self.get_success(self.store.set_profile_avatar_url("admin", "mxc://url1")) + self.get_success( + self.store.set_profile_avatar_url( + UserID.from_string("@user1:test"), "mxc://url3" + ) + ) + self.get_success( + self.store.set_profile_avatar_url( + UserID.from_string("@user2:test"), "mxc://url2" + ) + ) + self.get_success( + self.store.set_profile_avatar_url( + UserID.from_string("@admin:test"), "mxc://url1" + ) + ) # order by default (name) self._order_test([self.admin_user, user1, user2], None) @@ -1127,7 +1139,9 @@ class DeactivateAccountTestCase(unittest.HomeserverTestCase): # set attributes for user self.get_success( - self.store.set_profile_avatar_url("user", "mxc://servername/mediaid") + self.store.set_profile_avatar_url( + UserID.from_string("@user:test"), "mxc://servername/mediaid" + ) ) self.get_success( self.store.user_add_threepid("@user:test", "email", "foo@bar.com", 0, 0) @@ -1257,7 +1271,9 @@ class DeactivateAccountTestCase(unittest.HomeserverTestCase): Reproduces #12257. """ # Patch `self.other_user` to have an empty string as their avatar. - self.get_success(self.store.set_profile_avatar_url("user", "")) + self.get_success( + self.store.set_profile_avatar_url(UserID.from_string("@user:test"), "") + ) # Check we can still erase them. channel = self.make_request( @@ -2311,7 +2327,9 @@ class UserRestTestCase(unittest.HomeserverTestCase): # set attributes for user self.get_success( - self.store.set_profile_avatar_url("user", "mxc://servername/mediaid") + self.store.set_profile_avatar_url( + UserID.from_string("@user:test"), "mxc://servername/mediaid" + ) ) self.get_success( self.store.user_add_threepid("@user:test", "email", "foo@bar.com", 0, 0) diff --git a/tests/rest/client/test_account.py b/tests/rest/client/test_account.py index 7f675c44a2..ac19f3c6da 100644 --- a/tests/rest/client/test_account.py +++ b/tests/rest/client/test_account.py @@ -474,6 +474,163 @@ class DeactivateTestCase(unittest.HomeserverTestCase): self.assertEqual(len(memberships), 1, memberships) self.assertEqual(memberships[0].room_id, room_id, memberships) + def test_deactivate_account_deletes_server_side_backup_keys(self) -> None: + key_handler = self.hs.get_e2e_room_keys_handler() + room_keys = { + "rooms": { + "!abc:matrix.org": { + "sessions": { + "c0ff33": { + "first_message_index": 1, + "forwarded_count": 1, + "is_verified": False, + "session_data": "SSBBTSBBIEZJU0gK", + } + } + } + } + } + + user_id = self.register_user("missPiggy", "test") + tok = self.login("missPiggy", "test") + + # add some backup keys/versions + version = self.get_success( + key_handler.create_version( + user_id, + { + "algorithm": "m.megolm_backup.v1", + "auth_data": "first_version_auth_data", + }, + ) + ) + + self.get_success(key_handler.upload_room_keys(user_id, version, room_keys)) + + version2 = self.get_success( + key_handler.create_version( + user_id, + { + "algorithm": "m.megolm_backup.v1", + "auth_data": "second_version_auth_data", + }, + ) + ) + + self.get_success(key_handler.upload_room_keys(user_id, version2, room_keys)) + + self.deactivate(user_id, tok) + store = self.hs.get_datastores().main + + # Check that the user has been marked as deactivated. + self.assertTrue(self.get_success(store.get_user_deactivated_status(user_id))) + + # Check that there are no entries in 'e2e_room_keys` and `e2e_room_keys_versions` + res = self.get_success( + self.hs.get_datastores().main.db_pool.simple_select_list( + "e2e_room_keys", {"user_id": user_id}, "*", "simple_select" + ) + ) + self.assertEqual(len(res), 0) + + res2 = self.get_success( + self.hs.get_datastores().main.db_pool.simple_select_list( + "e2e_room_keys_versions", {"user_id": user_id}, "*", "simple_select" + ) + ) + self.assertEqual(len(res2), 0) + + def test_background_update_deletes_deactivated_users_server_side_backup_keys( + self, + ) -> None: + key_handler = self.hs.get_e2e_room_keys_handler() + room_keys = { + "rooms": { + "!abc:matrix.org": { + "sessions": { + "c0ff33": { + "first_message_index": 1, + "forwarded_count": 1, + "is_verified": False, + "session_data": "SSBBTSBBIEZJU0gK", + } + } + } + } + } + self.store = self.hs.get_datastores().main + + # create a bunch of users and add keys for them + users = [] + for i in range(0, 20): + user_id = self.register_user("missPiggy" + str(i), "test") + users.append((user_id,)) + + # add some backup keys/versions + version = self.get_success( + key_handler.create_version( + user_id, + { + "algorithm": "m.megolm_backup.v1", + "auth_data": str(i) + "_version_auth_data", + }, + ) + ) + + self.get_success(key_handler.upload_room_keys(user_id, version, room_keys)) + + version2 = self.get_success( + key_handler.create_version( + user_id, + { + "algorithm": "m.megolm_backup.v1", + "auth_data": str(i) + "_version_auth_data", + }, + ) + ) + + self.get_success(key_handler.upload_room_keys(user_id, version2, room_keys)) + + # deactivate most of the users by editing DB + self.get_success( + self.store.db_pool.simple_update_many( + table="users", + key_names=("name",), + key_values=users[0:18], + value_names=("deactivated",), + value_values=[(1,) for i in range(1, 19)], + desc="", + ) + ) + + # run background update + self.get_success( + self.store.db_pool.simple_insert( + "background_updates", + { + "update_name": "delete_e2e_backup_keys_for_deactivated_users", + "progress_json": "{}", + }, + ) + ) + self.store.db_pool.updates._all_done = False + self.wait_for_background_updates() + + # check that keys are deleted for the deactivated users but not the others + res = self.get_success( + self.hs.get_datastores().main.db_pool.simple_select_list( + "e2e_room_keys", None, ("user_id",), "simple_select" + ) + ) + self.assertEqual(len(res), 4) + + res2 = self.get_success( + self.hs.get_datastores().main.db_pool.simple_select_list( + "e2e_room_keys_versions", None, ("user_id",), "simple_select" + ) + ) + self.assertEqual(len(res2), 4) + def deactivate(self, user_id: str, tok: str) -> None: request_data = { "auth": { diff --git a/tests/rest/client/test_filter.py b/tests/rest/client/test_filter.py index 91678abf13..9faa9de050 100644 --- a/tests/rest/client/test_filter.py +++ b/tests/rest/client/test_filter.py @@ -17,6 +17,7 @@ from twisted.test.proto_helpers import MemoryReactor from synapse.api.errors import Codes from synapse.rest.client import filter from synapse.server import HomeServer +from synapse.types import UserID from synapse.util import Clock from tests import unittest @@ -76,7 +77,8 @@ class FilterTestCase(unittest.HomeserverTestCase): def test_get_filter(self) -> None: filter_id = self.get_success( self.filtering.add_user_filter( - user_localpart="apple", user_filter=self.EXAMPLE_FILTER + user_id=UserID.from_string("@apple:test"), + user_filter=self.EXAMPLE_FILTER, ) ) self.reactor.advance(1) diff --git a/tests/rest/client/test_login.py b/tests/rest/client/test_login.py index 6f4135eea0..41c1a16e6e 100644 --- a/tests/rest/client/test_login.py +++ b/tests/rest/client/test_login.py @@ -42,7 +42,7 @@ from tests.test_utils.html_parsers import TestHtmlParser from tests.unittest import HomeserverTestCase, override_config, skip_unless try: - from authlib.jose import jwk, jwt + from authlib.jose import JsonWebKey, jwt HAS_JWT = True except ImportError: @@ -1081,6 +1081,22 @@ class JWTTestCase(unittest.HomeserverTestCase): self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN") self.assertEqual(channel.json_body["error"], "Token field for JWT is missing") + def test_deactivated_user(self) -> None: + """Logging in as a deactivated account should error.""" + user_id = self.register_user("kermit", "monkey") + self.get_success( + self.hs.get_deactivate_account_handler().deactivate_account( + user_id, erase_data=False, requester=create_requester(user_id) + ) + ) + + channel = self.jwt_login({"sub": "kermit"}) + self.assertEqual(channel.code, 403, msg=channel.result) + self.assertEqual(channel.json_body["errcode"], "M_USER_DEACTIVATED") + self.assertEqual( + channel.json_body["error"], "This account has been deactivated" + ) + # The JWTPubKeyTestCase is a complement to JWTTestCase where we instead use # RSS256, with a public key configured in synapse as "jwt_secret", and tokens @@ -1148,7 +1164,7 @@ class JWTPubKeyTestCase(unittest.HomeserverTestCase): def jwt_encode(self, payload: Dict[str, Any], secret: str = jwt_privatekey) -> str: header = {"alg": "RS256"} if secret.startswith("-----BEGIN RSA PRIVATE KEY-----"): - secret = jwk.dumps(secret, kty="RSA") + secret = JsonWebKey.import_key(secret, {"kty": "RSA"}) result: bytes = jwt.encode(header, payload, secret) return result.decode("ascii") diff --git a/tests/rest/client/test_mutual_rooms.py b/tests/rest/client/test_mutual_rooms.py index a4327f7ace..22fddbd6d6 100644 --- a/tests/rest/client/test_mutual_rooms.py +++ b/tests/rest/client/test_mutual_rooms.py @@ -11,6 +11,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from urllib.parse import quote + from twisted.test.proto_helpers import MemoryReactor import synapse.rest.admin @@ -44,8 +46,8 @@ class UserMutualRoomsTest(unittest.HomeserverTestCase): def _get_mutual_rooms(self, token: str, other_user: str) -> FakeChannel: return self.make_request( "GET", - "/_matrix/client/unstable/uk.half-shot.msc2666/user/mutual_rooms/%s" - % other_user, + "/_matrix/client/unstable/uk.half-shot.msc2666/user/mutual_rooms" + f"?user_id={quote(other_user)}", access_token=token, ) diff --git a/tests/rest/client/test_read_marker.py b/tests/rest/client/test_read_marker.py new file mode 100644 index 0000000000..0eedcdb476 --- /dev/null +++ b/tests/rest/client/test_read_marker.py @@ -0,0 +1,147 @@ +# Copyright 2023 Beeper +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from twisted.test.proto_helpers import MemoryReactor + +import synapse.rest.admin +from synapse.api.constants import EventTypes +from synapse.rest import admin +from synapse.rest.client import login, read_marker, register, room +from synapse.server import HomeServer +from synapse.util import Clock + +from tests import unittest + +ONE_HOUR_MS = 3600000 +ONE_DAY_MS = ONE_HOUR_MS * 24 + + +class ReadMarkerTestCase(unittest.HomeserverTestCase): + servlets = [ + login.register_servlets, + register.register_servlets, + read_marker.register_servlets, + room.register_servlets, + synapse.rest.admin.register_servlets, + admin.register_servlets, + ] + + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: + config = self.default_config() + + # merge this default retention config with anything that was specified in + # @override_config + retention_config = { + "enabled": True, + "allowed_lifetime_min": ONE_DAY_MS, + "allowed_lifetime_max": ONE_DAY_MS * 3, + } + retention_config.update(config.get("retention", {})) + config["retention"] = retention_config + + self.hs = self.setup_test_homeserver(config=config) + + return self.hs + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.owner = self.register_user("owner", "pass") + self.owner_tok = self.login("owner", "pass") + self.store = self.hs.get_datastores().main + self.clock = self.hs.get_clock() + + def test_send_read_marker(self) -> None: + room_id = self.helper.create_room_as(self.owner, tok=self.owner_tok) + + def send_message() -> str: + res = self.helper.send(room_id=room_id, body="1", tok=self.owner_tok) + return res["event_id"] + + # Test setting the read marker on the room + event_id_1 = send_message() + + channel = self.make_request( + "POST", + "/rooms/!abc:beep/read_markers", + content={ + "m.fully_read": event_id_1, + }, + access_token=self.owner_tok, + ) + self.assertEqual(channel.code, 200, channel.result) + + # Test moving the read marker to a newer event + event_id_2 = send_message() + channel = self.make_request( + "POST", + "/rooms/!abc:beep/read_markers", + content={ + "m.fully_read": event_id_2, + }, + access_token=self.owner_tok, + ) + self.assertEqual(channel.code, 200, channel.result) + + def test_send_read_marker_missing_previous_event(self) -> None: + """ + Test moving a read marker from an event that previously existed but was + later removed due to retention rules. + """ + + room_id = self.helper.create_room_as(self.owner, tok=self.owner_tok) + + # Set retention rule on the room so we remove old events to test this case + self.helper.send_state( + room_id=room_id, + event_type=EventTypes.Retention, + body={"max_lifetime": ONE_DAY_MS}, + tok=self.owner_tok, + ) + + def send_message() -> str: + res = self.helper.send(room_id=room_id, body="1", tok=self.owner_tok) + return res["event_id"] + + # Test setting the read marker on the room + event_id_1 = send_message() + + channel = self.make_request( + "POST", + "/rooms/!abc:beep/read_markers", + content={ + "m.fully_read": event_id_1, + }, + access_token=self.owner_tok, + ) + + # Send a second message (retention will not remove the latest event ever) + send_message() + # And then advance so retention rules remove the first event (where the marker is) + self.reactor.advance(ONE_DAY_MS * 2 / 1000) + + event = self.get_success(self.store.get_event(event_id_1, allow_none=True)) + assert event is None + + # TODO See https://github.com/matrix-org/synapse/issues/13476 + self.store.get_event_ordering.invalidate_all() + + # Test moving the read marker to a newer event + event_id_2 = send_message() + channel = self.make_request( + "POST", + "/rooms/!abc:beep/read_markers", + content={ + "m.fully_read": event_id_2, + }, + access_token=self.owner_tok, + ) + self.assertEqual(channel.code, 200, channel.result) diff --git a/tests/rest/client/test_redactions.py b/tests/rest/client/test_redactions.py index 5dfe44defb..84a60c0b07 100644 --- a/tests/rest/client/test_redactions.py +++ b/tests/rest/client/test_redactions.py @@ -16,6 +16,7 @@ from typing import List, Optional from twisted.test.proto_helpers import MemoryReactor from synapse.api.constants import EventTypes, RelationTypes +from synapse.api.room_versions import RoomVersions from synapse.rest import admin from synapse.rest.client import login, room, sync from synapse.server import HomeServer @@ -74,6 +75,7 @@ class RedactionsTestCase(HomeserverTestCase): event_id: str, expect_code: int = 200, with_relations: Optional[List[str]] = None, + content: Optional[JsonDict] = None, ) -> JsonDict: """Helper function to send a redaction event. @@ -81,7 +83,7 @@ class RedactionsTestCase(HomeserverTestCase): """ path = "/_matrix/client/r0/rooms/%s/redact/%s" % (room_id, event_id) - request_content = {} + request_content = content or {} if with_relations: request_content["org.matrix.msc3912.with_relations"] = with_relations @@ -92,7 +94,7 @@ class RedactionsTestCase(HomeserverTestCase): return channel.json_body def _sync_room_timeline(self, access_token: str, room_id: str) -> List[JsonDict]: - channel = self.make_request("GET", "sync", access_token=self.mod_access_token) + channel = self.make_request("GET", "sync", access_token=access_token) self.assertEqual(channel.code, 200) room_sync = channel.json_body["rooms"]["join"][room_id] return room_sync["timeline"]["events"] @@ -466,3 +468,36 @@ class RedactionsTestCase(HomeserverTestCase): ) self.assertIn("body", event_dict["content"], event_dict) self.assertEqual("I'm in a thread!", event_dict["content"]["body"]) + + def test_content_redaction(self) -> None: + """MSC2174 moved the redacts property to the content.""" + # Create a room with the newer room version. + room_id = self.helper.create_room_as( + self.mod_user_id, + tok=self.mod_access_token, + room_version=RoomVersions.MSC2176.identifier, + ) + + # Create an event. + b = self.helper.send(room_id=room_id, tok=self.mod_access_token) + event_id = b["event_id"] + + # Attempt to redact it with a bogus event ID. + self._redact_event( + self.mod_access_token, + room_id, + event_id, + expect_code=400, + content={"redacts": "foo"}, + ) + + # Redact it for real. + self._redact_event(self.mod_access_token, room_id, event_id) + + # Sync the room, to get the id of the create event + timeline = self._sync_room_timeline(self.mod_access_token, room_id) + redact_event = timeline[-1] + self.assertEqual(redact_event["type"], EventTypes.Redaction) + # The redacts key should be in the content. + self.assertNotIn("redacts", redact_event) + self.assertEquals(redact_event["content"]["redacts"], event_id) diff --git a/tests/rest/client/test_relations.py b/tests/rest/client/test_relations.py index fbbbcb23f1..75439416c1 100644 --- a/tests/rest/client/test_relations.py +++ b/tests/rest/client/test_relations.py @@ -30,6 +30,7 @@ from tests import unittest from tests.server import FakeChannel from tests.test_utils import make_awaitable from tests.test_utils.event_injection import inject_event +from tests.unittest import override_config class BaseRelationsTestCase(unittest.HomeserverTestCase): @@ -949,6 +950,125 @@ class RelationPaginationTestCase(BaseRelationsTestCase): ) +class RecursiveRelationTestCase(BaseRelationsTestCase): + @override_config({"experimental_features": {"msc3981_recurse_relations": True}}) + def test_recursive_relations(self) -> None: + """Generate a complex, multi-level relationship tree and query it.""" + # Create a thread with a few messages in it. + channel = self._send_relation(RelationTypes.THREAD, "m.room.test") + thread_1 = channel.json_body["event_id"] + + channel = self._send_relation(RelationTypes.THREAD, "m.room.test") + thread_2 = channel.json_body["event_id"] + + # Add annotations. + channel = self._send_relation( + RelationTypes.ANNOTATION, "m.reaction", "a", parent_id=thread_2 + ) + annotation_1 = channel.json_body["event_id"] + + channel = self._send_relation( + RelationTypes.ANNOTATION, "m.reaction", "b", parent_id=thread_1 + ) + annotation_2 = channel.json_body["event_id"] + + # Add a reference to part of the thread, then edit the reference and annotate it. + channel = self._send_relation( + RelationTypes.REFERENCE, "m.room.test", parent_id=thread_2 + ) + reference_1 = channel.json_body["event_id"] + + channel = self._send_relation( + RelationTypes.ANNOTATION, "m.reaction", "c", parent_id=reference_1 + ) + annotation_3 = channel.json_body["event_id"] + + channel = self._send_relation( + RelationTypes.REPLACE, + "m.room.test", + parent_id=reference_1, + ) + edit = channel.json_body["event_id"] + + # Also more events off the root. + channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "d") + annotation_4 = channel.json_body["event_id"] + + channel = self.make_request( + "GET", + f"/_matrix/client/v1/rooms/{self.room}/relations/{self.parent_id}" + "?dir=f&limit=20&org.matrix.msc3981.recurse=true", + access_token=self.user_token, + ) + self.assertEqual(200, channel.code, channel.json_body) + + # The above events should be returned in creation order. + event_ids = [ev["event_id"] for ev in channel.json_body["chunk"]] + self.assertEqual( + event_ids, + [ + thread_1, + thread_2, + annotation_1, + annotation_2, + reference_1, + annotation_3, + edit, + annotation_4, + ], + ) + + @override_config({"experimental_features": {"msc3981_recurse_relations": True}}) + def test_recursive_relations_with_filter(self) -> None: + """The event_type and rel_type still apply.""" + # Create a thread with a few messages in it. + channel = self._send_relation(RelationTypes.THREAD, "m.room.test") + thread_1 = channel.json_body["event_id"] + + # Add annotations. + channel = self._send_relation( + RelationTypes.ANNOTATION, "m.reaction", "b", parent_id=thread_1 + ) + annotation_1 = channel.json_body["event_id"] + + # Add a reference to part of the thread, then edit the reference and annotate it. + channel = self._send_relation( + RelationTypes.REFERENCE, "m.room.test", parent_id=thread_1 + ) + reference_1 = channel.json_body["event_id"] + + channel = self._send_relation( + RelationTypes.ANNOTATION, "org.matrix.reaction", "c", parent_id=reference_1 + ) + annotation_2 = channel.json_body["event_id"] + + # Fetch only annotations, but recursively. + channel = self.make_request( + "GET", + f"/_matrix/client/v1/rooms/{self.room}/relations/{self.parent_id}/{RelationTypes.ANNOTATION}" + "?dir=f&limit=20&org.matrix.msc3981.recurse=true", + access_token=self.user_token, + ) + self.assertEqual(200, channel.code, channel.json_body) + + # The above events should be returned in creation order. + event_ids = [ev["event_id"] for ev in channel.json_body["chunk"]] + self.assertEqual(event_ids, [annotation_1, annotation_2]) + + # Fetch only m.reactions, but recursively. + channel = self.make_request( + "GET", + f"/_matrix/client/v1/rooms/{self.room}/relations/{self.parent_id}/{RelationTypes.ANNOTATION}/m.reaction" + "?dir=f&limit=20&org.matrix.msc3981.recurse=true", + access_token=self.user_token, + ) + self.assertEqual(200, channel.code, channel.json_body) + + # The above events should be returned in creation order. + event_ids = [ev["event_id"] for ev in channel.json_body["chunk"]] + self.assertEqual(event_ids, [annotation_1]) + + class BundledAggregationsTestCase(BaseRelationsTestCase): """ See RelationsTestCase.test_edit for a similar test for edits. diff --git a/tests/rest/client/test_rooms.py b/tests/rest/client/test_rooms.py index a4900703c4..4d39c89f6f 100644 --- a/tests/rest/client/test_rooms.py +++ b/tests/rest/client/test_rooms.py @@ -814,7 +814,9 @@ class RoomsCreateTestCase(RoomBase): return False join_mock = Mock(side_effect=user_may_join_room) - self.hs.get_spam_checker()._user_may_join_room_callbacks.append(join_mock) + self.hs.get_module_api_callbacks().spam_checker._user_may_join_room_callbacks.append( + join_mock + ) channel = self.make_request( "POST", @@ -840,7 +842,9 @@ class RoomsCreateTestCase(RoomBase): return Codes.CONSENT_NOT_GIVEN join_mock = Mock(side_effect=user_may_join_room_codes) - self.hs.get_spam_checker()._user_may_join_room_callbacks.append(join_mock) + self.hs.get_module_api_callbacks().spam_checker._user_may_join_room_callbacks.append( + join_mock + ) channel = self.make_request( "POST", @@ -1162,7 +1166,9 @@ class RoomJoinTestCase(RoomBase): # `spec` argument is needed for this function mock to have `__qualname__`, which # is needed for `Measure` metrics buried in SpamChecker. callback_mock = Mock(side_effect=user_may_join_room, spec=lambda *x: None) - self.hs.get_spam_checker()._user_may_join_room_callbacks.append(callback_mock) + self.hs.get_module_api_callbacks().spam_checker._user_may_join_room_callbacks.append( + callback_mock + ) # Join a first room, without being invited to it. self.helper.join(self.room1, self.user2, tok=self.tok2) @@ -1227,7 +1233,9 @@ class RoomJoinTestCase(RoomBase): # `spec` argument is needed for this function mock to have `__qualname__`, which # is needed for `Measure` metrics buried in SpamChecker. callback_mock = Mock(side_effect=user_may_join_room, spec=lambda *x: None) - self.hs.get_spam_checker()._user_may_join_room_callbacks.append(callback_mock) + self.hs.get_module_api_callbacks().spam_checker._user_may_join_room_callbacks.append( + callback_mock + ) # Join a first room, without being invited to it. self.helper.join(self.room1, self.user2, tok=self.tok2) @@ -1643,7 +1651,7 @@ class RoomMessagesTestCase(RoomBase): spam_checker = SpamCheck() - self.hs.get_spam_checker()._check_event_for_spam_callbacks.append( + self.hs.get_module_api_callbacks().spam_checker._check_event_for_spam_callbacks.append( spam_checker.check_event_for_spam ) @@ -3381,7 +3389,9 @@ class ThreepidInviteTestCase(unittest.HomeserverTestCase): # `spec` argument is needed for this function mock to have `__qualname__`, which # is needed for `Measure` metrics buried in SpamChecker. mock = Mock(return_value=make_awaitable(True), spec=lambda *x: None) - self.hs.get_spam_checker()._user_may_send_3pid_invite_callbacks.append(mock) + self.hs.get_module_api_callbacks().spam_checker._user_may_send_3pid_invite_callbacks.append( + mock + ) # Send a 3PID invite into the room and check that it succeeded. email_to_invite = "teresa@example.com" @@ -3446,7 +3456,9 @@ class ThreepidInviteTestCase(unittest.HomeserverTestCase): return_value=make_awaitable(synapse.module_api.NOT_SPAM), spec=lambda *x: None, ) - self.hs.get_spam_checker()._user_may_send_3pid_invite_callbacks.append(mock) + self.hs.get_module_api_callbacks().spam_checker._user_may_send_3pid_invite_callbacks.append( + mock + ) # Send a 3PID invite into the room and check that it succeeded. email_to_invite = "teresa@example.com" diff --git a/tests/rest/client/test_third_party_rules.py b/tests/rest/client/test_third_party_rules.py index 753ecc8d16..e5ba5a9706 100644 --- a/tests/rest/client/test_third_party_rules.py +++ b/tests/rest/client/test_third_party_rules.py @@ -22,7 +22,9 @@ from synapse.api.errors import SynapseError from synapse.api.room_versions import RoomVersion from synapse.config.homeserver import HomeServerConfig from synapse.events import EventBase -from synapse.events.third_party_rules import load_legacy_third_party_event_rules +from synapse.module_api.callbacks.third_party_event_rules_callbacks import ( + load_legacy_third_party_event_rules, +) from synapse.rest import admin from synapse.rest.client import account, login, profile, room from synapse.server import HomeServer @@ -146,7 +148,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): return ev.type != "foo.bar.forbidden", None callback = Mock(spec=[], side_effect=check) - self.hs.get_third_party_event_rules()._check_event_allowed_callbacks = [ + self.hs.get_module_api_callbacks().third_party_event_rules._check_event_allowed_callbacks = [ callback ] @@ -202,7 +204,9 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): ) -> Tuple[bool, Optional[JsonDict]]: raise NastyHackException(429, "message") - self.hs.get_third_party_event_rules()._check_event_allowed_callbacks = [check] + self.hs.get_module_api_callbacks().third_party_event_rules._check_event_allowed_callbacks = [ + check + ] # Make a request channel = self.make_request( @@ -229,7 +233,9 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): ev.content = {"x": "y"} return True, None - self.hs.get_third_party_event_rules()._check_event_allowed_callbacks = [check] + self.hs.get_module_api_callbacks().third_party_event_rules._check_event_allowed_callbacks = [ + check + ] # now send the event channel = self.make_request( @@ -253,7 +259,9 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): d["content"] = {"x": "y"} return True, d - self.hs.get_third_party_event_rules()._check_event_allowed_callbacks = [check] + self.hs.get_module_api_callbacks().third_party_event_rules._check_event_allowed_callbacks = [ + check + ] # now send the event channel = self.make_request( @@ -289,7 +297,9 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): } return True, d - self.hs.get_third_party_event_rules()._check_event_allowed_callbacks = [check] + self.hs.get_module_api_callbacks().third_party_event_rules._check_event_allowed_callbacks = [ + check + ] # Send an event, then edit it. channel = self.make_request( @@ -440,7 +450,9 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): ) return True, None - self.hs.get_third_party_event_rules()._check_event_allowed_callbacks = [test_fn] + self.hs.get_module_api_callbacks().third_party_event_rules._check_event_allowed_callbacks = [ + test_fn + ] # Sometimes the bug might not happen the first time the event type is added # to the state but might happen when an event updates the state of the room for @@ -466,7 +478,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): def test_on_new_event(self) -> None: """Test that the on_new_event callback is called on new events""" on_new_event = Mock(make_awaitable(None)) - self.hs.get_third_party_event_rules()._on_new_event_callbacks.append( + self.hs.get_module_api_callbacks().third_party_event_rules._on_new_event_callbacks.append( on_new_event ) @@ -569,7 +581,9 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): # Register a mock callback. m = Mock(return_value=make_awaitable(None)) - self.hs.get_third_party_event_rules()._on_profile_update_callbacks.append(m) + self.hs.get_module_api_callbacks().third_party_event_rules._on_profile_update_callbacks.append( + m + ) # Change the display name. channel = self.make_request( @@ -628,7 +642,9 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): # Register a mock callback. m = Mock(return_value=make_awaitable(None)) - self.hs.get_third_party_event_rules()._on_profile_update_callbacks.append(m) + self.hs.get_module_api_callbacks().third_party_event_rules._on_profile_update_callbacks.append( + m + ) # Register an admin user. self.register_user("admin", "password", admin=True) @@ -667,7 +683,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): """ # Register a mocked callback. deactivation_mock = Mock(return_value=make_awaitable(None)) - third_party_rules = self.hs.get_third_party_event_rules() + third_party_rules = self.hs.get_module_api_callbacks().third_party_event_rules third_party_rules._on_user_deactivation_status_changed_callbacks.append( deactivation_mock, ) @@ -675,7 +691,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): # deactivation code calls it in a way that let modules know the user is being # deactivated. profile_mock = Mock(return_value=make_awaitable(None)) - self.hs.get_third_party_event_rules()._on_profile_update_callbacks.append( + self.hs.get_module_api_callbacks().third_party_event_rules._on_profile_update_callbacks.append( profile_mock, ) @@ -725,7 +741,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): """ # Register a mock callback. m = Mock(return_value=make_awaitable(None)) - third_party_rules = self.hs.get_third_party_event_rules() + third_party_rules = self.hs.get_module_api_callbacks().third_party_event_rules third_party_rules._on_user_deactivation_status_changed_callbacks.append(m) # Register an admin user. @@ -779,7 +795,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): """ # Register a mocked callback. deactivation_mock = Mock(return_value=make_awaitable(False)) - third_party_rules = self.hs.get_third_party_event_rules() + third_party_rules = self.hs.get_module_api_callbacks().third_party_event_rules third_party_rules._check_can_deactivate_user_callbacks.append( deactivation_mock, ) @@ -825,7 +841,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): """ # Register a mocked callback. deactivation_mock = Mock(return_value=make_awaitable(False)) - third_party_rules = self.hs.get_third_party_event_rules() + third_party_rules = self.hs.get_module_api_callbacks().third_party_event_rules third_party_rules._check_can_deactivate_user_callbacks.append( deactivation_mock, ) @@ -864,7 +880,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): """ # Register a mocked callback. shutdown_mock = Mock(return_value=make_awaitable(False)) - third_party_rules = self.hs.get_third_party_event_rules() + third_party_rules = self.hs.get_module_api_callbacks().third_party_event_rules third_party_rules._check_can_shutdown_room_callbacks.append( shutdown_mock, ) @@ -900,7 +916,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): """ # Register a mocked callback. threepid_bind_mock = Mock(return_value=make_awaitable(None)) - third_party_rules = self.hs.get_third_party_event_rules() + third_party_rules = self.hs.get_module_api_callbacks().third_party_event_rules third_party_rules._on_threepid_bind_callbacks.append(threepid_bind_mock) # Register an admin user. @@ -947,8 +963,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): on_remove_user_third_party_identifier_callback_mock = Mock( return_value=make_awaitable(None) ) - third_party_rules = self.hs.get_third_party_event_rules() - third_party_rules.register_third_party_rules_callbacks( + self.hs.get_module_api().register_third_party_rules_callbacks( on_add_user_third_party_identifier=on_add_user_third_party_identifier_callback_mock, on_remove_user_third_party_identifier=on_remove_user_third_party_identifier_callback_mock, ) @@ -1009,8 +1024,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): on_remove_user_third_party_identifier_callback_mock = Mock( return_value=make_awaitable(None) ) - third_party_rules = self.hs.get_third_party_event_rules() - third_party_rules.register_third_party_rules_callbacks( + self.hs.get_module_api().register_third_party_rules_callbacks( on_remove_user_third_party_identifier=on_remove_user_third_party_identifier_callback_mock, ) diff --git a/tests/rest/media/test_domain_blocking.py b/tests/rest/media/test_domain_blocking.py new file mode 100644 index 0000000000..9beeeab843 --- /dev/null +++ b/tests/rest/media/test_domain_blocking.py @@ -0,0 +1,139 @@ +# Copyright 2023 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Dict + +from twisted.test.proto_helpers import MemoryReactor +from twisted.web.resource import Resource + +from synapse.media._base import FileInfo +from synapse.server import HomeServer +from synapse.util import Clock + +from tests import unittest +from tests.test_utils import SMALL_PNG +from tests.unittest import override_config + + +class MediaDomainBlockingTests(unittest.HomeserverTestCase): + remote_media_id = "doesnotmatter" + remote_server_name = "evil.com" + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.store = hs.get_datastores().main + + # Inject a piece of media. We'll use this to ensure we're returning a sane + # response when we're not supposed to block it, distinguishing a media block + # from a regular 404. + file_id = "abcdefg12345" + file_info = FileInfo(server_name=self.remote_server_name, file_id=file_id) + with hs.get_media_repository().media_storage.store_into_file(file_info) as ( + f, + fname, + finish, + ): + f.write(SMALL_PNG) + self.get_success(finish()) + + self.get_success( + self.store.store_cached_remote_media( + origin=self.remote_server_name, + media_id=self.remote_media_id, + media_type="image/png", + media_length=1, + time_now_ms=clock.time_msec(), + upload_name="test.png", + filesystem_id=file_id, + ) + ) + + def create_resource_dict(self) -> Dict[str, Resource]: + # We need to manually set the resource tree to include media, the + # default only does `/_matrix/client` APIs. + return {"/_matrix/media": self.hs.get_media_repository_resource()} + + @override_config( + { + # Disable downloads from the domain we'll be trying to download from. + # Should result in a 404. + "prevent_media_downloads_from": ["evil.com"] + } + ) + def test_cannot_download_blocked_media(self) -> None: + """ + Tests to ensure that remote media which is blocked cannot be downloaded. + """ + response = self.make_request( + "GET", + f"/_matrix/media/v3/download/evil.com/{self.remote_media_id}", + shorthand=False, + ) + self.assertEqual(response.code, 404) + + @override_config( + { + # Disable downloads from a domain we won't be requesting downloads from. + # This proves we haven't broken anything. + "prevent_media_downloads_from": ["not-listed.com"] + } + ) + def test_remote_media_normally_unblocked(self) -> None: + """ + Tests to ensure that remote media is normally able to be downloaded + when no domain block is in place. + """ + response = self.make_request( + "GET", + f"/_matrix/media/v3/download/evil.com/{self.remote_media_id}", + shorthand=False, + ) + self.assertEqual(response.code, 200) + + @override_config( + { + # Disable downloads from the domain we'll be trying to download from. + # Should result in a 404. + "prevent_media_downloads_from": ["evil.com"], + "dynamic_thumbnails": True, + } + ) + def test_cannot_download_blocked_media_thumbnail(self) -> None: + """ + Same test as test_cannot_download_blocked_media but for thumbnails. + """ + response = self.make_request( + "GET", + f"/_matrix/media/v3/thumbnail/evil.com/{self.remote_media_id}?width=100&height=100", + shorthand=False, + content={"width": 100, "height": 100}, + ) + self.assertEqual(response.code, 404) + + @override_config( + { + # Disable downloads from a domain we won't be requesting downloads from. + # This proves we haven't broken anything. + "prevent_media_downloads_from": ["not-listed.com"], + "dynamic_thumbnails": True, + } + ) + def test_remote_media_thumbnail_normally_unblocked(self) -> None: + """ + Same test as test_remote_media_normally_unblocked but for thumbnails. + """ + response = self.make_request( + "GET", + f"/_matrix/media/v3/thumbnail/evil.com/{self.remote_media_id}?width=100&height=100", + shorthand=False, + ) + self.assertEqual(response.code, 200) diff --git a/tests/rest/media/test_url_preview.py b/tests/rest/media/test_url_preview.py index e44beae8c1..170fb0534a 100644 --- a/tests/rest/media/test_url_preview.py +++ b/tests/rest/media/test_url_preview.py @@ -418,9 +418,9 @@ class URLPreviewTests(unittest.HomeserverTestCase): channel.json_body, {"og:title": "~matrix~", "og:description": "hi"} ) - def test_blacklisted_ip_specific(self) -> None: + def test_blocked_ip_specific(self) -> None: """ - Blacklisted IP addresses, found via DNS, are not spidered. + Blocked IP addresses, found via DNS, are not spidered. """ self.lookups["example.com"] = [(IPv4Address, "192.168.1.1")] @@ -439,9 +439,9 @@ class URLPreviewTests(unittest.HomeserverTestCase): }, ) - def test_blacklisted_ip_range(self) -> None: + def test_blocked_ip_range(self) -> None: """ - Blacklisted IP ranges, IPs found over DNS, are not spidered. + Blocked IP ranges, IPs found over DNS, are not spidered. """ self.lookups["example.com"] = [(IPv4Address, "1.1.1.2")] @@ -458,9 +458,9 @@ class URLPreviewTests(unittest.HomeserverTestCase): }, ) - def test_blacklisted_ip_specific_direct(self) -> None: + def test_blocked_ip_specific_direct(self) -> None: """ - Blacklisted IP addresses, accessed directly, are not spidered. + Blocked IP addresses, accessed directly, are not spidered. """ channel = self.make_request( "GET", "preview_url?url=http://192.168.1.1", shorthand=False @@ -470,16 +470,13 @@ class URLPreviewTests(unittest.HomeserverTestCase): self.assertEqual(len(self.reactor.tcpClients), 0) self.assertEqual( channel.json_body, - { - "errcode": "M_UNKNOWN", - "error": "IP address blocked by IP blacklist entry", - }, + {"errcode": "M_UNKNOWN", "error": "IP address blocked"}, ) self.assertEqual(channel.code, 403) - def test_blacklisted_ip_range_direct(self) -> None: + def test_blocked_ip_range_direct(self) -> None: """ - Blacklisted IP ranges, accessed directly, are not spidered. + Blocked IP ranges, accessed directly, are not spidered. """ channel = self.make_request( "GET", "preview_url?url=http://1.1.1.2", shorthand=False @@ -488,15 +485,12 @@ class URLPreviewTests(unittest.HomeserverTestCase): self.assertEqual(channel.code, 403) self.assertEqual( channel.json_body, - { - "errcode": "M_UNKNOWN", - "error": "IP address blocked by IP blacklist entry", - }, + {"errcode": "M_UNKNOWN", "error": "IP address blocked"}, ) - def test_blacklisted_ip_range_whitelisted_ip(self) -> None: + def test_blocked_ip_range_whitelisted_ip(self) -> None: """ - Blacklisted but then subsequently whitelisted IP addresses can be + Blocked but then subsequently whitelisted IP addresses can be spidered. """ self.lookups["example.com"] = [(IPv4Address, "1.1.1.1")] @@ -527,10 +521,10 @@ class URLPreviewTests(unittest.HomeserverTestCase): channel.json_body, {"og:title": "~matrix~", "og:description": "hi"} ) - def test_blacklisted_ip_with_external_ip(self) -> None: + def test_blocked_ip_with_external_ip(self) -> None: """ - If a hostname resolves a blacklisted IP, even if there's a - non-blacklisted one, it will be rejected. + If a hostname resolves a blocked IP, even if there's a non-blocked one, + it will be rejected. """ # Hardcode the URL resolving to the IP we want. self.lookups["example.com"] = [ @@ -550,9 +544,9 @@ class URLPreviewTests(unittest.HomeserverTestCase): }, ) - def test_blacklisted_ipv6_specific(self) -> None: + def test_blocked_ipv6_specific(self) -> None: """ - Blacklisted IP addresses, found via DNS, are not spidered. + Blocked IP addresses, found via DNS, are not spidered. """ self.lookups["example.com"] = [ (IPv6Address, "3fff:ffff:ffff:ffff:ffff:ffff:ffff:ffff") @@ -573,9 +567,9 @@ class URLPreviewTests(unittest.HomeserverTestCase): }, ) - def test_blacklisted_ipv6_range(self) -> None: + def test_blocked_ipv6_range(self) -> None: """ - Blacklisted IP ranges, IPs found over DNS, are not spidered. + Blocked IP ranges, IPs found over DNS, are not spidered. """ self.lookups["example.com"] = [(IPv6Address, "2001:800::1")] @@ -653,6 +647,57 @@ class URLPreviewTests(unittest.HomeserverTestCase): server.data, ) + def test_image(self) -> None: + """An image should be precached if mentioned in the HTML.""" + self.lookups["matrix.org"] = [(IPv4Address, "10.1.2.3")] + self.lookups["cdn.matrix.org"] = [(IPv4Address, "10.1.2.4")] + + result = ( + b"""<html><body><img src="http://cdn.matrix.org/foo.png"></body></html>""" + ) + + channel = self.make_request( + "GET", + "preview_url?url=http://matrix.org", + shorthand=False, + await_result=False, + ) + self.pump() + + # Respond with the HTML. + client = self.reactor.tcpClients[0][2].buildProtocol(None) + server = AccumulatingProtocol() + server.makeConnection(FakeTransport(client, self.reactor)) + client.makeConnection(FakeTransport(server, self.reactor)) + client.dataReceived( + ( + b"HTTP/1.0 200 OK\r\nContent-Length: %d\r\n" + b'Content-Type: text/html; charset="utf8"\r\n\r\n' + ) + % (len(result),) + + result + ) + self.pump() + + # Respond with the photo. + client = self.reactor.tcpClients[1][2].buildProtocol(None) + server = AccumulatingProtocol() + server.makeConnection(FakeTransport(client, self.reactor)) + client.makeConnection(FakeTransport(server, self.reactor)) + client.dataReceived( + ( + b"HTTP/1.0 200 OK\r\nContent-Length: %d\r\n" + b"Content-Type: image/png\r\n\r\n" + ) + % (len(SMALL_PNG),) + + SMALL_PNG + ) + self.pump() + + # The image should be in the result. + self.assertEqual(channel.code, 200) + self._assert_small_png(channel.json_body) + def test_nonexistent_image(self) -> None: """If the preview image doesn't exist, ensure some data is returned.""" self.lookups["matrix.org"] = [(IPv4Address, "10.1.2.3")] @@ -683,9 +728,53 @@ class URLPreviewTests(unittest.HomeserverTestCase): ) self.pump() + + # There should not be a second connection. + self.assertEqual(len(self.reactor.tcpClients), 1) + + # The image should not be in the result. self.assertEqual(channel.code, 200) + self.assertNotIn("og:image", channel.json_body) + + @unittest.override_config( + {"url_preview_url_blacklist": [{"netloc": "cdn.matrix.org"}]} + ) + def test_image_blocked(self) -> None: + """If the preview image doesn't exist, ensure some data is returned.""" + self.lookups["matrix.org"] = [(IPv4Address, "10.1.2.3")] + self.lookups["cdn.matrix.org"] = [(IPv4Address, "10.1.2.4")] + + result = ( + b"""<html><body><img src="http://cdn.matrix.org/foo.jpg"></body></html>""" + ) + + channel = self.make_request( + "GET", + "preview_url?url=http://matrix.org", + shorthand=False, + await_result=False, + ) + self.pump() + + client = self.reactor.tcpClients[0][2].buildProtocol(None) + server = AccumulatingProtocol() + server.makeConnection(FakeTransport(client, self.reactor)) + client.makeConnection(FakeTransport(server, self.reactor)) + client.dataReceived( + ( + b"HTTP/1.0 200 OK\r\nContent-Length: %d\r\n" + b'Content-Type: text/html; charset="utf8"\r\n\r\n' + ) + % (len(result),) + + result + ) + self.pump() + + # There should not be a second connection. + self.assertEqual(len(self.reactor.tcpClients), 1) # The image should not be in the result. + self.assertEqual(channel.code, 200) self.assertNotIn("og:image", channel.json_body) def test_oembed_failure(self) -> None: @@ -880,6 +969,11 @@ class URLPreviewTests(unittest.HomeserverTestCase): ) self.pump() + + # Double check that the proper host is being connected to. (Note that + # twitter.com can't be resolved so this is already implicitly checked.) + self.assertIn(b"\r\nHost: publish.twitter.com\r\n", server.data) + self.assertEqual(channel.code, 200) body = channel.json_body self.assertEqual( @@ -940,6 +1034,22 @@ class URLPreviewTests(unittest.HomeserverTestCase): }, ) + @unittest.override_config( + {"url_preview_url_blacklist": [{"netloc": "publish.twitter.com"}]} + ) + def test_oembed_blocked(self) -> None: + """The oEmbed URL should not be downloaded if the oEmbed URL is blocked.""" + self.lookups["twitter.com"] = [(IPv4Address, "10.1.2.3")] + + channel = self.make_request( + "GET", + "preview_url?url=http://twitter.com/matrixdotorg/status/12345", + shorthand=False, + await_result=False, + ) + self.pump() + self.assertEqual(channel.code, 403, channel.result) + def test_oembed_autodiscovery(self) -> None: """ Autodiscovery works by finding the link in the HTML response and then requesting an oEmbed URL. @@ -980,7 +1090,6 @@ class URLPreviewTests(unittest.HomeserverTestCase): % (len(result),) + result ) - self.pump() # The oEmbed response. @@ -1004,7 +1113,6 @@ class URLPreviewTests(unittest.HomeserverTestCase): % (len(oembed_content),) + oembed_content ) - self.pump() # Ensure the URL is what was requested. @@ -1023,7 +1131,6 @@ class URLPreviewTests(unittest.HomeserverTestCase): % (len(SMALL_PNG),) + SMALL_PNG ) - self.pump() # Ensure the URL is what was requested. @@ -1036,6 +1143,59 @@ class URLPreviewTests(unittest.HomeserverTestCase): ) self._assert_small_png(body) + @unittest.override_config( + {"url_preview_url_blacklist": [{"netloc": "publish.twitter.com"}]} + ) + def test_oembed_autodiscovery_blocked(self) -> None: + """ + If the discovered oEmbed URL is blocked, it should be discarded. + """ + # This is a little cheesy in that we use the www subdomain (which isn't the + # list of oEmbed patterns) to get "raw" HTML response. + self.lookups["www.twitter.com"] = [(IPv4Address, "10.1.2.3")] + self.lookups["publish.twitter.com"] = [(IPv4Address, "10.1.2.4")] + + result = b""" + <title>Test</title> + <link rel="alternate" type="application/json+oembed" + href="http://publish.twitter.com/oembed?url=http%3A%2F%2Fcdn.twitter.com%2Fmatrixdotorg%2Fstatus%2F12345&format=json" + title="matrixdotorg" /> + """ + + channel = self.make_request( + "GET", + "preview_url?url=http://www.twitter.com/matrixdotorg/status/12345", + shorthand=False, + await_result=False, + ) + self.pump() + + client = self.reactor.tcpClients[0][2].buildProtocol(None) + server = AccumulatingProtocol() + server.makeConnection(FakeTransport(client, self.reactor)) + client.makeConnection(FakeTransport(server, self.reactor)) + client.dataReceived( + ( + b"HTTP/1.0 200 OK\r\nContent-Length: %d\r\n" + b'Content-Type: text/html; charset="utf8"\r\n\r\n' + ) + % (len(result),) + + result + ) + + self.pump() + + # Ensure there's no additional connections. + self.assertEqual(len(self.reactor.tcpClients), 1) + + # Ensure the URL is what was requested. + self.assertIn(b"\r\nHost: www.twitter.com\r\n", server.data) + + self.assertEqual(channel.code, 200) + body = channel.json_body + self.assertEqual(body["og:title"], "Test") + self.assertNotIn("og:image", body) + def _download_image(self) -> Tuple[str, str]: """Downloads an image into the URL cache. Returns: @@ -1192,8 +1352,8 @@ class URLPreviewTests(unittest.HomeserverTestCase): ) @unittest.override_config({"url_preview_url_blacklist": [{"port": "*"}]}) - def test_blacklist_port(self) -> None: - """Tests that blacklisting URLs with a port makes previewing such URLs + def test_blocked_port(self) -> None: + """Tests that blocking URLs with a port makes previewing such URLs fail with a 403 error and doesn't impact other previews. """ self.lookups["matrix.org"] = [(IPv4Address, "10.1.2.3")] @@ -1230,3 +1390,23 @@ class URLPreviewTests(unittest.HomeserverTestCase): self.pump() self.assertEqual(channel.code, 200) + + @unittest.override_config( + {"url_preview_url_blacklist": [{"netloc": "example.com"}]} + ) + def test_blocked_url(self) -> None: + """Tests that blocking URLs with a host makes previewing such URLs + fail with a 403 error. + """ + self.lookups["example.com"] = [(IPv4Address, "10.1.2.3")] + + bad_url = quote("http://example.com/foo") + + channel = self.make_request( + "GET", + "preview_url?url=" + bad_url, + shorthand=False, + await_result=False, + ) + self.pump() + self.assertEqual(channel.code, 403, channel.result) diff --git a/tests/server.py b/tests/server.py index b52ff1c463..7296f0a552 100644 --- a/tests/server.py +++ b/tests/server.py @@ -73,11 +73,13 @@ from twisted.web.server import Request, Site from synapse.config.database import DatabaseConnectionConfig from synapse.config.homeserver import HomeServerConfig from synapse.events.presence_router import load_legacy_presence_router -from synapse.events.spamcheck import load_legacy_spam_checkers -from synapse.events.third_party_rules import load_legacy_third_party_event_rules from synapse.handlers.auth import load_legacy_password_auth_providers from synapse.http.site import SynapseRequest from synapse.logging.context import ContextResourceUsage +from synapse.module_api.callbacks.spamchecker_callbacks import load_legacy_spam_checkers +from synapse.module_api.callbacks.third_party_event_rules_callbacks import ( + load_legacy_third_party_event_rules, +) from synapse.server import HomeServer from synapse.storage import DataStore from synapse.storage.database import LoggingDatabaseConnection diff --git a/tests/storage/test_keys.py b/tests/storage/test_keys.py index ba68171ad7..5d7c13e6d0 100644 --- a/tests/storage/test_keys.py +++ b/tests/storage/test_keys.py @@ -37,24 +37,24 @@ KEY_2 = decode_verify_key_base64( class KeyStoreTestCase(tests.unittest.HomeserverTestCase): - def test_get_server_verify_keys(self) -> None: + def test_get_server_signature_keys(self) -> None: store = self.hs.get_datastores().main key_id_1 = "ed25519:key1" key_id_2 = "ed25519:KEY_ID_2" self.get_success( - store.store_server_verify_keys( + store.store_server_signature_keys( "from_server", 10, - [ - ("server1", key_id_1, FetchKeyResult(KEY_1, 100)), - ("server1", key_id_2, FetchKeyResult(KEY_2, 200)), - ], + { + ("server1", key_id_1): FetchKeyResult(KEY_1, 100), + ("server1", key_id_2): FetchKeyResult(KEY_2, 200), + }, ) ) res = self.get_success( - store.get_server_verify_keys( + store.get_server_signature_keys( [ ("server1", key_id_1), ("server1", key_id_2), @@ -87,18 +87,18 @@ class KeyStoreTestCase(tests.unittest.HomeserverTestCase): key_id_2 = "ed25519:key2" self.get_success( - store.store_server_verify_keys( + store.store_server_signature_keys( "from_server", 0, - [ - ("srv1", key_id_1, FetchKeyResult(KEY_1, 100)), - ("srv1", key_id_2, FetchKeyResult(KEY_2, 200)), - ], + { + ("srv1", key_id_1): FetchKeyResult(KEY_1, 100), + ("srv1", key_id_2): FetchKeyResult(KEY_2, 200), + }, ) ) res = self.get_success( - store.get_server_verify_keys([("srv1", key_id_1), ("srv1", key_id_2)]) + store.get_server_signature_keys([("srv1", key_id_1), ("srv1", key_id_2)]) ) self.assertEqual(len(res.keys()), 2) @@ -111,20 +111,20 @@ class KeyStoreTestCase(tests.unittest.HomeserverTestCase): self.assertEqual(res2.valid_until_ts, 200) # we should be able to look up the same thing again without a db hit - res = self.get_success(store.get_server_verify_keys([("srv1", key_id_1)])) + res = self.get_success(store.get_server_signature_keys([("srv1", key_id_1)])) self.assertEqual(len(res.keys()), 1) self.assertEqual(res[("srv1", key_id_1)].verify_key, KEY_1) new_key_2 = signedjson.key.get_verify_key( signedjson.key.generate_signing_key("key2") ) - d = store.store_server_verify_keys( - "from_server", 10, [("srv1", key_id_2, FetchKeyResult(new_key_2, 300))] + d = store.store_server_signature_keys( + "from_server", 10, {("srv1", key_id_2): FetchKeyResult(new_key_2, 300)} ) self.get_success(d) res = self.get_success( - store.get_server_verify_keys([("srv1", key_id_1), ("srv1", key_id_2)]) + store.get_server_signature_keys([("srv1", key_id_1), ("srv1", key_id_2)]) ) self.assertEqual(len(res.keys()), 2) diff --git a/tests/storage/test_main.py b/tests/storage/test_main.py index 5806cb0e4b..27f450e22d 100644 --- a/tests/storage/test_main.py +++ b/tests/storage/test_main.py @@ -29,9 +29,9 @@ class DataStoreTestCase(unittest.HomeserverTestCase): def test_get_users_paginate(self) -> None: self.get_success(self.store.register_user(self.user.to_string(), "pass")) - self.get_success(self.store.create_profile(self.user.localpart)) + self.get_success(self.store.create_profile(self.user)) self.get_success( - self.store.set_profile_displayname(self.user.localpart, self.displayname) + self.store.set_profile_displayname(self.user, self.displayname) ) users, total = self.get_success( diff --git a/tests/storage/test_profile.py b/tests/storage/test_profile.py index a019d06e09..f9cf0fcb82 100644 --- a/tests/storage/test_profile.py +++ b/tests/storage/test_profile.py @@ -14,6 +14,8 @@ from twisted.test.proto_helpers import MemoryReactor from synapse.server import HomeServer +from synapse.storage.database import LoggingTransaction +from synapse.storage.engines import PostgresEngine from synapse.types import UserID from synapse.util import Clock @@ -27,11 +29,9 @@ class ProfileStoreTestCase(unittest.HomeserverTestCase): self.u_frank = UserID.from_string("@frank:test") def test_displayname(self) -> None: - self.get_success(self.store.create_profile(self.u_frank.localpart)) + self.get_success(self.store.create_profile(self.u_frank)) - self.get_success( - self.store.set_profile_displayname(self.u_frank.localpart, "Frank") - ) + self.get_success(self.store.set_profile_displayname(self.u_frank, "Frank")) self.assertEqual( "Frank", @@ -43,21 +43,17 @@ class ProfileStoreTestCase(unittest.HomeserverTestCase): ) # test set to None - self.get_success( - self.store.set_profile_displayname(self.u_frank.localpart, None) - ) + self.get_success(self.store.set_profile_displayname(self.u_frank, None)) self.assertIsNone( self.get_success(self.store.get_profile_displayname(self.u_frank.localpart)) ) def test_avatar_url(self) -> None: - self.get_success(self.store.create_profile(self.u_frank.localpart)) + self.get_success(self.store.create_profile(self.u_frank)) self.get_success( - self.store.set_profile_avatar_url( - self.u_frank.localpart, "http://my.site/here" - ) + self.store.set_profile_avatar_url(self.u_frank, "http://my.site/here") ) self.assertEqual( @@ -70,10 +66,69 @@ class ProfileStoreTestCase(unittest.HomeserverTestCase): ) # test set to None - self.get_success( - self.store.set_profile_avatar_url(self.u_frank.localpart, None) - ) + self.get_success(self.store.set_profile_avatar_url(self.u_frank, None)) self.assertIsNone( self.get_success(self.store.get_profile_avatar_url(self.u_frank.localpart)) ) + + def test_profiles_bg_migration(self) -> None: + """ + Test background job that copies entries from column user_id to full_user_id, adding + the hostname in the process. + """ + updater = self.hs.get_datastores().main.db_pool.updates + + # drop the constraint so we can insert nulls in full_user_id to populate the test + if isinstance(self.store.database_engine, PostgresEngine): + + def f(txn: LoggingTransaction) -> None: + txn.execute( + "ALTER TABLE profiles DROP CONSTRAINT full_user_id_not_null" + ) + + self.get_success(self.store.db_pool.runInteraction("", f)) + + for i in range(0, 70): + self.get_success( + self.store.db_pool.simple_insert( + "profiles", + {"user_id": f"hello{i:02}"}, + ) + ) + + # re-add the constraint so that when it's validated it actually exists + if isinstance(self.store.database_engine, PostgresEngine): + + def f(txn: LoggingTransaction) -> None: + txn.execute( + "ALTER TABLE profiles ADD CONSTRAINT full_user_id_not_null CHECK (full_user_id IS NOT NULL) NOT VALID" + ) + + self.get_success(self.store.db_pool.runInteraction("", f)) + + self.get_success( + self.store.db_pool.simple_insert( + "background_updates", + values={ + "update_name": "populate_full_user_id_profiles", + "progress_json": "{}", + }, + ) + ) + + self.get_success( + updater.run_background_updates(False), + ) + + expected_values = [] + for i in range(0, 70): + expected_values.append((f"@hello{i:02}:{self.hs.hostname}",)) + + res = self.get_success( + self.store.db_pool.execute( + "", None, "SELECT full_user_id from profiles ORDER BY full_user_id" + ) + ) + self.assertEqual(len(res), len(expected_values)) + self.assertEqual(res, expected_values) diff --git a/tests/storage/test_transactions.py b/tests/storage/test_transactions.py index db9ee9955e..2fab84a529 100644 --- a/tests/storage/test_transactions.py +++ b/tests/storage/test_transactions.py @@ -33,15 +33,14 @@ class TransactionStoreTestCase(HomeserverTestCase): destination retries, as well as testing tht we can set and get correctly. """ - d = self.store.get_destination_retry_timings("example.com") - r = self.get_success(d) + r = self.get_success(self.store.get_destination_retry_timings("example.com")) self.assertIsNone(r) - d = self.store.set_destination_retry_timings("example.com", 1000, 50, 100) - self.get_success(d) + self.get_success( + self.store.set_destination_retry_timings("example.com", 1000, 50, 100) + ) - d = self.store.get_destination_retry_timings("example.com") - r = self.get_success(d) + r = self.get_success(self.store.get_destination_retry_timings("example.com")) self.assertEqual( DestinationRetryTimings( diff --git a/tests/storage/test_user_filters.py b/tests/storage/test_user_filters.py new file mode 100644 index 0000000000..bab802f56e --- /dev/null +++ b/tests/storage/test_user_filters.py @@ -0,0 +1,94 @@ +# Copyright 2023 The Matrix.org Foundation C.I.C +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from twisted.test.proto_helpers import MemoryReactor + +from synapse.server import HomeServer +from synapse.storage.database import LoggingTransaction +from synapse.storage.engines import PostgresEngine +from synapse.util import Clock + +from tests import unittest + + +class UserFiltersStoreTestCase(unittest.HomeserverTestCase): + """ + Test background migration that copies entries from column user_id to full_user_id, adding + the hostname in the process. + """ + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.store = hs.get_datastores().main + + def test_bg_migration(self) -> None: + updater = self.hs.get_datastores().main.db_pool.updates + + # drop the constraint so we can insert nulls in full_user_id to populate the test + if isinstance(self.store.database_engine, PostgresEngine): + + def f(txn: LoggingTransaction) -> None: + txn.execute( + "ALTER TABLE user_filters DROP CONSTRAINT full_user_id_not_null" + ) + + self.get_success(self.store.db_pool.runInteraction("", f)) + + for i in range(0, 70): + self.get_success( + self.store.db_pool.simple_insert( + "user_filters", + { + "user_id": f"hello{i:02}", + "filter_id": i, + "filter_json": bytearray(i), + }, + ) + ) + + # re-add the constraint so that when it's validated it actually exists + if isinstance(self.store.database_engine, PostgresEngine): + + def f(txn: LoggingTransaction) -> None: + txn.execute( + "ALTER TABLE user_filters ADD CONSTRAINT full_user_id_not_null CHECK (full_user_id IS NOT NULL) NOT VALID" + ) + + self.get_success(self.store.db_pool.runInteraction("", f)) + + self.get_success( + self.store.db_pool.simple_insert( + "background_updates", + values={ + "update_name": "populate_full_user_id_user_filters", + "progress_json": "{}", + }, + ) + ) + + self.get_success( + updater.run_background_updates(False), + ) + + expected_values = [] + for i in range(0, 70): + expected_values.append((f"@hello{i:02}:{self.hs.hostname}",)) + + res = self.get_success( + self.store.db_pool.execute( + "", None, "SELECT full_user_id from user_filters ORDER BY full_user_id" + ) + ) + self.assertEqual(len(res), len(expected_values)) + self.assertEqual(res, expected_values) diff --git a/tests/test_federation.py b/tests/test_federation.py index 46d2f99eac..6d15ac7597 100644 --- a/tests/test_federation.py +++ b/tests/test_federation.py @@ -267,7 +267,9 @@ class MessageAcceptTests(unittest.HomeserverTestCase): # Resync the device list. device_handler = self.hs.get_device_handler() self.get_success( - device_handler.device_list_updater.user_device_resync(remote_user_id), + device_handler.device_list_updater.multi_user_device_resync( + [remote_user_id] + ), ) # Retrieve the cross-signing keys for this user. diff --git a/tests/test_state.py b/tests/test_state.py index b20a26e1ff..ddf59916b1 100644 --- a/tests/test_state.py +++ b/tests/test_state.py @@ -228,6 +228,7 @@ class StateTestCase(unittest.TestCase): "get_macaroon_generator", "get_instance_name", "get_simple_http_client", + "get_replication_client", "hostname", ] ) @@ -263,7 +264,7 @@ class StateTestCase(unittest.TestCase): self.dummy_store.register_events(graph.walk()) - context_store: dict[str, EventContext] = {} + context_store: Dict[str, EventContext] = {} for event in graph.walk(): context = yield defer.ensureDeferred( diff --git a/tests/test_utils/logging_setup.py b/tests/test_utils/logging_setup.py index b522163a34..c37f205ed0 100644 --- a/tests/test_utils/logging_setup.py +++ b/tests/test_utils/logging_setup.py @@ -40,10 +40,9 @@ def setup_logging() -> None: """ root_logger = logging.getLogger() - log_format = ( - "%(asctime)s - %(name)s - %(lineno)d - " - "%(levelname)s - %(request)s - %(message)s" - ) + # We exclude `%(asctime)s` from this format because the Twisted logger adds its own + # timestamp + log_format = "%(name)s - %(lineno)d - " "%(levelname)s - %(request)s - %(message)s" handler = ToTwistedHandler() formatter = logging.Formatter(log_format) diff --git a/tests/unittest.py b/tests/unittest.py index 8a16fd3665..c73195b32b 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -13,9 +13,11 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import functools import gc import hashlib import hmac +import json import logging import secrets import time @@ -53,6 +55,7 @@ from twisted.web.server import Request from synapse import events from synapse.api.constants import EventTypes from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion +from synapse.config._base import Config, RootConfig from synapse.config.homeserver import HomeServerConfig from synapse.config.server import DEFAULT_ROOM_VERSION from synapse.crypto.event_signing import add_hashes_and_signatures @@ -67,7 +70,6 @@ from synapse.logging.context import ( ) from synapse.rest import RegisterServletsFunc from synapse.server import HomeServer -from synapse.storage.keys import FetchKeyResult from synapse.types import JsonDict, Requester, UserID, create_requester from synapse.util import Clock from synapse.util.httpresourcetree import create_resource_tree @@ -124,6 +126,53 @@ def around(target: TV) -> Callable[[Callable[Concatenate[S, P], R]], None]: return _around +_TConfig = TypeVar("_TConfig", Config, RootConfig) + + +def deepcopy_config(config: _TConfig) -> _TConfig: + new_config: _TConfig + + if isinstance(config, RootConfig): + new_config = config.__class__(config.config_files) # type: ignore[arg-type] + else: + new_config = config.__class__(config.root) + + for attr_name in config.__dict__: + if attr_name.startswith("__") or attr_name == "root": + continue + attr = getattr(config, attr_name) + if isinstance(attr, Config): + new_attr = deepcopy_config(attr) + else: + new_attr = attr + + setattr(new_config, attr_name, new_attr) + + return new_config + + +@functools.lru_cache(maxsize=8) +def _parse_config_dict(config: str) -> RootConfig: + config_obj = HomeServerConfig() + config_obj.parse_config_dict(json.loads(config), "", "") + return config_obj + + +def make_homeserver_config_obj(config: Dict[str, Any]) -> RootConfig: + """Creates a :class:`HomeServerConfig` instance with the given configuration dict. + + This is equivalent to:: + + config_obj = HomeServerConfig() + config_obj.parse_config_dict(config, "", "") + + but it keeps a cache of `HomeServerConfig` instances and deepcopies them as needed, + to avoid validating the whole configuration every time. + """ + config_obj = _parse_config_dict(json.dumps(config, sort_keys=True)) + return deepcopy_config(config_obj) + + class TestCase(unittest.TestCase): """A subclass of twisted.trial's TestCase which looks for 'loglevel' attributes on both itself and its individual test methods, to override the @@ -171,13 +220,20 @@ class TestCase(unittest.TestCase): # # The easiest way to do this would be to do a full GC after each test # run, but that is very expensive. Instead, we disable GC (above) for - # the duration of the test so that we only need to run a gen-0 GC, which - # is a lot quicker. + # the duration of the test and only run a gen-0 GC, which is a lot + # quicker. This doesn't clean up everything, since the TestCase + # instance still holds references to objects created during the test, + # such as HomeServers, so we do a full GC every so often. @around(self) def tearDown(orig: Callable[[], R]) -> R: ret = orig() gc.collect(0) + # Run a full GC every 50 gen-0 GCs. + gen0_stats = gc.get_stats()[0] + gen0_collections = gen0_stats["collections"] + if gen0_collections % 50 == 0: + gc.collect() gc.enable() set_current_context(SENTINEL_CONTEXT) @@ -508,7 +564,9 @@ class HomeserverTestCase(TestCase): client_ip, ) - def setup_test_homeserver(self, *args: Any, **kwargs: Any) -> HomeServer: + def setup_test_homeserver( + self, name: Optional[str] = None, **kwargs: Any + ) -> HomeServer: """ Set up the test homeserver, meant to be called by the overridable make_homeserver. It automatically passes through the test class's @@ -527,16 +585,25 @@ class HomeserverTestCase(TestCase): else: config = kwargs["config"] + # The server name can be specified using either the `name` argument or a config + # override. The `name` argument takes precedence over any config overrides. + if name is not None: + config["server_name"] = name + # Parse the config from a config dict into a HomeServerConfig - config_obj = HomeServerConfig() - config_obj.parse_config_dict(config, "", "") + config_obj = make_homeserver_config_obj(config) kwargs["config"] = config_obj + # The server name in the config is now `name`, if provided, or the `server_name` + # from a config override, or the default of "test". Whichever it is, we + # construct a homeserver with a matching name. + kwargs["name"] = config_obj.server.server_name + async def run_bg_updates() -> None: with LoggingContext("run_bg_updates"): self.get_success(stor.db_pool.updates.run_background_updates(False)) - hs = setup_test_homeserver(self.addCleanup, *args, **kwargs) + hs = setup_test_homeserver(self.addCleanup, **kwargs) stor = hs.get_datastores().main # Run the database background updates, when running against "master". @@ -790,19 +857,23 @@ class FederatingHomeserverTestCase(HomeserverTestCase): verify_key_id = "%s:%s" % (verify_key.alg, verify_key.version) self.get_success( - hs.get_datastores().main.store_server_verify_keys( + hs.get_datastores().main.store_server_keys_json( + self.OTHER_SERVER_NAME, + verify_key_id, from_server=self.OTHER_SERVER_NAME, - ts_added_ms=clock.time_msec(), - verify_keys=[ - ( - self.OTHER_SERVER_NAME, - verify_key_id, - FetchKeyResult( - verify_key=verify_key, - valid_until_ts=clock.time_msec() + 10000, - ), - ) - ], + ts_now_ms=clock.time_msec(), + ts_expires_ms=clock.time_msec() + 10000, + key_json_bytes=canonicaljson.encode_canonical_json( + { + "verify_keys": { + verify_key_id: { + "key": signedjson.key.encode_verify_key_base64( + verify_key + ) + } + } + } + ), ) ) diff --git a/tests/utils.py b/tests/utils.py index a0ac11bc5c..e73b46944b 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -131,6 +131,9 @@ def default_config( # the test signing key is just an arbitrary ed25519 key to keep the config # parser happy "signing_key": "ed25519 a_lPym qvioDNmfExFBRPgdTU+wtFYKq4JfwFRv7sYVgWvmgJg", + # Disable trusted key servers, otherwise unit tests might try to actually + # reach out to matrix.org. + "trusted_key_servers": [], "event_cache_size": 1, "enable_registration": True, "enable_registration_captcha": False, |