diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
index 41503899a1..07d4f6dfce 100644
--- a/.github/PULL_REQUEST_TEMPLATE.md
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -9,6 +9,5 @@
- End with either a period (.) or an exclamation mark (!).
- Start with a capital letter.
- Feel free to credit yourself, by adding a sentence "Contributed by @github_username." or "Contributed by [Your Name]." to the end of the entry.
-* [ ] Pull request includes a [sign off](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#sign-off)
* [ ] [Code style](https://element-hq.github.io/synapse/latest/code_style.html) is correct
(run the [linters](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#run-the-linters))
diff --git a/.github/workflows/docs-pr-netlify.yaml b/.github/workflows/docs-pr-netlify.yaml
index 8b20322308..4c0a7989a9 100644
--- a/.github/workflows/docs-pr-netlify.yaml
+++ b/.github/workflows/docs-pr-netlify.yaml
@@ -14,7 +14,7 @@ jobs:
# There's a 'download artifact' action, but it hasn't been updated for the workflow_run action
# (https://github.com/actions/download-artifact/issues/60) so instead we get this mess:
- name: 📥 Download artifact
- uses: dawidd6/action-download-artifact@268677152d06ba59fcec7a7f0b5d961b6ccd7e1e # v2.28.0
+ uses: dawidd6/action-download-artifact@e7466d1a7587ed14867642c2ca74b5bcc1e19a2d # v3.0.0
with:
workflow: docs-pr.yaml
run_id: ${{ github.event.workflow_run.id }}
diff --git a/.github/workflows/docs-pr.yaml b/.github/workflows/docs-pr.yaml
index 52b0f8802d..652ef90095 100644
--- a/.github/workflows/docs-pr.yaml
+++ b/.github/workflows/docs-pr.yaml
@@ -39,7 +39,7 @@ jobs:
cp book/welcome_and_overview.html book/index.html
- name: Upload Artifact
- uses: actions/upload-artifact@v3
+ uses: actions/upload-artifact@v4
with:
name: book
path: book
diff --git a/.github/workflows/latest_deps.yml b/.github/workflows/latest_deps.yml
index f40342f951..a754515c9d 100644
--- a/.github/workflows/latest_deps.yml
+++ b/.github/workflows/latest_deps.yml
@@ -164,7 +164,7 @@ jobs:
if: ${{ always() }}
run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
- name: Upload SyTest logs
- uses: actions/upload-artifact@v3
+ uses: actions/upload-artifact@v4
if: ${{ always() }}
with:
name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.*, ', ') }})
diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml
index baf4b62292..b9d0bc1743 100644
--- a/.github/workflows/release-artifacts.yml
+++ b/.github/workflows/release-artifacts.yml
@@ -92,7 +92,7 @@ jobs:
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
- name: Upload debs as artifacts
- uses: actions/upload-artifact@v3
+ uses: actions/upload-artifact@v4
with:
name: debs
path: debs/*
@@ -156,7 +156,7 @@ jobs:
CARGO_NET_GIT_FETCH_WITH_CLI: true
CIBW_ENVIRONMENT_PASS_LINUX: CARGO_NET_GIT_FETCH_WITH_CLI
- - uses: actions/upload-artifact@v3
+ - uses: actions/upload-artifact@v4
with:
name: Wheel
path: ./wheelhouse/*.whl
@@ -177,7 +177,7 @@ jobs:
- name: Build sdist
run: python -m build --sdist
- - uses: actions/upload-artifact@v3
+ - uses: actions/upload-artifact@v4
with:
name: Sdist
path: dist/*.tar.gz
@@ -194,7 +194,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Download all workflow run artifacts
- uses: actions/download-artifact@v3
+ uses: actions/download-artifact@v4
- name: Build a tarball for the debs
run: tar -cvJf debs.tar.xz debs
- name: Attach to release
diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml
index f22ca5f7e6..da15d87ab0 100644
--- a/.github/workflows/tests.yml
+++ b/.github/workflows/tests.yml
@@ -12,10 +12,6 @@ concurrency:
cancel-in-progress: true
jobs:
- check-signoff:
- if: "github.event_name == 'pull_request'"
- uses: "matrix-org/backend-meta/.github/workflows/sign-off.yml@v2"
-
# Job to detect what has changed so we don't run e.g. Rust checks on PRs that
# don't modify Rust code.
changes:
@@ -286,10 +282,26 @@ jobs:
- check-schema-delta
- check-lockfile
- lint-clippy
+ - lint-clippy-nightly
- lint-rustfmt
runs-on: ubuntu-latest
steps:
- - run: "true"
+ - uses: matrix-org/done-action@v2
+ with:
+ needs: ${{ toJSON(needs) }}
+
+ # Various bits are skipped if there was no applicable changes.
+ skippable: |
+ check-sampleconfig
+ check-schema-delta
+ lint
+ lint-mypy
+ lint-newsfile
+ lint-pydantic
+ lint-clippy
+ lint-clippy-nightly
+ lint-rustfmt
+
calculate-test-jobs:
if: ${{ !cancelled() && !failure() }} # Allow previous steps to be skipped, but not fail
@@ -496,7 +508,7 @@ jobs:
if: ${{ always() }}
run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
- name: Upload SyTest logs
- uses: actions/upload-artifact@v3
+ uses: actions/upload-artifact@v4
if: ${{ always() }}
with:
name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.job.*, ', ') }})
@@ -594,7 +606,7 @@ jobs:
PGPASSWORD: postgres
PGDATABASE: postgres
- name: "Upload schema differences"
- uses: actions/upload-artifact@v3
+ uses: actions/upload-artifact@v4
if: ${{ failure() && !cancelled() && steps.run_tester_script.outcome == 'failure' }}
with:
name: Schema dumps
@@ -699,6 +711,7 @@ jobs:
- complement
- cargo-test
- cargo-bench
+ - linting-done
runs-on: ubuntu-latest
steps:
- uses: matrix-org/done-action@v2
@@ -706,7 +719,7 @@ jobs:
needs: ${{ toJSON(needs) }}
# Various bits are skipped if there was no applicable changes.
- # The newsfile and signoff lint may be skipped on non PR builds.
+ # The newsfile lint may be skipped on non PR builds.
skippable: |
trial
trial-olddeps
@@ -714,7 +727,6 @@ jobs:
portdb
export-data
complement
- check-signoff
lint-newsfile
cargo-test
cargo-bench
diff --git a/.github/workflows/twisted_trunk.yml b/.github/workflows/twisted_trunk.yml
index 428180fffd..b7b93b3561 100644
--- a/.github/workflows/twisted_trunk.yml
+++ b/.github/workflows/twisted_trunk.yml
@@ -136,7 +136,7 @@ jobs:
if: ${{ always() }}
run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
- name: Upload SyTest logs
- uses: actions/upload-artifact@v3
+ uses: actions/upload-artifact@v4
if: ${{ always() }}
with:
name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.*, ', ') }})
diff --git a/CHANGES.md b/CHANGES.md
index cac80ca972..d6f3d7cf33 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,3 +1,50 @@
+# Synapse 1.99.0rc1 (2024-01-09)
+
+### Features
+
+- Add [config options](https://element-hq.github.io/synapse/v1.99/usage/configuration/config_documentation.html#server_notices) to set the avatar and the topic of the server notices room, as well as the avatar of the server notices user. ([\#16679](https://github.com/matrix-org/synapse/issues/16679))
+- Add config option [`email.notif_delay_before_mail`](https://element-hq.github.io/synapse/v1.99/usage/configuration/config_documentation.html#email) to tweak the delay before an email is sent following a notification. ([\#16696](https://github.com/matrix-org/synapse/issues/16696))
+- Add new configuration option [`sentry.environment`](https://element-hq.github.io/synapse/v1.99/usage/configuration/config_documentation.html#sentry) for improved system monitoring. Contributed by @zeeshanrafiqrana. ([\#16738](https://github.com/matrix-org/synapse/issues/16738))
+- Filter out rooms from the room directory being served to other homeservers when those rooms block that homeserver by their Access Control Lists. ([\#16759](https://github.com/element-hq/synapse/issues/16759))
+
+### Bugfixes
+
+- Fix a long-standing bug where the signing keys generated by Synapse were world-readable. Contributed by Fabian Klemp. ([\#16740](https://github.com/matrix-org/synapse/issues/16740))
+- Fix email verification redirection. Contributed by Fadhlan Ridhwanallah. ([\#16761](https://github.com/element-hq/synapse/issues/16761))
+- Fixed a bug that prevented users from being queried by display name if it contains non-ASCII characters. ([\#16767](https://github.com/element-hq/synapse/issues/16767))
+- Allow reactivate user without password with Admin API in some edge cases. ([\#16770](https://github.com/element-hq/synapse/issues/16770))
+- Adds the `recursion_depth` parameter to the response of the /relations endpoint if MSC3981 recursion is being performed. ([\#16775](https://github.com/element-hq/synapse/issues/16775))
+
+### Improved Documentation
+
+- Added version picker for Synapse documentation. Contributed by @Dmytro27Ind. ([\#16533](https://github.com/matrix-org/synapse/issues/16533))
+- Clarify that `password_config.enabled: "only_for_reauth"` does not allow new logins to be created using password auth. ([\#16737](https://github.com/matrix-org/synapse/issues/16737))
+- Remove value from header in configuration documentation for `refresh_token_lifetime`. ([\#16763](https://github.com/element-hq/synapse/issues/16763))
+- Add another custom statistics collection server to the documentation. Contributed by @loelkes. ([\#16769](https://github.com/element-hq/synapse/issues/16769))
+
+### Internal Changes
+
+- Remove run-once workflow after adding the version picker to the documentation. ([\#9453](https://github.com/element-hq/synapse/issues/9453))
+- Update the implementation of [MSC2965](https://github.com/matrix-org/matrix-spec-proposals/pull/2965) (OIDC Provider discovery). ([\#16726](https://github.com/matrix-org/synapse/issues/16726))
+- Move the rust stubs inline for better IDE integration. ([\#16757](https://github.com/element-hq/synapse/issues/16757))
+- Fix sample config doc CI. ([\#16758](https://github.com/element-hq/synapse/issues/16758))
+- Simplify event internal metadata class. ([\#16762](https://github.com/element-hq/synapse/issues/16762), [\#16780](https://github.com/element-hq/synapse/issues/16780))
+- Sign the published docker image using [cosign](https://docs.sigstore.dev/). ([\#16774](https://github.com/element-hq/synapse/issues/16774))
+- Port `EventInternalMetadata` class to Rust. ([\#16782](https://github.com/element-hq/synapse/issues/16782))
+
+
+
+### Updates to locked dependencies
+
+* Bump actions/setup-go from 4 to 5. ([\#16749](https://github.com/matrix-org/synapse/issues/16749))
+* Bump actions/setup-python from 4 to 5. ([\#16748](https://github.com/matrix-org/synapse/issues/16748))
+* Bump immutabledict from 3.0.0 to 4.0.0. ([\#16743](https://github.com/matrix-org/synapse/issues/16743))
+* Bump isort from 5.12.0 to 5.13.0. ([\#16745](https://github.com/matrix-org/synapse/issues/16745))
+* Bump isort from 5.13.0 to 5.13.1. ([\#16752](https://github.com/matrix-org/synapse/issues/16752))
+* Bump pydantic from 2.5.1 to 2.5.2. ([\#16747](https://github.com/matrix-org/synapse/issues/16747))
+* Bump ruff from 0.1.6 to 0.1.7. ([\#16746](https://github.com/matrix-org/synapse/issues/16746))
+* Bump types-setuptools from 68.2.0.2 to 69.0.0.0. ([\#16744](https://github.com/matrix-org/synapse/issues/16744))
+
# Synapse 1.98.0 (2023-12-12)
Synapse 1.98.0 will be the last Synapse release in 2023; the regular release cadence will resume in January 2024.
diff --git a/Cargo.lock b/Cargo.lock
index d5e77297f4..f895fbc650 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -13,9 +13,9 @@ dependencies = [
[[package]]
name = "anyhow"
-version = "1.0.75"
+version = "1.0.79"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a4668cab20f66d8d020e1fbc0ebe47217433c1b6c8f2040faf858554e394ace6"
+checksum = "080e9890a082662b09c1ad45f567faeeb47f22b5fb23895fbe1e651e718e25ca"
[[package]]
name = "arc-swap"
@@ -188,18 +188,18 @@ dependencies = [
[[package]]
name = "proc-macro2"
-version = "1.0.64"
+version = "1.0.76"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "78803b62cbf1f46fde80d7c0e803111524b9877184cfe7c3033659490ac7a7da"
+checksum = "95fc56cda0b5c3325f5fbbd7ff9fda9e02bb00bb3dac51252d2f1bfa1cb8cc8c"
dependencies = [
"unicode-ident",
]
[[package]]
name = "pyo3"
-version = "0.20.0"
+version = "0.20.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "04e8453b658fe480c3e70c8ed4e3d3ec33eb74988bd186561b0cc66b85c3bc4b"
+checksum = "9a89dc7a5850d0e983be1ec2a463a171d20990487c3cfcd68b5363f1ee3d6fe0"
dependencies = [
"anyhow",
"cfg-if",
@@ -215,9 +215,9 @@ dependencies = [
[[package]]
name = "pyo3-build-config"
-version = "0.20.0"
+version = "0.20.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a96fe70b176a89cff78f2fa7b3c930081e163d5379b4dcdf993e3ae29ca662e5"
+checksum = "07426f0d8fe5a601f26293f300afd1a7b1ed5e78b2a705870c5f30893c5163be"
dependencies = [
"once_cell",
"target-lexicon",
@@ -225,9 +225,9 @@ dependencies = [
[[package]]
name = "pyo3-ffi"
-version = "0.20.0"
+version = "0.20.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "214929900fd25e6604661ed9cf349727c8920d47deff196c4e28165a6ef2a96b"
+checksum = "dbb7dec17e17766b46bca4f1a4215a85006b4c2ecde122076c562dd058da6cf1"
dependencies = [
"libc",
"pyo3-build-config",
@@ -246,9 +246,9 @@ dependencies = [
[[package]]
name = "pyo3-macros"
-version = "0.20.0"
+version = "0.20.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "dac53072f717aa1bfa4db832b39de8c875b7c7af4f4a6fe93cdbf9264cf8383b"
+checksum = "05f738b4e40d50b5711957f142878cfa0f28e054aa0ebdfc3fd137a843f74ed3"
dependencies = [
"proc-macro2",
"pyo3-macros-backend",
@@ -258,9 +258,9 @@ dependencies = [
[[package]]
name = "pyo3-macros-backend"
-version = "0.20.0"
+version = "0.20.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7774b5a8282bd4f25f803b1f0d945120be959a36c72e08e7cd031c792fdfd424"
+checksum = "0fc910d4851847827daf9d6cdd4a823fbdaab5b8818325c5e97a86da79e8881f"
dependencies = [
"heck",
"proc-macro2",
@@ -280,9 +280,9 @@ dependencies = [
[[package]]
name = "quote"
-version = "1.0.29"
+version = "1.0.35"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "573015e8ab27661678357f27dc26460738fd2b6c86e46f386fde94cb5d913105"
+checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef"
dependencies = [
"proc-macro2",
]
@@ -339,18 +339,18 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
[[package]]
name = "serde"
-version = "1.0.193"
+version = "1.0.195"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "25dd9975e68d0cb5aa1120c288333fc98731bd1dd12f561e468ea4728c042b89"
+checksum = "63261df402c67811e9ac6def069e4786148c4563f4b50fd4bf30aa370d626b02"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
-version = "1.0.193"
+version = "1.0.195"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "43576ca501357b9b071ac53cdc7da8ef0cbd9493d8df094cd821777ea6e894d3"
+checksum = "46fe8f8603d81ba86327b23a2e9cdf49e1255fb94a4c5f297f6ee0547178ea2c"
dependencies = [
"proc-macro2",
"quote",
@@ -359,9 +359,9 @@ dependencies = [
[[package]]
name = "serde_json"
-version = "1.0.108"
+version = "1.0.111"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3d1c7e3eac408d115102c4c24ad393e0821bb3a5df4d506a80f85f7a742a526b"
+checksum = "176e46fa42316f18edd598015a5166857fc835ec732f5215eac6b7bdbf0a84f4"
dependencies = [
"itoa",
"ryu",
@@ -382,9 +382,9 @@ checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601"
[[package]]
name = "syn"
-version = "2.0.28"
+version = "2.0.48"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "04361975b3f5e348b2189d8dc55bc942f278b2d482a6a0365de5bdd62d351567"
+checksum = "0f3531638e407dfc0814761abb7c00a5b54992b849452a0646b7f65c9f770f3f"
dependencies = [
"proc-macro2",
"quote",
diff --git a/changelog.d/16533.doc b/changelog.d/16533.doc
deleted file mode 100644
index ae23a8a578..0000000000
--- a/changelog.d/16533.doc
+++ /dev/null
@@ -1 +0,0 @@
-Added version picker for Synapse documentation. Contributed by @Dmytro27Ind.
\ No newline at end of file
diff --git a/changelog.d/16679.feature b/changelog.d/16679.feature
deleted file mode 100644
index 85af837ae1..0000000000
--- a/changelog.d/16679.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add config options to set the avatar and the topic of the server notices room.
diff --git a/changelog.d/16696.feature b/changelog.d/16696.feature
deleted file mode 100644
index 53d7b40f36..0000000000
--- a/changelog.d/16696.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add a setting to be able to tweak the delay without interaction before an email is sent following a notification.
diff --git a/changelog.d/16726.misc b/changelog.d/16726.misc
deleted file mode 100644
index bac312465c..0000000000
--- a/changelog.d/16726.misc
+++ /dev/null
@@ -1 +0,0 @@
-Update the implementation of [MSC2965](https://github.com/matrix-org/matrix-spec-proposals/pull/2965) (OIDC Provider discovery).
diff --git a/changelog.d/16737.doc b/changelog.d/16737.doc
deleted file mode 100644
index 26035b73ec..0000000000
--- a/changelog.d/16737.doc
+++ /dev/null
@@ -1 +0,0 @@
-Clarify that `password_config.enabled: "only_for_reauth"` does not allow new logins to be created using password auth.
\ No newline at end of file
diff --git a/changelog.d/16738.feature b/changelog.d/16738.feature
deleted file mode 100644
index c9ea12a2ab..0000000000
--- a/changelog.d/16738.feature
+++ /dev/null
@@ -1 +0,0 @@
-Add new Sentry configuration option `environment` for improved system monitoring. Contributed by @zeeshanrafiqrana.
\ No newline at end of file
diff --git a/changelog.d/16740.bugfix b/changelog.d/16740.bugfix
deleted file mode 100644
index 21551516e2..0000000000
--- a/changelog.d/16740.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix a long-standing bug where the signing keys generated by Synapse were world-readable. Contributed by Fabian Klemp.
diff --git a/changelog.d/16756.misc b/changelog.d/16756.misc
new file mode 100644
index 0000000000..200e18fb7b
--- /dev/null
+++ b/changelog.d/16756.misc
@@ -0,0 +1 @@
+Improve DB performance of calculating badge counts for push.
diff --git a/changelog.d/16757.misc b/changelog.d/16757.misc
deleted file mode 100644
index 9856bf1aa6..0000000000
--- a/changelog.d/16757.misc
+++ /dev/null
@@ -1 +0,0 @@
-Move the rust stubs inline for better IDE integration.
diff --git a/changelog.d/16758.misc b/changelog.d/16758.misc
deleted file mode 100644
index eea2d0869c..0000000000
--- a/changelog.d/16758.misc
+++ /dev/null
@@ -1 +0,0 @@
-Fix sample config doc CI.
diff --git a/changelog.d/16759.feature b/changelog.d/16759.feature
deleted file mode 100644
index 5846e5a9f0..0000000000
--- a/changelog.d/16759.feature
+++ /dev/null
@@ -1 +0,0 @@
-Filter out rooms from the room directory being served to other homeservers when those rooms block that homeserver by their Access Control Lists.
\ No newline at end of file
diff --git a/changelog.d/16761.bugfix b/changelog.d/16761.bugfix
deleted file mode 100644
index 86c6545eda..0000000000
--- a/changelog.d/16761.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix email verification redirection. Contributed by Fadhlan Ridhwanallah.
\ No newline at end of file
diff --git a/changelog.d/16762.misc b/changelog.d/16762.misc
deleted file mode 100644
index c49dc2085e..0000000000
--- a/changelog.d/16762.misc
+++ /dev/null
@@ -1 +0,0 @@
-Simplify event internal metadata class.
diff --git a/changelog.d/16763.doc b/changelog.d/16763.doc
deleted file mode 100644
index e4236e8ef6..0000000000
--- a/changelog.d/16763.doc
+++ /dev/null
@@ -1 +0,0 @@
-Remove value from header in configuration documentation for `refresh_token_lifetime`.
diff --git a/changelog.d/16766.misc b/changelog.d/16766.misc
new file mode 100644
index 0000000000..ded77a11c4
--- /dev/null
+++ b/changelog.d/16766.misc
@@ -0,0 +1 @@
+Split up deleting devices into batches.
diff --git a/changelog.d/16767.bugfix b/changelog.d/16767.bugfix
deleted file mode 100644
index b1fa1285ef..0000000000
--- a/changelog.d/16767.bugfix
+++ /dev/null
@@ -1,2 +0,0 @@
-Fixed a bug that prevented users from being queried by display name if it contains non-ASCII characters.
-
diff --git a/changelog.d/16769.doc b/changelog.d/16769.doc
deleted file mode 100644
index c46e9e9908..0000000000
--- a/changelog.d/16769.doc
+++ /dev/null
@@ -1 +0,0 @@
-Add another custom statistics collection server to the documentation. Contributed by @loelkes.
\ No newline at end of file
diff --git a/changelog.d/16770.bugfix b/changelog.d/16770.bugfix
deleted file mode 100644
index c02bd8510d..0000000000
--- a/changelog.d/16770.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Allow reactivate user without password with Admin API in some edge cases.
diff --git a/changelog.d/16774.misc b/changelog.d/16774.misc
deleted file mode 100644
index c5ad9bf68c..0000000000
--- a/changelog.d/16774.misc
+++ /dev/null
@@ -1 +0,0 @@
-Sign the published docker image using [cosign](https://docs.sigstore.dev/).
\ No newline at end of file
diff --git a/changelog.d/16775.bugfix b/changelog.d/16775.bugfix
deleted file mode 100644
index 99f04ca59d..0000000000
--- a/changelog.d/16775.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Adds the recursion_depth parameter to the response of the /relations endpoint if MSC3981 recursion is being performed.
diff --git a/changelog.d/16776.misc b/changelog.d/16776.misc
new file mode 100644
index 0000000000..1650075a77
--- /dev/null
+++ b/changelog.d/16776.misc
@@ -0,0 +1 @@
+Remove CI check for sign off as we require an CLA signature instead.
diff --git a/changelog.d/16778.doc b/changelog.d/16778.doc
new file mode 100644
index 0000000000..fe3ca0da7e
--- /dev/null
+++ b/changelog.d/16778.doc
@@ -0,0 +1 @@
+Add a link to the "Request log format" explainer on the "Logging sample config" documentation page.
diff --git a/changelog.d/16780.misc b/changelog.d/16780.misc
deleted file mode 100644
index c49dc2085e..0000000000
--- a/changelog.d/16780.misc
+++ /dev/null
@@ -1 +0,0 @@
-Simplify event internal metadata class.
diff --git a/changelog.d/16781.misc b/changelog.d/16781.misc
new file mode 100644
index 0000000000..2f628dc5cb
--- /dev/null
+++ b/changelog.d/16781.misc
@@ -0,0 +1 @@
+Ensure CI fails when linting fails to make sure auto-merge does the correct thing.
diff --git a/changelog.d/16782.misc b/changelog.d/16782.misc
deleted file mode 100644
index d0cb0be26f..0000000000
--- a/changelog.d/16782.misc
+++ /dev/null
@@ -1 +0,0 @@
-Port `EventInternalMetadata` class to Rust.
diff --git a/changelog.d/16783.misc b/changelog.d/16783.misc
new file mode 100644
index 0000000000..9d3b96ffc6
--- /dev/null
+++ b/changelog.d/16783.misc
@@ -0,0 +1 @@
+Faster load recents for sync by reducing amount of state pulled out.
diff --git a/changelog.d/16785.misc b/changelog.d/16785.misc
new file mode 100644
index 0000000000..4de185c5dd
--- /dev/null
+++ b/changelog.d/16785.misc
@@ -0,0 +1 @@
+Reduce amount of state pulled out when querying federation hierachy.
diff --git a/changelog.d/16788.misc b/changelog.d/16788.misc
new file mode 100644
index 0000000000..e58a5a7a32
--- /dev/null
+++ b/changelog.d/16788.misc
@@ -0,0 +1 @@
+Pull less state out of the DB when we retry fetching old events during backfill.
diff --git a/changelog.d/16805.misc b/changelog.d/16805.misc
new file mode 100644
index 0000000000..0b54ab0f74
--- /dev/null
+++ b/changelog.d/16805.misc
@@ -0,0 +1 @@
+Optimize query for fetching to-device messages in `/sync`.
diff --git a/changelog.d/16806.misc b/changelog.d/16806.misc
new file mode 100644
index 0000000000..623338268b
--- /dev/null
+++ b/changelog.d/16806.misc
@@ -0,0 +1 @@
+Reject OIDC config when `client_secret` isn't specified, but the auth method requires one.
diff --git a/changelog.d/7.misc b/changelog.d/7.misc
new file mode 100644
index 0000000000..63f1fb77ff
--- /dev/null
+++ b/changelog.d/7.misc
@@ -0,0 +1 @@
+Faster partial join to room with complex auth graph.
diff --git a/changelog.d/9453.misc b/changelog.d/9453.misc
deleted file mode 100644
index 73656104cf..0000000000
--- a/changelog.d/9453.misc
+++ /dev/null
@@ -1 +0,0 @@
-Remove run-once workflow after adding the version picker to the documentation.
\ No newline at end of file
diff --git a/debian/changelog b/debian/changelog
index abcfedf6d6..bca3ab92c7 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,15 @@
+matrix-synapse-py3 (1.99.0~rc1ubuntu1) UNRELEASED; urgency=medium
+
+ * Fix copyright file with new licensing
+
+ -- Synapse Packaging team <packages@matrix.org> Thu, 11 Jan 2024 13:47:29 +0000
+
+matrix-synapse-py3 (1.99.0~rc1) stable; urgency=medium
+
+ * New Synapse release 1.99.0rc1.
+
+ -- Synapse Packaging team <packages@matrix.org> Tue, 09 Jan 2024 13:43:56 +0000
+
matrix-synapse-py3 (1.98.0) stable; urgency=medium
* New Synapse release 1.98.0.
diff --git a/debian/copyright b/debian/copyright
index 03e176876a..9e407ce425 100644
--- a/debian/copyright
+++ b/debian/copyright
@@ -6,6 +6,10 @@ Files: *
Copyright: 2014-2017, OpenMarket Ltd, 2017-2018 New Vector Ltd
License: Apache-2.0
+Files: *
+Copyright: 2023 New Vector Ltd
+License: AGPL-3.0-or-later
+
Files: synapse/config/saml2.py
Copyright: 2015, Ericsson
License: Apache-2.0
diff --git a/docs/usage/configuration/logging_sample_config.md b/docs/usage/configuration/logging_sample_config.md
index 8956741997..23a55abdcc 100644
--- a/docs/usage/configuration/logging_sample_config.md
+++ b/docs/usage/configuration/logging_sample_config.md
@@ -11,6 +11,9 @@ Note that a default logging configuration (shown below) is created automatically
the homeserver config when following the [installation instructions](../../setup/installation.md).
It should be named `<SERVERNAME>.log.config` by default.
+Hint: If you're looking for a guide on what each of the fields in the "Processed request" log lines mean,
+see [Request log format](../administration/request_log.md).
+
```yaml
{{#include ../../sample_log_config.yaml}}
```
diff --git a/poetry.lock b/poetry.lock
index b3e046b52d..3a2a5af424 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -64,17 +64,17 @@ tests-no-zope = ["cloudpickle", "hypothesis", "mypy (>=1.1.1)", "pympler", "pyte
[[package]]
name = "authlib"
-version = "1.2.1"
+version = "1.3.0"
description = "The ultimate Python library in building OAuth and OpenID Connect servers and clients."
optional = true
-python-versions = "*"
+python-versions = ">=3.8"
files = [
- {file = "Authlib-1.2.1-py2.py3-none-any.whl", hash = "sha256:c88984ea00149a90e3537c964327da930779afa4564e354edfd98410bea01911"},
- {file = "Authlib-1.2.1.tar.gz", hash = "sha256:421f7c6b468d907ca2d9afede256f068f87e34d23dd221c07d13d4c234726afb"},
+ {file = "Authlib-1.3.0-py2.py3-none-any.whl", hash = "sha256:9637e4de1fb498310a56900b3e2043a206b03cb11c05422014b0302cbc814be3"},
+ {file = "Authlib-1.3.0.tar.gz", hash = "sha256:959ea62a5b7b5123c5059758296122b57cd2585ae2ed1c0622c21b371ffdae06"},
]
[package.dependencies]
-cryptography = ">=3.2"
+cryptography = "*"
[[package]]
name = "automat"
@@ -1616,70 +1616,88 @@ files = [
[[package]]
name = "pillow"
-version = "10.1.0"
+version = "10.2.0"
description = "Python Imaging Library (Fork)"
optional = false
python-versions = ">=3.8"
files = [
- {file = "Pillow-10.1.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:1ab05f3db77e98f93964697c8efc49c7954b08dd61cff526b7f2531a22410106"},
- {file = "Pillow-10.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6932a7652464746fcb484f7fc3618e6503d2066d853f68a4bd97193a3996e273"},
- {file = "Pillow-10.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5f63b5a68daedc54c7c3464508d8c12075e56dcfbd42f8c1bf40169061ae666"},
- {file = "Pillow-10.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0949b55eb607898e28eaccb525ab104b2d86542a85c74baf3a6dc24002edec2"},
- {file = "Pillow-10.1.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:ae88931f93214777c7a3aa0a8f92a683f83ecde27f65a45f95f22d289a69e593"},
- {file = "Pillow-10.1.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:b0eb01ca85b2361b09480784a7931fc648ed8b7836f01fb9241141b968feb1db"},
- {file = "Pillow-10.1.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d27b5997bdd2eb9fb199982bb7eb6164db0426904020dc38c10203187ae2ff2f"},
- {file = "Pillow-10.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7df5608bc38bd37ef585ae9c38c9cd46d7c81498f086915b0f97255ea60c2818"},
- {file = "Pillow-10.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:41f67248d92a5e0a2076d3517d8d4b1e41a97e2df10eb8f93106c89107f38b57"},
- {file = "Pillow-10.1.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:1fb29c07478e6c06a46b867e43b0bcdb241b44cc52be9bc25ce5944eed4648e7"},
- {file = "Pillow-10.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2cdc65a46e74514ce742c2013cd4a2d12e8553e3a2563c64879f7c7e4d28bce7"},
- {file = "Pillow-10.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50d08cd0a2ecd2a8657bd3d82c71efd5a58edb04d9308185d66c3a5a5bed9610"},
- {file = "Pillow-10.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:062a1610e3bc258bff2328ec43f34244fcec972ee0717200cb1425214fe5b839"},
- {file = "Pillow-10.1.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:61f1a9d247317fa08a308daaa8ee7b3f760ab1809ca2da14ecc88ae4257d6172"},
- {file = "Pillow-10.1.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:a646e48de237d860c36e0db37ecaecaa3619e6f3e9d5319e527ccbc8151df061"},
- {file = "Pillow-10.1.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:47e5bf85b80abc03be7455c95b6d6e4896a62f6541c1f2ce77a7d2bb832af262"},
- {file = "Pillow-10.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:a92386125e9ee90381c3369f57a2a50fa9e6aa8b1cf1d9c4b200d41a7dd8e992"},
- {file = "Pillow-10.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:0f7c276c05a9767e877a0b4c5050c8bee6a6d960d7f0c11ebda6b99746068c2a"},
- {file = "Pillow-10.1.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:a89b8312d51715b510a4fe9fc13686283f376cfd5abca8cd1c65e4c76e21081b"},
- {file = "Pillow-10.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:00f438bb841382b15d7deb9a05cc946ee0f2c352653c7aa659e75e592f6fa17d"},
- {file = "Pillow-10.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3d929a19f5469b3f4df33a3df2983db070ebb2088a1e145e18facbc28cae5b27"},
- {file = "Pillow-10.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a92109192b360634a4489c0c756364c0c3a2992906752165ecb50544c251312"},
- {file = "Pillow-10.1.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:0248f86b3ea061e67817c47ecbe82c23f9dd5d5226200eb9090b3873d3ca32de"},
- {file = "Pillow-10.1.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:9882a7451c680c12f232a422730f986a1fcd808da0fd428f08b671237237d651"},
- {file = "Pillow-10.1.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:1c3ac5423c8c1da5928aa12c6e258921956757d976405e9467c5f39d1d577a4b"},
- {file = "Pillow-10.1.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:806abdd8249ba3953c33742506fe414880bad78ac25cc9a9b1c6ae97bedd573f"},
- {file = "Pillow-10.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:eaed6977fa73408b7b8a24e8b14e59e1668cfc0f4c40193ea7ced8e210adf996"},
- {file = "Pillow-10.1.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:fe1e26e1ffc38be097f0ba1d0d07fcade2bcfd1d023cda5b29935ae8052bd793"},
- {file = "Pillow-10.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7a7e3daa202beb61821c06d2517428e8e7c1aab08943e92ec9e5755c2fc9ba5e"},
- {file = "Pillow-10.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:24fadc71218ad2b8ffe437b54876c9382b4a29e030a05a9879f615091f42ffc2"},
- {file = "Pillow-10.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa1d323703cfdac2036af05191b969b910d8f115cf53093125e4058f62012c9a"},
- {file = "Pillow-10.1.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:912e3812a1dbbc834da2b32299b124b5ddcb664ed354916fd1ed6f193f0e2d01"},
- {file = "Pillow-10.1.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:7dbaa3c7de82ef37e7708521be41db5565004258ca76945ad74a8e998c30af8d"},
- {file = "Pillow-10.1.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:9d7bc666bd8c5a4225e7ac71f2f9d12466ec555e89092728ea0f5c0c2422ea80"},
- {file = "Pillow-10.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:baada14941c83079bf84c037e2d8b7506ce201e92e3d2fa0d1303507a8538212"},
- {file = "Pillow-10.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:2ef6721c97894a7aa77723740a09547197533146fba8355e86d6d9a4a1056b14"},
- {file = "Pillow-10.1.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:0a026c188be3b443916179f5d04548092e253beb0c3e2ee0a4e2cdad72f66099"},
- {file = "Pillow-10.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:04f6f6149f266a100374ca3cc368b67fb27c4af9f1cc8cb6306d849dcdf12616"},
- {file = "Pillow-10.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb40c011447712d2e19cc261c82655f75f32cb724788df315ed992a4d65696bb"},
- {file = "Pillow-10.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1a8413794b4ad9719346cd9306118450b7b00d9a15846451549314a58ac42219"},
- {file = "Pillow-10.1.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:c9aeea7b63edb7884b031a35305629a7593272b54f429a9869a4f63a1bf04c34"},
- {file = "Pillow-10.1.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:b4005fee46ed9be0b8fb42be0c20e79411533d1fd58edabebc0dd24626882cfd"},
- {file = "Pillow-10.1.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:4d0152565c6aa6ebbfb1e5d8624140a440f2b99bf7afaafbdbf6430426497f28"},
- {file = "Pillow-10.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d921bc90b1defa55c9917ca6b6b71430e4286fc9e44c55ead78ca1a9f9eba5f2"},
- {file = "Pillow-10.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:cfe96560c6ce2f4c07d6647af2d0f3c54cc33289894ebd88cfbb3bcd5391e256"},
- {file = "Pillow-10.1.0-pp310-pypy310_pp73-macosx_10_10_x86_64.whl", hash = "sha256:937bdc5a7f5343d1c97dc98149a0be7eb9704e937fe3dc7140e229ae4fc572a7"},
- {file = "Pillow-10.1.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1c25762197144e211efb5f4e8ad656f36c8d214d390585d1d21281f46d556ba"},
- {file = "Pillow-10.1.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:afc8eef765d948543a4775f00b7b8c079b3321d6b675dde0d02afa2ee23000b4"},
- {file = "Pillow-10.1.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:883f216eac8712b83a63f41b76ddfb7b2afab1b74abbb413c5df6680f071a6b9"},
- {file = "Pillow-10.1.0-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:b920e4d028f6442bea9a75b7491c063f0b9a3972520731ed26c83e254302eb1e"},
- {file = "Pillow-10.1.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c41d960babf951e01a49c9746f92c5a7e0d939d1652d7ba30f6b3090f27e412"},
- {file = "Pillow-10.1.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:1fafabe50a6977ac70dfe829b2d5735fd54e190ab55259ec8aea4aaea412fa0b"},
- {file = "Pillow-10.1.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:3b834f4b16173e5b92ab6566f0473bfb09f939ba14b23b8da1f54fa63e4b623f"},
- {file = "Pillow-10.1.0.tar.gz", hash = "sha256:e6bf8de6c36ed96c86ea3b6e1d5273c53f46ef518a062464cd7ef5dd2cf92e38"},
+ {file = "pillow-10.2.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:7823bdd049099efa16e4246bdf15e5a13dbb18a51b68fa06d6c1d4d8b99a796e"},
+ {file = "pillow-10.2.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:83b2021f2ade7d1ed556bc50a399127d7fb245e725aa0113ebd05cfe88aaf588"},
+ {file = "pillow-10.2.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6fad5ff2f13d69b7e74ce5b4ecd12cc0ec530fcee76356cac6742785ff71c452"},
+ {file = "pillow-10.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da2b52b37dad6d9ec64e653637a096905b258d2fc2b984c41ae7d08b938a67e4"},
+ {file = "pillow-10.2.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:47c0995fc4e7f79b5cfcab1fc437ff2890b770440f7696a3ba065ee0fd496563"},
+ {file = "pillow-10.2.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:322bdf3c9b556e9ffb18f93462e5f749d3444ce081290352c6070d014c93feb2"},
+ {file = "pillow-10.2.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:51f1a1bffc50e2e9492e87d8e09a17c5eea8409cda8d3f277eb6edc82813c17c"},
+ {file = "pillow-10.2.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:69ffdd6120a4737710a9eee73e1d2e37db89b620f702754b8f6e62594471dee0"},
+ {file = "pillow-10.2.0-cp310-cp310-win32.whl", hash = "sha256:c6dafac9e0f2b3c78df97e79af707cdc5ef8e88208d686a4847bab8266870023"},
+ {file = "pillow-10.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:aebb6044806f2e16ecc07b2a2637ee1ef67a11840a66752751714a0d924adf72"},
+ {file = "pillow-10.2.0-cp310-cp310-win_arm64.whl", hash = "sha256:7049e301399273a0136ff39b84c3678e314f2158f50f517bc50285fb5ec847ad"},
+ {file = "pillow-10.2.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:35bb52c37f256f662abdfa49d2dfa6ce5d93281d323a9af377a120e89a9eafb5"},
+ {file = "pillow-10.2.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9c23f307202661071d94b5e384e1e1dc7dfb972a28a2310e4ee16103e66ddb67"},
+ {file = "pillow-10.2.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:773efe0603db30c281521a7c0214cad7836c03b8ccff897beae9b47c0b657d61"},
+ {file = "pillow-10.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:11fa2e5984b949b0dd6d7a94d967743d87c577ff0b83392f17cb3990d0d2fd6e"},
+ {file = "pillow-10.2.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:716d30ed977be8b37d3ef185fecb9e5a1d62d110dfbdcd1e2a122ab46fddb03f"},
+ {file = "pillow-10.2.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:a086c2af425c5f62a65e12fbf385f7c9fcb8f107d0849dba5839461a129cf311"},
+ {file = "pillow-10.2.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:c8de2789052ed501dd829e9cae8d3dcce7acb4777ea4a479c14521c942d395b1"},
+ {file = "pillow-10.2.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:609448742444d9290fd687940ac0b57fb35e6fd92bdb65386e08e99af60bf757"},
+ {file = "pillow-10.2.0-cp311-cp311-win32.whl", hash = "sha256:823ef7a27cf86df6597fa0671066c1b596f69eba53efa3d1e1cb8b30f3533068"},
+ {file = "pillow-10.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:1da3b2703afd040cf65ec97efea81cfba59cdbed9c11d8efc5ab09df9509fc56"},
+ {file = "pillow-10.2.0-cp311-cp311-win_arm64.whl", hash = "sha256:edca80cbfb2b68d7b56930b84a0e45ae1694aeba0541f798e908a49d66b837f1"},
+ {file = "pillow-10.2.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:1b5e1b74d1bd1b78bc3477528919414874748dd363e6272efd5abf7654e68bef"},
+ {file = "pillow-10.2.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0eae2073305f451d8ecacb5474997c08569fb4eb4ac231ffa4ad7d342fdc25ac"},
+ {file = "pillow-10.2.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b7c2286c23cd350b80d2fc9d424fc797575fb16f854b831d16fd47ceec078f2c"},
+ {file = "pillow-10.2.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1e23412b5c41e58cec602f1135c57dfcf15482013ce6e5f093a86db69646a5aa"},
+ {file = "pillow-10.2.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:52a50aa3fb3acb9cf7213573ef55d31d6eca37f5709c69e6858fe3bc04a5c2a2"},
+ {file = "pillow-10.2.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:127cee571038f252a552760076407f9cff79761c3d436a12af6000cd182a9d04"},
+ {file = "pillow-10.2.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:8d12251f02d69d8310b046e82572ed486685c38f02176bd08baf216746eb947f"},
+ {file = "pillow-10.2.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:54f1852cd531aa981bc0965b7d609f5f6cc8ce8c41b1139f6ed6b3c54ab82bfb"},
+ {file = "pillow-10.2.0-cp312-cp312-win32.whl", hash = "sha256:257d8788df5ca62c980314053197f4d46eefedf4e6175bc9412f14412ec4ea2f"},
+ {file = "pillow-10.2.0-cp312-cp312-win_amd64.whl", hash = "sha256:154e939c5f0053a383de4fd3d3da48d9427a7e985f58af8e94d0b3c9fcfcf4f9"},
+ {file = "pillow-10.2.0-cp312-cp312-win_arm64.whl", hash = "sha256:f379abd2f1e3dddb2b61bc67977a6b5a0a3f7485538bcc6f39ec76163891ee48"},
+ {file = "pillow-10.2.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:8373c6c251f7ef8bda6675dd6d2b3a0fcc31edf1201266b5cf608b62a37407f9"},
+ {file = "pillow-10.2.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:870ea1ada0899fd0b79643990809323b389d4d1d46c192f97342eeb6ee0b8483"},
+ {file = "pillow-10.2.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b4b6b1e20608493548b1f32bce8cca185bf0480983890403d3b8753e44077129"},
+ {file = "pillow-10.2.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3031709084b6e7852d00479fd1d310b07d0ba82765f973b543c8af5061cf990e"},
+ {file = "pillow-10.2.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:3ff074fc97dd4e80543a3e91f69d58889baf2002b6be64347ea8cf5533188213"},
+ {file = "pillow-10.2.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:cb4c38abeef13c61d6916f264d4845fab99d7b711be96c326b84df9e3e0ff62d"},
+ {file = "pillow-10.2.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:b1b3020d90c2d8e1dae29cf3ce54f8094f7938460fb5ce8bc5c01450b01fbaf6"},
+ {file = "pillow-10.2.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:170aeb00224ab3dc54230c797f8404507240dd868cf52066f66a41b33169bdbe"},
+ {file = "pillow-10.2.0-cp38-cp38-win32.whl", hash = "sha256:c4225f5220f46b2fde568c74fca27ae9771536c2e29d7c04f4fb62c83275ac4e"},
+ {file = "pillow-10.2.0-cp38-cp38-win_amd64.whl", hash = "sha256:0689b5a8c5288bc0504d9fcee48f61a6a586b9b98514d7d29b840143d6734f39"},
+ {file = "pillow-10.2.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:b792a349405fbc0163190fde0dc7b3fef3c9268292586cf5645598b48e63dc67"},
+ {file = "pillow-10.2.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c570f24be1e468e3f0ce7ef56a89a60f0e05b30a3669a459e419c6eac2c35364"},
+ {file = "pillow-10.2.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8ecd059fdaf60c1963c58ceb8997b32e9dc1b911f5da5307aab614f1ce5c2fb"},
+ {file = "pillow-10.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c365fd1703040de1ec284b176d6af5abe21b427cb3a5ff68e0759e1e313a5e7e"},
+ {file = "pillow-10.2.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:70c61d4c475835a19b3a5aa42492409878bbca7438554a1f89d20d58a7c75c01"},
+ {file = "pillow-10.2.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:b6f491cdf80ae540738859d9766783e3b3c8e5bd37f5dfa0b76abdecc5081f13"},
+ {file = "pillow-10.2.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9d189550615b4948f45252d7f005e53c2040cea1af5b60d6f79491a6e147eef7"},
+ {file = "pillow-10.2.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:49d9ba1ed0ef3e061088cd1e7538a0759aab559e2e0a80a36f9fd9d8c0c21591"},
+ {file = "pillow-10.2.0-cp39-cp39-win32.whl", hash = "sha256:babf5acfede515f176833ed6028754cbcd0d206f7f614ea3447d67c33be12516"},
+ {file = "pillow-10.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:0304004f8067386b477d20a518b50f3fa658a28d44e4116970abfcd94fac34a8"},
+ {file = "pillow-10.2.0-cp39-cp39-win_arm64.whl", hash = "sha256:0fb3e7fc88a14eacd303e90481ad983fd5b69c761e9e6ef94c983f91025da869"},
+ {file = "pillow-10.2.0-pp310-pypy310_pp73-macosx_10_10_x86_64.whl", hash = "sha256:322209c642aabdd6207517e9739c704dc9f9db943015535783239022002f054a"},
+ {file = "pillow-10.2.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3eedd52442c0a5ff4f887fab0c1c0bb164d8635b32c894bc1faf4c618dd89df2"},
+ {file = "pillow-10.2.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cb28c753fd5eb3dd859b4ee95de66cc62af91bcff5db5f2571d32a520baf1f04"},
+ {file = "pillow-10.2.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:33870dc4653c5017bf4c8873e5488d8f8d5f8935e2f1fb9a2208c47cdd66efd2"},
+ {file = "pillow-10.2.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:3c31822339516fb3c82d03f30e22b1d038da87ef27b6a78c9549888f8ceda39a"},
+ {file = "pillow-10.2.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:a2b56ba36e05f973d450582fb015594aaa78834fefe8dfb8fcd79b93e64ba4c6"},
+ {file = "pillow-10.2.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:d8e6aeb9201e655354b3ad049cb77d19813ad4ece0df1249d3c793de3774f8c7"},
+ {file = "pillow-10.2.0-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:2247178effb34a77c11c0e8ac355c7a741ceca0a732b27bf11e747bbc950722f"},
+ {file = "pillow-10.2.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:15587643b9e5eb26c48e49a7b33659790d28f190fc514a322d55da2fb5c2950e"},
+ {file = "pillow-10.2.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:753cd8f2086b2b80180d9b3010dd4ed147efc167c90d3bf593fe2af21265e5a5"},
+ {file = "pillow-10.2.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:7c8f97e8e7a9009bcacbe3766a36175056c12f9a44e6e6f2d5caad06dcfbf03b"},
+ {file = "pillow-10.2.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:d1b35bcd6c5543b9cb547dee3150c93008f8dd0f1fef78fc0cd2b141c5baf58a"},
+ {file = "pillow-10.2.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:fe4c15f6c9285dc54ce6553a3ce908ed37c8f3825b5a51a15c91442bb955b868"},
+ {file = "pillow-10.2.0.tar.gz", hash = "sha256:e87f0b2c78157e12d7686b27d63c070fd65d994e8ddae6f328e0dcf4a0cd007e"},
]
[package.extras]
docs = ["furo", "olefile", "sphinx (>=2.4)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinx-removed-in", "sphinxext-opengraph"]
+fpx = ["olefile"]
+mic = ["olefile"]
tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"]
+typing = ["typing-extensions"]
+xmp = ["defusedxml"]
[[package]]
name = "pkginfo"
@@ -2475,13 +2493,13 @@ doc = ["Sphinx", "sphinx-rtd-theme"]
[[package]]
name = "sentry-sdk"
-version = "1.35.0"
+version = "1.39.1"
description = "Python client for Sentry (https://sentry.io)"
optional = true
python-versions = "*"
files = [
- {file = "sentry-sdk-1.35.0.tar.gz", hash = "sha256:04e392db9a0d59bd49a51b9e3a92410ac5867556820465057c2ef89a38e953e9"},
- {file = "sentry_sdk-1.35.0-py2.py3-none-any.whl", hash = "sha256:a7865952701e46d38b41315c16c075367675c48d049b90a4cc2e41991ebc7efa"},
+ {file = "sentry-sdk-1.39.1.tar.gz", hash = "sha256:320a55cdf9da9097a0bead239c35b7e61f53660ef9878861824fd6d9b2eaf3b5"},
+ {file = "sentry_sdk-1.39.1-py2.py3-none-any.whl", hash = "sha256:81b5b9ffdd1a374e9eb0c053b5d2012155db9cbe76393a8585677b753bd5fdc1"},
]
[package.dependencies]
@@ -3037,24 +3055,24 @@ files = [
[[package]]
name = "types-commonmark"
-version = "0.9.2.4"
+version = "0.9.2.20240106"
description = "Typing stubs for commonmark"
optional = false
-python-versions = "*"
+python-versions = ">=3.8"
files = [
- {file = "types-commonmark-0.9.2.4.tar.gz", hash = "sha256:2c6486f65735cf18215cca3e962b17787fa545be279306f79b801f64a5319959"},
- {file = "types_commonmark-0.9.2.4-py3-none-any.whl", hash = "sha256:d5090fa685c3e3c0ec3a5973ff842000baef6d86f762d52209b3c5e9fbd0b555"},
+ {file = "types-commonmark-0.9.2.20240106.tar.gz", hash = "sha256:52a062b71766d6ab258fca2d8e19fb0853796e25ca9afa9d0f67a1e42c93479f"},
+ {file = "types_commonmark-0.9.2.20240106-py3-none-any.whl", hash = "sha256:606d9de1e3a96cab0b1c0b6cccf4df099116148d1d864d115fde2e27ad6877c3"},
]
[[package]]
name = "types-jsonschema"
-version = "4.20.0.0"
+version = "4.20.0.20240105"
description = "Typing stubs for jsonschema"
optional = false
python-versions = ">=3.8"
files = [
- {file = "types-jsonschema-4.20.0.0.tar.gz", hash = "sha256:0de1032d243f1d3dba8b745ad84efe8c1af71665a9deb1827636ac535dcb79c1"},
- {file = "types_jsonschema-4.20.0.0-py3-none-any.whl", hash = "sha256:e6d5df18aaca4412f0aae246a294761a92040e93d7bc840f002b7329a8b72d26"},
+ {file = "types-jsonschema-4.20.0.20240105.tar.gz", hash = "sha256:4a71af7e904498e7ad055149f6dc1eee04153b59a99ad7dd17aa3769c9bc5982"},
+ {file = "types_jsonschema-4.20.0.20240105-py3-none-any.whl", hash = "sha256:26706cd70a273e59e718074c4e756608a25ba61327a7f9a4493ebd11941e5ad4"},
]
[package.dependencies]
diff --git a/pyproject.toml b/pyproject.toml
index 2ac03edfe8..24791aa0a9 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -96,7 +96,7 @@ module-name = "synapse.synapse_rust"
[tool.poetry]
name = "matrix-synapse"
-version = "1.98.0"
+version = "1.99.0rc1"
description = "Homeserver for the Matrix decentralised comms protocol"
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
license = "AGPL-3.0-or-later"
diff --git a/synapse/config/oidc.py b/synapse/config/oidc.py
index 07ca16c94c..8f9cdbddbb 100644
--- a/synapse/config/oidc.py
+++ b/synapse/config/oidc.py
@@ -299,6 +299,19 @@ def _parse_oidc_config_dict(
config_path + ("client_secret",),
)
+ # If no client secret is specified then the auth method must be None
+ client_auth_method = oidc_config.get("client_auth_method")
+ if client_secret is None and client_secret_jwt_key is None:
+ if client_auth_method is None:
+ client_auth_method = "none"
+ elif client_auth_method != "none":
+ raise ConfigError(
+ "No 'client_secret' is set in OIDC config, and 'client_auth_method' is not set to 'none'"
+ )
+
+ if client_auth_method is None:
+ client_auth_method = "client_secret_basic"
+
return OidcProviderConfig(
idp_id=idp_id,
idp_name=oidc_config.get("idp_name", "OIDC"),
@@ -309,7 +322,7 @@ def _parse_oidc_config_dict(
client_id=oidc_config["client_id"],
client_secret=client_secret,
client_secret_jwt_key=client_secret_jwt_key,
- client_auth_method=oidc_config.get("client_auth_method", "client_secret_basic"),
+ client_auth_method=client_auth_method,
pkce_method=oidc_config.get("pkce_method", "auto"),
scopes=oidc_config.get("scopes", ["openid"]),
authorization_endpoint=oidc_config.get("authorization_endpoint"),
diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py
index 882be905db..12837429b9 100644
--- a/synapse/handlers/federation_event.py
+++ b/synapse/handlers/federation_event.py
@@ -94,7 +94,7 @@ from synapse.types import (
)
from synapse.types.state import StateFilter
from synapse.util.async_helpers import Linearizer, concurrently_execute
-from synapse.util.iterutils import batch_iter, partition, sorted_topologically_batched
+from synapse.util.iterutils import batch_iter, partition, sorted_topologically
from synapse.util.retryutils import NotRetryingDestination
from synapse.util.stringutils import shortstr
@@ -1141,16 +1141,8 @@ class FederationEventHandler:
partial_state_flags = await self._store.get_partial_state_events(seen)
partial_state = any(partial_state_flags.values())
- # Get the state of the events we know about
- ours = await self._state_storage_controller.get_state_groups_ids(
- room_id, seen, await_full_state=False
- )
-
# state_maps is a list of mappings from (type, state_key) to event_id
- state_maps: List[StateMap[str]] = list(ours.values())
-
- # we don't need this any more, let's delete it.
- del ours
+ state_maps: List[StateMap[str]] = []
# Ask the remote server for the states we don't
# know about
@@ -1169,6 +1161,17 @@ class FederationEventHandler:
state_maps.append(remote_state_map)
+ # Get the state of the events we know about. We do this *after*
+ # trying to fetch missing state over federation as that might fail
+ # and then we can skip loading the local state.
+ ours = await self._state_storage_controller.get_state_groups_ids(
+ room_id, seen, await_full_state=False
+ )
+ state_maps.extend(ours.values())
+
+ # we don't need this any more, let's delete it.
+ del ours
+
room_version = await self._store.get_room_version_id(room_id)
state_map = await self._state_resolution_handler.resolve_events_with_store(
room_id,
@@ -1678,57 +1681,36 @@ class FederationEventHandler:
# We need to persist an event's auth events before the event.
auth_graph = {
- ev: [event_map[e_id] for e_id in ev.auth_event_ids() if e_id in event_map]
+ ev.event_id: [e_id for e_id in ev.auth_event_ids() if e_id in event_map]
for ev in event_map.values()
}
- for roots in sorted_topologically_batched(event_map.values(), auth_graph):
- if not roots:
- # if *none* of the remaining events are ready, that means
- # we have a loop. This either means a bug in our logic, or that
- # somebody has managed to create a loop (which requires finding a
- # hash collision in room v2 and later).
- logger.warning(
- "Loop found in auth events while fetching missing state/auth "
- "events: %s",
- shortstr(event_map.keys()),
- )
- return
-
- logger.info(
- "Persisting %i of %i remaining outliers: %s",
- len(roots),
- len(event_map),
- shortstr(e.event_id for e in roots),
- )
-
- await self._auth_and_persist_outliers_inner(room_id, roots)
-
- async def _auth_and_persist_outliers_inner(
- self, room_id: str, fetched_events: Collection[EventBase]
- ) -> None:
- """Helper for _auth_and_persist_outliers
-
- Persists a batch of events where we have (theoretically) already persisted all
- of their auth events.
-
- Marks the events as outliers, auths them, persists them to the database, and,
- where appropriate (eg, an invite), awakes the notifier.
+ sorted_auth_event_ids = sorted_topologically(event_map.keys(), auth_graph)
+ sorted_auth_events = [event_map[e_id] for e_id in sorted_auth_event_ids]
+ logger.info(
+ "Persisting %i remaining outliers: %s",
+ len(sorted_auth_events),
+ shortstr(e.event_id for e in sorted_auth_events),
+ )
- Params:
- origin: where the events came from
- room_id: the room that the events are meant to be in (though this has
- not yet been checked)
- fetched_events: the events to persist
- """
# get all the auth events for all the events in this batch. By now, they should
# have been persisted.
- auth_events = {
- aid for event in fetched_events for aid in event.auth_event_ids()
+ auth_event_ids = {
+ aid for event in sorted_auth_events for aid in event.auth_event_ids()
}
- persisted_events = await self._store.get_events(
- auth_events,
- allow_rejected=True,
- )
+ auth_map = {
+ ev.event_id: ev
+ for ev in sorted_auth_events
+ if ev.event_id in auth_event_ids
+ }
+
+ missing_events = auth_event_ids.difference(auth_map)
+ if missing_events:
+ persisted_events = await self._store.get_events(
+ missing_events,
+ allow_rejected=True,
+ redact_behaviour=EventRedactBehaviour.as_is,
+ )
+ auth_map.update(persisted_events)
events_and_contexts_to_persist: List[Tuple[EventBase, EventContext]] = []
@@ -1736,7 +1718,7 @@ class FederationEventHandler:
with nested_logging_context(suffix=event.event_id):
auth = []
for auth_event_id in event.auth_event_ids():
- ae = persisted_events.get(auth_event_id)
+ ae = auth_map.get(auth_event_id)
if not ae:
# the fact we can't find the auth event doesn't mean it doesn't
# exist, which means it is premature to reject `event`. Instead we
@@ -1755,7 +1737,9 @@ class FederationEventHandler:
context = EventContext.for_outlier(self._storage_controllers)
try:
validate_event_for_room_version(event)
- await check_state_independent_auth_rules(self._store, event)
+ await check_state_independent_auth_rules(
+ self._store, event, batched_auth_events=auth_map
+ )
check_state_dependent_auth_rules(event, auth)
except AuthError as e:
logger.warning("Rejecting %r because %s", event, e)
@@ -1772,7 +1756,7 @@ class FederationEventHandler:
events_and_contexts_to_persist.append((event, context))
- for event in fetched_events:
+ for event in sorted_auth_events:
await prep(event)
await self.persist_events_and_notify(
diff --git a/synapse/handlers/room_summary.py b/synapse/handlers/room_summary.py
index a534f5f280..78bcac1429 100644
--- a/synapse/handlers/room_summary.py
+++ b/synapse/handlers/room_summary.py
@@ -44,6 +44,7 @@ from synapse.api.ratelimiting import Ratelimiter
from synapse.config.ratelimiting import RatelimitSettings
from synapse.events import EventBase
from synapse.types import JsonDict, Requester, StrCollection
+from synapse.types.state import StateFilter
from synapse.util.caches.response_cache import ResponseCache
if TYPE_CHECKING:
@@ -546,7 +547,16 @@ class RoomSummaryHandler:
Returns:
True if the room is accessible to the requesting user or server.
"""
- state_ids = await self._storage_controllers.state.get_current_state_ids(room_id)
+ event_types = [
+ (EventTypes.JoinRules, ""),
+ (EventTypes.RoomHistoryVisibility, ""),
+ ]
+ if requester:
+ event_types.append((EventTypes.Member, requester))
+
+ state_ids = await self._storage_controllers.state.get_current_state_ids(
+ room_id, state_filter=StateFilter.from_types(event_types)
+ )
# If there's no state for the room, it isn't known.
if not state_ids:
diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py
index 0385c04bc2..2e10035772 100644
--- a/synapse/handlers/sync.py
+++ b/synapse/handlers/sync.py
@@ -583,10 +583,11 @@ class SyncHandler:
# `recents`, so partial state is only a problem when a membership
# event turns up in `recents` but has not made it into the current
# state.
- current_state_ids_map = (
- await self.store.get_partial_current_state_ids(room_id)
+ current_state_ids = (
+ await self.store.check_if_events_in_current_state(
+ {e.event_id for e in recents if e.is_state()}
+ )
)
- current_state_ids = frozenset(current_state_ids_map.values())
recents = await filter_events_for_client(
self._storage_controllers,
@@ -667,10 +668,11 @@ class SyncHandler:
# `loaded_recents`, so partial state is only a problem when a
# membership event turns up in `loaded_recents` but has not made it
# into the current state.
- current_state_ids_map = (
- await self.store.get_partial_current_state_ids(room_id)
+ current_state_ids = (
+ await self.store.check_if_events_in_current_state(
+ {e.event_id for e in loaded_recents if e.is_state()}
+ )
)
- current_state_ids = frozenset(current_state_ids_map.values())
loaded_recents = await filter_events_for_client(
self._storage_controllers,
diff --git a/synapse/push/push_tools.py b/synapse/push/push_tools.py
index 03ce0b4dc6..cce9583fa7 100644
--- a/synapse/push/push_tools.py
+++ b/synapse/push/push_tools.py
@@ -28,17 +28,11 @@ from synapse.storage.databases.main import DataStore
async def get_badge_count(store: DataStore, user_id: str, group_by_room: bool) -> int:
invites = await store.get_invited_rooms_for_local_user(user_id)
- joins = await store.get_rooms_for_user(user_id)
badge = len(invites)
room_to_count = await store.get_unread_counts_by_room_for_user(user_id)
- for room_id, notify_count in room_to_count.items():
- # room_to_count may include rooms which the user has left,
- # ignore those.
- if room_id not in joins:
- continue
-
+ for _room_id, notify_count in room_to_count.items():
if notify_count == 0:
continue
diff --git a/synapse/storage/databases/main/deviceinbox.py b/synapse/storage/databases/main/deviceinbox.py
index 4bbcf7199c..5a1a3e8e65 100644
--- a/synapse/storage/databases/main/deviceinbox.py
+++ b/synapse/storage/databases/main/deviceinbox.py
@@ -245,33 +245,74 @@ class DeviceInboxWorkerStore(SQLBaseStore):
* The last-processed stream ID. Subsequent calls of this function with the
same device should pass this value as 'from_stream_id'.
"""
- (
- user_id_device_id_to_messages,
- last_processed_stream_id,
- ) = await self._get_device_messages(
- user_ids=[user_id],
- device_id=device_id,
- from_stream_id=from_stream_id,
- to_stream_id=to_stream_id,
- limit=limit,
- )
-
- if not user_id_device_id_to_messages:
+ if not self._device_inbox_stream_cache.has_entity_changed(
+ user_id, from_stream_id
+ ):
# There were no messages!
return [], to_stream_id
- # Extract the messages, no need to return the user and device ID again
- to_device_messages = user_id_device_id_to_messages.get((user_id, device_id), [])
+ def get_device_messages_txn(
+ txn: LoggingTransaction,
+ ) -> Tuple[List[JsonDict], int]:
+ sql = """
+ SELECT stream_id, message_json FROM device_inbox
+ WHERE user_id = ? AND device_id = ?
+ AND ? < stream_id AND stream_id <= ?
+ ORDER BY stream_id ASC
+ LIMIT ?
+ """
+ txn.execute(sql, (user_id, device_id, from_stream_id, to_stream_id, limit))
+
+ # Create and fill a dictionary of (user ID, device ID) -> list of messages
+ # intended for each device.
+ last_processed_stream_pos = to_stream_id
+ to_device_messages: List[JsonDict] = []
+ rowcount = 0
+ for row in txn:
+ rowcount += 1
+
+ last_processed_stream_pos = row[0]
+ message_dict = db_to_json(row[1])
+
+ # Store the device details
+ to_device_messages.append(message_dict)
- return to_device_messages, last_processed_stream_id
+ # start a new span for each message, so that we can tag each separately
+ with start_active_span("get_to_device_message"):
+ set_tag(SynapseTags.TO_DEVICE_TYPE, message_dict["type"])
+ set_tag(SynapseTags.TO_DEVICE_SENDER, message_dict["sender"])
+ set_tag(SynapseTags.TO_DEVICE_RECIPIENT, user_id)
+ set_tag(SynapseTags.TO_DEVICE_RECIPIENT_DEVICE, device_id)
+ set_tag(
+ SynapseTags.TO_DEVICE_MSGID,
+ message_dict["content"].get(EventContentFields.TO_DEVICE_MSGID),
+ )
+
+ if rowcount == limit:
+ # We ended up bumping up against the message limit. There may be more messages
+ # to retrieve. Return what we have, as well as the last stream position that
+ # was processed.
+ #
+ # The caller is expected to set this as the lower (exclusive) bound
+ # for the next query of this device.
+ return to_device_messages, last_processed_stream_pos
+
+ # The limit was not reached, thus we know that recipient_device_to_messages
+ # contains all to-device messages for the given device and stream id range.
+ #
+ # We return to_stream_id, which the caller should then provide as the lower
+ # (exclusive) bound on the next query of this device.
+ return to_device_messages, to_stream_id
+
+ return await self.db_pool.runInteraction(
+ "get_messages_for_device", get_device_messages_txn
+ )
async def _get_device_messages(
self,
user_ids: Collection[str],
from_stream_id: int,
to_stream_id: int,
- device_id: Optional[str] = None,
- limit: Optional[int] = None,
) -> Tuple[Dict[Tuple[str, str], List[JsonDict]], int]:
"""
Retrieve pending to-device messages for a collection of user devices.
@@ -291,11 +332,7 @@ class DeviceInboxWorkerStore(SQLBaseStore):
user_ids: The user IDs to filter device messages by.
from_stream_id: The lower boundary of stream id to filter with (exclusive).
to_stream_id: The upper boundary of stream id to filter with (inclusive).
- device_id: A device ID to query to-device messages for. If not provided, to-device
- messages from all device IDs for the given user IDs will be queried. May not be
- provided if `user_ids` contains more than one entry.
- limit: The maximum number of to-device messages to return. Can only be used when
- passing a single user ID / device ID tuple.
+
Returns:
A tuple containing:
@@ -308,30 +345,7 @@ class DeviceInboxWorkerStore(SQLBaseStore):
logger.warning("No users provided upon querying for device IDs")
return {}, to_stream_id
- # Prevent a query for one user's device also retrieving another user's device with
- # the same device ID (device IDs are not unique across users).
- if len(user_ids) > 1 and device_id is not None:
- raise AssertionError(
- "Programming error: 'device_id' cannot be supplied to "
- "_get_device_messages when >1 user_id has been provided"
- )
-
- # A limit can only be applied when querying for a single user ID / device ID tuple.
- # See the docstring of this function for more details.
- if limit is not None and device_id is None:
- raise AssertionError(
- "Programming error: _get_device_messages was passed 'limit' "
- "without a specific user_id/device_id"
- )
-
user_ids_to_query: Set[str] = set()
- device_ids_to_query: Set[str] = set()
-
- # Note that a device ID could be an empty str
- if device_id is not None:
- # If a device ID was passed, use it to filter results.
- # Otherwise, device IDs will be derived from the given collection of user IDs.
- device_ids_to_query.add(device_id)
# Determine which users have devices with pending messages
for user_id in user_ids:
@@ -355,20 +369,20 @@ class DeviceInboxWorkerStore(SQLBaseStore):
# hidden devices should not receive to-device messages.
# Note that this is more efficient than just dropping `device_id` from the query,
# since device_inbox has an index on `(user_id, device_id, stream_id)`
- if not device_ids_to_query:
- user_device_dicts = cast(
- List[Tuple[str]],
- self.db_pool.simple_select_many_txn(
- txn,
- table="devices",
- column="user_id",
- iterable=user_ids_to_query,
- keyvalues={"hidden": False},
- retcols=("device_id",),
- ),
- )
- device_ids_to_query.update({row[0] for row in user_device_dicts})
+ user_device_dicts = cast(
+ List[Tuple[str]],
+ self.db_pool.simple_select_many_txn(
+ txn,
+ table="devices",
+ column="user_id",
+ iterable=user_ids_to_query,
+ keyvalues={"hidden": False},
+ retcols=("device_id",),
+ ),
+ )
+
+ device_ids_to_query = {row[0] for row in user_device_dicts}
if not device_ids_to_query:
# We've ended up with no devices to query.
@@ -400,22 +414,15 @@ class DeviceInboxWorkerStore(SQLBaseStore):
to_stream_id,
)
- # If a limit was provided, limit the data retrieved from the database
- if limit is not None:
- sql += "LIMIT ?"
- sql_args += (limit,)
-
txn.execute(sql, sql_args)
# Create and fill a dictionary of (user ID, device ID) -> list of messages
# intended for each device.
- last_processed_stream_pos = to_stream_id
recipient_device_to_messages: Dict[Tuple[str, str], List[JsonDict]] = {}
rowcount = 0
for row in txn:
rowcount += 1
- last_processed_stream_pos = row[0]
recipient_user_id = row[1]
recipient_device_id = row[2]
message_dict = db_to_json(row[3])
@@ -436,18 +443,6 @@ class DeviceInboxWorkerStore(SQLBaseStore):
message_dict["content"].get(EventContentFields.TO_DEVICE_MSGID),
)
- if limit is not None and rowcount == limit:
- # We ended up bumping up against the message limit. There may be more messages
- # to retrieve. Return what we have, as well as the last stream position that
- # was processed.
- #
- # The caller is expected to set this as the lower (exclusive) bound
- # for the next query of this device.
- return recipient_device_to_messages, last_processed_stream_pos
-
- # The limit was not reached, thus we know that recipient_device_to_messages
- # contains all to-device messages for the given device and stream id range.
- #
# We return to_stream_id, which the caller should then provide as the lower
# (exclusive) bound on the next query of this device.
return recipient_device_to_messages, to_stream_id
diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py
index 38029710db..d3859014b6 100644
--- a/synapse/storage/databases/main/devices.py
+++ b/synapse/storage/databases/main/devices.py
@@ -1796,7 +1796,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
device_ids: The IDs of the devices to delete
"""
- def _delete_devices_txn(txn: LoggingTransaction) -> None:
+ def _delete_devices_txn(txn: LoggingTransaction, device_ids: List[str]) -> None:
self.db_pool.simple_delete_many_txn(
txn,
table="devices",
@@ -1813,7 +1813,11 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
keyvalues={"user_id": user_id},
)
- await self.db_pool.runInteraction("delete_devices", _delete_devices_txn)
+ for batch in batch_iter(device_ids, 100):
+ await self.db_pool.runInteraction(
+ "delete_devices", _delete_devices_txn, batch
+ )
+
for device_id in device_ids:
self.device_id_exists_cache.invalidate((user_id, device_id))
diff --git a/synapse/storage/databases/main/event_push_actions.py b/synapse/storage/databases/main/event_push_actions.py
index 650b8c8135..6d4e2942ea 100644
--- a/synapse/storage/databases/main/event_push_actions.py
+++ b/synapse/storage/databases/main/event_push_actions.py
@@ -357,10 +357,6 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
This function is intentionally not cached because it is called to calculate the
unread badge for push notifications and thus the result is expected to change.
- Note that this function assumes the user is a member of the room. Because
- summary rows are not removed when a user leaves a room, the caller must
- filter out those results from the result.
-
Returns:
A map of room ID to notification counts for the given user.
"""
@@ -373,127 +369,170 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
def _get_unread_counts_by_room_for_user_txn(
self, txn: LoggingTransaction, user_id: str
) -> Dict[str, int]:
- receipt_types_clause, args = make_in_list_sql_clause(
+ # To get the badge count of all rooms we need to make three queries:
+ # 1. Fetch all counts from `event_push_summary`, discarding any stale
+ # rooms.
+ # 2. Fetch all notifications from `event_push_actions` that haven't
+ # been rotated yet.
+ # 3. Fetch all notifications from `event_push_actions` for the stale
+ # rooms.
+ #
+ # The "stale room" scenario generally happens when there is a new read
+ # receipt that hasn't yet been processed to update the
+ # `event_push_summary` table. When that happens we ignore the
+ # `event_push_summary` table for that room and calculate the count
+ # manually from `event_push_actions`.
+
+ # We need to only take into account read receipts of these types.
+ receipt_types_clause, receipt_types_args = make_in_list_sql_clause(
self.database_engine,
"receipt_type",
(ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE),
)
- args.extend([user_id, user_id])
-
- receipts_cte = f"""
- WITH all_receipts AS (
- SELECT room_id, thread_id, MAX(event_stream_ordering) AS max_receipt_stream_ordering
- FROM receipts_linearized
- LEFT JOIN events USING (room_id, event_id)
- WHERE
- {receipt_types_clause}
- AND user_id = ?
- GROUP BY room_id, thread_id
- )
- """
-
- receipts_joins = """
- LEFT JOIN (
- SELECT room_id, thread_id,
- max_receipt_stream_ordering AS threaded_receipt_stream_ordering
- FROM all_receipts
- WHERE thread_id IS NOT NULL
- ) AS threaded_receipts USING (room_id, thread_id)
- LEFT JOIN (
- SELECT room_id, thread_id,
- max_receipt_stream_ordering AS unthreaded_receipt_stream_ordering
- FROM all_receipts
- WHERE thread_id IS NULL
- ) AS unthreaded_receipts USING (room_id)
- """
-
- # First get summary counts by room / thread for the user. We use the max receipt
- # stream ordering of both threaded & unthreaded receipts to compare against the
- # summary table.
- #
- # PostgreSQL and SQLite differ in comparing scalar numerics.
- if isinstance(self.database_engine, PostgresEngine):
- # GREATEST ignores NULLs.
- max_clause = """GREATEST(
- threaded_receipt_stream_ordering,
- unthreaded_receipt_stream_ordering
- )"""
- else:
- # MAX returns NULL if any are NULL, so COALESCE to 0 first.
- max_clause = """MAX(
- COALESCE(threaded_receipt_stream_ordering, 0),
- COALESCE(unthreaded_receipt_stream_ordering, 0)
- )"""
+ # Step 1, fetch all counts from `event_push_summary` for the user. This
+ # is slightly convoluted as we also need to pull out the stream ordering
+ # of the most recent receipt of the user in the room (either a thread
+ # aware receipt or thread unaware receipt) in order to determine
+ # whether the row in `event_push_summary` is stale. Hence the outer
+ # GROUP BY and odd join condition against `receipts_linearized`.
sql = f"""
- {receipts_cte}
- SELECT eps.room_id, eps.thread_id, notif_count
- FROM event_push_summary AS eps
- {receipts_joins}
- WHERE user_id = ?
- AND notif_count != 0
- AND (
- (last_receipt_stream_ordering IS NULL AND stream_ordering > {max_clause})
- OR last_receipt_stream_ordering = {max_clause}
+ SELECT room_id, notif_count, stream_ordering, thread_id, last_receipt_stream_ordering,
+ MAX(receipt_stream_ordering)
+ FROM (
+ SELECT e.room_id, notif_count, e.stream_ordering, e.thread_id, last_receipt_stream_ordering,
+ ev.stream_ordering AS receipt_stream_ordering
+ FROM event_push_summary AS e
+ INNER JOIN local_current_membership USING (user_id, room_id)
+ LEFT JOIN receipts_linearized AS r ON (
+ e.user_id = r.user_id
+ AND e.room_id = r.room_id
+ AND (e.thread_id = r.thread_id OR r.thread_id IS NULL)
+ AND {receipt_types_clause}
)
+ LEFT JOIN events AS ev ON (r.event_id = ev.event_id)
+ WHERE e.user_id = ? and notif_count > 0
+ ) AS es
+ GROUP BY room_id, notif_count, stream_ordering, thread_id, last_receipt_stream_ordering
"""
- txn.execute(sql, args)
-
- seen_thread_ids = set()
- room_to_count: Dict[str, int] = defaultdict(int)
- for room_id, thread_id, notif_count in txn:
- room_to_count[room_id] += notif_count
- seen_thread_ids.add(thread_id)
+ txn.execute(
+ sql,
+ receipt_types_args
+ + [
+ user_id,
+ ],
+ )
- # Now get any event push actions that haven't been rotated using the same OR
- # join and filter by receipt and event push summary rotated up to stream ordering.
- sql = f"""
- {receipts_cte}
- SELECT epa.room_id, epa.thread_id, COUNT(CASE WHEN epa.notif = 1 THEN 1 END) AS notif_count
- FROM event_push_actions AS epa
- {receipts_joins}
- WHERE user_id = ?
- AND epa.notif = 1
- AND stream_ordering > (SELECT stream_ordering FROM event_push_summary_stream_ordering)
- AND (threaded_receipt_stream_ordering IS NULL OR stream_ordering > threaded_receipt_stream_ordering)
- AND (unthreaded_receipt_stream_ordering IS NULL OR stream_ordering > unthreaded_receipt_stream_ordering)
- GROUP BY epa.room_id, epa.thread_id
- """
- txn.execute(sql, args)
+ room_to_count: Dict[str, int] = defaultdict(int)
+ stale_room_ids = set()
+ for row in txn:
+ room_id = row[0]
+ notif_count = row[1]
+ stream_ordering = row[2]
+ _thread_id = row[3]
+ last_receipt_stream_ordering = row[4]
+ receipt_stream_ordering = row[5]
+
+ if last_receipt_stream_ordering is None:
+ if receipt_stream_ordering is None:
+ room_to_count[room_id] += notif_count
+ elif stream_ordering > receipt_stream_ordering:
+ room_to_count[room_id] += notif_count
+ else:
+ # The latest read receipt from the user is after all the rows for
+ # this room in `event_push_summary`. We ignore them, and
+ # calculate the count from `event_push_actions` in step 3.
+ pass
+ elif last_receipt_stream_ordering == receipt_stream_ordering:
+ room_to_count[room_id] += notif_count
+ else:
+ # The row is stale if `last_receipt_stream_ordering` is set and
+ # *doesn't* match the latest receipt from the user.
+ stale_room_ids.add(room_id)
- for room_id, thread_id, notif_count in txn:
- # Note: only count push actions we have valid summaries for with up to date receipt.
- if thread_id not in seen_thread_ids:
- continue
- room_to_count[room_id] += notif_count
+ # Discard any stale rooms from `room_to_count`, as we will recalculate
+ # them in step 3.
+ for room_id in stale_room_ids:
+ room_to_count.pop(room_id, None)
- thread_id_clause, thread_ids_args = make_in_list_sql_clause(
- self.database_engine, "epa.thread_id", seen_thread_ids
+ # Step 2, basically the same query, except against `event_push_actions`
+ # and only fetching rows inserted since the last rotation.
+ rotated_upto_stream_ordering = self.db_pool.simple_select_one_onecol_txn(
+ txn,
+ table="event_push_summary_stream_ordering",
+ keyvalues={},
+ retcol="stream_ordering",
)
- # Finally re-check event_push_actions for any rooms not in the summary, ignoring
- # the rotated up-to position. This handles the case where a read receipt has arrived
- # but not been rotated meaning the summary table is out of date, so we go back to
- # the push actions table.
sql = f"""
- {receipts_cte}
- SELECT epa.room_id, COUNT(CASE WHEN epa.notif = 1 THEN 1 END) AS notif_count
- FROM event_push_actions AS epa
- {receipts_joins}
- WHERE user_id = ?
- AND NOT {thread_id_clause}
- AND epa.notif = 1
- AND (threaded_receipt_stream_ordering IS NULL OR stream_ordering > threaded_receipt_stream_ordering)
- AND (unthreaded_receipt_stream_ordering IS NULL OR stream_ordering > unthreaded_receipt_stream_ordering)
- GROUP BY epa.room_id
+ SELECT room_id, thread_id
+ FROM (
+ SELECT e.room_id, e.stream_ordering, e.thread_id,
+ ev.stream_ordering AS receipt_stream_ordering
+ FROM event_push_actions AS e
+ INNER JOIN local_current_membership USING (user_id, room_id)
+ LEFT JOIN receipts_linearized AS r ON (
+ e.user_id = r.user_id
+ AND e.room_id = r.room_id
+ AND (e.thread_id = r.thread_id OR r.thread_id IS NULL)
+ AND {receipt_types_clause}
+ )
+ LEFT JOIN events AS ev ON (r.event_id = ev.event_id)
+ WHERE e.user_id = ? and notif > 0
+ AND e.stream_ordering > ?
+ ) AS es
+ GROUP BY room_id, stream_ordering, thread_id
+ HAVING stream_ordering > COALESCE(MAX(receipt_stream_ordering), 0)
"""
- args.extend(thread_ids_args)
- txn.execute(sql, args)
+ txn.execute(
+ sql,
+ receipt_types_args + [user_id, rotated_upto_stream_ordering],
+ )
+ for room_id, _thread_id in txn:
+ # Again, we ignore any stale rooms.
+ if room_id not in stale_room_ids:
+ # For event push actions it is one notification per row.
+ room_to_count[room_id] += 1
+
+ # Step 3, if we have stale rooms then we need to recalculate the counts
+ # from `event_push_actions`. Again, this is basically the same query as
+ # above except without a lower bound on stream ordering and only against
+ # a specific set of rooms.
+ if stale_room_ids:
+ room_id_clause, room_id_args = make_in_list_sql_clause(
+ self.database_engine,
+ "e.room_id",
+ stale_room_ids,
+ )
- for room_id, notif_count in txn:
- room_to_count[room_id] += notif_count
+ sql = f"""
+ SELECT room_id, thread_id
+ FROM (
+ SELECT e.room_id, e.stream_ordering, e.thread_id,
+ ev.stream_ordering AS receipt_stream_ordering
+ FROM event_push_actions AS e
+ INNER JOIN local_current_membership USING (user_id, room_id)
+ LEFT JOIN receipts_linearized AS r ON (
+ e.user_id = r.user_id
+ AND e.room_id = r.room_id
+ AND (e.thread_id = r.thread_id OR r.thread_id IS NULL)
+ AND {receipt_types_clause}
+ )
+ LEFT JOIN events AS ev ON (r.event_id = ev.event_id)
+ WHERE e.user_id = ? and notif > 0
+ AND {room_id_clause}
+ ) AS es
+ GROUP BY room_id, stream_ordering, thread_id
+ HAVING stream_ordering > COALESCE(MAX(receipt_stream_ordering), 0)
+ """
+ txn.execute(
+ sql,
+ receipt_types_args + [user_id] + room_id_args,
+ )
+ for room_id, _ in txn:
+ room_to_count[room_id] += 1
return room_to_count
diff --git a/synapse/storage/databases/main/state.py b/synapse/storage/databases/main/state.py
index 4700e74ad2..8006046453 100644
--- a/synapse/storage/databases/main/state.py
+++ b/synapse/storage/databases/main/state.py
@@ -24,13 +24,17 @@ from typing import (
Any,
Collection,
Dict,
+ FrozenSet,
Iterable,
List,
Mapping,
Optional,
Set,
Tuple,
+ TypeVar,
+ Union,
cast,
+ overload,
)
import attr
@@ -52,7 +56,7 @@ from synapse.storage.database import (
)
from synapse.storage.databases.main.events_worker import EventsWorkerStore
from synapse.storage.databases.main.roommember import RoomMemberWorkerStore
-from synapse.types import JsonDict, JsonMapping, StateMap
+from synapse.types import JsonDict, JsonMapping, StateKey, StateMap, StrCollection
from synapse.types.state import StateFilter
from synapse.util.caches import intern_string
from synapse.util.caches.descriptors import cached, cachedList
@@ -64,6 +68,8 @@ if TYPE_CHECKING:
logger = logging.getLogger(__name__)
+_T = TypeVar("_T")
+
MAX_STATE_DELTA_HOPS = 100
@@ -318,6 +324,20 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
"get_partial_current_state_ids", _get_current_state_ids_txn
)
+ async def check_if_events_in_current_state(
+ self, event_ids: StrCollection
+ ) -> FrozenSet[str]:
+ """Checks and returns which of the given events is part of the current state."""
+ rows = await self.db_pool.simple_select_many_batch(
+ table="current_state_events",
+ column="event_id",
+ iterable=event_ids,
+ retcols=("event_id",),
+ desc="check_if_events_in_current_state",
+ )
+
+ return frozenset(event_id for event_id, in rows)
+
# FIXME: how should this be cached?
@cancellable
async def get_partial_filtered_current_state_ids(
@@ -349,7 +369,8 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
def _get_filtered_current_state_ids_txn(
txn: LoggingTransaction,
) -> StateMap[str]:
- results = {}
+ results = StateMapWrapper(state_filter=state_filter or StateFilter.all())
+
sql = """
SELECT type, state_key, event_id FROM current_state_events
WHERE room_id = ?
@@ -726,3 +747,41 @@ class StateStore(StateGroupWorkerStore, MainStateBackgroundUpdateStore):
hs: "HomeServer",
):
super().__init__(database, db_conn, hs)
+
+
+@attr.s(auto_attribs=True, slots=True)
+class StateMapWrapper(Dict[StateKey, str]):
+ """A wrapper around a StateMap[str] to ensure that we only query for items
+ that were not filtered out.
+
+ This is to help prevent bugs where we filter out state but other bits of the
+ code expect the state to be there.
+ """
+
+ state_filter: StateFilter
+
+ def __getitem__(self, key: StateKey) -> str:
+ if key not in self.state_filter:
+ raise Exception("State map was filtered and doesn't include: %s", key)
+ return super().__getitem__(key)
+
+ @overload
+ def get(self, key: Tuple[str, str]) -> Optional[str]:
+ ...
+
+ @overload
+ def get(self, key: Tuple[str, str], default: Union[str, _T]) -> Union[str, _T]:
+ ...
+
+ def get(
+ self, key: StateKey, default: Union[str, _T, None] = None
+ ) -> Union[str, _T, None]:
+ if key not in self.state_filter:
+ raise Exception("State map was filtered and doesn't include: %s", key)
+ return super().get(key, default)
+
+ def __contains__(self, key: Any) -> bool:
+ if key not in self.state_filter:
+ raise Exception("State map was filtered and doesn't include: %s", key)
+
+ return super().__contains__(key)
diff --git a/synapse/types/state.py b/synapse/types/state.py
index 5ca3c94bce..53662372af 100644
--- a/synapse/types/state.py
+++ b/synapse/types/state.py
@@ -20,6 +20,7 @@
import logging
from typing import (
TYPE_CHECKING,
+ Any,
Callable,
Collection,
Dict,
@@ -584,6 +585,29 @@ class StateFilter:
# local users only
return False
+ def __contains__(self, key: Any) -> bool:
+ if not isinstance(key, tuple) or len(key) != 2:
+ raise TypeError(
+ f"'in StateFilter' requires (str, str) as left operand, not {type(key).__name__}"
+ )
+
+ typ, state_key = key
+
+ if not isinstance(typ, str) or not isinstance(state_key, str):
+ raise TypeError(
+ f"'in StateFilter' requires (str, str) as left operand, not ({type(typ).__name__}, {type(state_key).__name__})"
+ )
+
+ if typ in self.types:
+ state_keys = self.types[typ]
+ if state_keys is None or state_key in state_keys:
+ return True
+
+ elif self.include_others:
+ return True
+
+ return False
+
_ALL_STATE_FILTER = StateFilter(types=immutabledict(), include_others=True)
_ALL_NON_MEMBER_STATE_FILTER = StateFilter(
|