diff --git a/.dockerignore b/.dockerignore
index 5670b8c15b..1c6905b1bb 100644
--- a/.dockerignore
+++ b/.dockerignore
@@ -8,6 +8,7 @@
!README.rst
!pyproject.toml
!poetry.lock
+!Cargo.lock
!build_rust.py
rust/target
diff --git a/.github/workflows/latest_deps.yml b/.github/workflows/latest_deps.yml
index 8366ac9393..9a708286a4 100644
--- a/.github/workflows/latest_deps.yml
+++ b/.github/workflows/latest_deps.yml
@@ -201,10 +201,11 @@ jobs:
open-issue:
if: "failure() && github.event_name != 'push' && github.event_name != 'pull_request'"
needs:
- # TODO: should mypy be included here? It feels more brittle than the other two.
+ # TODO: should mypy be included here? It feels more brittle than the others.
- mypy
- trial
- sytest
+ - complement
runs-on: ubuntu-latest
diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml
index 91a080cca0..9fe61930a5 100644
--- a/.github/workflows/tests.yml
+++ b/.github/workflows/tests.yml
@@ -94,7 +94,7 @@ jobs:
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
- toolchain: 1.61.0
+ toolchain: 1.58.1
override: true
components: clippy
- uses: Swatinem/rust-cache@v2
@@ -112,7 +112,7 @@ jobs:
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
- toolchain: 1.61.0
+ toolchain: 1.58.1
override: true
components: rustfmt
- uses: Swatinem/rust-cache@v2
@@ -204,7 +204,7 @@ jobs:
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
- toolchain: 1.61.0
+ toolchain: 1.58.1
override: true
- uses: Swatinem/rust-cache@v2
@@ -320,7 +320,7 @@ jobs:
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
- toolchain: 1.61.0
+ toolchain: 1.58.1
override: true
- uses: Swatinem/rust-cache@v2
@@ -452,7 +452,7 @@ jobs:
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
- toolchain: 1.61.0
+ toolchain: 1.58.1
override: true
- uses: Swatinem/rust-cache@v2
@@ -478,7 +478,7 @@ jobs:
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
- toolchain: 1.61.0
+ toolchain: 1.58.1
override: true
- uses: Swatinem/rust-cache@v2
diff --git a/.gitignore b/.gitignore
index 31a60bb7bd..15fbfdddf1 100644
--- a/.gitignore
+++ b/.gitignore
@@ -15,8 +15,9 @@ _trial_temp*/
.DS_Store
__pycache__/
-# We do want the poetry lockfile.
+# We do want the poetry and cargo lockfile.
!poetry.lock
+!Cargo.lock
# stuff that is likely to exist when you run a server locally
/*.db
diff --git a/CHANGES.md b/CHANGES.md
index fb91bc5f20..82b5526f94 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,4 +1,4 @@
-Synapse 1.68.0rc1 (2022-09-20)
+Synapse 1.68.0rc2 (2022-09-23)
==============================
Please note that Synapse will now refuse to start if configured to use a version of SQLite earlier than 3.27.
@@ -8,6 +8,23 @@ Those using packages will not be affected. On most platforms, installing with `p
See the [upgrade notes](https://matrix-org.github.io/synapse/v1.68/upgrade.html#upgrading-to-v1670).
+Bugfixes
+--------
+
+- Fix building from packaged sdist. Broke in v1.68.0rc1. ([\#13866](https://github.com/matrix-org/synapse/issues/13866))
+
+
+Internal Changes
+----------------
+
+- Fix the release script not publishing binary wheels. ([\#13850](https://github.com/matrix-org/synapse/issues/13850))
+- Lower minimum supported rustc version to 1.58.1. ([\#13857](https://github.com/matrix-org/synapse/issues/13857))
+- Lock Rust dependencies versions. ([\#13858](https://github.com/matrix-org/synapse/issues/13858))
+
+
+Synapse 1.68.0rc1 (2022-09-20)
+==============================
+
Features
--------
diff --git a/Cargo.lock b/Cargo.lock
new file mode 100644
index 0000000000..b952b6b4c0
--- /dev/null
+++ b/Cargo.lock
@@ -0,0 +1,466 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+version = 3
+
+[[package]]
+name = "aho-corasick"
+version = "0.7.19"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b4f55bd91a0978cbfd91c457a164bab8b4001c833b7f323132c0a4e1922dd44e"
+dependencies = [
+ "memchr",
+]
+
+[[package]]
+name = "anyhow"
+version = "1.0.65"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "98161a4e3e2184da77bb14f02184cdd111e83bbbcc9979dfee3c44b9a85f5602"
+
+[[package]]
+name = "arc-swap"
+version = "1.5.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "983cd8b9d4b02a6dc6ffa557262eb5858a27a0038ffffe21a0f133eaa819a164"
+
+[[package]]
+name = "autocfg"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
+
+[[package]]
+name = "bitflags"
+version = "1.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
+
+[[package]]
+name = "blake2"
+version = "0.10.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b9cf849ee05b2ee5fba5e36f97ff8ec2533916700fc0758d40d92136a42f3388"
+dependencies = [
+ "digest",
+]
+
+[[package]]
+name = "block-buffer"
+version = "0.10.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "69cce20737498f97b993470a6e536b8523f0af7892a4f928cceb1ac5e52ebe7e"
+dependencies = [
+ "generic-array",
+]
+
+[[package]]
+name = "cfg-if"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
+
+[[package]]
+name = "crypto-common"
+version = "0.1.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3"
+dependencies = [
+ "generic-array",
+ "typenum",
+]
+
+[[package]]
+name = "digest"
+version = "0.10.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "adfbc57365a37acbd2ebf2b64d7e69bb766e2fea813521ed536f5d0520dcf86c"
+dependencies = [
+ "block-buffer",
+ "crypto-common",
+ "subtle",
+]
+
+[[package]]
+name = "generic-array"
+version = "0.14.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bff49e947297f3312447abdca79f45f4738097cc82b06e72054d2223f601f1b9"
+dependencies = [
+ "typenum",
+ "version_check",
+]
+
+[[package]]
+name = "hex"
+version = "0.4.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70"
+
+[[package]]
+name = "indoc"
+version = "1.0.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "adab1eaa3408fb7f0c777a73e7465fd5656136fc93b670eb6df3c88c2c1344e3"
+
+[[package]]
+name = "itoa"
+version = "1.0.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6c8af84674fe1f223a982c933a0ee1086ac4d4052aa0fb8060c12c6ad838e754"
+
+[[package]]
+name = "lazy_static"
+version = "1.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
+
+[[package]]
+name = "libc"
+version = "0.2.132"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8371e4e5341c3a96db127eb2465ac681ced4c433e01dd0e938adbef26ba93ba5"
+
+[[package]]
+name = "lock_api"
+version = "0.4.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "327fa5b6a6940e4699ec49a9beae1ea4845c6bab9314e4f84ac68742139d8c53"
+dependencies = [
+ "autocfg",
+ "scopeguard",
+]
+
+[[package]]
+name = "log"
+version = "0.4.17"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e"
+dependencies = [
+ "cfg-if",
+]
+
+[[package]]
+name = "memchr"
+version = "2.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d"
+
+[[package]]
+name = "memoffset"
+version = "0.6.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce"
+dependencies = [
+ "autocfg",
+]
+
+[[package]]
+name = "once_cell"
+version = "1.13.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "074864da206b4973b84eb91683020dbefd6a8c3f0f38e054d93954e891935e4e"
+
+[[package]]
+name = "parking_lot"
+version = "0.12.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f"
+dependencies = [
+ "lock_api",
+ "parking_lot_core",
+]
+
+[[package]]
+name = "parking_lot_core"
+version = "0.9.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "09a279cbf25cb0757810394fbc1e359949b59e348145c643a939a525692e6929"
+dependencies = [
+ "cfg-if",
+ "libc",
+ "redox_syscall",
+ "smallvec",
+ "windows-sys",
+]
+
+[[package]]
+name = "proc-macro2"
+version = "1.0.43"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0a2ca2c61bc9f3d74d2886294ab7b9853abd9c1ad903a3ac7815c58989bb7bab"
+dependencies = [
+ "unicode-ident",
+]
+
+[[package]]
+name = "pyo3"
+version = "0.17.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "12f72538a0230791398a0986a6518ebd88abc3fded89007b506ed072acc831e1"
+dependencies = [
+ "anyhow",
+ "cfg-if",
+ "indoc",
+ "libc",
+ "memoffset",
+ "parking_lot",
+ "pyo3-build-config",
+ "pyo3-ffi",
+ "pyo3-macros",
+ "unindent",
+]
+
+[[package]]
+name = "pyo3-build-config"
+version = "0.17.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fc4cf18c20f4f09995f3554e6bcf9b09bd5e4d6b67c562fdfaafa644526ba479"
+dependencies = [
+ "once_cell",
+ "target-lexicon",
+]
+
+[[package]]
+name = "pyo3-ffi"
+version = "0.17.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a41877f28d8ebd600b6aa21a17b40c3b0fc4dfe73a27b6e81ab3d895e401b0e9"
+dependencies = [
+ "libc",
+ "pyo3-build-config",
+]
+
+[[package]]
+name = "pyo3-log"
+version = "0.7.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e5695ccff5060c13ca1751cf8c857a12da9b0bf0378cb071c5e0326f7c7e4c1b"
+dependencies = [
+ "arc-swap",
+ "log",
+ "pyo3",
+]
+
+[[package]]
+name = "pyo3-macros"
+version = "0.17.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2e81c8d4bcc2f216dc1b665412df35e46d12ee8d3d046b381aad05f1fcf30547"
+dependencies = [
+ "proc-macro2",
+ "pyo3-macros-backend",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "pyo3-macros-backend"
+version = "0.17.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "85752a767ee19399a78272cc2ab625cd7d373b2e112b4b13db28de71fa892784"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "pythonize"
+version = "0.17.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0f7f0c136f5fbc01868185eef462800e49659eb23acca83b9e884367a006acb6"
+dependencies = [
+ "pyo3",
+ "serde",
+]
+
+[[package]]
+name = "quote"
+version = "1.0.21"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bbe448f377a7d6961e30f5955f9b8d106c3f5e449d493ee1b125c1d43c2b5179"
+dependencies = [
+ "proc-macro2",
+]
+
+[[package]]
+name = "redox_syscall"
+version = "0.2.16"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a"
+dependencies = [
+ "bitflags",
+]
+
+[[package]]
+name = "regex"
+version = "1.6.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4c4eb3267174b8c6c2f654116623910a0fef09c4753f8dd83db29c48a0df988b"
+dependencies = [
+ "aho-corasick",
+ "memchr",
+ "regex-syntax",
+]
+
+[[package]]
+name = "regex-syntax"
+version = "0.6.27"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a3f87b73ce11b1619a3c6332f45341e0047173771e8b8b73f87bfeefb7b56244"
+
+[[package]]
+name = "ryu"
+version = "1.0.11"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4501abdff3ae82a1c1b477a17252eb69cee9e66eb915c1abaa4f44d873df9f09"
+
+[[package]]
+name = "scopeguard"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
+
+[[package]]
+name = "serde"
+version = "1.0.145"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "728eb6351430bccb993660dfffc5a72f91ccc1295abaa8ce19b27ebe4f75568b"
+dependencies = [
+ "serde_derive",
+]
+
+[[package]]
+name = "serde_derive"
+version = "1.0.145"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "81fa1584d3d1bcacd84c277a0dfe21f5b0f6accf4a23d04d4c6d61f1af522b4c"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "serde_json"
+version = "1.0.85"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e55a28e3aaef9d5ce0506d0a14dbba8054ddc7e499ef522dd8b26859ec9d4a44"
+dependencies = [
+ "itoa",
+ "ryu",
+ "serde",
+]
+
+[[package]]
+name = "smallvec"
+version = "1.9.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2fd0db749597d91ff862fd1d55ea87f7855a744a8425a64695b6fca237d1dad1"
+
+[[package]]
+name = "subtle"
+version = "2.4.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601"
+
+[[package]]
+name = "syn"
+version = "1.0.99"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "58dbef6ec655055e20b86b15a8cc6d439cca19b667537ac6a1369572d151ab13"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "unicode-ident",
+]
+
+[[package]]
+name = "synapse"
+version = "0.1.0"
+dependencies = [
+ "anyhow",
+ "blake2",
+ "hex",
+ "lazy_static",
+ "log",
+ "pyo3",
+ "pyo3-log",
+ "pythonize",
+ "regex",
+ "serde",
+ "serde_json",
+]
+
+[[package]]
+name = "target-lexicon"
+version = "0.12.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c02424087780c9b71cc96799eaeddff35af2bc513278cda5c99fc1f5d026d3c1"
+
+[[package]]
+name = "typenum"
+version = "1.15.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987"
+
+[[package]]
+name = "unicode-ident"
+version = "1.0.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c4f5b37a154999a8f3f98cc23a628d850e154479cd94decf3414696e12e31aaf"
+
+[[package]]
+name = "unindent"
+version = "0.1.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "58ee9362deb4a96cef4d437d1ad49cffc9b9e92d202b6995674e928ce684f112"
+
+[[package]]
+name = "version_check"
+version = "0.9.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f"
+
+[[package]]
+name = "windows-sys"
+version = "0.36.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ea04155a16a59f9eab786fe12a4a450e75cdb175f9e0d80da1e17db09f55b8d2"
+dependencies = [
+ "windows_aarch64_msvc",
+ "windows_i686_gnu",
+ "windows_i686_msvc",
+ "windows_x86_64_gnu",
+ "windows_x86_64_msvc",
+]
+
+[[package]]
+name = "windows_aarch64_msvc"
+version = "0.36.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9bb8c3fd39ade2d67e9874ac4f3db21f0d710bee00fe7cab16949ec184eeaa47"
+
+[[package]]
+name = "windows_i686_gnu"
+version = "0.36.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "180e6ccf01daf4c426b846dfc66db1fc518f074baa793aa7d9b9aaeffad6a3b6"
+
+[[package]]
+name = "windows_i686_msvc"
+version = "0.36.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e2e7917148b2812d1eeafaeb22a97e4813dfa60a3f8f78ebe204bcc88f12f024"
+
+[[package]]
+name = "windows_x86_64_gnu"
+version = "0.36.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4dcd171b8776c41b97521e5da127a2d86ad280114807d0b2ab1e462bc764d9e1"
+
+[[package]]
+name = "windows_x86_64_msvc"
+version = "0.36.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680"
diff --git a/changelog.d/13635.feature b/changelog.d/13635.feature
new file mode 100644
index 0000000000..d86bf7ed80
--- /dev/null
+++ b/changelog.d/13635.feature
@@ -0,0 +1 @@
+Exponentially backoff from backfilling the same event over and over.
diff --git a/changelog.d/13667.feature b/changelog.d/13667.feature
new file mode 100644
index 0000000000..a0b3cfe18c
--- /dev/null
+++ b/changelog.d/13667.feature
@@ -0,0 +1 @@
+Add cache invalidation across workers to module API.
diff --git a/changelog.d/13722.feature b/changelog.d/13722.feature
new file mode 100644
index 0000000000..588d143c0f
--- /dev/null
+++ b/changelog.d/13722.feature
@@ -0,0 +1 @@
+Experimental implementation of MSC3882 to allow an existing device/session to generate a login token for use on a new device/session.
diff --git a/changelog.d/13772.doc b/changelog.d/13772.doc
new file mode 100644
index 0000000000..3398ff3765
--- /dev/null
+++ b/changelog.d/13772.doc
@@ -0,0 +1 @@
+Add `worker_main_http_uri` for the worker generator bash script.
diff --git a/changelog.d/13782.feature b/changelog.d/13782.feature
new file mode 100644
index 0000000000..d0cb902dff
--- /dev/null
+++ b/changelog.d/13782.feature
@@ -0,0 +1 @@
+Experimental support for thread-specific receipts ([MSC3771](https://github.com/matrix-org/matrix-spec-proposals/pull/3771)).
diff --git a/changelog.d/13796.misc b/changelog.d/13796.misc
new file mode 100644
index 0000000000..9ed1662394
--- /dev/null
+++ b/changelog.d/13796.misc
@@ -0,0 +1 @@
+Use shared methods for cache invalidation when persisting events, remove duplicate codepaths. Contributed by Nick @ Beeper (@fizzadar).
diff --git a/changelog.d/13799.feature b/changelog.d/13799.feature
new file mode 100644
index 0000000000..6c8e5cffe2
--- /dev/null
+++ b/changelog.d/13799.feature
@@ -0,0 +1 @@
+Add experimental support for [MSC3881: Remotely toggle push notifications for another client](https://github.com/matrix-org/matrix-spec-proposals/pull/3881).
diff --git a/changelog.d/13809.misc b/changelog.d/13809.misc
new file mode 100644
index 0000000000..c2dacca2f2
--- /dev/null
+++ b/changelog.d/13809.misc
@@ -0,0 +1 @@
+Improve the `synapse.api.auth.Auth` mock used in unit tests.
diff --git a/changelog.d/13818.doc b/changelog.d/13818.doc
new file mode 100644
index 0000000000..16b31f5071
--- /dev/null
+++ b/changelog.d/13818.doc
@@ -0,0 +1 @@
+Update URL for the NixOS module for Synapse.
diff --git a/changelog.d/13823.misc b/changelog.d/13823.misc
new file mode 100644
index 0000000000..527d79f4b2
--- /dev/null
+++ b/changelog.d/13823.misc
@@ -0,0 +1 @@
+Faster Remote Room Joins: tell remote homeservers that we are unable to authorise them if they query a room which has partial state on our server.
\ No newline at end of file
diff --git a/changelog.d/13830.bugfix b/changelog.d/13830.bugfix
new file mode 100644
index 0000000000..e6215806cd
--- /dev/null
+++ b/changelog.d/13830.bugfix
@@ -0,0 +1 @@
+Fix a long-standing bug where typing events would be accepted from remote servers not present in a room. Also fix a bug where incoming typing events would cause other incoming events to get stuck during a fast join.
diff --git a/changelog.d/13831.feature b/changelog.d/13831.feature
new file mode 100644
index 0000000000..6c8e5cffe2
--- /dev/null
+++ b/changelog.d/13831.feature
@@ -0,0 +1 @@
+Add experimental support for [MSC3881: Remotely toggle push notifications for another client](https://github.com/matrix-org/matrix-spec-proposals/pull/3881).
diff --git a/changelog.d/13832.feature b/changelog.d/13832.feature
new file mode 100644
index 0000000000..1dc1d66efe
--- /dev/null
+++ b/changelog.d/13832.feature
@@ -0,0 +1 @@
+Improve validation for the unspecced, internal-only `_matrix/client/unstable/add_threepid/msisdn/submit_token` endpoint.
diff --git a/changelog.d/13836.doc b/changelog.d/13836.doc
new file mode 100644
index 0000000000..f2edab00f4
--- /dev/null
+++ b/changelog.d/13836.doc
@@ -0,0 +1 @@
+Fix a mistake in sso_mapping_providers.md: `map_user_attributes` is expected to return `display_name` not `displayname`.
diff --git a/changelog.d/13840.bugfix b/changelog.d/13840.bugfix
new file mode 100644
index 0000000000..0f014439a8
--- /dev/null
+++ b/changelog.d/13840.bugfix
@@ -0,0 +1 @@
+Fix a bug introduced in Synapse v1.53.0 where the experimental implementation of [MSC3715](https://github.com/matrix-org/matrix-spec-proposals/pull/3715) would give incorrect results when paginating forward.
diff --git a/changelog.d/13850.misc b/changelog.d/13850.misc
new file mode 100644
index 0000000000..a973118aaf
--- /dev/null
+++ b/changelog.d/13850.misc
@@ -0,0 +1 @@
+Fix the release script not publishing binary wheels.
\ No newline at end of file
diff --git a/changelog.d/13855.bugfix b/changelog.d/13855.bugfix
new file mode 100644
index 0000000000..5ea8539bd8
--- /dev/null
+++ b/changelog.d/13855.bugfix
@@ -0,0 +1 @@
+Fix access token leak to logs from proxy agent.
diff --git a/changelog.d/13859.misc b/changelog.d/13859.misc
new file mode 100644
index 0000000000..2780a4af3c
--- /dev/null
+++ b/changelog.d/13859.misc
@@ -0,0 +1 @@
+Raise issue if complement fails with latest deps.
diff --git a/changelog.d/13860.feature b/changelog.d/13860.feature
new file mode 100644
index 0000000000..6c8e5cffe2
--- /dev/null
+++ b/changelog.d/13860.feature
@@ -0,0 +1 @@
+Add experimental support for [MSC3881: Remotely toggle push notifications for another client](https://github.com/matrix-org/matrix-spec-proposals/pull/3881).
diff --git a/changelog.d/13870.doc b/changelog.d/13870.doc
new file mode 100644
index 0000000000..2598bc270c
--- /dev/null
+++ b/changelog.d/13870.doc
@@ -0,0 +1 @@
+Fix a cross-link from the register admin API to the `registration_shared_secret` configuration documentation.
diff --git a/changelog.d/13873.misc b/changelog.d/13873.misc
new file mode 100644
index 0000000000..f4342482f0
--- /dev/null
+++ b/changelog.d/13873.misc
@@ -0,0 +1 @@
+Create a new snapshot of the database schema.
diff --git a/changelog.d/13874.misc b/changelog.d/13874.misc
new file mode 100644
index 0000000000..499e488c35
--- /dev/null
+++ b/changelog.d/13874.misc
@@ -0,0 +1 @@
+Faster room joins: Send device list updates to most servers in rooms with partial state.
diff --git a/changelog.d/13876.misc b/changelog.d/13876.misc
new file mode 100644
index 0000000000..ef37100115
--- /dev/null
+++ b/changelog.d/13876.misc
@@ -0,0 +1 @@
+Add comments to the Prometheus recording rules to make it clear which set of rules you need for Grafana or Prometheus Console.
\ No newline at end of file
diff --git a/changelog.d/13888.misc b/changelog.d/13888.misc
new file mode 100644
index 0000000000..4ffd9bcede
--- /dev/null
+++ b/changelog.d/13888.misc
@@ -0,0 +1 @@
+Faster room joins: Avoid waiting for full state when processing `/keys/changes` requests.
diff --git a/changelog.d/13889.misc b/changelog.d/13889.misc
new file mode 100644
index 0000000000..28bddb7059
--- /dev/null
+++ b/changelog.d/13889.misc
@@ -0,0 +1 @@
+Port push rules to using Rust.
diff --git a/changelog.d/13893.feature b/changelog.d/13893.feature
new file mode 100644
index 0000000000..d0cb902dff
--- /dev/null
+++ b/changelog.d/13893.feature
@@ -0,0 +1 @@
+Experimental support for thread-specific receipts ([MSC3771](https://github.com/matrix-org/matrix-spec-proposals/pull/3771)).
diff --git a/changelog.d/13905.misc b/changelog.d/13905.misc
new file mode 100644
index 0000000000..efe3bed5f1
--- /dev/null
+++ b/changelog.d/13905.misc
@@ -0,0 +1 @@
+Fix mypy errors with canonicaljson 1.6.3.
diff --git a/changelog.d/13909.bugfix b/changelog.d/13909.bugfix
new file mode 100644
index 0000000000..883dd72919
--- /dev/null
+++ b/changelog.d/13909.bugfix
@@ -0,0 +1 @@
+Fix packaging to include `Cargo.lock` in `sdist`.
diff --git a/changelog.d/13911.doc b/changelog.d/13911.doc
new file mode 100644
index 0000000000..7cc3206501
--- /dev/null
+++ b/changelog.d/13911.doc
@@ -0,0 +1 @@
+Update the man page for the `hash_password` script to correct the default number of bcrypt rounds performed.
\ No newline at end of file
diff --git a/contrib/prometheus/synapse-v2.rules b/contrib/prometheus/synapse-v2.rules
index cbe6f7beba..dde311322f 100644
--- a/contrib/prometheus/synapse-v2.rules
+++ b/contrib/prometheus/synapse-v2.rules
@@ -1,7 +1,12 @@
groups:
- name: synapse
rules:
- # These 3 rules are used in the included Prometheus console
+
+ ###
+ ### Prometheus Console Only
+ ### The following rules are only needed if you use the Prometheus Console
+ ### in contrib/prometheus/consoles/synapse.html
+ ###
- record: 'synapse_federation_client_sent'
labels:
type: "EDU"
@@ -15,7 +20,6 @@ groups:
type: "Query"
expr: 'sum(synapse_federation_client_sent_queries) by (job)'
- # These 3 rules are used in the included Prometheus console
- record: 'synapse_federation_server_received'
labels:
type: "EDU"
@@ -29,7 +33,6 @@ groups:
type: "Query"
expr: 'sum(synapse_federation_server_received_queries) by (job)'
- # These 2 rules are used in the included Prometheus console
- record: 'synapse_federation_transaction_queue_pending'
labels:
type: "EDU"
@@ -38,8 +41,16 @@ groups:
labels:
type: "PDU"
expr: 'synapse_federation_transaction_queue_pending_pdus + 0'
+ ###
+ ### End of 'Prometheus Console Only' rules block
+ ###
+
- # These 3 rules are used in the included Grafana dashboard
+ ###
+ ### Grafana Only
+ ### The following rules are only needed if you use the Grafana dashboard
+ ### in contrib/grafana/synapse.json
+ ###
- record: synapse_storage_events_persisted_by_source_type
expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep_total{origin_type="remote"})
labels:
@@ -53,11 +64,11 @@ groups:
labels:
type: bridges
- # This rule is used in the included Grafana dashboard
- record: synapse_storage_events_persisted_by_event_type
expr: sum without(origin_entity, origin_type) (synapse_storage_events_persisted_events_sep_total)
- # This rule is used in the included Grafana dashboard
- record: synapse_storage_events_persisted_by_origin
expr: sum without(type) (synapse_storage_events_persisted_events_sep_total)
-
+ ###
+ ### End of 'Grafana Only' rules block
+ ###
diff --git a/contrib/workers-bash-scripts/create-multiple-generic-workers.md b/contrib/workers-bash-scripts/create-multiple-generic-workers.md
index d303101429..c9be707b3c 100644
--- a/contrib/workers-bash-scripts/create-multiple-generic-workers.md
+++ b/contrib/workers-bash-scripts/create-multiple-generic-workers.md
@@ -7,7 +7,7 @@ You can alternatively create multiple worker configuration files with a simple `
#!/bin/bash
for i in {1..5}
do
-cat << EOF >> generic_worker$i.yaml
+cat << EOF > generic_worker$i.yaml
worker_app: synapse.app.generic_worker
worker_name: generic_worker$i
@@ -15,6 +15,8 @@ worker_name: generic_worker$i
worker_replication_host: 127.0.0.1
worker_replication_http_port: 9093
+worker_main_http_uri: http://localhost:8008/
+
worker_listeners:
- type: http
port: 808$i
diff --git a/debian/changelog b/debian/changelog
index 6325ce2994..d6c7639775 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,16 @@
+matrix-synapse-py3 (1.69.0~rc1+nmu1) UNRELEASED; urgency=medium
+
+ * The man page for the hash_password script has been updated to reflect
+ the correct default value of 'bcrypt_rounds'.
+
+ -- Synapse Packaging team <packages@matrix.org> Mon, 26 Sep 2022 18:05:09 +0100
+
+matrix-synapse-py3 (1.68.0~rc2) stable; urgency=medium
+
+ * New Synapse release 1.68.0rc2.
+
+ -- Synapse Packaging team <packages@matrix.org> Fri, 23 Sep 2022 09:40:10 +0100
+
matrix-synapse-py3 (1.68.0~rc1) stable; urgency=medium
* New Synapse release 1.68.0rc1.
diff --git a/debian/hash_password.ronn b/debian/hash_password.ronn
index eeb354602d..5d0df53802 100644
--- a/debian/hash_password.ronn
+++ b/debian/hash_password.ronn
@@ -14,7 +14,7 @@ or the `STDIN` if not supplied.
It accepts an YAML file which can be used to specify parameters like the
number of rounds for bcrypt and password_config section having the pepper
-value used for the hashing. By default `bcrypt_rounds` is set to **10**.
+value used for the hashing. By default `bcrypt_rounds` is set to **12**.
The hashed password is written on the `STDOUT`.
diff --git a/docs/admin_api/register_api.md b/docs/admin_api/register_api.md
index f6be31b443..dd2830f3a1 100644
--- a/docs/admin_api/register_api.md
+++ b/docs/admin_api/register_api.md
@@ -5,7 +5,7 @@ non-interactive way. This is generally used for bootstrapping a Synapse
instance with administrator accounts.
To authenticate yourself to the server, you will need both the shared secret
-([`registration_shared_secret`](../configuration/config_documentation.md#registration_shared_secret)
+([`registration_shared_secret`](../usage/configuration/config_documentation.md#registration_shared_secret)
in the homeserver configuration), and a one-time nonce. If the registration
shared secret is not configured, this API is not enabled.
diff --git a/docs/setup/installation.md b/docs/setup/installation.md
index 96833effc6..dcd8f17c5e 100644
--- a/docs/setup/installation.md
+++ b/docs/setup/installation.md
@@ -181,7 +181,7 @@ doas pkg_add synapse
#### NixOS
Robin Lambertz has packaged Synapse for NixOS at:
-<https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/services/misc/matrix-synapse.nix>
+<https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/services/matrix/synapse.nix>
### Installing as a Python module from PyPI
diff --git a/docs/sso_mapping_providers.md b/docs/sso_mapping_providers.md
index 817499149f..9f5e5fbbe1 100644
--- a/docs/sso_mapping_providers.md
+++ b/docs/sso_mapping_providers.md
@@ -73,8 +73,8 @@ A custom mapping provider must specify the following methods:
* `async def map_user_attributes(self, userinfo, token, failures)`
- This method must be async.
- Arguments:
- - `userinfo` - A `authlib.oidc.core.claims.UserInfo` object to extract user
- information from.
+ - `userinfo` - An [`authlib.oidc.core.claims.UserInfo`](https://docs.authlib.org/en/latest/specs/oidc.html#authlib.oidc.core.UserInfo)
+ object to extract user information from.
- `token` - A dictionary which includes information necessary to make
further requests to the OpenID provider.
- `failures` - An `int` that represents the amount of times the returned
@@ -91,7 +91,13 @@ A custom mapping provider must specify the following methods:
`None`, the user is prompted to pick their own username. This is only used
during a user's first login. Once a localpart has been associated with a
remote user ID (see `get_remote_user_id`) it cannot be updated.
- - `displayname`: An optional string, the display name for the user.
+ - `confirm_localpart`: A boolean. If set to `True`, when a `localpart`
+ string is returned from this method, Synapse will prompt the user to
+ either accept this localpart or pick their own username. Otherwise this
+ option has no effect. If omitted, defaults to `False`.
+ - `display_name`: An optional string, the display name for the user.
+ - `emails`: A list of strings, the email address(es) to associate with
+ this user. If omitted, defaults to an empty list.
* `async def get_extra_attributes(self, userinfo, token)`
- This method must be async.
- Arguments:
diff --git a/poetry.lock b/poetry.lock
index ca82903be6..a106a972f1 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -95,14 +95,15 @@ webencodings = "*"
[[package]]
name = "canonicaljson"
-version = "1.6.0"
+version = "1.6.3"
description = "Canonical JSON"
category = "main"
optional = false
-python-versions = "~=3.7"
+python-versions = ">=3.7"
[package.dependencies]
simplejson = ">=3.14.0"
+typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.8\""}
[package.extras]
frozendict = ["frozendict (>=1.0)"]
@@ -1737,8 +1738,8 @@ bleach = [
{file = "bleach-4.1.0.tar.gz", hash = "sha256:0900d8b37eba61a802ee40ac0061f8c2b5dee29c1927dd1d233e075ebf5a71da"},
]
canonicaljson = [
- {file = "canonicaljson-1.6.0-py3-none-any.whl", hash = "sha256:7230c2a2a3db07874f622af84effe41a655e07bf23734830e18a454e65d5b998"},
- {file = "canonicaljson-1.6.0.tar.gz", hash = "sha256:8739d5fd91aca7281d425660ae65af7663808c8177778965f67e90b16a2b2427"},
+ {file = "canonicaljson-1.6.3-py3-none-any.whl", hash = "sha256:6ba3cf1702fa3d209b3e915a4e9a3e4ef194f1e8fca189c1f0b7a2a7686a27e6"},
+ {file = "canonicaljson-1.6.3.tar.gz", hash = "sha256:ca59760bc274a899a0da75809d6909ae43e5123381fd6ef040a44d1952c0b448"},
]
certifi = [
{file = "certifi-2021.10.8-py2.py3-none-any.whl", hash = "sha256:d62a0163eb4c2344ac042ab2bdf75399a71a2d8c7d47eac2e2ee91b9d6339569"},
diff --git a/pyproject.toml b/pyproject.toml
index d6839d57b4..e669867aa0 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -57,7 +57,7 @@ manifest-path = "rust/Cargo.toml"
[tool.poetry]
name = "matrix-synapse"
-version = "1.68.0rc1"
+version = "1.68.0rc2"
description = "Homeserver for the Matrix decentralised comms protocol"
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
license = "Apache-2.0"
@@ -86,8 +86,9 @@ include = [
{ path = "tests", format = "sdist" },
{ path = "UPGRADE.rst", format = "sdist" },
{ path = "Cargo.toml", format = "sdist" },
+ { path = "Cargo.lock", format = "sdist" },
{ path = "rust/Cargo.toml", format = "sdist" },
- { path = "rust/Cargo.lock", format = "sdist" },
+ { path = "rust/build.rs", format = "sdist" },
{ path = "rust/src/**", format = "sdist" },
]
exclude = [
diff --git a/rust/Cargo.toml b/rust/Cargo.toml
index 8dc5f93ff1..44263bf77e 100644
--- a/rust/Cargo.toml
+++ b/rust/Cargo.toml
@@ -7,7 +7,7 @@ name = "synapse"
version = "0.1.0"
edition = "2021"
-rust-version = "1.61.0"
+rust-version = "1.58.1"
[lib]
name = "synapse"
diff --git a/scripts-dev/make_full_schema.sh b/scripts-dev/make_full_schema.sh
index d8cd06ee4f..e2bc1640bb 100755
--- a/scripts-dev/make_full_schema.sh
+++ b/scripts-dev/make_full_schema.sh
@@ -26,6 +26,9 @@ usage() {
echo " Defaults to 9999."
echo "-h"
echo " Display this help text."
+ echo ""
+ echo " NB: make sure to run this against the *oldest* supported version of postgres,"
+ echo " or else pg_dump might output non-backwards-compatible syntax."
}
SCHEMA_NUMBER="9999"
@@ -240,25 +243,54 @@ DROP TABLE user_directory_search_stat;
echo "Dumping SQLite3 schema..."
-mkdir -p "$OUTPUT_DIR/"{common,main,state}"/full_schema/$SCHEMA_NUMBER"
-sqlite3 "$SQLITE_COMMON_DB" ".schema --indent" > "$OUTPUT_DIR/common/full_schema/$SCHEMA_NUMBER/full.sql.sqlite"
-sqlite3 "$SQLITE_COMMON_DB" ".dump --data-only --nosys" >> "$OUTPUT_DIR/common/full_schema/$SCHEMA_NUMBER/full.sql.sqlite"
-sqlite3 "$SQLITE_MAIN_DB" ".schema --indent" > "$OUTPUT_DIR/main/full_schema/$SCHEMA_NUMBER/full.sql.sqlite"
-sqlite3 "$SQLITE_MAIN_DB" ".dump --data-only --nosys" >> "$OUTPUT_DIR/main/full_schema/$SCHEMA_NUMBER/full.sql.sqlite"
-sqlite3 "$SQLITE_STATE_DB" ".schema --indent" > "$OUTPUT_DIR/state/full_schema/$SCHEMA_NUMBER/full.sql.sqlite"
-sqlite3 "$SQLITE_STATE_DB" ".dump --data-only --nosys" >> "$OUTPUT_DIR/state/full_schema/$SCHEMA_NUMBER/full.sql.sqlite"
+mkdir -p "$OUTPUT_DIR/"{common,main,state}"/full_schemas/$SCHEMA_NUMBER"
+sqlite3 "$SQLITE_COMMON_DB" ".schema" > "$OUTPUT_DIR/common/full_schemas/$SCHEMA_NUMBER/full.sql.sqlite"
+sqlite3 "$SQLITE_COMMON_DB" ".dump --data-only --nosys" >> "$OUTPUT_DIR/common/full_schemas/$SCHEMA_NUMBER/full.sql.sqlite"
+sqlite3 "$SQLITE_MAIN_DB" ".schema" > "$OUTPUT_DIR/main/full_schemas/$SCHEMA_NUMBER/full.sql.sqlite"
+sqlite3 "$SQLITE_MAIN_DB" ".dump --data-only --nosys" >> "$OUTPUT_DIR/main/full_schemas/$SCHEMA_NUMBER/full.sql.sqlite"
+sqlite3 "$SQLITE_STATE_DB" ".schema" > "$OUTPUT_DIR/state/full_schemas/$SCHEMA_NUMBER/full.sql.sqlite"
+sqlite3 "$SQLITE_STATE_DB" ".dump --data-only --nosys" >> "$OUTPUT_DIR/state/full_schemas/$SCHEMA_NUMBER/full.sql.sqlite"
cleanup_pg_schema() {
- sed -e '/^$/d' -e '/^--/d' -e 's/public\.//g' -e '/^SET /d' -e '/^SELECT /d'
+ # Cleanup as follows:
+ # - Remove empty lines. pg_dump likes to output a lot of these.
+ # - Remove comment-only lines. pg_dump also likes to output a lot of these to visually
+ # separate tables etc.
+ # - Remove "public." prefix --- the schema name.
+ # - Remove "SET" commands. Last time I ran this, the output commands were
+ # SET statement_timeout = 0;
+ # SET lock_timeout = 0;
+ # SET idle_in_transaction_session_timeout = 0;
+ # SET client_encoding = 'UTF8';
+ # SET standard_conforming_strings = on;
+ # SET check_function_bodies = false;
+ # SET xmloption = content;
+ # SET client_min_messages = warning;
+ # SET row_security = off;
+ # SET default_table_access_method = heap;
+ # - Very carefully remove specific SELECT statements. We CANNOT blanket remove all
+ # SELECT statements because some of those have side-effects which we do want in the
+ # schema. Last time I ran this, the only SELECTS were
+ # SELECT pg_catalog.set_config('search_path', '', false);
+ # and
+ # SELECT pg_catalog.setval(text, bigint, bool);
+ # We do want to remove the former, but the latter is important. If the last argument
+ # is `true` or omitted, this marks the given integer as having been consumed and
+ # will NOT appear as the nextval.
+ sed -e '/^$/d' \
+ -e '/^--/d' \
+ -e 's/public\.//g' \
+ -e '/^SET /d' \
+ -e '/^SELECT pg_catalog.set_config/d'
}
echo "Dumping Postgres schema..."
-pg_dump --format=plain --schema-only --no-tablespaces --no-acl --no-owner "$POSTGRES_COMMON_DB_NAME" | cleanup_pg_schema > "$OUTPUT_DIR/common/full_schema/$SCHEMA_NUMBER/full.sql.postgres"
-pg_dump --format=plain --data-only --inserts --no-tablespaces --no-acl --no-owner "$POSTGRES_COMMON_DB_NAME" | cleanup_pg_schema >> "$OUTPUT_DIR/common/full_schema/$SCHEMA_NUMBER/full.sql.postgres"
-pg_dump --format=plain --schema-only --no-tablespaces --no-acl --no-owner "$POSTGRES_MAIN_DB_NAME" | cleanup_pg_schema > "$OUTPUT_DIR/main/full_schema/$SCHEMA_NUMBER/full.sql.postgres"
-pg_dump --format=plain --data-only --inserts --no-tablespaces --no-acl --no-owner "$POSTGRES_MAIN_DB_NAME" | cleanup_pg_schema >> "$OUTPUT_DIR/main/full_schema/$SCHEMA_NUMBER/full.sql.postgres"
-pg_dump --format=plain --schema-only --no-tablespaces --no-acl --no-owner "$POSTGRES_STATE_DB_NAME" | cleanup_pg_schema > "$OUTPUT_DIR/state/full_schema/$SCHEMA_NUMBER/full.sql.postgres"
-pg_dump --format=plain --data-only --inserts --no-tablespaces --no-acl --no-owner "$POSTGRES_STATE_DB_NAME" | cleanup_pg_schema >> "$OUTPUT_DIR/state/full_schema/$SCHEMA_NUMBER/full.sql.postgres"
+pg_dump --format=plain --schema-only --no-tablespaces --no-acl --no-owner "$POSTGRES_COMMON_DB_NAME" | cleanup_pg_schema > "$OUTPUT_DIR/common/full_schemas/$SCHEMA_NUMBER/full.sql.postgres"
+pg_dump --format=plain --data-only --inserts --no-tablespaces --no-acl --no-owner "$POSTGRES_COMMON_DB_NAME" | cleanup_pg_schema >> "$OUTPUT_DIR/common/full_schemas/$SCHEMA_NUMBER/full.sql.postgres"
+pg_dump --format=plain --schema-only --no-tablespaces --no-acl --no-owner "$POSTGRES_MAIN_DB_NAME" | cleanup_pg_schema > "$OUTPUT_DIR/main/full_schemas/$SCHEMA_NUMBER/full.sql.postgres"
+pg_dump --format=plain --data-only --inserts --no-tablespaces --no-acl --no-owner "$POSTGRES_MAIN_DB_NAME" | cleanup_pg_schema >> "$OUTPUT_DIR/main/full_schemas/$SCHEMA_NUMBER/full.sql.postgres"
+pg_dump --format=plain --schema-only --no-tablespaces --no-acl --no-owner "$POSTGRES_STATE_DB_NAME" | cleanup_pg_schema > "$OUTPUT_DIR/state/full_schemas/$SCHEMA_NUMBER/full.sql.postgres"
+pg_dump --format=plain --data-only --inserts --no-tablespaces --no-acl --no-owner "$POSTGRES_STATE_DB_NAME" | cleanup_pg_schema >> "$OUTPUT_DIR/state/full_schemas/$SCHEMA_NUMBER/full.sql.postgres"
echo "Done! Files dumped to: $OUTPUT_DIR"
diff --git a/scripts-dev/mypy_synapse_plugin.py b/scripts-dev/mypy_synapse_plugin.py
index d08517a953..2c377533c0 100644
--- a/scripts-dev/mypy_synapse_plugin.py
+++ b/scripts-dev/mypy_synapse_plugin.py
@@ -29,7 +29,7 @@ class SynapsePlugin(Plugin):
self, fullname: str
) -> Optional[Callable[[MethodSigContext], CallableType]]:
if fullname.startswith(
- "synapse.util.caches.descriptors._CachedFunction.__call__"
+ "synapse.util.caches.descriptors.CachedFunction.__call__"
) or fullname.startswith(
"synapse.util.caches.descriptors._LruCachedFunction.__call__"
):
@@ -38,7 +38,7 @@ class SynapsePlugin(Plugin):
def cached_function_method_signature(ctx: MethodSigContext) -> CallableType:
- """Fixes the `_CachedFunction.__call__` signature to be correct.
+ """Fixes the `CachedFunction.__call__` signature to be correct.
It already has *almost* the correct signature, except:
diff --git a/scripts-dev/release.py b/scripts-dev/release.py
index 6603bc593b..c82c58c54b 100755
--- a/scripts-dev/release.py
+++ b/scripts-dev/release.py
@@ -427,11 +427,12 @@ def _publish(gh_token: str) -> None:
@cli.command()
-def upload() -> None:
- _upload()
+@click.option("--gh-token", envvar=["GH_TOKEN", "GITHUB_TOKEN"], required=False)
+def upload(gh_token: Optional[str]) -> None:
+ _upload(gh_token)
-def _upload() -> None:
+def _upload(gh_token: Optional[str]) -> None:
"""Upload release to pypi."""
current_version = get_package_version()
@@ -444,18 +445,40 @@ def _upload() -> None:
click.echo("Tag {tag_name} (tag.commit) is not currently checked out!")
click.get_current_context().abort()
- pypi_asset_names = [
- f"matrix_synapse-{current_version}-py3-none-any.whl",
- f"matrix-synapse-{current_version}.tar.gz",
- ]
+ # Query all the assets corresponding to this release.
+ gh = Github(gh_token)
+ gh_repo = gh.get_repo("matrix-org/synapse")
+ gh_release = gh_repo.get_release(tag_name)
+
+ all_assets = set(gh_release.get_assets())
+
+ # Only accept the wheels and sdist.
+ # Notably: we don't care about debs.tar.xz.
+ asset_names_and_urls = sorted(
+ (asset.name, asset.browser_download_url)
+ for asset in all_assets
+ if asset.name.endswith((".whl", ".tar.gz"))
+ )
+
+ # Print out what we've determined.
+ print("Found relevant assets:")
+ for asset_name, _ in asset_names_and_urls:
+ print(f" - {asset_name}")
+
+ ignored_asset_names = sorted(
+ {asset.name for asset in all_assets}
+ - {asset_name for asset_name, _ in asset_names_and_urls}
+ )
+ print("\nIgnoring irrelevant assets:")
+ for asset_name in ignored_asset_names:
+ print(f" - {asset_name}")
with TemporaryDirectory(prefix=f"synapse_upload_{tag_name}_") as tmpdir:
- for name in pypi_asset_names:
+ for name, asset_download_url in asset_names_and_urls:
filename = path.join(tmpdir, name)
- url = f"https://github.com/matrix-org/synapse/releases/download/{tag_name}/{name}"
click.echo(f"Downloading {name} into {filename}")
- urllib.request.urlretrieve(url, filename=filename)
+ urllib.request.urlretrieve(asset_download_url, filename=filename)
if click.confirm("Upload to PyPI?", default=True):
subprocess.run("twine upload *", shell=True, cwd=tmpdir)
@@ -672,7 +695,7 @@ def full(gh_token: str) -> None:
_publish(gh_token)
click.echo("\n*** upload ***")
- _upload()
+ _upload(gh_token)
click.echo("\n*** merge back ***")
_merge_back()
diff --git a/synapse/_scripts/synapse_port_db.py b/synapse/_scripts/synapse_port_db.py
index 30983c47fb..450ba462ba 100755
--- a/synapse/_scripts/synapse_port_db.py
+++ b/synapse/_scripts/synapse_port_db.py
@@ -111,6 +111,7 @@ BOOLEAN_COLUMNS = {
"e2e_fallback_keys_json": ["used"],
"access_tokens": ["used"],
"device_lists_changes_in_room": ["converted_to_destinations"],
+ "pushers": ["enabled"],
}
diff --git a/synapse/api/errors.py b/synapse/api/errors.py
index e6dea89c6d..1c6b53aa24 100644
--- a/synapse/api/errors.py
+++ b/synapse/api/errors.py
@@ -100,6 +100,12 @@ class Codes(str, Enum):
UNREDACTED_CONTENT_DELETED = "FI.MAU.MSC2815_UNREDACTED_CONTENT_DELETED"
+ # Returned for federation requests where we can't process a request as we
+ # can't ensure the sending server is in a room which is partial-stated on
+ # our side.
+ # Part of MSC3895.
+ UNABLE_DUE_TO_PARTIAL_STATE = "ORG.MATRIX.MSC3895_UNABLE_DUE_TO_PARTIAL_STATE"
+
class CodeMessageException(RuntimeError):
"""An exception with integer code and message string attributes.
diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py
index 702b81e636..933779c23a 100644
--- a/synapse/config/experimental.py
+++ b/synapse/config/experimental.py
@@ -63,7 +63,8 @@ class ExperimentalConfig(Config):
# MSC3706 (server-side support for partial state in /send_join responses)
self.msc3706_enabled: bool = experimental.get("msc3706_enabled", False)
- # experimental support for faster joins over federation (msc2775, msc3706)
+ # experimental support for faster joins over federation
+ # (MSC2775, MSC3706, MSC3895)
# requires a target server with msc3706_enabled enabled.
self.faster_joins_enabled: bool = experimental.get("faster_joins", False)
@@ -82,6 +83,8 @@ class ExperimentalConfig(Config):
# MSC3786 (Add a default push rule to ignore m.room.server_acl events)
self.msc3786_enabled: bool = experimental.get("msc3786_enabled", False)
+ # MSC3771: Thread read receipts
+ self.msc3771_enabled: bool = experimental.get("msc3771_enabled", False)
# MSC3772: A push rule for mutual relations.
self.msc3772_enabled: bool = experimental.get("msc3772_enabled", False)
@@ -93,3 +96,13 @@ class ExperimentalConfig(Config):
# MSC3852: Expose last seen user agent field on /_matrix/client/v3/devices.
self.msc3852_enabled: bool = experimental.get("msc3852_enabled", False)
+
+ # MSC3881: Remotely toggle push notifications for another client
+ self.msc3881_enabled: bool = experimental.get("msc3881_enabled", False)
+
+ # MSC3882: Allow an existing session to sign in a new session
+ self.msc3882_enabled: bool = experimental.get("msc3882_enabled", False)
+ self.msc3882_ui_auth: bool = experimental.get("msc3882_ui_auth", True)
+ self.msc3882_token_timeout = self.parse_duration(
+ experimental.get("msc3882_token_timeout", "5m")
+ )
diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py
index 965f198c99..641922a1d2 100644
--- a/synapse/federation/federation_server.py
+++ b/synapse/federation/federation_server.py
@@ -525,13 +525,10 @@ class FederationServer(FederationBase):
async def on_room_state_request(
self, origin: str, room_id: str, event_id: str
) -> Tuple[int, JsonDict]:
+ await self._event_auth_handler.assert_host_in_room(room_id, origin)
origin_host, _ = parse_server_name(origin)
await self.check_server_matches_acl(origin_host, room_id)
- in_room = await self._event_auth_handler.check_host_in_room(room_id, origin)
- if not in_room:
- raise AuthError(403, "Host not in room.")
-
# we grab the linearizer to protect ourselves from servers which hammer
# us. In theory we might already have the response to this query
# in the cache so we could return it without waiting for the linearizer
@@ -555,13 +552,10 @@ class FederationServer(FederationBase):
if not event_id:
raise NotImplementedError("Specify an event")
+ await self._event_auth_handler.assert_host_in_room(room_id, origin)
origin_host, _ = parse_server_name(origin)
await self.check_server_matches_acl(origin_host, room_id)
- in_room = await self._event_auth_handler.check_host_in_room(room_id, origin)
- if not in_room:
- raise AuthError(403, "Host not in room.")
-
resp = await self._state_ids_resp_cache.wrap(
(room_id, event_id),
self._on_state_ids_request_compute,
@@ -950,6 +944,7 @@ class FederationServer(FederationBase):
self, origin: str, room_id: str, event_id: str
) -> Tuple[int, Dict[str, Any]]:
async with self._server_linearizer.queue((origin, room_id)):
+ await self._event_auth_handler.assert_host_in_room(room_id, origin)
origin_host, _ = parse_server_name(origin)
await self.check_server_matches_acl(origin_host, room_id)
diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py
index 8e9c98db6c..961f8eb186 100644
--- a/synapse/handlers/device.py
+++ b/synapse/handlers/device.py
@@ -195,7 +195,9 @@ class DeviceWorkerHandler:
possibly_changed = set(changed)
possibly_left = set()
for room_id in rooms_changed:
- current_state_ids = await self._state_storage.get_current_state_ids(room_id)
+ current_state_ids = await self._state_storage.get_current_state_ids(
+ room_id, await_full_state=False
+ )
# The user may have left the room
# TODO: Check if they actually did or if we were just invited.
@@ -234,7 +236,8 @@ class DeviceWorkerHandler:
# mapping from event_id -> state_dict
prev_state_ids = await self._state_storage.get_state_ids_for_events(
- event_ids
+ event_ids,
+ await_full_state=False,
)
# Check if we've joined the room? If so we just blindly add all the users to
@@ -688,11 +691,15 @@ class DeviceHandler(DeviceWorkerHandler):
# Ignore any users that aren't ours
if self.hs.is_mine_id(user_id):
hosts = set(
- await self._storage_controllers.state.get_current_hosts_in_room(
+ await self._storage_controllers.state.get_current_hosts_in_room_or_partial_state_approximation(
room_id
)
)
hosts.discard(self.server_name)
+ # For rooms with partial state, `hosts` is merely an
+ # approximation. When we transition to a full state room, we
+ # will have to send out device list updates to any servers we
+ # missed.
# Check if we've already sent this update to some hosts
if current_stream_id == stream_id:
diff --git a/synapse/handlers/event_auth.py b/synapse/handlers/event_auth.py
index c3ddc5d182..8249ca1ed2 100644
--- a/synapse/handlers/event_auth.py
+++ b/synapse/handlers/event_auth.py
@@ -31,7 +31,6 @@ from synapse.events import EventBase
from synapse.events.builder import EventBuilder
from synapse.events.snapshot import EventContext
from synapse.types import StateMap, get_domain_from_id
-from synapse.util.metrics import Measure
if TYPE_CHECKING:
from synapse.server import HomeServer
@@ -156,9 +155,33 @@ class EventAuthHandler:
Codes.UNABLE_TO_GRANT_JOIN,
)
- async def check_host_in_room(self, room_id: str, host: str) -> bool:
- with Measure(self._clock, "check_host_in_room"):
- return await self._store.is_host_joined(room_id, host)
+ async def is_host_in_room(self, room_id: str, host: str) -> bool:
+ return await self._store.is_host_joined(room_id, host)
+
+ async def assert_host_in_room(
+ self, room_id: str, host: str, allow_partial_state_rooms: bool = False
+ ) -> None:
+ """
+ Asserts that the host is in the room, or raises an AuthError.
+
+ If the room is partial-stated, we raise an AuthError with the
+ UNABLE_DUE_TO_PARTIAL_STATE error code, unless `allow_partial_state_rooms` is true.
+
+ If allow_partial_state_rooms is True and the room is partial-stated,
+ this function may return an incorrect result as we are not able to fully
+ track server membership in a room without full state.
+ """
+ if not allow_partial_state_rooms and await self._store.is_partial_state_room(
+ room_id
+ ):
+ raise AuthError(
+ 403,
+ "Unable to authorise you right now; room is partial-stated here.",
+ errcode=Codes.UNABLE_DUE_TO_PARTIAL_STATE,
+ )
+
+ if not await self.is_host_in_room(room_id, host):
+ raise AuthError(403, "Host not in room.")
async def check_restricted_join_rules(
self,
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index 377ac04f8c..73471fe041 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -232,9 +232,7 @@ class FederationHandler:
"""
backwards_extremities = [
_BackfillPoint(event_id, depth, _BackfillPointType.BACKWARDS_EXTREMITY)
- for event_id, depth in await self.store.get_oldest_event_ids_with_depth_in_room(
- room_id
- )
+ for event_id, depth in await self.store.get_backfill_points_in_room(room_id)
]
insertion_events_to_be_backfilled: List[_BackfillPoint] = []
@@ -815,7 +813,7 @@ class FederationHandler:
)
# now check that we are *still* in the room
- is_in_room = await self._event_auth_handler.check_host_in_room(
+ is_in_room = await self._event_auth_handler.is_host_in_room(
room_id, self.server_name
)
if not is_in_room:
@@ -1161,9 +1159,7 @@ class FederationHandler:
async def on_backfill_request(
self, origin: str, room_id: str, pdu_list: List[str], limit: int
) -> List[EventBase]:
- in_room = await self._event_auth_handler.check_host_in_room(room_id, origin)
- if not in_room:
- raise AuthError(403, "Host not in room.")
+ await self._event_auth_handler.assert_host_in_room(room_id, origin)
# Synapse asks for 100 events per backfill request. Do not allow more.
limit = min(limit, 100)
@@ -1209,21 +1205,17 @@ class FederationHandler:
event_id, allow_none=True, allow_rejected=True
)
- if event:
- in_room = await self._event_auth_handler.check_host_in_room(
- event.room_id, origin
- )
- if not in_room:
- raise AuthError(403, "Host not in room.")
-
- events = await filter_events_for_server(
- self._storage_controllers, origin, [event]
- )
- event = events[0]
- return event
- else:
+ if not event:
return None
+ await self._event_auth_handler.assert_host_in_room(event.room_id, origin)
+
+ events = await filter_events_for_server(
+ self._storage_controllers, origin, [event]
+ )
+ event = events[0]
+ return event
+
async def on_get_missing_events(
self,
origin: str,
@@ -1232,9 +1224,7 @@ class FederationHandler:
latest_events: List[str],
limit: int,
) -> List[EventBase]:
- in_room = await self._event_auth_handler.check_host_in_room(room_id, origin)
- if not in_room:
- raise AuthError(403, "Host not in room.")
+ await self._event_auth_handler.assert_host_in_room(room_id, origin)
# Only allow up to 20 events to be retrieved per request.
limit = min(limit, 20)
@@ -1268,7 +1258,7 @@ class FederationHandler:
"state_key": target_user_id,
}
- if await self._event_auth_handler.check_host_in_room(room_id, self.hs.hostname):
+ if await self._event_auth_handler.is_host_in_room(room_id, self.hs.hostname):
room_version_obj = await self.store.get_room_version(room_id)
builder = self.event_builder_factory.for_room_version(
room_version_obj, event_dict
diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py
index c87925aa51..00a8860ff3 100644
--- a/synapse/handlers/federation_event.py
+++ b/synapse/handlers/federation_event.py
@@ -238,7 +238,7 @@ class FederationEventHandler:
#
# Note that if we were never in the room then we would have already
# dropped the event, since we wouldn't know the room version.
- is_in_room = await self._event_auth_handler.check_host_in_room(
+ is_in_room = await self._event_auth_handler.is_host_in_room(
room_id, self._server_name
)
if not is_in_room:
diff --git a/synapse/handlers/receipts.py b/synapse/handlers/receipts.py
index d2bdb9c8be..4768a34c07 100644
--- a/synapse/handlers/receipts.py
+++ b/synapse/handlers/receipts.py
@@ -63,6 +63,8 @@ class ReceiptsHandler:
self.clock = self.hs.get_clock()
self.state = hs.get_state_handler()
+ self._msc3771_enabled = hs.config.experimental.msc3771_enabled
+
async def _received_remote_receipt(self, origin: str, content: JsonDict) -> None:
"""Called when we receive an EDU of type m.receipt from a remote HS."""
receipts = []
@@ -70,7 +72,7 @@ class ReceiptsHandler:
# If we're not in the room just ditch the event entirely. This is
# probably an old server that has come back and thinks we're still in
# the room (or we've been rejoined to the room by a state reset).
- is_in_room = await self.event_auth_handler.check_host_in_room(
+ is_in_room = await self.event_auth_handler.is_host_in_room(
room_id, self.server_name
)
if not is_in_room:
@@ -91,13 +93,23 @@ class ReceiptsHandler:
)
continue
+ # Check if these receipts apply to a thread.
+ thread_id = None
+ data = user_values.get("data", {})
+ if self._msc3771_enabled and isinstance(data, dict):
+ thread_id = data.get("thread_id")
+ # If the thread ID is invalid, consider it missing.
+ if not isinstance(thread_id, str):
+ thread_id = None
+
receipts.append(
ReadReceipt(
room_id=room_id,
receipt_type=receipt_type,
user_id=user_id,
event_ids=user_values["event_ids"],
- data=user_values.get("data", {}),
+ thread_id=thread_id,
+ data=data,
)
)
@@ -114,6 +126,7 @@ class ReceiptsHandler:
receipt.receipt_type,
receipt.user_id,
receipt.event_ids,
+ receipt.thread_id,
receipt.data,
)
@@ -146,7 +159,12 @@ class ReceiptsHandler:
return True
async def received_client_receipt(
- self, room_id: str, receipt_type: str, user_id: str, event_id: str
+ self,
+ room_id: str,
+ receipt_type: str,
+ user_id: str,
+ event_id: str,
+ thread_id: Optional[str],
) -> None:
"""Called when a client tells us a local user has read up to the given
event_id in the room.
@@ -156,6 +174,7 @@ class ReceiptsHandler:
receipt_type=receipt_type,
user_id=user_id,
event_ids=[event_id],
+ thread_id=thread_id,
data={"ts": int(self.clock.time_msec())},
)
diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py
index 20ec22105a..cfcadb34db 100644
--- a/synapse/handlers/register.py
+++ b/synapse/handlers/register.py
@@ -997,7 +997,7 @@ class RegistrationHandler:
assert user_tuple
token_id = user_tuple.token_id
- await self.pusher_pool.add_pusher(
+ await self.pusher_pool.add_or_update_pusher(
user_id=user_id,
access_token=token_id,
kind="email",
@@ -1005,7 +1005,7 @@ class RegistrationHandler:
app_display_name="Email Notifications",
device_display_name=threepid["address"],
pushkey=threepid["address"],
- lang=None, # We don't know a user's language here
+ lang=None,
data={},
)
diff --git a/synapse/handlers/room_summary.py b/synapse/handlers/room_summary.py
index ebd445adca..8d08625237 100644
--- a/synapse/handlers/room_summary.py
+++ b/synapse/handlers/room_summary.py
@@ -609,7 +609,7 @@ class RoomSummaryHandler:
# If this is a request over federation, check if the host is in the room or
# has a user who could join the room.
elif origin:
- if await self._event_auth_handler.check_host_in_room(
+ if await self._event_auth_handler.is_host_in_room(
room_id, origin
) or await self._store.is_host_invited(room_id, origin):
return True
@@ -624,9 +624,7 @@ class RoomSummaryHandler:
await self._event_auth_handler.get_rooms_that_allow_join(state_ids)
)
for space_id in allowed_rooms:
- if await self._event_auth_handler.check_host_in_room(
- space_id, origin
- ):
+ if await self._event_auth_handler.is_host_in_room(space_id, origin):
return True
logger.info(
diff --git a/synapse/handlers/sso.py b/synapse/handlers/sso.py
index 1e171f3f71..6bc1cbd787 100644
--- a/synapse/handlers/sso.py
+++ b/synapse/handlers/sso.py
@@ -128,6 +128,9 @@ class SsoIdentityProvider(Protocol):
@attr.s(auto_attribs=True)
class UserAttributes:
+ # NB: This struct is documented in docs/sso_mapping_providers.md so that users can
+ # populate it with data from their own mapping providers.
+
# the localpart of the mxid that the mapper has assigned to the user.
# if `None`, the mapper has not picked a userid, and the user should be prompted to
# enter one.
diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py
index a4cd8b8f0c..f953691669 100644
--- a/synapse/handlers/typing.py
+++ b/synapse/handlers/typing.py
@@ -340,7 +340,7 @@ class TypingWriterHandler(FollowerTypingHandler):
# If we're not in the room just ditch the event entirely. This is
# probably an old server that has come back and thinks we're still in
# the room (or we've been rejoined to the room by a state reset).
- is_in_room = await self.event_auth_handler.check_host_in_room(
+ is_in_room = await self.event_auth_handler.is_host_in_room(
room_id, self.server_name
)
if not is_in_room:
@@ -362,11 +362,14 @@ class TypingWriterHandler(FollowerTypingHandler):
)
return
- domains = await self._storage_controllers.state.get_current_hosts_in_room(
+ # Let's check that the origin server is in the room before accepting the typing
+ # event. We don't want to block waiting on a partial state so take an
+ # approximation if needed.
+ domains = await self._storage_controllers.state.get_current_hosts_in_room_or_partial_state_approximation(
room_id
)
- if self.server_name in domains:
+ if user.domain in domains:
logger.info("Got typing update from %s: %r", user_id, content)
now = self.clock.time_msec()
self._member_typing_until[member] = now + FEDERATION_TIMEOUT
diff --git a/synapse/http/proxyagent.py b/synapse/http/proxyagent.py
index b2a50c9105..1f8227896f 100644
--- a/synapse/http/proxyagent.py
+++ b/synapse/http/proxyagent.py
@@ -36,6 +36,7 @@ from twisted.web.error import SchemeNotSupported
from twisted.web.http_headers import Headers
from twisted.web.iweb import IAgent, IBodyProducer, IPolicyForHTTPS
+from synapse.http import redact_uri
from synapse.http.connectproxyclient import HTTPConnectProxyEndpoint, ProxyCredentials
from synapse.types import ISynapseReactor
@@ -220,7 +221,11 @@ class ProxyAgent(_AgentBase):
self._reactor, parsed_uri.host, parsed_uri.port, **self._endpoint_kwargs
)
- logger.debug("Requesting %s via %s", uri, endpoint)
+ logger.debug(
+ "Requesting %s via %s",
+ redact_uri(uri.decode("ascii", errors="replace")),
+ endpoint,
+ )
if parsed_uri.scheme == b"https":
tls_connection_creator = self._policy_for_https.creatorForNetloc(
diff --git a/synapse/http/server.py b/synapse/http/server.py
index c34c102e5e..be551483bb 100644
--- a/synapse/http/server.py
+++ b/synapse/http/server.py
@@ -705,7 +705,7 @@ class _ByteProducer:
self._request = None
-def _encode_json_bytes(json_object: Any) -> bytes:
+def _encode_json_bytes(json_object: object) -> bytes:
"""
Encode an object into JSON. Returns an iterator of bytes.
"""
@@ -746,7 +746,7 @@ def respond_with_json(
return None
if canonical_json:
- encoder = encode_canonical_json
+ encoder: Callable[[object], bytes] = encode_canonical_json
else:
encoder = _encode_json_bytes
diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py
index 9287c0fb8d..59755bff6d 100644
--- a/synapse/module_api/__init__.py
+++ b/synapse/module_api/__init__.py
@@ -125,7 +125,7 @@ from synapse.types import (
)
from synapse.util import Clock
from synapse.util.async_helpers import maybe_awaitable
-from synapse.util.caches.descriptors import cached
+from synapse.util.caches.descriptors import CachedFunction, cached
from synapse.util.frozenutils import freeze
if TYPE_CHECKING:
@@ -836,6 +836,37 @@ class ModuleApi:
self._store.db_pool.runInteraction(desc, func, *args, **kwargs) # type: ignore[arg-type]
)
+ def register_cached_function(self, cached_func: CachedFunction) -> None:
+ """Register a cached function that should be invalidated across workers.
+ Invalidation local to a worker can be done directly using `cached_func.invalidate`,
+ however invalidation that needs to go to other workers needs to call `invalidate_cache`
+ on the module API instead.
+
+ Args:
+ cached_function: The cached function that will be registered to receive invalidation
+ locally and from other workers.
+ """
+ self._store.register_external_cached_function(
+ f"{cached_func.__module__}.{cached_func.__name__}", cached_func
+ )
+
+ async def invalidate_cache(
+ self, cached_func: CachedFunction, keys: Tuple[Any, ...]
+ ) -> None:
+ """Invalidate a cache entry of a cached function across workers. The cached function
+ needs to be registered on all workers first with `register_cached_function`.
+
+ Args:
+ cached_function: The cached function that needs an invalidation
+ keys: keys of the entry to invalidate, usually matching the arguments of the
+ cached function.
+ """
+ cached_func.invalidate(keys)
+ await self._store.send_invalidation_to_replication(
+ f"{cached_func.__module__}.{cached_func.__name__}",
+ keys,
+ )
+
async def complete_sso_login_async(
self,
registered_user_id: str,
diff --git a/synapse/push/__init__.py b/synapse/push/__init__.py
index 57c4d70466..a0c760239d 100644
--- a/synapse/push/__init__.py
+++ b/synapse/push/__init__.py
@@ -116,6 +116,8 @@ class PusherConfig:
last_stream_ordering: int
last_success: Optional[int]
failing_since: Optional[int]
+ enabled: bool
+ device_id: Optional[str]
def as_dict(self) -> Dict[str, Any]:
"""Information that can be retrieved about a pusher after creation."""
@@ -128,6 +130,8 @@ class PusherConfig:
"lang": self.lang,
"profile_tag": self.profile_tag,
"pushkey": self.pushkey,
+ "enabled": self.enabled,
+ "device_id": self.device_id,
}
diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py
index 1e0ef44fc7..e2648cbc93 100644
--- a/synapse/push/pusherpool.py
+++ b/synapse/push/pusherpool.py
@@ -94,7 +94,7 @@ class PusherPool:
return
run_as_background_process("start_pushers", self._start_pushers)
- async def add_pusher(
+ async def add_or_update_pusher(
self,
user_id: str,
access_token: Optional[int],
@@ -106,6 +106,8 @@ class PusherPool:
lang: Optional[str],
data: JsonDict,
profile_tag: str = "",
+ enabled: bool = True,
+ device_id: Optional[str] = None,
) -> Optional[Pusher]:
"""Creates a new pusher and adds it to the pool
@@ -147,9 +149,22 @@ class PusherPool:
last_stream_ordering=last_stream_ordering,
last_success=None,
failing_since=None,
+ enabled=enabled,
+ device_id=device_id,
)
)
+ # Before we actually persist the pusher, we check if the user already has one
+ # this app ID and pushkey. If so, we want to keep the access token and device ID
+ # in place, since this could be one device modifying (e.g. enabling/disabling)
+ # another device's pusher.
+ existing_config = await self._get_pusher_config_for_user_by_app_id_and_pushkey(
+ user_id, app_id, pushkey
+ )
+ if existing_config:
+ access_token = existing_config.access_token
+ device_id = existing_config.device_id
+
await self.store.add_pusher(
user_id=user_id,
access_token=access_token,
@@ -163,8 +178,10 @@ class PusherPool:
data=data,
last_stream_ordering=last_stream_ordering,
profile_tag=profile_tag,
+ enabled=enabled,
+ device_id=device_id,
)
- pusher = await self.start_pusher_by_id(app_id, pushkey, user_id)
+ pusher = await self.process_pusher_change_by_id(app_id, pushkey, user_id)
return pusher
@@ -276,10 +293,25 @@ class PusherPool:
except Exception:
logger.exception("Exception in pusher on_new_receipts")
- async def start_pusher_by_id(
+ async def _get_pusher_config_for_user_by_app_id_and_pushkey(
+ self, user_id: str, app_id: str, pushkey: str
+ ) -> Optional[PusherConfig]:
+ resultlist = await self.store.get_pushers_by_app_id_and_pushkey(app_id, pushkey)
+
+ pusher_config = None
+ for r in resultlist:
+ if r.user_name == user_id:
+ pusher_config = r
+
+ return pusher_config
+
+ async def process_pusher_change_by_id(
self, app_id: str, pushkey: str, user_id: str
) -> Optional[Pusher]:
- """Look up the details for the given pusher, and start it
+ """Look up the details for the given pusher, and either start it if its
+ "enabled" flag is True, or try to stop it otherwise.
+
+ If the pusher is new and its "enabled" flag is False, the stop is a noop.
Returns:
The pusher started, if any
@@ -290,12 +322,13 @@ class PusherPool:
if not self._pusher_shard_config.should_handle(self._instance_name, user_id):
return None
- resultlist = await self.store.get_pushers_by_app_id_and_pushkey(app_id, pushkey)
+ pusher_config = await self._get_pusher_config_for_user_by_app_id_and_pushkey(
+ user_id, app_id, pushkey
+ )
- pusher_config = None
- for r in resultlist:
- if r.user_name == user_id:
- pusher_config = r
+ if pusher_config and not pusher_config.enabled:
+ self.maybe_stop_pusher(app_id, pushkey, user_id)
+ return None
pusher = None
if pusher_config:
@@ -305,7 +338,7 @@ class PusherPool:
async def _start_pushers(self) -> None:
"""Start all the pushers"""
- pushers = await self.store.get_all_pushers()
+ pushers = await self.store.get_enabled_pushers()
# Stagger starting up the pushers so we don't completely drown the
# process on start up.
@@ -363,6 +396,8 @@ class PusherPool:
synapse_pushers.labels(type(pusher).__name__, pusher.app_id).inc()
+ logger.info("Starting pusher %s / %s", pusher.user_id, appid_pushkey)
+
# Check if there *may* be push to process. We do this as this check is a
# lot cheaper to do than actually fetching the exact rows we need to
# push.
@@ -382,16 +417,7 @@ class PusherPool:
return pusher
async def remove_pusher(self, app_id: str, pushkey: str, user_id: str) -> None:
- appid_pushkey = "%s:%s" % (app_id, pushkey)
-
- byuser = self.pushers.get(user_id, {})
-
- if appid_pushkey in byuser:
- logger.info("Stopping pusher %s / %s", user_id, appid_pushkey)
- pusher = byuser.pop(appid_pushkey)
- pusher.on_stop()
-
- synapse_pushers.labels(type(pusher).__name__, pusher.app_id).dec()
+ self.maybe_stop_pusher(app_id, pushkey, user_id)
# We can only delete pushers on master.
if self._remove_pusher_client:
@@ -402,3 +428,22 @@ class PusherPool:
await self.store.delete_pusher_by_app_id_pushkey_user_id(
app_id, pushkey, user_id
)
+
+ def maybe_stop_pusher(self, app_id: str, pushkey: str, user_id: str) -> None:
+ """Stops a pusher with the given app ID and push key if one is running.
+
+ Args:
+ app_id: the pusher's app ID.
+ pushkey: the pusher's push key.
+ user_id: the user the pusher belongs to. Only used for logging.
+ """
+ appid_pushkey = "%s:%s" % (app_id, pushkey)
+
+ byuser = self.pushers.get(user_id, {})
+
+ if appid_pushkey in byuser:
+ logger.info("Stopping pusher %s / %s", user_id, appid_pushkey)
+ pusher = byuser.pop(appid_pushkey)
+ pusher.on_stop()
+
+ synapse_pushers.labels(type(pusher).__name__, pusher.app_id).dec()
diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py
index e4f2201c92..b2522f98ca 100644
--- a/synapse/replication/tcp/client.py
+++ b/synapse/replication/tcp/client.py
@@ -189,7 +189,9 @@ class ReplicationDataHandler:
if row.deleted:
self.stop_pusher(row.user_id, row.app_id, row.pushkey)
else:
- await self.start_pusher(row.user_id, row.app_id, row.pushkey)
+ await self.process_pusher_change(
+ row.user_id, row.app_id, row.pushkey
+ )
elif stream_name == EventsStream.NAME:
# We shouldn't get multiple rows per token for events stream, so
# we don't need to optimise this for multiple rows.
@@ -334,13 +336,15 @@ class ReplicationDataHandler:
logger.info("Stopping pusher %r / %r", user_id, key)
pusher.on_stop()
- async def start_pusher(self, user_id: str, app_id: str, pushkey: str) -> None:
+ async def process_pusher_change(
+ self, user_id: str, app_id: str, pushkey: str
+ ) -> None:
if not self._notify_pushers:
return
key = "%s:%s" % (app_id, pushkey)
logger.info("Starting pusher %r / %r", user_id, key)
- await self._pusher_pool.start_pusher_by_id(app_id, pushkey, user_id)
+ await self._pusher_pool.process_pusher_change_by_id(app_id, pushkey, user_id)
class FederationSenderHandler:
@@ -423,7 +427,8 @@ class FederationSenderHandler:
receipt.receipt_type,
receipt.user_id,
[receipt.event_id],
- receipt.data,
+ thread_id=receipt.thread_id,
+ data=receipt.data,
)
await self.federation_sender.send_read_receipt(receipt_info)
diff --git a/synapse/replication/tcp/streams/_base.py b/synapse/replication/tcp/streams/_base.py
index 398bebeaa6..e01155ad59 100644
--- a/synapse/replication/tcp/streams/_base.py
+++ b/synapse/replication/tcp/streams/_base.py
@@ -361,6 +361,7 @@ class ReceiptsStream(Stream):
receipt_type: str
user_id: str
event_id: str
+ thread_id: Optional[str]
data: dict
NAME = "receipts"
diff --git a/synapse/rest/__init__.py b/synapse/rest/__init__.py
index b712215112..9a2ab99ede 100644
--- a/synapse/rest/__init__.py
+++ b/synapse/rest/__init__.py
@@ -30,6 +30,7 @@ from synapse.rest.client import (
keys,
knock,
login as v1_login,
+ login_token_request,
logout,
mutual_rooms,
notifications,
@@ -130,3 +131,4 @@ class ClientRestResource(JsonResource):
# unstable
mutual_rooms.register_servlets(hs, client_resource)
+ login_token_request.register_servlets(hs, client_resource)
diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py
index 2ca6b2d08a..1274773d7e 100644
--- a/synapse/rest/admin/users.py
+++ b/synapse/rest/admin/users.py
@@ -375,7 +375,7 @@ class UserRestServletV2(RestServlet):
and self.hs.config.email.email_notif_for_new_users
and medium == "email"
):
- await self.pusher_pool.add_pusher(
+ await self.pusher_pool.add_or_update_pusher(
user_id=user_id,
access_token=None,
kind="email",
@@ -383,7 +383,7 @@ class UserRestServletV2(RestServlet):
app_display_name="Email Notifications",
device_display_name=address,
pushkey=address,
- lang=None, # We don't know a user's language here
+ lang=None,
data={},
)
diff --git a/synapse/rest/client/account.py b/synapse/rest/client/account.py
index 2db2a04f95..44f622bcce 100644
--- a/synapse/rest/client/account.py
+++ b/synapse/rest/client/account.py
@@ -534,6 +534,11 @@ class AddThreepidMsisdnSubmitTokenServlet(RestServlet):
"/add_threepid/msisdn/submit_token$", releases=(), unstable=True
)
+ class PostBody(RequestBodyModel):
+ client_secret: ClientSecretStr
+ sid: StrictStr
+ token: StrictStr
+
def __init__(self, hs: "HomeServer"):
super().__init__()
self.config = hs.config
@@ -549,16 +554,14 @@ class AddThreepidMsisdnSubmitTokenServlet(RestServlet):
"instead.",
)
- body = parse_json_object_from_request(request)
- assert_params_in_dict(body, ["client_secret", "sid", "token"])
- assert_valid_client_secret(body["client_secret"])
+ body = parse_and_validate_json_object_from_request(request, self.PostBody)
# Proxy submit_token request to msisdn threepid delegate
response = await self.identity_handler.proxy_msisdn_submit_token(
self.config.registration.account_threepid_delegate_msisdn,
- body["client_secret"],
- body["sid"],
- body["token"],
+ body.client_secret,
+ body.sid,
+ body.token,
)
return 200, response
@@ -581,6 +584,10 @@ class ThreepidRestServlet(RestServlet):
return 200, {"threepids": threepids}
+ # NOTE(dmr): I have chosen not to use Pydantic to parse this request's body, because
+ # the endpoint is deprecated. (If you really want to, you could do this by reusing
+ # ThreePidBindRestServelet.PostBody with an `alias_generator` to handle
+ # `threePidCreds` versus `three_pid_creds`.
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
if not self.hs.config.registration.enable_3pid_changes:
raise SynapseError(
diff --git a/synapse/rest/client/login_token_request.py b/synapse/rest/client/login_token_request.py
new file mode 100644
index 0000000000..ca5c54bf17
--- /dev/null
+++ b/synapse/rest/client/login_token_request.py
@@ -0,0 +1,94 @@
+# Copyright 2022 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+from typing import TYPE_CHECKING, Tuple
+
+from synapse.http.server import HttpServer
+from synapse.http.servlet import RestServlet, parse_json_object_from_request
+from synapse.http.site import SynapseRequest
+from synapse.rest.client._base import client_patterns, interactive_auth_handler
+from synapse.types import JsonDict
+
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
+
+logger = logging.getLogger(__name__)
+
+
+class LoginTokenRequestServlet(RestServlet):
+ """
+ Get a token that can be used with `m.login.token` to log in a second device.
+
+ Request:
+
+ POST /login/token HTTP/1.1
+ Content-Type: application/json
+
+ {}
+
+ Response:
+
+ HTTP/1.1 200 OK
+ {
+ "login_token": "ABDEFGH",
+ "expires_in": 3600,
+ }
+ """
+
+ PATTERNS = client_patterns("/login/token$")
+
+ def __init__(self, hs: "HomeServer"):
+ super().__init__()
+ self.auth = hs.get_auth()
+ self.store = hs.get_datastores().main
+ self.clock = hs.get_clock()
+ self.server_name = hs.config.server.server_name
+ self.macaroon_gen = hs.get_macaroon_generator()
+ self.auth_handler = hs.get_auth_handler()
+ self.token_timeout = hs.config.experimental.msc3882_token_timeout
+ self.ui_auth = hs.config.experimental.msc3882_ui_auth
+
+ @interactive_auth_handler
+ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
+ requester = await self.auth.get_user_by_req(request)
+ body = parse_json_object_from_request(request)
+
+ if self.ui_auth:
+ await self.auth_handler.validate_user_via_ui_auth(
+ requester,
+ request,
+ body,
+ "issue a new access token for your account",
+ can_skip_ui_auth=False, # Don't allow skipping of UI auth
+ )
+
+ login_token = self.macaroon_gen.generate_short_term_login_token(
+ user_id=requester.user.to_string(),
+ auth_provider_id="org.matrix.msc3882.login_token_request",
+ duration_in_ms=self.token_timeout,
+ )
+
+ return (
+ 200,
+ {
+ "login_token": login_token,
+ "expires_in": self.token_timeout // 1000,
+ },
+ )
+
+
+def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
+ if hs.config.experimental.msc3882_enabled:
+ LoginTokenRequestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/pusher.py b/synapse/rest/client/pusher.py
index 9a1f10f4be..975eef2144 100644
--- a/synapse/rest/client/pusher.py
+++ b/synapse/rest/client/pusher.py
@@ -42,6 +42,7 @@ class PushersRestServlet(RestServlet):
super().__init__()
self.hs = hs
self.auth = hs.get_auth()
+ self._msc3881_enabled = self.hs.config.experimental.msc3881_enabled
async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
@@ -51,9 +52,16 @@ class PushersRestServlet(RestServlet):
user.to_string()
)
- filtered_pushers = [p.as_dict() for p in pushers]
+ pusher_dicts = [p.as_dict() for p in pushers]
- return 200, {"pushers": filtered_pushers}
+ for pusher in pusher_dicts:
+ if self._msc3881_enabled:
+ pusher["org.matrix.msc3881.enabled"] = pusher["enabled"]
+ pusher["org.matrix.msc3881.device_id"] = pusher["device_id"]
+ del pusher["enabled"]
+ del pusher["device_id"]
+
+ return 200, {"pushers": pusher_dicts}
class PushersSetRestServlet(RestServlet):
@@ -65,6 +73,7 @@ class PushersSetRestServlet(RestServlet):
self.auth = hs.get_auth()
self.notifier = hs.get_notifier()
self.pusher_pool = self.hs.get_pusherpool()
+ self._msc3881_enabled = self.hs.config.experimental.msc3881_enabled
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
@@ -103,6 +112,10 @@ class PushersSetRestServlet(RestServlet):
if "append" in content:
append = content["append"]
+ enabled = True
+ if self._msc3881_enabled and "org.matrix.msc3881.enabled" in content:
+ enabled = content["org.matrix.msc3881.enabled"]
+
if not append:
await self.pusher_pool.remove_pushers_by_app_id_and_pushkey_not_user(
app_id=content["app_id"],
@@ -111,7 +124,7 @@ class PushersSetRestServlet(RestServlet):
)
try:
- await self.pusher_pool.add_pusher(
+ await self.pusher_pool.add_or_update_pusher(
user_id=user.to_string(),
access_token=requester.access_token_id,
kind=content["kind"],
@@ -122,6 +135,8 @@ class PushersSetRestServlet(RestServlet):
lang=content["lang"],
data=content["data"],
profile_tag=content.get("profile_tag", ""),
+ enabled=enabled,
+ device_id=requester.device_id,
)
except PusherConfigException as pce:
raise SynapseError(
diff --git a/synapse/rest/client/read_marker.py b/synapse/rest/client/read_marker.py
index 5e53096539..852838515c 100644
--- a/synapse/rest/client/read_marker.py
+++ b/synapse/rest/client/read_marker.py
@@ -83,6 +83,8 @@ class ReadMarkerRestServlet(RestServlet):
receipt_type,
user_id=requester.user.to_string(),
event_id=event_id,
+ # Setting the thread ID is not possible with the /read_markers endpoint.
+ thread_id=None,
)
return 200, {}
diff --git a/synapse/rest/client/receipts.py b/synapse/rest/client/receipts.py
index 5b7fad7402..f3ff156abe 100644
--- a/synapse/rest/client/receipts.py
+++ b/synapse/rest/client/receipts.py
@@ -49,6 +49,7 @@ class ReceiptRestServlet(RestServlet):
ReceiptTypes.READ_PRIVATE,
ReceiptTypes.FULLY_READ,
}
+ self._msc3771_enabled = hs.config.experimental.msc3771_enabled
async def on_POST(
self, request: SynapseRequest, room_id: str, receipt_type: str, event_id: str
@@ -61,7 +62,17 @@ class ReceiptRestServlet(RestServlet):
f"Receipt type must be {', '.join(self._known_receipt_types)}",
)
- parse_json_object_from_request(request, allow_empty_body=False)
+ body = parse_json_object_from_request(request)
+
+ # Pull the thread ID, if one exists.
+ thread_id = None
+ if self._msc3771_enabled:
+ if "thread_id" in body:
+ thread_id = body.get("thread_id")
+ if not thread_id or not isinstance(thread_id, str):
+ raise SynapseError(
+ 400, "thread_id field must be a non-empty string"
+ )
await self.presence_handler.bump_presence_active_time(requester.user)
@@ -77,6 +88,7 @@ class ReceiptRestServlet(RestServlet):
receipt_type,
user_id=requester.user.to_string(),
event_id=event_id,
+ thread_id=thread_id,
)
return 200, {}
diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py
index c516cda95d..c95b0d6f19 100644
--- a/synapse/rest/client/versions.py
+++ b/synapse/rest/client/versions.py
@@ -103,8 +103,14 @@ class VersionsRestServlet(RestServlet):
"org.matrix.msc3030": self.config.experimental.msc3030_enabled,
# Adds support for thread relations, per MSC3440.
"org.matrix.msc3440.stable": True, # TODO: remove when "v1.3" is added above
+ # Support for thread read receipts.
+ "org.matrix.msc3771": self.config.experimental.msc3771_enabled,
# Allows moderators to fetch redacted event content as described in MSC2815
"fi.mau.msc2815": self.config.experimental.msc2815_enabled,
+ # Adds support for login token requests as per MSC3882
+ "org.matrix.msc3882": self.config.experimental.msc3882_enabled,
+ # Adds support for remotely enabling/disabling pushers, as per MSC3881
+ "org.matrix.msc3881": self.config.experimental.msc3881_enabled,
},
},
)
diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py
index e30f9c76d4..313e8aca7d 100644
--- a/synapse/storage/_base.py
+++ b/synapse/storage/_base.py
@@ -15,12 +15,13 @@
# limitations under the License.
import logging
from abc import ABCMeta
-from typing import TYPE_CHECKING, Any, Collection, Iterable, Optional, Union
+from typing import TYPE_CHECKING, Any, Collection, Dict, Iterable, Optional, Union
from synapse.storage.database import make_in_list_sql_clause # noqa: F401; noqa: F401
from synapse.storage.database import DatabasePool, LoggingDatabaseConnection
from synapse.types import get_domain_from_id
from synapse.util import json_decoder
+from synapse.util.caches.descriptors import CachedFunction
if TYPE_CHECKING:
from synapse.server import HomeServer
@@ -47,6 +48,8 @@ class SQLBaseStore(metaclass=ABCMeta):
self.database_engine = database.engine
self.db_pool = database
+ self.external_cached_functions: Dict[str, CachedFunction] = {}
+
def process_replication_rows(
self,
stream_name: str,
@@ -88,6 +91,9 @@ class SQLBaseStore(metaclass=ABCMeta):
self._attempt_to_invalidate_cache(
"get_user_in_room_with_profile", (room_id, user_id)
)
+ self._attempt_to_invalidate_cache(
+ "get_rooms_for_user_with_stream_ordering", (user_id,)
+ )
# Purge other caches based on room state.
self._attempt_to_invalidate_cache("get_room_summary", (room_id,))
@@ -95,7 +101,7 @@ class SQLBaseStore(metaclass=ABCMeta):
def _attempt_to_invalidate_cache(
self, cache_name: str, key: Optional[Collection[Any]]
- ) -> None:
+ ) -> bool:
"""Attempts to invalidate the cache of the given name, ignoring if the
cache doesn't exist. Mainly used for invalidating caches on workers,
where they may not have the cache.
@@ -113,9 +119,12 @@ class SQLBaseStore(metaclass=ABCMeta):
try:
cache = getattr(self, cache_name)
except AttributeError:
- # We probably haven't pulled in the cache in this worker,
- # which is fine.
- return
+ # Check if an externally defined module cache has been registered
+ cache = self.external_cached_functions.get(cache_name)
+ if not cache:
+ # We probably haven't pulled in the cache in this worker,
+ # which is fine.
+ return False
if key is None:
cache.invalidate_all()
@@ -125,6 +134,13 @@ class SQLBaseStore(metaclass=ABCMeta):
invalidate_method = getattr(cache, "invalidate_local", cache.invalidate)
invalidate_method(tuple(key))
+ return True
+
+ def register_external_cached_function(
+ self, cache_name: str, func: CachedFunction
+ ) -> None:
+ self.external_cached_functions[cache_name] = func
+
def db_to_json(db_content: Union[memoryview, bytes, bytearray, str]) -> Any:
"""
diff --git a/synapse/storage/controllers/state.py b/synapse/storage/controllers/state.py
index 19d9d315f6..b4f055fb60 100644
--- a/synapse/storage/controllers/state.py
+++ b/synapse/storage/controllers/state.py
@@ -23,6 +23,7 @@ from typing import (
List,
Mapping,
Optional,
+ Sequence,
Tuple,
)
@@ -406,6 +407,7 @@ class StateStorageController:
self,
room_id: str,
state_filter: Optional[StateFilter] = None,
+ await_full_state: bool = True,
on_invalidate: Optional[Callable[[], None]] = None,
) -> StateMap[str]:
"""Get the current state event ids for a room based on the
@@ -418,13 +420,17 @@ class StateStorageController:
room_id: The room to get the state IDs of. state_filter: The state
filter used to fetch state from the
database.
+ await_full_state: if true, will block if we do not yet have complete
+ state for the room.
on_invalidate: Callback for when the `get_current_state_ids` cache
for the room gets invalidated.
Returns:
The current state of the room.
"""
- if not state_filter or state_filter.must_await_full_state(self._is_mine_id):
+ if await_full_state and (
+ not state_filter or state_filter.must_await_full_state(self._is_mine_id)
+ ):
await self._partial_state_room_tracker.await_full_state(room_id)
if state_filter and not state_filter.is_full():
@@ -524,12 +530,53 @@ class StateStorageController:
return state_map.get(key)
async def get_current_hosts_in_room(self, room_id: str) -> List[str]:
- """Get current hosts in room based on current state."""
+ """Get current hosts in room based on current state.
+
+ Blocks until we have full state for the given room. This only happens for rooms
+ with partial state.
+
+ Returns:
+ A list of hosts in the room, sorted by longest in the room first. (aka.
+ sorted by join with the lowest depth first).
+ """
await self._partial_state_room_tracker.await_full_state(room_id)
return await self.stores.main.get_current_hosts_in_room(room_id)
+ async def get_current_hosts_in_room_or_partial_state_approximation(
+ self, room_id: str
+ ) -> Sequence[str]:
+ """Get approximation of current hosts in room based on current state.
+
+ For rooms with full state, this is equivalent to `get_current_hosts_in_room`,
+ with the same order of results.
+
+ For rooms with partial state, no blocking occurs. Instead, the list of hosts
+ in the room at the time of joining is combined with the list of hosts which
+ joined the room afterwards. The returned list may include hosts that are not
+ actually in the room and exclude hosts that are in the room, since we may
+ calculate state incorrectly during the partial state phase. The order of results
+ is arbitrary for rooms with partial state.
+ """
+ # We have to read this list first to mitigate races with un-partial stating.
+ # This will be empty for rooms with full state.
+ hosts_at_join = await self.stores.main.get_partial_state_servers_at_join(
+ room_id
+ )
+
+ hosts_from_state = await self.stores.main.get_current_hosts_in_room(room_id)
+ hosts_from_state_set = set(hosts_from_state)
+
+ # First take the list of hosts based on the current state.
+ # For rooms with partial state, this will be missing most hosts.
+ hosts = list(hosts_from_state)
+ # Then add in the list of hosts in the room at the time we joined.
+ # This will be an empty list for rooms with full state.
+ hosts.extend(host for host in hosts_at_join if host not in hosts_from_state_set)
+
+ return hosts
+
async def get_users_in_room_with_profiles(
self, room_id: str
) -> Dict[str, ProfileInfo]:
diff --git a/synapse/storage/database.py b/synapse/storage/database.py
index 4a75fb4f3e..a00f8dcd21 100644
--- a/synapse/storage/database.py
+++ b/synapse/storage/database.py
@@ -95,6 +95,8 @@ UNIQUE_INDEX_BACKGROUND_UPDATES = {
"local_media_repository_thumbnails": "local_media_repository_thumbnails_method_idx",
"remote_media_cache_thumbnails": "remote_media_repository_thumbnails_method_idx",
"event_push_summary": "event_push_summary_unique_index",
+ "receipts_linearized": "receipts_linearized_unique_index",
+ "receipts_graph": "receipts_graph_unique_index",
}
@@ -391,6 +393,14 @@ class LoggingTransaction:
def executemany(self, sql: str, *args: Any) -> None:
self._do_execute(self.txn.executemany, sql, *args)
+ def executescript(self, sql: str) -> None:
+ if isinstance(self.database_engine, Sqlite3Engine):
+ self._do_execute(self.txn.executescript, sql) # type: ignore[attr-defined]
+ else:
+ raise NotImplementedError(
+ f"executescript only exists for sqlite driver, not {type(self.database_engine)}"
+ )
+
def _make_sql_one_line(self, sql: str) -> str:
"Strip newlines out of SQL so that the loggers in the DB are on one line"
return " ".join(line.strip() for line in sql.splitlines() if line.strip())
diff --git a/synapse/storage/databases/main/cache.py b/synapse/storage/databases/main/cache.py
index 12e9a42382..db6ce83a2b 100644
--- a/synapse/storage/databases/main/cache.py
+++ b/synapse/storage/databases/main/cache.py
@@ -33,7 +33,7 @@ from synapse.storage.database import (
)
from synapse.storage.engines import PostgresEngine
from synapse.storage.util.id_generators import MultiWriterIdGenerator
-from synapse.util.caches.descriptors import _CachedFunction
+from synapse.util.caches.descriptors import CachedFunction
from synapse.util.iterutils import batch_iter
if TYPE_CHECKING:
@@ -223,15 +223,16 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
# process triggering the invalidation is responsible for clearing any external
# cached objects.
self._invalidate_local_get_event_cache(event_id)
- self.have_seen_event.invalidate((room_id, event_id))
- self.get_latest_event_ids_in_room.invalidate((room_id,))
-
- self.get_unread_event_push_actions_by_room_for_user.invalidate((room_id,))
+ self._attempt_to_invalidate_cache("have_seen_event", (room_id, event_id))
+ self._attempt_to_invalidate_cache("get_latest_event_ids_in_room", (room_id,))
+ self._attempt_to_invalidate_cache(
+ "get_unread_event_push_actions_by_room_for_user", (room_id,)
+ )
# The `_get_membership_from_event_id` is immutable, except for the
# case where we look up an event *before* persisting it.
- self._get_membership_from_event_id.invalidate((event_id,))
+ self._attempt_to_invalidate_cache("_get_membership_from_event_id", (event_id,))
if not backfilled:
self._events_stream_cache.entity_has_changed(room_id, stream_ordering)
@@ -240,19 +241,26 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
self._invalidate_local_get_event_cache(redacts)
# Caches which might leak edits must be invalidated for the event being
# redacted.
- self.get_relations_for_event.invalidate((redacts,))
- self.get_applicable_edit.invalidate((redacts,))
+ self._attempt_to_invalidate_cache("get_relations_for_event", (redacts,))
+ self._attempt_to_invalidate_cache("get_applicable_edit", (redacts,))
if etype == EventTypes.Member:
self._membership_stream_cache.entity_has_changed(state_key, stream_ordering)
- self.get_invited_rooms_for_local_user.invalidate((state_key,))
+ self._attempt_to_invalidate_cache(
+ "get_invited_rooms_for_local_user", (state_key,)
+ )
if relates_to:
- self.get_relations_for_event.invalidate((relates_to,))
- self.get_aggregation_groups_for_event.invalidate((relates_to,))
- self.get_applicable_edit.invalidate((relates_to,))
- self.get_thread_summary.invalidate((relates_to,))
- self.get_thread_participated.invalidate((relates_to,))
+ self._attempt_to_invalidate_cache("get_relations_for_event", (relates_to,))
+ self._attempt_to_invalidate_cache(
+ "get_aggregation_groups_for_event", (relates_to,)
+ )
+ self._attempt_to_invalidate_cache("get_applicable_edit", (relates_to,))
+ self._attempt_to_invalidate_cache("get_thread_summary", (relates_to,))
+ self._attempt_to_invalidate_cache("get_thread_participated", (relates_to,))
+ self._attempt_to_invalidate_cache(
+ "get_mutual_event_relations_for_rel_type", (relates_to,)
+ )
async def invalidate_cache_and_stream(
self, cache_name: str, keys: Tuple[Any, ...]
@@ -269,9 +277,7 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
return
cache_func.invalidate(keys)
- await self.db_pool.runInteraction(
- "invalidate_cache_and_stream",
- self._send_invalidation_to_replication,
+ await self.send_invalidation_to_replication(
cache_func.__name__,
keys,
)
@@ -279,7 +285,7 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
def _invalidate_cache_and_stream(
self,
txn: LoggingTransaction,
- cache_func: _CachedFunction,
+ cache_func: CachedFunction,
keys: Tuple[Any, ...],
) -> None:
"""Invalidates the cache and adds it to the cache stream so slaves
@@ -293,7 +299,7 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
self._send_invalidation_to_replication(txn, cache_func.__name__, keys)
def _invalidate_all_cache_and_stream(
- self, txn: LoggingTransaction, cache_func: _CachedFunction
+ self, txn: LoggingTransaction, cache_func: CachedFunction
) -> None:
"""Invalidates the entire cache and adds it to the cache stream so slaves
will know to invalidate their caches.
@@ -334,6 +340,16 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
txn, CURRENT_STATE_CACHE_NAME, [room_id]
)
+ async def send_invalidation_to_replication(
+ self, cache_name: str, keys: Optional[Collection[Any]]
+ ) -> None:
+ await self.db_pool.runInteraction(
+ "send_invalidation_to_replication",
+ self._send_invalidation_to_replication,
+ cache_name,
+ keys,
+ )
+
def _send_invalidation_to_replication(
self, txn: LoggingTransaction, cache_name: str, keys: Optional[Iterable[Any]]
) -> None:
diff --git a/synapse/storage/databases/main/event_federation.py b/synapse/storage/databases/main/event_federation.py
index 0669d54822..0261ff4ad6 100644
--- a/synapse/storage/databases/main/event_federation.py
+++ b/synapse/storage/databases/main/event_federation.py
@@ -11,6 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+import datetime
import itertools
import logging
from queue import Empty, PriorityQueue
@@ -43,7 +44,7 @@ from synapse.storage.database import (
)
from synapse.storage.databases.main.events_worker import EventsWorkerStore
from synapse.storage.databases.main.signatures import SignatureWorkerStore
-from synapse.storage.engines import PostgresEngine
+from synapse.storage.engines import PostgresEngine, Sqlite3Engine
from synapse.types import JsonDict
from synapse.util import json_encoder
from synapse.util.caches.descriptors import cached
@@ -72,6 +73,13 @@ pdus_pruned_from_federation_queue = Counter(
logger = logging.getLogger(__name__)
+BACKFILL_EVENT_BACKOFF_UPPER_BOUND_SECONDS: int = int(
+ datetime.timedelta(days=7).total_seconds()
+)
+BACKFILL_EVENT_EXPONENTIAL_BACKOFF_STEP_SECONDS: int = int(
+ datetime.timedelta(hours=1).total_seconds()
+)
+
# All the info we need while iterating the DAG while backfilling
@attr.s(frozen=True, slots=True, auto_attribs=True)
@@ -715,96 +723,189 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas
@trace
@tag_args
- async def get_oldest_event_ids_with_depth_in_room(
- self, room_id: str
+ async def get_backfill_points_in_room(
+ self,
+ room_id: str,
) -> List[Tuple[str, int]]:
- """Gets the oldest events(backwards extremities) in the room along with the
- aproximate depth.
-
- We use this function so that we can compare and see if someones current
- depth at their current scrollback is within pagination range of the
- event extremeties. If the current depth is close to the depth of given
- oldest event, we can trigger a backfill.
+ """
+ Gets the oldest events(backwards extremities) in the room along with the
+ approximate depth. Sorted by depth, highest to lowest (descending).
Args:
room_id: Room where we want to find the oldest events
Returns:
- List of (event_id, depth) tuples
+ List of (event_id, depth) tuples. Sorted by depth, highest to lowest
+ (descending)
"""
- def get_oldest_event_ids_with_depth_in_room_txn(
+ def get_backfill_points_in_room_txn(
txn: LoggingTransaction, room_id: str
) -> List[Tuple[str, int]]:
- # Assemble a dictionary with event_id -> depth for the oldest events
+ # Assemble a tuple lookup of event_id -> depth for the oldest events
# we know of in the room. Backwards extremeties are the oldest
# events we know of in the room but we only know of them because
- # some other event referenced them by prev_event and aren't peristed
- # in our database yet (meaning we don't know their depth
- # specifically). So we need to look for the aproximate depth from
+ # some other event referenced them by prev_event and aren't
+ # persisted in our database yet (meaning we don't know their depth
+ # specifically). So we need to look for the approximate depth from
# the events connected to the current backwards extremeties.
sql = """
- SELECT b.event_id, MAX(e.depth) FROM events as e
+ SELECT backward_extrem.event_id, event.depth FROM events AS event
/**
* Get the edge connections from the event_edges table
* so we can see whether this event's prev_events points
* to a backward extremity in the next join.
*/
- INNER JOIN event_edges as g
- ON g.event_id = e.event_id
+ INNER JOIN event_edges AS edge
+ ON edge.event_id = event.event_id
/**
* We find the "oldest" events in the room by looking for
* events connected to backwards extremeties (oldest events
* in the room that we know of so far).
*/
- INNER JOIN event_backward_extremities as b
- ON g.prev_event_id = b.event_id
- WHERE b.room_id = ? AND g.is_state is ?
- GROUP BY b.event_id
+ INNER JOIN event_backward_extremities AS backward_extrem
+ ON edge.prev_event_id = backward_extrem.event_id
+ /**
+ * We use this info to make sure we don't retry to use a backfill point
+ * if we've already attempted to backfill from it recently.
+ */
+ LEFT JOIN event_failed_pull_attempts AS failed_backfill_attempt_info
+ ON
+ failed_backfill_attempt_info.room_id = backward_extrem.room_id
+ AND failed_backfill_attempt_info.event_id = backward_extrem.event_id
+ WHERE
+ backward_extrem.room_id = ?
+ /* We only care about non-state edges because we used to use
+ * `event_edges` for two different sorts of "edges" (the current
+ * event DAG, but also a link to the previous state, for state
+ * events). These legacy state event edges can be distinguished by
+ * `is_state` and are removed from the codebase and schema but
+ * because the schema change is in a background update, it's not
+ * necessarily safe to assume that it will have been completed.
+ */
+ AND edge.is_state is ? /* False */
+ /**
+ * Exponential back-off (up to the upper bound) so we don't retry the
+ * same backfill point over and over. ex. 2hr, 4hr, 8hr, 16hr, etc.
+ *
+ * We use `1 << n` as a power of 2 equivalent for compatibility
+ * with older SQLites. The left shift equivalent only works with
+ * powers of 2 because left shift is a binary operation (base-2).
+ * Otherwise, we would use `power(2, n)` or the power operator, `2^n`.
+ */
+ AND (
+ failed_backfill_attempt_info.event_id IS NULL
+ OR ? /* current_time */ >= failed_backfill_attempt_info.last_attempt_ts + /*least*/%s((1 << failed_backfill_attempt_info.num_attempts) * ? /* step */, ? /* upper bound */)
+ )
+ /**
+ * Sort from highest to the lowest depth. Then tie-break on
+ * alphabetical order of the event_ids so we get a consistent
+ * ordering which is nice when asserting things in tests.
+ */
+ ORDER BY event.depth DESC, backward_extrem.event_id DESC
"""
- txn.execute(sql, (room_id, False))
+ if isinstance(self.database_engine, PostgresEngine):
+ least_function = "least"
+ elif isinstance(self.database_engine, Sqlite3Engine):
+ least_function = "min"
+ else:
+ raise RuntimeError("Unknown database engine")
+
+ txn.execute(
+ sql % (least_function,),
+ (
+ room_id,
+ False,
+ self._clock.time_msec(),
+ 1000 * BACKFILL_EVENT_EXPONENTIAL_BACKOFF_STEP_SECONDS,
+ 1000 * BACKFILL_EVENT_BACKOFF_UPPER_BOUND_SECONDS,
+ ),
+ )
return cast(List[Tuple[str, int]], txn.fetchall())
return await self.db_pool.runInteraction(
- "get_oldest_event_ids_with_depth_in_room",
- get_oldest_event_ids_with_depth_in_room_txn,
+ "get_backfill_points_in_room",
+ get_backfill_points_in_room_txn,
room_id,
)
@trace
async def get_insertion_event_backward_extremities_in_room(
- self, room_id: str
+ self,
+ room_id: str,
) -> List[Tuple[str, int]]:
- """Get the insertion events we know about that we haven't backfilled yet.
-
- We use this function so that we can compare and see if someones current
- depth at their current scrollback is within pagination range of the
- insertion event. If the current depth is close to the depth of given
- insertion event, we can trigger a backfill.
+ """
+ Get the insertion events we know about that we haven't backfilled yet
+ along with the approximate depth. Sorted by depth, highest to lowest
+ (descending).
Args:
room_id: Room where we want to find the oldest events
Returns:
- List of (event_id, depth) tuples
+ List of (event_id, depth) tuples. Sorted by depth, highest to lowest
+ (descending)
"""
def get_insertion_event_backward_extremities_in_room_txn(
txn: LoggingTransaction, room_id: str
) -> List[Tuple[str, int]]:
sql = """
- SELECT b.event_id, MAX(e.depth) FROM insertion_events as i
+ SELECT
+ insertion_event_extremity.event_id, event.depth
/* We only want insertion events that are also marked as backwards extremities */
- INNER JOIN insertion_event_extremities as b USING (event_id)
+ FROM insertion_event_extremities AS insertion_event_extremity
/* Get the depth of the insertion event from the events table */
- INNER JOIN events AS e USING (event_id)
- WHERE b.room_id = ?
- GROUP BY b.event_id
+ INNER JOIN events AS event USING (event_id)
+ /**
+ * We use this info to make sure we don't retry to use a backfill point
+ * if we've already attempted to backfill from it recently.
+ */
+ LEFT JOIN event_failed_pull_attempts AS failed_backfill_attempt_info
+ ON
+ failed_backfill_attempt_info.room_id = insertion_event_extremity.room_id
+ AND failed_backfill_attempt_info.event_id = insertion_event_extremity.event_id
+ WHERE
+ insertion_event_extremity.room_id = ?
+ /**
+ * Exponential back-off (up to the upper bound) so we don't retry the
+ * same backfill point over and over. ex. 2hr, 4hr, 8hr, 16hr, etc
+ *
+ * We use `1 << n` as a power of 2 equivalent for compatibility
+ * with older SQLites. The left shift equivalent only works with
+ * powers of 2 because left shift is a binary operation (base-2).
+ * Otherwise, we would use `power(2, n)` or the power operator, `2^n`.
+ */
+ AND (
+ failed_backfill_attempt_info.event_id IS NULL
+ OR ? /* current_time */ >= failed_backfill_attempt_info.last_attempt_ts + /*least*/%s((1 << failed_backfill_attempt_info.num_attempts) * ? /* step */, ? /* upper bound */)
+ )
+ /**
+ * Sort from highest to the lowest depth. Then tie-break on
+ * alphabetical order of the event_ids so we get a consistent
+ * ordering which is nice when asserting things in tests.
+ */
+ ORDER BY event.depth DESC, insertion_event_extremity.event_id DESC
"""
- txn.execute(sql, (room_id,))
+ if isinstance(self.database_engine, PostgresEngine):
+ least_function = "least"
+ elif isinstance(self.database_engine, Sqlite3Engine):
+ least_function = "min"
+ else:
+ raise RuntimeError("Unknown database engine")
+
+ txn.execute(
+ sql % (least_function,),
+ (
+ room_id,
+ self._clock.time_msec(),
+ 1000 * BACKFILL_EVENT_EXPONENTIAL_BACKOFF_STEP_SECONDS,
+ 1000 * BACKFILL_EVENT_BACKOFF_UPPER_BOUND_SECONDS,
+ ),
+ )
return cast(List[Tuple[str, int]], txn.fetchall())
return await self.db_pool.runInteraction(
@@ -1539,7 +1640,12 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas
self,
room_id: str,
) -> Optional[Tuple[str, str]]:
- """Get the next event ID in the staging area for the given room."""
+ """
+ Get the next event ID in the staging area for the given room.
+
+ Returns:
+ Tuple of the `origin` and `event_id`
+ """
def _get_next_staged_event_id_for_room_txn(
txn: LoggingTransaction,
diff --git a/synapse/storage/databases/main/event_push_actions.py b/synapse/storage/databases/main/event_push_actions.py
index 6b8668d2dc..f4cdc2e399 100644
--- a/synapse/storage/databases/main/event_push_actions.py
+++ b/synapse/storage/databases/main/event_push_actions.py
@@ -559,7 +559,18 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
def _get_receipts_by_room_txn(
self, txn: LoggingTransaction, user_id: str
- ) -> List[Tuple[str, int]]:
+ ) -> Dict[str, int]:
+ """
+ Generate a map of room ID to the latest stream ordering that has been
+ read by the given user.
+
+ Args:
+ txn:
+ user_id: The user to fetch receipts for.
+
+ Returns:
+ A map of room ID to stream ordering for all rooms the user has a receipt in.
+ """
receipt_types_clause, args = make_in_list_sql_clause(
self.database_engine,
"receipt_type",
@@ -580,7 +591,10 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
args.extend((user_id,))
txn.execute(sql, args)
- return cast(List[Tuple[str, int]], txn.fetchall())
+ return {
+ room_id: latest_stream_ordering
+ for room_id, latest_stream_ordering in txn.fetchall()
+ }
async def get_unread_push_actions_for_user_in_range_for_http(
self,
@@ -605,12 +619,10 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
The list will have between 0~limit entries.
"""
- receipts_by_room = dict(
- await self.db_pool.runInteraction(
- "get_unread_push_actions_for_user_in_range_http_receipts",
- self._get_receipts_by_room_txn,
- user_id=user_id,
- ),
+ receipts_by_room = await self.db_pool.runInteraction(
+ "get_unread_push_actions_for_user_in_range_http_receipts",
+ self._get_receipts_by_room_txn,
+ user_id=user_id,
)
def get_push_actions_txn(
@@ -679,12 +691,10 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
The list will have between 0~limit entries.
"""
- receipts_by_room = dict(
- await self.db_pool.runInteraction(
- "get_unread_push_actions_for_user_in_range_email_receipts",
- self._get_receipts_by_room_txn,
- user_id=user_id,
- ),
+ receipts_by_room = await self.db_pool.runInteraction(
+ "get_unread_push_actions_for_user_in_range_email_receipts",
+ self._get_receipts_by_room_txn,
+ user_id=user_id,
)
def get_push_actions_txn(
diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py
index 5932668f2f..0b86a53085 100644
--- a/synapse/storage/databases/main/events.py
+++ b/synapse/storage/databases/main/events.py
@@ -35,7 +35,7 @@ import attr
from prometheus_client import Counter
import synapse.metrics
-from synapse.api.constants import EventContentFields, EventTypes, RelationTypes
+from synapse.api.constants import EventContentFields, EventTypes
from synapse.api.errors import Codes, SynapseError
from synapse.api.room_versions import RoomVersions
from synapse.events import EventBase, relation_from_event
@@ -410,6 +410,31 @@ class PersistEventsStore:
assert min_stream_order
assert max_stream_order
+ # Once the txn completes, invalidate all of the relevant caches. Note that we do this
+ # up here because it captures all the events_and_contexts before any are removed.
+ for event, _ in events_and_contexts:
+ self.store.invalidate_get_event_cache_after_txn(txn, event.event_id)
+ if event.redacts:
+ self.store.invalidate_get_event_cache_after_txn(txn, event.redacts)
+
+ relates_to = None
+ relation = relation_from_event(event)
+ if relation:
+ relates_to = relation.parent_id
+
+ assert event.internal_metadata.stream_ordering is not None
+ txn.call_after(
+ self.store._invalidate_caches_for_event,
+ event.internal_metadata.stream_ordering,
+ event.event_id,
+ event.room_id,
+ event.type,
+ getattr(event, "state_key", None),
+ event.redacts,
+ relates_to,
+ backfilled=False,
+ )
+
self._update_forward_extremities_txn(
txn,
new_forward_extremities=new_forward_extremities,
@@ -459,6 +484,7 @@ class PersistEventsStore:
# We call this last as it assumes we've inserted the events into
# room_memberships, where applicable.
+ # NB: This function invalidates all state related caches
self._update_current_state_txn(txn, state_delta_for_room, min_stream_order)
def _persist_event_auth_chain_txn(
@@ -1172,13 +1198,6 @@ class PersistEventsStore:
)
# Invalidate the various caches
-
- for member in members_changed:
- txn.call_after(
- self.store.get_rooms_for_user_with_stream_ordering.invalidate,
- (member,),
- )
-
self.store._invalidate_state_caches_and_stream(
txn, room_id, members_changed
)
@@ -1222,9 +1241,6 @@ class PersistEventsStore:
self.db_pool.simple_delete_txn(
txn, table="event_forward_extremities", keyvalues={"room_id": room_id}
)
- txn.call_after(
- self.store.get_latest_event_ids_in_room.invalidate, (room_id,)
- )
self.db_pool.simple_insert_many_txn(
txn,
@@ -1294,8 +1310,6 @@ class PersistEventsStore:
"""
depth_updates: Dict[str, int] = {}
for event, context in events_and_contexts:
- # Remove the any existing cache entries for the event_ids
- self.store.invalidate_get_event_cache_after_txn(txn, event.event_id)
# Then update the `stream_ordering` position to mark the latest
# event as the front of the room. This should not be done for
# backfilled events because backfilled events have negative
@@ -1697,16 +1711,7 @@ class PersistEventsStore:
txn.async_call_after(prefill)
def _store_redaction(self, txn: LoggingTransaction, event: EventBase) -> None:
- """Invalidate the caches for the redacted event.
-
- Note that these caches are also cleared as part of event replication in
- _invalidate_caches_for_event.
- """
assert event.redacts is not None
- self.store.invalidate_get_event_cache_after_txn(txn, event.redacts)
- txn.call_after(self.store.get_relations_for_event.invalidate, (event.redacts,))
- txn.call_after(self.store.get_applicable_edit.invalidate, (event.redacts,))
-
self.db_pool.simple_upsert_txn(
txn,
table="redactions",
@@ -1807,34 +1812,6 @@ class PersistEventsStore:
for event in events:
assert event.internal_metadata.stream_ordering is not None
- txn.call_after(
- self.store._membership_stream_cache.entity_has_changed,
- event.state_key,
- event.internal_metadata.stream_ordering,
- )
- txn.call_after(
- self.store.get_invited_rooms_for_local_user.invalidate,
- (event.state_key,),
- )
- txn.call_after(
- self.store.get_local_users_in_room.invalidate,
- (event.room_id,),
- )
- txn.call_after(
- self.store.get_number_joined_users_in_room.invalidate,
- (event.room_id,),
- )
- txn.call_after(
- self.store.get_user_in_room_with_profile.invalidate,
- (event.room_id, event.state_key),
- )
-
- # The `_get_membership_from_event_id` is immutable, except for the
- # case where we look up an event *before* persisting it.
- txn.call_after(
- self.store._get_membership_from_event_id.invalidate,
- (event.event_id,),
- )
# We update the local_current_membership table only if the event is
# "current", i.e., its something that has just happened.
@@ -1883,35 +1860,6 @@ class PersistEventsStore:
},
)
- txn.call_after(
- self.store.get_relations_for_event.invalidate, (relation.parent_id,)
- )
- txn.call_after(
- self.store.get_aggregation_groups_for_event.invalidate,
- (relation.parent_id,),
- )
- txn.call_after(
- self.store.get_mutual_event_relations_for_rel_type.invalidate,
- (relation.parent_id,),
- )
-
- if relation.rel_type == RelationTypes.REPLACE:
- txn.call_after(
- self.store.get_applicable_edit.invalidate, (relation.parent_id,)
- )
-
- if relation.rel_type == RelationTypes.THREAD:
- txn.call_after(
- self.store.get_thread_summary.invalidate, (relation.parent_id,)
- )
- # It should be safe to only invalidate the cache if the user has not
- # previously participated in the thread, but that's difficult (and
- # potentially error-prone) so it is always invalidated.
- txn.call_after(
- self.store.get_thread_participated.invalidate,
- (relation.parent_id, event.sender),
- )
-
def _handle_insertion_event(
self, txn: LoggingTransaction, event: EventBase
) -> None:
@@ -2213,28 +2161,6 @@ class PersistEventsStore:
),
)
- room_to_event_ids: Dict[str, List[str]] = {}
- for e in non_outlier_events:
- room_to_event_ids.setdefault(e.room_id, []).append(e.event_id)
-
- for room_id, event_ids in room_to_event_ids.items():
- rows = self.db_pool.simple_select_many_txn(
- txn,
- table="event_push_actions_staging",
- column="event_id",
- iterable=event_ids,
- keyvalues={},
- retcols=("user_id",),
- )
-
- user_ids = {row["user_id"] for row in rows}
-
- for user_id in user_ids:
- txn.call_after(
- self.store.get_unread_event_push_actions_by_room_for_user.invalidate,
- (room_id, user_id),
- )
-
# Now we delete the staging area for *all* events that were being
# persisted.
txn.execute_batch(
@@ -2249,11 +2175,6 @@ class PersistEventsStore:
def _remove_push_actions_for_event_id_txn(
self, txn: LoggingTransaction, room_id: str, event_id: str
) -> None:
- # Sad that we have to blow away the cache for the whole room here
- txn.call_after(
- self.store.get_unread_event_push_actions_by_room_for_user.invalidate,
- (room_id,),
- )
txn.execute(
"DELETE FROM event_push_actions WHERE room_id = ? AND event_id = ?",
(room_id, event_id),
diff --git a/synapse/storage/databases/main/pusher.py b/synapse/storage/databases/main/pusher.py
index bd0cfa7f32..01206950a9 100644
--- a/synapse/storage/databases/main/pusher.py
+++ b/synapse/storage/databases/main/pusher.py
@@ -89,6 +89,11 @@ class PusherWorkerStore(SQLBaseStore):
)
continue
+ # If we're using SQLite, then boolean values are integers. This is
+ # troublesome since some code using the return value of this method might
+ # expect it to be a boolean, or will expose it to clients (in responses).
+ r["enabled"] = bool(r["enabled"])
+
yield PusherConfig(**r)
async def get_pushers_by_app_id_and_pushkey(
@@ -100,38 +105,52 @@ class PusherWorkerStore(SQLBaseStore):
return await self.get_pushers_by({"user_name": user_id})
async def get_pushers_by(self, keyvalues: Dict[str, Any]) -> Iterator[PusherConfig]:
- ret = await self.db_pool.simple_select_list(
- "pushers",
- keyvalues,
- [
- "id",
- "user_name",
- "access_token",
- "profile_tag",
- "kind",
- "app_id",
- "app_display_name",
- "device_display_name",
- "pushkey",
- "ts",
- "lang",
- "data",
- "last_stream_ordering",
- "last_success",
- "failing_since",
- ],
+ """Retrieve pushers that match the given criteria.
+
+ Args:
+ keyvalues: A {column: value} dictionary.
+
+ Returns:
+ The pushers for which the given columns have the given values.
+ """
+
+ def get_pushers_by_txn(txn: LoggingTransaction) -> List[Dict[str, Any]]:
+ # We could technically use simple_select_list here, but we need to call
+ # COALESCE on the 'enabled' column. While it is technically possible to give
+ # simple_select_list the whole `COALESCE(...) AS ...` as a column name, it
+ # feels a bit hacky, so it's probably better to just inline the query.
+ sql = """
+ SELECT
+ id, user_name, access_token, profile_tag, kind, app_id,
+ app_display_name, device_display_name, pushkey, ts, lang, data,
+ last_stream_ordering, last_success, failing_since,
+ COALESCE(enabled, TRUE) AS enabled, device_id
+ FROM pushers
+ """
+
+ sql += "WHERE %s" % (" AND ".join("%s = ?" % (k,) for k in keyvalues),)
+
+ txn.execute(sql, list(keyvalues.values()))
+
+ return self.db_pool.cursor_to_dict(txn)
+
+ ret = await self.db_pool.runInteraction(
desc="get_pushers_by",
+ func=get_pushers_by_txn,
)
+
return self._decode_pushers_rows(ret)
- async def get_all_pushers(self) -> Iterator[PusherConfig]:
- def get_pushers(txn: LoggingTransaction) -> Iterator[PusherConfig]:
- txn.execute("SELECT * FROM pushers")
+ async def get_enabled_pushers(self) -> Iterator[PusherConfig]:
+ def get_enabled_pushers_txn(txn: LoggingTransaction) -> Iterator[PusherConfig]:
+ txn.execute("SELECT * FROM pushers WHERE COALESCE(enabled, TRUE)")
rows = self.db_pool.cursor_to_dict(txn)
return self._decode_pushers_rows(rows)
- return await self.db_pool.runInteraction("get_all_pushers", get_pushers)
+ return await self.db_pool.runInteraction(
+ "get_enabled_pushers", get_enabled_pushers_txn
+ )
async def get_all_updated_pushers_rows(
self, instance_name: str, last_id: int, current_id: int, limit: int
@@ -458,7 +477,74 @@ class PusherWorkerStore(SQLBaseStore):
return number_deleted
-class PusherStore(PusherWorkerStore):
+class PusherBackgroundUpdatesStore(SQLBaseStore):
+ def __init__(
+ self,
+ database: DatabasePool,
+ db_conn: LoggingDatabaseConnection,
+ hs: "HomeServer",
+ ):
+ super().__init__(database, db_conn, hs)
+
+ self.db_pool.updates.register_background_update_handler(
+ "set_device_id_for_pushers", self._set_device_id_for_pushers
+ )
+
+ async def _set_device_id_for_pushers(
+ self, progress: JsonDict, batch_size: int
+ ) -> int:
+ """Background update to populate the device_id column of the pushers table."""
+ last_pusher_id = progress.get("pusher_id", 0)
+
+ def set_device_id_for_pushers_txn(txn: LoggingTransaction) -> int:
+ txn.execute(
+ """
+ SELECT p.id, at.device_id
+ FROM pushers AS p
+ INNER JOIN access_tokens AS at
+ ON p.access_token = at.id
+ WHERE
+ p.access_token IS NOT NULL
+ AND at.device_id IS NOT NULL
+ AND p.id > ?
+ ORDER BY p.id
+ LIMIT ?
+ """,
+ (last_pusher_id, batch_size),
+ )
+
+ rows = self.db_pool.cursor_to_dict(txn)
+ if len(rows) == 0:
+ return 0
+
+ self.db_pool.simple_update_many_txn(
+ txn=txn,
+ table="pushers",
+ key_names=("id",),
+ key_values=[(row["id"],) for row in rows],
+ value_names=("device_id",),
+ value_values=[(row["device_id"],) for row in rows],
+ )
+
+ self.db_pool.updates._background_update_progress_txn(
+ txn, "set_device_id_for_pushers", {"pusher_id": rows[-1]["id"]}
+ )
+
+ return len(rows)
+
+ nb_processed = await self.db_pool.runInteraction(
+ "set_device_id_for_pushers", set_device_id_for_pushers_txn
+ )
+
+ if nb_processed < batch_size:
+ await self.db_pool.updates._end_background_update(
+ "set_device_id_for_pushers"
+ )
+
+ return nb_processed
+
+
+class PusherStore(PusherWorkerStore, PusherBackgroundUpdatesStore):
def get_pushers_stream_token(self) -> int:
return self._pushers_id_gen.get_current_token()
@@ -476,6 +562,8 @@ class PusherStore(PusherWorkerStore):
data: Optional[JsonDict],
last_stream_ordering: int,
profile_tag: str = "",
+ enabled: bool = True,
+ device_id: Optional[str] = None,
) -> None:
async with self._pushers_id_gen.get_next() as stream_id:
# no need to lock because `pushers` has a unique key on
@@ -494,6 +582,8 @@ class PusherStore(PusherWorkerStore):
"last_stream_ordering": last_stream_ordering,
"profile_tag": profile_tag,
"id": stream_id,
+ "enabled": enabled,
+ "device_id": device_id,
},
desc="add_pusher",
lock=False,
diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py
index ddb8e80b69..52fe0db924 100644
--- a/synapse/storage/databases/main/receipts.py
+++ b/synapse/storage/databases/main/receipts.py
@@ -540,7 +540,9 @@ class ReceiptsWorkerStore(SQLBaseStore):
async def get_all_updated_receipts(
self, instance_name: str, last_id: int, current_id: int, limit: int
- ) -> Tuple[List[Tuple[int, list]], int, bool]:
+ ) -> Tuple[
+ List[Tuple[int, Tuple[str, str, str, str, Optional[str], JsonDict]]], int, bool
+ ]:
"""Get updates for receipts replication stream.
Args:
@@ -567,9 +569,13 @@ class ReceiptsWorkerStore(SQLBaseStore):
def get_all_updated_receipts_txn(
txn: LoggingTransaction,
- ) -> Tuple[List[Tuple[int, list]], int, bool]:
+ ) -> Tuple[
+ List[Tuple[int, Tuple[str, str, str, str, Optional[str], JsonDict]]],
+ int,
+ bool,
+ ]:
sql = """
- SELECT stream_id, room_id, receipt_type, user_id, event_id, data
+ SELECT stream_id, room_id, receipt_type, user_id, event_id, thread_id, data
FROM receipts_linearized
WHERE ? < stream_id AND stream_id <= ?
ORDER BY stream_id ASC
@@ -578,8 +584,8 @@ class ReceiptsWorkerStore(SQLBaseStore):
txn.execute(sql, (last_id, current_id, limit))
updates = cast(
- List[Tuple[int, list]],
- [(r[0], r[1:5] + (db_to_json(r[5]),)) for r in txn],
+ List[Tuple[int, Tuple[str, str, str, str, Optional[str], JsonDict]]],
+ [(r[0], r[1:6] + (db_to_json(r[6]),)) for r in txn],
)
limited = False
@@ -631,6 +637,7 @@ class ReceiptsWorkerStore(SQLBaseStore):
receipt_type: str,
user_id: str,
event_id: str,
+ thread_id: Optional[str],
data: JsonDict,
stream_id: int,
) -> Optional[int]:
@@ -657,12 +664,27 @@ class ReceiptsWorkerStore(SQLBaseStore):
# We don't want to clobber receipts for more recent events, so we
# have to compare orderings of existing receipts
if stream_ordering is not None:
- sql = (
- "SELECT stream_ordering, event_id FROM events"
- " INNER JOIN receipts_linearized AS r USING (event_id, room_id)"
- " WHERE r.room_id = ? AND r.receipt_type = ? AND r.user_id = ?"
+ if thread_id is None:
+ thread_clause = "r.thread_id IS NULL"
+ thread_args: Tuple[str, ...] = ()
+ else:
+ thread_clause = "r.thread_id = ?"
+ thread_args = (thread_id,)
+
+ sql = f"""
+ SELECT stream_ordering, event_id FROM events
+ INNER JOIN receipts_linearized AS r USING (event_id, room_id)
+ WHERE r.room_id = ? AND r.receipt_type = ? AND r.user_id = ? AND {thread_clause}
+ """
+ txn.execute(
+ sql,
+ (
+ room_id,
+ receipt_type,
+ user_id,
+ )
+ + thread_args,
)
- txn.execute(sql, (room_id, receipt_type, user_id))
for so, eid in txn:
if int(so) >= stream_ordering:
@@ -682,21 +704,28 @@ class ReceiptsWorkerStore(SQLBaseStore):
self._receipts_stream_cache.entity_has_changed, room_id, stream_id
)
+ keyvalues = {
+ "room_id": room_id,
+ "receipt_type": receipt_type,
+ "user_id": user_id,
+ }
+ where_clause = ""
+ if thread_id is None:
+ where_clause = "thread_id IS NULL"
+ else:
+ keyvalues["thread_id"] = thread_id
+
self.db_pool.simple_upsert_txn(
txn,
table="receipts_linearized",
- keyvalues={
- "room_id": room_id,
- "receipt_type": receipt_type,
- "user_id": user_id,
- },
+ keyvalues=keyvalues,
values={
"stream_id": stream_id,
"event_id": event_id,
"event_stream_ordering": stream_ordering,
"data": json_encoder.encode(data),
- "thread_id": None,
},
+ where_clause=where_clause,
# receipts_linearized has a unique constraint on
# (user_id, room_id, receipt_type), so no need to lock
lock=False,
@@ -748,6 +777,7 @@ class ReceiptsWorkerStore(SQLBaseStore):
receipt_type: str,
user_id: str,
event_ids: List[str],
+ thread_id: Optional[str],
data: dict,
) -> Optional[Tuple[int, int]]:
"""Insert a receipt, either from local client or remote server.
@@ -780,6 +810,7 @@ class ReceiptsWorkerStore(SQLBaseStore):
receipt_type,
user_id,
linearized_event_id,
+ thread_id,
data,
stream_id=stream_id,
# Read committed is actually beneficial here because we check for a receipt with
@@ -794,7 +825,8 @@ class ReceiptsWorkerStore(SQLBaseStore):
now = self._clock.time_msec()
logger.debug(
- "RR for event %s in %s (%i ms old)",
+ "Receipt %s for event %s in %s (%i ms old)",
+ receipt_type,
linearized_event_id,
room_id,
now - event_ts,
@@ -807,6 +839,7 @@ class ReceiptsWorkerStore(SQLBaseStore):
receipt_type,
user_id,
event_ids,
+ thread_id,
data,
)
@@ -821,6 +854,7 @@ class ReceiptsWorkerStore(SQLBaseStore):
receipt_type: str,
user_id: str,
event_ids: List[str],
+ thread_id: Optional[str],
data: JsonDict,
) -> None:
assert self._can_write_to_receipts
@@ -832,19 +866,26 @@ class ReceiptsWorkerStore(SQLBaseStore):
# FIXME: This shouldn't invalidate the whole cache
txn.call_after(self._get_linearized_receipts_for_room.invalidate, (room_id,))
+ keyvalues = {
+ "room_id": room_id,
+ "receipt_type": receipt_type,
+ "user_id": user_id,
+ }
+ where_clause = ""
+ if thread_id is None:
+ where_clause = "thread_id IS NULL"
+ else:
+ keyvalues["thread_id"] = thread_id
+
self.db_pool.simple_upsert_txn(
txn,
table="receipts_graph",
- keyvalues={
- "room_id": room_id,
- "receipt_type": receipt_type,
- "user_id": user_id,
- },
+ keyvalues=keyvalues,
values={
"event_ids": json_encoder.encode(event_ids),
"data": json_encoder.encode(data),
- "thread_id": None,
},
+ where_clause=where_clause,
# receipts_graph has a unique constraint on
# (user_id, room_id, receipt_type), so no need to lock
lock=False,
diff --git a/synapse/storage/databases/main/relations.py b/synapse/storage/databases/main/relations.py
index 255854cd66..64cba763c4 100644
--- a/synapse/storage/databases/main/relations.py
+++ b/synapse/storage/databases/main/relations.py
@@ -52,6 +52,8 @@ class _RelatedEvent:
event_id: str
# The sender of the related event.
sender: str
+ topological_ordering: Optional[int]
+ stream_ordering: int
class RelationsWorkerStore(SQLBaseStore):
@@ -92,6 +94,9 @@ class RelationsWorkerStore(SQLBaseStore):
# it. The `event_id` must match the `event.event_id`.
assert event.event_id == event_id
+ # Ensure bad limits aren't being passed in.
+ assert limit >= 0
+
where_clause = ["relates_to_id = ?", "room_id = ?"]
where_args: List[Union[str, int]] = [event.event_id, room_id]
is_redacted = event.internal_metadata.is_redacted()
@@ -140,21 +145,34 @@ class RelationsWorkerStore(SQLBaseStore):
) -> Tuple[List[_RelatedEvent], Optional[StreamToken]]:
txn.execute(sql, where_args + [limit + 1])
- last_topo_id = None
- last_stream_id = None
events = []
- for row in txn:
+ for event_id, relation_type, sender, topo_ordering, stream_ordering in txn:
# Do not include edits for redacted events as they leak event
# content.
- if not is_redacted or row[1] != RelationTypes.REPLACE:
- events.append(_RelatedEvent(row[0], row[2]))
- last_topo_id = row[3]
- last_stream_id = row[4]
+ if not is_redacted or relation_type != RelationTypes.REPLACE:
+ events.append(
+ _RelatedEvent(event_id, sender, topo_ordering, stream_ordering)
+ )
- # If there are more events, generate the next pagination key.
+ # If there are more events, generate the next pagination key from the
+ # last event returned.
next_token = None
- if len(events) > limit and last_topo_id and last_stream_id:
- next_key = RoomStreamToken(last_topo_id, last_stream_id)
+ if len(events) > limit:
+ # Instead of using the last row (which tells us there is more
+ # data), use the last row to be returned.
+ events = events[:limit]
+
+ topo = events[-1].topological_ordering
+ token = events[-1].stream_ordering
+ if direction == "b":
+ # Tokens are positions between events.
+ # This token points *after* the last event in the chunk.
+ # We need it to point to the event before it in the chunk
+ # when we are going backwards so we subtract one from the
+ # stream part.
+ token -= 1
+ next_key = RoomStreamToken(topo, token)
+
if from_token:
next_token = from_token.copy_and_replace(
StreamKeyType.ROOM, next_key
diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py
index bef66f1992..5dd116d766 100644
--- a/synapse/storage/databases/main/room.py
+++ b/synapse/storage/databases/main/room.py
@@ -25,6 +25,7 @@ from typing import (
List,
Mapping,
Optional,
+ Sequence,
Tuple,
Union,
cast,
@@ -1133,6 +1134,22 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
get_rooms_for_retention_period_in_range_txn,
)
+ async def get_partial_state_servers_at_join(self, room_id: str) -> Sequence[str]:
+ """Gets the list of servers in a partial state room at the time we joined it.
+
+ Returns:
+ The `servers_in_room` list from the `/send_join` response for partial state
+ rooms. May not be accurate or complete, as it comes from a remote
+ homeserver.
+ An empty list for full state rooms.
+ """
+ return await self.db_pool.simple_select_onecol(
+ "partial_state_rooms_servers",
+ keyvalues={"room_id": room_id},
+ retcol="server_name",
+ desc="get_partial_state_servers_at_join",
+ )
+
async def get_partial_state_rooms_and_servers(
self,
) -> Mapping[str, Collection[str]]:
diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py
index f0b179eea5..323c7bf7a5 100644
--- a/synapse/storage/databases/main/stream.py
+++ b/synapse/storage/databases/main/stream.py
@@ -1334,15 +1334,15 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
if rows:
topo = rows[-1].topological_ordering
- toke = rows[-1].stream_ordering
+ token = rows[-1].stream_ordering
if direction == "b":
# Tokens are positions between events.
# This token points *after* the last event in the chunk.
# We need it to point to the event before it in the chunk
# when we are going backwards so we subtract one from the
# stream part.
- toke -= 1
- next_token = RoomStreamToken(topo, toke)
+ token -= 1
+ next_token = RoomStreamToken(topo, token)
else:
# TODO (erikj): We should work out what to do here instead.
next_token = to_token if to_token else from_token
diff --git a/synapse/storage/engines/_base.py b/synapse/storage/engines/_base.py
index 0d16a419a4..70e594a68f 100644
--- a/synapse/storage/engines/_base.py
+++ b/synapse/storage/engines/_base.py
@@ -32,9 +32,10 @@ class IncorrectDatabaseSetup(RuntimeError):
ConnectionType = TypeVar("ConnectionType", bound=Connection)
+CursorType = TypeVar("CursorType", bound=Cursor)
-class BaseDatabaseEngine(Generic[ConnectionType], metaclass=abc.ABCMeta):
+class BaseDatabaseEngine(Generic[ConnectionType, CursorType], metaclass=abc.ABCMeta):
def __init__(self, module: DBAPI2Module, config: Mapping[str, Any]):
self.module = module
@@ -64,7 +65,7 @@ class BaseDatabaseEngine(Generic[ConnectionType], metaclass=abc.ABCMeta):
...
@abc.abstractmethod
- def check_new_database(self, txn: Cursor) -> None:
+ def check_new_database(self, txn: CursorType) -> None:
"""Gets called when setting up a brand new database. This allows us to
apply stricter checks on new databases versus existing database.
"""
@@ -124,3 +125,21 @@ class BaseDatabaseEngine(Generic[ConnectionType], metaclass=abc.ABCMeta):
Note: This has no effect on SQLite3, as transactions are SERIALIZABLE by default.
"""
...
+
+ @staticmethod
+ @abc.abstractmethod
+ def executescript(cursor: CursorType, script: str) -> None:
+ """Execute a chunk of SQL containing multiple semicolon-delimited statements.
+
+ This is not provided by DBAPI2, and so needs engine-specific support.
+ """
+ ...
+
+ @classmethod
+ def execute_script_file(cls, cursor: CursorType, filepath: str) -> None:
+ """Execute a file containing multiple semicolon-delimited SQL statements.
+
+ This is not provided by DBAPI2, and so needs engine-specific support.
+ """
+ with open(filepath, "rt") as f:
+ cls.executescript(cursor, f.read())
diff --git a/synapse/storage/engines/postgres.py b/synapse/storage/engines/postgres.py
index 7f7d006ac2..d8c0f64d9a 100644
--- a/synapse/storage/engines/postgres.py
+++ b/synapse/storage/engines/postgres.py
@@ -31,7 +31,9 @@ if TYPE_CHECKING:
logger = logging.getLogger(__name__)
-class PostgresEngine(BaseDatabaseEngine[psycopg2.extensions.connection]):
+class PostgresEngine(
+ BaseDatabaseEngine[psycopg2.extensions.connection, psycopg2.extensions.cursor]
+):
def __init__(self, database_config: Mapping[str, Any]):
super().__init__(psycopg2, database_config)
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
@@ -212,3 +214,11 @@ class PostgresEngine(BaseDatabaseEngine[psycopg2.extensions.connection]):
else:
isolation_level = self.isolation_level_map[isolation_level]
return conn.set_isolation_level(isolation_level)
+
+ @staticmethod
+ def executescript(cursor: psycopg2.extensions.cursor, script: str) -> None:
+ """Execute a chunk of SQL containing multiple semicolon-delimited statements.
+
+ Psycopg2 seems happy to do this in DBAPI2's `execute()` function.
+ """
+ cursor.execute(script)
diff --git a/synapse/storage/engines/sqlite.py b/synapse/storage/engines/sqlite.py
index 095ae0a096..faa574dbfd 100644
--- a/synapse/storage/engines/sqlite.py
+++ b/synapse/storage/engines/sqlite.py
@@ -24,7 +24,7 @@ if TYPE_CHECKING:
from synapse.storage.database import LoggingDatabaseConnection
-class Sqlite3Engine(BaseDatabaseEngine[sqlite3.Connection]):
+class Sqlite3Engine(BaseDatabaseEngine[sqlite3.Connection, sqlite3.Cursor]):
def __init__(self, database_config: Mapping[str, Any]):
super().__init__(sqlite3, database_config)
@@ -120,6 +120,25 @@ class Sqlite3Engine(BaseDatabaseEngine[sqlite3.Connection]):
# All transactions are SERIALIZABLE by default in sqlite
pass
+ @staticmethod
+ def executescript(cursor: sqlite3.Cursor, script: str) -> None:
+ """Execute a chunk of SQL containing multiple semicolon-delimited statements.
+
+ Python's built-in SQLite driver does not allow you to do this with DBAPI2's
+ `execute`:
+
+ > execute() will only execute a single SQL statement. If you try to execute more
+ > than one statement with it, it will raise a Warning. Use executescript() if
+ > you want to execute multiple SQL statements with one call.
+
+ Though the docs for `executescript` warn:
+
+ > If there is a pending transaction, an implicit COMMIT statement is executed
+ > first. No other implicit transaction control is performed; any transaction
+ > control must be added to sql_script.
+ """
+ cursor.executescript(script)
+
# Following functions taken from: https://github.com/coleifer/peewee
diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py
index 09a2b58f4c..3acdb39da7 100644
--- a/synapse/storage/prepare_database.py
+++ b/synapse/storage/prepare_database.py
@@ -266,7 +266,7 @@ def _setup_new_database(
".sql." + specific
):
logger.debug("Applying schema %s", entry.absolute_path)
- executescript(cur, entry.absolute_path)
+ database_engine.execute_script_file(cur, entry.absolute_path)
cur.execute(
"INSERT INTO schema_version (version, upgraded) VALUES (?,?)",
@@ -517,7 +517,7 @@ def _upgrade_existing_database(
UNAPPLIED_DELTA_ON_WORKER_ERROR % relative_path
)
logger.info("Applying schema %s", relative_path)
- executescript(cur, absolute_path)
+ database_engine.execute_script_file(cur, absolute_path)
elif ext == specific_engine_extension and root_name.endswith(".sql"):
# A .sql file specific to our engine; just read and execute it
if is_worker:
@@ -525,7 +525,7 @@ def _upgrade_existing_database(
UNAPPLIED_DELTA_ON_WORKER_ERROR % relative_path
)
logger.info("Applying engine-specific schema %s", relative_path)
- executescript(cur, absolute_path)
+ database_engine.execute_script_file(cur, absolute_path)
elif ext in specific_engine_extensions and root_name.endswith(".sql"):
# A .sql file for a different engine; skip it.
continue
@@ -666,7 +666,7 @@ def _get_or_create_schema_state(
) -> Optional[_SchemaState]:
# Bluntly try creating the schema_version tables.
sql_path = os.path.join(schema_path, "common", "schema_version.sql")
- executescript(txn, sql_path)
+ database_engine.execute_script_file(txn, sql_path)
txn.execute("SELECT version, upgraded FROM schema_version")
row = txn.fetchone()
diff --git a/synapse/storage/schema/common/full_schemas/72/full.sql.postgres b/synapse/storage/schema/common/full_schemas/72/full.sql.postgres
new file mode 100644
index 0000000000..f0e546f052
--- /dev/null
+++ b/synapse/storage/schema/common/full_schemas/72/full.sql.postgres
@@ -0,0 +1,8 @@
+CREATE TABLE background_updates (
+ update_name text NOT NULL,
+ progress_json text NOT NULL,
+ depends_on text,
+ ordering integer DEFAULT 0 NOT NULL
+);
+ALTER TABLE ONLY background_updates
+ ADD CONSTRAINT background_updates_uniqueness UNIQUE (update_name);
diff --git a/synapse/storage/schema/common/full_schemas/72/full.sql.sqlite b/synapse/storage/schema/common/full_schemas/72/full.sql.sqlite
new file mode 100644
index 0000000000..d5a2c04aa9
--- /dev/null
+++ b/synapse/storage/schema/common/full_schemas/72/full.sql.sqlite
@@ -0,0 +1,6 @@
+CREATE TABLE background_updates (
+ update_name text NOT NULL,
+ progress_json text NOT NULL,
+ depends_on text, ordering INT NOT NULL DEFAULT 0,
+ CONSTRAINT background_updates_uniqueness UNIQUE (update_name)
+);
diff --git a/synapse/storage/schema/main/delta/73/02add_pusher_enabled.sql b/synapse/storage/schema/main/delta/73/02add_pusher_enabled.sql
new file mode 100644
index 0000000000..dba3b4900b
--- /dev/null
+++ b/synapse/storage/schema/main/delta/73/02add_pusher_enabled.sql
@@ -0,0 +1,16 @@
+/* Copyright 2022 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+ALTER TABLE pushers ADD COLUMN enabled BOOLEAN;
\ No newline at end of file
diff --git a/synapse/storage/schema/main/delta/73/03pusher_device_id.sql b/synapse/storage/schema/main/delta/73/03pusher_device_id.sql
new file mode 100644
index 0000000000..1b4ffbeebe
--- /dev/null
+++ b/synapse/storage/schema/main/delta/73/03pusher_device_id.sql
@@ -0,0 +1,20 @@
+/* Copyright 2022 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- Add a device_id column to track the device ID that created the pusher. It's NULLable
+-- on purpose, because a) it might not be possible to track down the device that created
+-- old pushers (pushers.access_token and access_tokens.device_id are both NULLable), and
+-- b) access tokens retrieved via the admin API don't have a device associated to them.
+ALTER TABLE pushers ADD COLUMN device_id TEXT;
\ No newline at end of file
diff --git a/synapse/storage/schema/main/full_schemas/72/full.sql.postgres b/synapse/storage/schema/main/full_schemas/72/full.sql.postgres
new file mode 100644
index 0000000000..d421fd9ab9
--- /dev/null
+++ b/synapse/storage/schema/main/full_schemas/72/full.sql.postgres
@@ -0,0 +1,1344 @@
+CREATE FUNCTION check_partial_state_events() RETURNS trigger
+ LANGUAGE plpgsql
+ AS $$
+ BEGIN
+ IF EXISTS (
+ SELECT 1 FROM events
+ WHERE events.event_id = NEW.event_id
+ AND events.room_id != NEW.room_id
+ ) THEN
+ RAISE EXCEPTION 'Incorrect room_id in partial_state_events';
+ END IF;
+ RETURN NEW;
+ END;
+ $$;
+CREATE TABLE access_tokens (
+ id bigint NOT NULL,
+ user_id text NOT NULL,
+ device_id text,
+ token text NOT NULL,
+ valid_until_ms bigint,
+ puppets_user_id text,
+ last_validated bigint,
+ refresh_token_id bigint,
+ used boolean
+);
+CREATE TABLE account_data (
+ user_id text NOT NULL,
+ account_data_type text NOT NULL,
+ stream_id bigint NOT NULL,
+ content text NOT NULL,
+ instance_name text
+);
+CREATE SEQUENCE account_data_sequence
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+CREATE TABLE account_validity (
+ user_id text NOT NULL,
+ expiration_ts_ms bigint NOT NULL,
+ email_sent boolean NOT NULL,
+ renewal_token text,
+ token_used_ts_ms bigint
+);
+CREATE TABLE application_services_state (
+ as_id text NOT NULL,
+ state character varying(5),
+ read_receipt_stream_id bigint,
+ presence_stream_id bigint,
+ to_device_stream_id bigint,
+ device_list_stream_id bigint
+);
+CREATE SEQUENCE application_services_txn_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+CREATE TABLE application_services_txns (
+ as_id text NOT NULL,
+ txn_id bigint NOT NULL,
+ event_ids text NOT NULL
+);
+CREATE TABLE appservice_room_list (
+ appservice_id text NOT NULL,
+ network_id text NOT NULL,
+ room_id text NOT NULL
+);
+CREATE TABLE appservice_stream_position (
+ lock character(1) DEFAULT 'X'::bpchar NOT NULL,
+ stream_ordering bigint,
+ CONSTRAINT appservice_stream_position_lock_check CHECK ((lock = 'X'::bpchar))
+);
+CREATE TABLE batch_events (
+ event_id text NOT NULL,
+ room_id text NOT NULL,
+ batch_id text NOT NULL
+);
+CREATE TABLE blocked_rooms (
+ room_id text NOT NULL,
+ user_id text NOT NULL
+);
+CREATE TABLE cache_invalidation_stream_by_instance (
+ stream_id bigint NOT NULL,
+ instance_name text NOT NULL,
+ cache_func text NOT NULL,
+ keys text[],
+ invalidation_ts bigint
+);
+CREATE SEQUENCE cache_invalidation_stream_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+CREATE TABLE current_state_delta_stream (
+ stream_id bigint NOT NULL,
+ room_id text NOT NULL,
+ type text NOT NULL,
+ state_key text NOT NULL,
+ event_id text,
+ prev_event_id text,
+ instance_name text
+);
+CREATE TABLE current_state_events (
+ event_id text NOT NULL,
+ room_id text NOT NULL,
+ type text NOT NULL,
+ state_key text NOT NULL,
+ membership text
+);
+CREATE TABLE dehydrated_devices (
+ user_id text NOT NULL,
+ device_id text NOT NULL,
+ device_data text NOT NULL
+);
+CREATE TABLE deleted_pushers (
+ stream_id bigint NOT NULL,
+ app_id text NOT NULL,
+ pushkey text NOT NULL,
+ user_id text NOT NULL
+);
+CREATE TABLE destination_rooms (
+ destination text NOT NULL,
+ room_id text NOT NULL,
+ stream_ordering bigint NOT NULL
+);
+CREATE TABLE destinations (
+ destination text NOT NULL,
+ retry_last_ts bigint,
+ retry_interval bigint,
+ failure_ts bigint,
+ last_successful_stream_ordering bigint
+);
+CREATE TABLE device_auth_providers (
+ user_id text NOT NULL,
+ device_id text NOT NULL,
+ auth_provider_id text NOT NULL,
+ auth_provider_session_id text NOT NULL
+);
+CREATE TABLE device_federation_inbox (
+ origin text NOT NULL,
+ message_id text NOT NULL,
+ received_ts bigint NOT NULL,
+ instance_name text
+);
+CREATE TABLE device_federation_outbox (
+ destination text NOT NULL,
+ stream_id bigint NOT NULL,
+ queued_ts bigint NOT NULL,
+ messages_json text NOT NULL,
+ instance_name text
+);
+CREATE TABLE device_inbox (
+ user_id text NOT NULL,
+ device_id text NOT NULL,
+ stream_id bigint NOT NULL,
+ message_json text NOT NULL,
+ instance_name text
+);
+CREATE SEQUENCE device_inbox_sequence
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+CREATE TABLE device_lists_changes_in_room (
+ user_id text NOT NULL,
+ device_id text NOT NULL,
+ room_id text NOT NULL,
+ stream_id bigint NOT NULL,
+ converted_to_destinations boolean NOT NULL,
+ opentracing_context text
+);
+CREATE TABLE device_lists_outbound_last_success (
+ destination text NOT NULL,
+ user_id text NOT NULL,
+ stream_id bigint NOT NULL
+);
+CREATE TABLE device_lists_outbound_pokes (
+ destination text NOT NULL,
+ stream_id bigint NOT NULL,
+ user_id text NOT NULL,
+ device_id text NOT NULL,
+ sent boolean NOT NULL,
+ ts bigint NOT NULL,
+ opentracing_context text
+);
+CREATE TABLE device_lists_remote_cache (
+ user_id text NOT NULL,
+ device_id text NOT NULL,
+ content text NOT NULL
+);
+CREATE TABLE device_lists_remote_extremeties (
+ user_id text NOT NULL,
+ stream_id text NOT NULL
+);
+CREATE TABLE device_lists_remote_resync (
+ user_id text NOT NULL,
+ added_ts bigint NOT NULL
+);
+CREATE TABLE device_lists_stream (
+ stream_id bigint NOT NULL,
+ user_id text NOT NULL,
+ device_id text NOT NULL
+);
+CREATE TABLE devices (
+ user_id text NOT NULL,
+ device_id text NOT NULL,
+ display_name text,
+ last_seen bigint,
+ ip text,
+ user_agent text,
+ hidden boolean DEFAULT false
+);
+CREATE TABLE e2e_cross_signing_keys (
+ user_id text NOT NULL,
+ keytype text NOT NULL,
+ keydata text NOT NULL,
+ stream_id bigint NOT NULL
+);
+CREATE TABLE e2e_cross_signing_signatures (
+ user_id text NOT NULL,
+ key_id text NOT NULL,
+ target_user_id text NOT NULL,
+ target_device_id text NOT NULL,
+ signature text NOT NULL
+);
+CREATE TABLE e2e_device_keys_json (
+ user_id text NOT NULL,
+ device_id text NOT NULL,
+ ts_added_ms bigint NOT NULL,
+ key_json text NOT NULL
+);
+CREATE TABLE e2e_fallback_keys_json (
+ user_id text NOT NULL,
+ device_id text NOT NULL,
+ algorithm text NOT NULL,
+ key_id text NOT NULL,
+ key_json text NOT NULL,
+ used boolean DEFAULT false NOT NULL
+);
+CREATE TABLE e2e_one_time_keys_json (
+ user_id text NOT NULL,
+ device_id text NOT NULL,
+ algorithm text NOT NULL,
+ key_id text NOT NULL,
+ ts_added_ms bigint NOT NULL,
+ key_json text NOT NULL
+);
+CREATE TABLE e2e_room_keys (
+ user_id text NOT NULL,
+ room_id text NOT NULL,
+ session_id text NOT NULL,
+ version bigint NOT NULL,
+ first_message_index integer,
+ forwarded_count integer,
+ is_verified boolean,
+ session_data text NOT NULL
+);
+CREATE TABLE e2e_room_keys_versions (
+ user_id text NOT NULL,
+ version bigint NOT NULL,
+ algorithm text NOT NULL,
+ auth_data text NOT NULL,
+ deleted smallint DEFAULT 0 NOT NULL,
+ etag bigint
+);
+CREATE TABLE erased_users (
+ user_id text NOT NULL
+);
+CREATE TABLE event_auth (
+ event_id text NOT NULL,
+ auth_id text NOT NULL,
+ room_id text NOT NULL
+);
+CREATE SEQUENCE event_auth_chain_id
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+CREATE TABLE event_auth_chain_links (
+ origin_chain_id bigint NOT NULL,
+ origin_sequence_number bigint NOT NULL,
+ target_chain_id bigint NOT NULL,
+ target_sequence_number bigint NOT NULL
+);
+CREATE TABLE event_auth_chain_to_calculate (
+ event_id text NOT NULL,
+ room_id text NOT NULL,
+ type text NOT NULL,
+ state_key text NOT NULL
+);
+CREATE TABLE event_auth_chains (
+ event_id text NOT NULL,
+ chain_id bigint NOT NULL,
+ sequence_number bigint NOT NULL
+);
+CREATE TABLE event_backward_extremities (
+ event_id text NOT NULL,
+ room_id text NOT NULL
+);
+CREATE TABLE event_edges (
+ event_id text NOT NULL,
+ prev_event_id text NOT NULL,
+ room_id text,
+ is_state boolean DEFAULT false NOT NULL
+);
+CREATE TABLE event_expiry (
+ event_id text NOT NULL,
+ expiry_ts bigint NOT NULL
+);
+CREATE TABLE event_forward_extremities (
+ event_id text NOT NULL,
+ room_id text NOT NULL
+);
+CREATE TABLE event_json (
+ event_id text NOT NULL,
+ room_id text NOT NULL,
+ internal_metadata text NOT NULL,
+ json text NOT NULL,
+ format_version integer
+);
+CREATE TABLE event_labels (
+ event_id text NOT NULL,
+ label text NOT NULL,
+ room_id text NOT NULL,
+ topological_ordering bigint NOT NULL
+);
+CREATE TABLE event_push_actions (
+ room_id text NOT NULL,
+ event_id text NOT NULL,
+ user_id text NOT NULL,
+ profile_tag character varying(32),
+ actions text NOT NULL,
+ topological_ordering bigint,
+ stream_ordering bigint,
+ notif smallint,
+ highlight smallint,
+ unread smallint,
+ thread_id text
+);
+CREATE TABLE event_push_actions_staging (
+ event_id text NOT NULL,
+ user_id text NOT NULL,
+ actions text NOT NULL,
+ notif smallint NOT NULL,
+ highlight smallint NOT NULL,
+ unread smallint,
+ thread_id text
+);
+CREATE TABLE event_push_summary (
+ user_id text NOT NULL,
+ room_id text NOT NULL,
+ notif_count bigint NOT NULL,
+ stream_ordering bigint NOT NULL,
+ unread_count bigint,
+ last_receipt_stream_ordering bigint,
+ thread_id text
+);
+CREATE TABLE event_push_summary_last_receipt_stream_id (
+ lock character(1) DEFAULT 'X'::bpchar NOT NULL,
+ stream_id bigint NOT NULL,
+ CONSTRAINT event_push_summary_last_receipt_stream_id_lock_check CHECK ((lock = 'X'::bpchar))
+);
+CREATE TABLE event_push_summary_stream_ordering (
+ lock character(1) DEFAULT 'X'::bpchar NOT NULL,
+ stream_ordering bigint NOT NULL,
+ CONSTRAINT event_push_summary_stream_ordering_lock_check CHECK ((lock = 'X'::bpchar))
+);
+CREATE TABLE event_relations (
+ event_id text NOT NULL,
+ relates_to_id text NOT NULL,
+ relation_type text NOT NULL,
+ aggregation_key text
+);
+CREATE TABLE event_reports (
+ id bigint NOT NULL,
+ received_ts bigint NOT NULL,
+ room_id text NOT NULL,
+ event_id text NOT NULL,
+ user_id text NOT NULL,
+ reason text,
+ content text
+);
+CREATE TABLE event_search (
+ event_id text,
+ room_id text,
+ sender text,
+ key text,
+ vector tsvector,
+ origin_server_ts bigint,
+ stream_ordering bigint
+);
+CREATE TABLE event_to_state_groups (
+ event_id text NOT NULL,
+ state_group bigint NOT NULL
+);
+CREATE TABLE event_txn_id (
+ event_id text NOT NULL,
+ room_id text NOT NULL,
+ user_id text NOT NULL,
+ token_id bigint NOT NULL,
+ txn_id text NOT NULL,
+ inserted_ts bigint NOT NULL
+);
+CREATE TABLE events (
+ topological_ordering bigint NOT NULL,
+ event_id text NOT NULL,
+ type text NOT NULL,
+ room_id text NOT NULL,
+ content text,
+ unrecognized_keys text,
+ processed boolean NOT NULL,
+ outlier boolean NOT NULL,
+ depth bigint DEFAULT 0 NOT NULL,
+ origin_server_ts bigint,
+ received_ts bigint,
+ sender text,
+ contains_url boolean,
+ instance_name text,
+ stream_ordering bigint,
+ state_key text,
+ rejection_reason text
+);
+CREATE SEQUENCE events_backfill_stream_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+CREATE SEQUENCE events_stream_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+CREATE TABLE ex_outlier_stream (
+ event_stream_ordering bigint NOT NULL,
+ event_id text NOT NULL,
+ state_group bigint NOT NULL,
+ instance_name text
+);
+CREATE TABLE federation_inbound_events_staging (
+ origin text NOT NULL,
+ room_id text NOT NULL,
+ event_id text NOT NULL,
+ received_ts bigint NOT NULL,
+ event_json text NOT NULL,
+ internal_metadata text NOT NULL
+);
+CREATE TABLE federation_stream_position (
+ type text NOT NULL,
+ stream_id bigint NOT NULL,
+ instance_name text DEFAULT 'master'::text NOT NULL
+);
+CREATE TABLE ignored_users (
+ ignorer_user_id text NOT NULL,
+ ignored_user_id text NOT NULL
+);
+CREATE TABLE insertion_event_edges (
+ event_id text NOT NULL,
+ room_id text NOT NULL,
+ insertion_prev_event_id text NOT NULL
+);
+CREATE TABLE insertion_event_extremities (
+ event_id text NOT NULL,
+ room_id text NOT NULL
+);
+CREATE TABLE insertion_events (
+ event_id text NOT NULL,
+ room_id text NOT NULL,
+ next_batch_id text NOT NULL
+);
+CREATE TABLE instance_map (
+ instance_id integer NOT NULL,
+ instance_name text NOT NULL
+);
+CREATE SEQUENCE instance_map_instance_id_seq
+ AS integer
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+ALTER SEQUENCE instance_map_instance_id_seq OWNED BY instance_map.instance_id;
+CREATE TABLE local_current_membership (
+ room_id text NOT NULL,
+ user_id text NOT NULL,
+ event_id text NOT NULL,
+ membership text NOT NULL
+);
+CREATE TABLE local_media_repository (
+ media_id text,
+ media_type text,
+ media_length integer,
+ created_ts bigint,
+ upload_name text,
+ user_id text,
+ quarantined_by text,
+ url_cache text,
+ last_access_ts bigint,
+ safe_from_quarantine boolean DEFAULT false NOT NULL
+);
+CREATE TABLE local_media_repository_thumbnails (
+ media_id text,
+ thumbnail_width integer,
+ thumbnail_height integer,
+ thumbnail_type text,
+ thumbnail_method text,
+ thumbnail_length integer
+);
+CREATE TABLE local_media_repository_url_cache (
+ url text,
+ response_code integer,
+ etag text,
+ expires_ts bigint,
+ og text,
+ media_id text,
+ download_ts bigint
+);
+CREATE TABLE monthly_active_users (
+ user_id text NOT NULL,
+ "timestamp" bigint NOT NULL
+);
+CREATE TABLE open_id_tokens (
+ token text NOT NULL,
+ ts_valid_until_ms bigint NOT NULL,
+ user_id text NOT NULL
+);
+CREATE TABLE partial_state_events (
+ room_id text NOT NULL,
+ event_id text NOT NULL
+);
+CREATE TABLE partial_state_rooms (
+ room_id text NOT NULL
+);
+CREATE TABLE partial_state_rooms_servers (
+ room_id text NOT NULL,
+ server_name text NOT NULL
+);
+CREATE TABLE presence (
+ user_id text NOT NULL,
+ state character varying(20),
+ status_msg text,
+ mtime bigint
+);
+CREATE TABLE presence_stream (
+ stream_id bigint,
+ user_id text,
+ state text,
+ last_active_ts bigint,
+ last_federation_update_ts bigint,
+ last_user_sync_ts bigint,
+ status_msg text,
+ currently_active boolean,
+ instance_name text
+);
+CREATE SEQUENCE presence_stream_sequence
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+CREATE TABLE profiles (
+ user_id text NOT NULL,
+ displayname text,
+ avatar_url text
+);
+CREATE TABLE push_rules (
+ id bigint NOT NULL,
+ user_name text NOT NULL,
+ rule_id text NOT NULL,
+ priority_class smallint NOT NULL,
+ priority integer DEFAULT 0 NOT NULL,
+ conditions text NOT NULL,
+ actions text NOT NULL
+);
+CREATE TABLE push_rules_enable (
+ id bigint NOT NULL,
+ user_name text NOT NULL,
+ rule_id text NOT NULL,
+ enabled smallint
+);
+CREATE TABLE push_rules_stream (
+ stream_id bigint NOT NULL,
+ event_stream_ordering bigint NOT NULL,
+ user_id text NOT NULL,
+ rule_id text NOT NULL,
+ op text NOT NULL,
+ priority_class smallint,
+ priority integer,
+ conditions text,
+ actions text
+);
+CREATE TABLE pusher_throttle (
+ pusher bigint NOT NULL,
+ room_id text NOT NULL,
+ last_sent_ts bigint,
+ throttle_ms bigint
+);
+CREATE TABLE pushers (
+ id bigint NOT NULL,
+ user_name text NOT NULL,
+ access_token bigint,
+ profile_tag text NOT NULL,
+ kind text NOT NULL,
+ app_id text NOT NULL,
+ app_display_name text NOT NULL,
+ device_display_name text NOT NULL,
+ pushkey text NOT NULL,
+ ts bigint NOT NULL,
+ lang text,
+ data text,
+ last_stream_ordering bigint,
+ last_success bigint,
+ failing_since bigint
+);
+CREATE TABLE ratelimit_override (
+ user_id text NOT NULL,
+ messages_per_second bigint,
+ burst_count bigint
+);
+CREATE TABLE receipts_graph (
+ room_id text NOT NULL,
+ receipt_type text NOT NULL,
+ user_id text NOT NULL,
+ event_ids text NOT NULL,
+ data text NOT NULL,
+ thread_id text
+);
+CREATE TABLE receipts_linearized (
+ stream_id bigint NOT NULL,
+ room_id text NOT NULL,
+ receipt_type text NOT NULL,
+ user_id text NOT NULL,
+ event_id text NOT NULL,
+ data text NOT NULL,
+ instance_name text,
+ event_stream_ordering bigint,
+ thread_id text
+);
+CREATE SEQUENCE receipts_sequence
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+CREATE TABLE received_transactions (
+ transaction_id text,
+ origin text,
+ ts bigint,
+ response_code integer,
+ response_json bytea,
+ has_been_referenced smallint DEFAULT 0
+);
+CREATE TABLE redactions (
+ event_id text NOT NULL,
+ redacts text NOT NULL,
+ have_censored boolean DEFAULT false NOT NULL,
+ received_ts bigint
+);
+CREATE TABLE refresh_tokens (
+ id bigint NOT NULL,
+ user_id text NOT NULL,
+ device_id text NOT NULL,
+ token text NOT NULL,
+ next_token_id bigint,
+ expiry_ts bigint,
+ ultimate_session_expiry_ts bigint
+);
+CREATE TABLE registration_tokens (
+ token text NOT NULL,
+ uses_allowed integer,
+ pending integer NOT NULL,
+ completed integer NOT NULL,
+ expiry_time bigint
+);
+CREATE TABLE rejections (
+ event_id text NOT NULL,
+ reason text NOT NULL,
+ last_check text NOT NULL
+);
+CREATE TABLE remote_media_cache (
+ media_origin text,
+ media_id text,
+ media_type text,
+ created_ts bigint,
+ upload_name text,
+ media_length integer,
+ filesystem_id text,
+ last_access_ts bigint,
+ quarantined_by text
+);
+CREATE TABLE remote_media_cache_thumbnails (
+ media_origin text,
+ media_id text,
+ thumbnail_width integer,
+ thumbnail_height integer,
+ thumbnail_method text,
+ thumbnail_type text,
+ thumbnail_length integer,
+ filesystem_id text
+);
+CREATE TABLE room_account_data (
+ user_id text NOT NULL,
+ room_id text NOT NULL,
+ account_data_type text NOT NULL,
+ stream_id bigint NOT NULL,
+ content text NOT NULL,
+ instance_name text
+);
+CREATE TABLE room_alias_servers (
+ room_alias text NOT NULL,
+ server text NOT NULL
+);
+CREATE TABLE room_aliases (
+ room_alias text NOT NULL,
+ room_id text NOT NULL,
+ creator text
+);
+CREATE TABLE room_depth (
+ room_id text NOT NULL,
+ min_depth bigint
+);
+CREATE TABLE room_memberships (
+ event_id text NOT NULL,
+ user_id text NOT NULL,
+ sender text NOT NULL,
+ room_id text NOT NULL,
+ membership text NOT NULL,
+ forgotten integer DEFAULT 0,
+ display_name text,
+ avatar_url text
+);
+CREATE TABLE room_retention (
+ room_id text NOT NULL,
+ event_id text NOT NULL,
+ min_lifetime bigint,
+ max_lifetime bigint
+);
+CREATE TABLE room_stats_current (
+ room_id text NOT NULL,
+ current_state_events integer NOT NULL,
+ joined_members integer NOT NULL,
+ invited_members integer NOT NULL,
+ left_members integer NOT NULL,
+ banned_members integer NOT NULL,
+ local_users_in_room integer NOT NULL,
+ completed_delta_stream_id bigint NOT NULL,
+ knocked_members integer
+);
+CREATE TABLE room_stats_earliest_token (
+ room_id text NOT NULL,
+ token bigint NOT NULL
+);
+CREATE TABLE room_stats_state (
+ room_id text NOT NULL,
+ name text,
+ canonical_alias text,
+ join_rules text,
+ history_visibility text,
+ encryption text,
+ avatar text,
+ guest_access text,
+ is_federatable boolean,
+ topic text,
+ room_type text
+);
+CREATE TABLE room_tags (
+ user_id text NOT NULL,
+ room_id text NOT NULL,
+ tag text NOT NULL,
+ content text NOT NULL
+);
+CREATE TABLE room_tags_revisions (
+ user_id text NOT NULL,
+ room_id text NOT NULL,
+ stream_id bigint NOT NULL,
+ instance_name text
+);
+CREATE TABLE rooms (
+ room_id text NOT NULL,
+ is_public boolean,
+ creator text,
+ room_version text,
+ has_auth_chain_index boolean
+);
+CREATE TABLE server_keys_json (
+ server_name text NOT NULL,
+ key_id text NOT NULL,
+ from_server text NOT NULL,
+ ts_added_ms bigint NOT NULL,
+ ts_valid_until_ms bigint NOT NULL,
+ key_json bytea NOT NULL
+);
+CREATE TABLE server_signature_keys (
+ server_name text,
+ key_id text,
+ from_server text,
+ ts_added_ms bigint,
+ verify_key bytea,
+ ts_valid_until_ms bigint
+);
+CREATE TABLE sessions (
+ session_type text NOT NULL,
+ session_id text NOT NULL,
+ value text NOT NULL,
+ expiry_time_ms bigint NOT NULL
+);
+CREATE TABLE state_events (
+ event_id text NOT NULL,
+ room_id text NOT NULL,
+ type text NOT NULL,
+ state_key text NOT NULL,
+ prev_state text
+);
+CREATE TABLE stats_incremental_position (
+ lock character(1) DEFAULT 'X'::bpchar NOT NULL,
+ stream_id bigint NOT NULL,
+ CONSTRAINT stats_incremental_position_lock_check CHECK ((lock = 'X'::bpchar))
+);
+CREATE TABLE stream_ordering_to_exterm (
+ stream_ordering bigint NOT NULL,
+ room_id text NOT NULL,
+ event_id text NOT NULL
+);
+CREATE TABLE stream_positions (
+ stream_name text NOT NULL,
+ instance_name text NOT NULL,
+ stream_id bigint NOT NULL
+);
+CREATE TABLE threepid_guest_access_tokens (
+ medium text,
+ address text,
+ guest_access_token text,
+ first_inviter text
+);
+CREATE TABLE threepid_validation_session (
+ session_id text NOT NULL,
+ medium text NOT NULL,
+ address text NOT NULL,
+ client_secret text NOT NULL,
+ last_send_attempt bigint NOT NULL,
+ validated_at bigint
+);
+CREATE TABLE threepid_validation_token (
+ token text NOT NULL,
+ session_id text NOT NULL,
+ next_link text,
+ expires bigint NOT NULL
+);
+CREATE TABLE ui_auth_sessions (
+ session_id text NOT NULL,
+ creation_time bigint NOT NULL,
+ serverdict text NOT NULL,
+ clientdict text NOT NULL,
+ uri text NOT NULL,
+ method text NOT NULL,
+ description text NOT NULL
+);
+CREATE TABLE ui_auth_sessions_credentials (
+ session_id text NOT NULL,
+ stage_type text NOT NULL,
+ result text NOT NULL
+);
+CREATE TABLE ui_auth_sessions_ips (
+ session_id text NOT NULL,
+ ip text NOT NULL,
+ user_agent text NOT NULL
+);
+CREATE TABLE user_daily_visits (
+ user_id text NOT NULL,
+ device_id text,
+ "timestamp" bigint NOT NULL,
+ user_agent text
+);
+CREATE TABLE user_directory (
+ user_id text NOT NULL,
+ room_id text,
+ display_name text,
+ avatar_url text
+);
+CREATE TABLE user_directory_search (
+ user_id text NOT NULL,
+ vector tsvector
+);
+CREATE TABLE user_directory_stream_pos (
+ lock character(1) DEFAULT 'X'::bpchar NOT NULL,
+ stream_id bigint,
+ CONSTRAINT user_directory_stream_pos_lock_check CHECK ((lock = 'X'::bpchar))
+);
+CREATE TABLE user_external_ids (
+ auth_provider text NOT NULL,
+ external_id text NOT NULL,
+ user_id text NOT NULL
+);
+CREATE TABLE user_filters (
+ user_id text NOT NULL,
+ filter_id bigint NOT NULL,
+ filter_json bytea NOT NULL
+);
+CREATE SEQUENCE user_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+CREATE TABLE user_ips (
+ user_id text NOT NULL,
+ access_token text NOT NULL,
+ device_id text,
+ ip text NOT NULL,
+ user_agent text NOT NULL,
+ last_seen bigint NOT NULL
+);
+CREATE TABLE user_signature_stream (
+ stream_id bigint NOT NULL,
+ from_user_id text NOT NULL,
+ user_ids text NOT NULL
+);
+CREATE TABLE user_stats_current (
+ user_id text NOT NULL,
+ joined_rooms bigint NOT NULL,
+ completed_delta_stream_id bigint NOT NULL
+);
+CREATE TABLE user_threepid_id_server (
+ user_id text NOT NULL,
+ medium text NOT NULL,
+ address text NOT NULL,
+ id_server text NOT NULL
+);
+CREATE TABLE user_threepids (
+ user_id text NOT NULL,
+ medium text NOT NULL,
+ address text NOT NULL,
+ validated_at bigint NOT NULL,
+ added_at bigint NOT NULL
+);
+CREATE TABLE users (
+ name text,
+ password_hash text,
+ creation_ts bigint,
+ admin smallint DEFAULT 0 NOT NULL,
+ upgrade_ts bigint,
+ is_guest smallint DEFAULT 0 NOT NULL,
+ appservice_id text,
+ consent_version text,
+ consent_server_notice_sent text,
+ user_type text,
+ deactivated smallint DEFAULT 0 NOT NULL,
+ shadow_banned boolean,
+ consent_ts bigint
+);
+CREATE TABLE users_in_public_rooms (
+ user_id text NOT NULL,
+ room_id text NOT NULL
+);
+CREATE TABLE users_pending_deactivation (
+ user_id text NOT NULL
+);
+CREATE TABLE users_to_send_full_presence_to (
+ user_id text NOT NULL,
+ presence_stream_id bigint
+);
+CREATE TABLE users_who_share_private_rooms (
+ user_id text NOT NULL,
+ other_user_id text NOT NULL,
+ room_id text NOT NULL
+);
+CREATE TABLE worker_locks (
+ lock_name text NOT NULL,
+ lock_key text NOT NULL,
+ instance_name text NOT NULL,
+ token text NOT NULL,
+ last_renewed_ts bigint NOT NULL
+);
+ALTER TABLE ONLY instance_map ALTER COLUMN instance_id SET DEFAULT nextval('instance_map_instance_id_seq'::regclass);
+ALTER TABLE ONLY access_tokens
+ ADD CONSTRAINT access_tokens_pkey PRIMARY KEY (id);
+ALTER TABLE ONLY access_tokens
+ ADD CONSTRAINT access_tokens_token_key UNIQUE (token);
+ALTER TABLE ONLY account_data
+ ADD CONSTRAINT account_data_uniqueness UNIQUE (user_id, account_data_type);
+ALTER TABLE ONLY account_validity
+ ADD CONSTRAINT account_validity_pkey PRIMARY KEY (user_id);
+ALTER TABLE ONLY application_services_state
+ ADD CONSTRAINT application_services_state_pkey PRIMARY KEY (as_id);
+ALTER TABLE ONLY application_services_txns
+ ADD CONSTRAINT application_services_txns_as_id_txn_id_key UNIQUE (as_id, txn_id);
+ALTER TABLE ONLY appservice_stream_position
+ ADD CONSTRAINT appservice_stream_position_lock_key UNIQUE (lock);
+ALTER TABLE ONLY current_state_events
+ ADD CONSTRAINT current_state_events_event_id_key UNIQUE (event_id);
+ALTER TABLE ONLY current_state_events
+ ADD CONSTRAINT current_state_events_room_id_type_state_key_key UNIQUE (room_id, type, state_key);
+ALTER TABLE ONLY dehydrated_devices
+ ADD CONSTRAINT dehydrated_devices_pkey PRIMARY KEY (user_id);
+ALTER TABLE ONLY destination_rooms
+ ADD CONSTRAINT destination_rooms_pkey PRIMARY KEY (destination, room_id);
+ALTER TABLE ONLY destinations
+ ADD CONSTRAINT destinations_pkey PRIMARY KEY (destination);
+ALTER TABLE ONLY devices
+ ADD CONSTRAINT device_uniqueness UNIQUE (user_id, device_id);
+ALTER TABLE ONLY e2e_device_keys_json
+ ADD CONSTRAINT e2e_device_keys_json_uniqueness UNIQUE (user_id, device_id);
+ALTER TABLE ONLY e2e_fallback_keys_json
+ ADD CONSTRAINT e2e_fallback_keys_json_uniqueness UNIQUE (user_id, device_id, algorithm);
+ALTER TABLE ONLY e2e_one_time_keys_json
+ ADD CONSTRAINT e2e_one_time_keys_json_uniqueness UNIQUE (user_id, device_id, algorithm, key_id);
+ALTER TABLE ONLY event_auth_chain_to_calculate
+ ADD CONSTRAINT event_auth_chain_to_calculate_pkey PRIMARY KEY (event_id);
+ALTER TABLE ONLY event_auth_chains
+ ADD CONSTRAINT event_auth_chains_pkey PRIMARY KEY (event_id);
+ALTER TABLE ONLY event_backward_extremities
+ ADD CONSTRAINT event_backward_extremities_event_id_room_id_key UNIQUE (event_id, room_id);
+ALTER TABLE ONLY event_expiry
+ ADD CONSTRAINT event_expiry_pkey PRIMARY KEY (event_id);
+ALTER TABLE ONLY event_forward_extremities
+ ADD CONSTRAINT event_forward_extremities_event_id_room_id_key UNIQUE (event_id, room_id);
+ALTER TABLE ONLY event_push_actions
+ ADD CONSTRAINT event_id_user_id_profile_tag_uniqueness UNIQUE (room_id, event_id, user_id, profile_tag);
+ALTER TABLE ONLY event_json
+ ADD CONSTRAINT event_json_event_id_key UNIQUE (event_id);
+ALTER TABLE ONLY event_labels
+ ADD CONSTRAINT event_labels_pkey PRIMARY KEY (event_id, label);
+ALTER TABLE ONLY event_push_summary_last_receipt_stream_id
+ ADD CONSTRAINT event_push_summary_last_receipt_stream_id_lock_key UNIQUE (lock);
+ALTER TABLE ONLY event_push_summary_stream_ordering
+ ADD CONSTRAINT event_push_summary_stream_ordering_lock_key UNIQUE (lock);
+ALTER TABLE ONLY event_reports
+ ADD CONSTRAINT event_reports_pkey PRIMARY KEY (id);
+ALTER TABLE ONLY event_to_state_groups
+ ADD CONSTRAINT event_to_state_groups_event_id_key UNIQUE (event_id);
+ALTER TABLE ONLY events
+ ADD CONSTRAINT events_event_id_key UNIQUE (event_id);
+ALTER TABLE ONLY ex_outlier_stream
+ ADD CONSTRAINT ex_outlier_stream_pkey PRIMARY KEY (event_stream_ordering);
+ALTER TABLE ONLY instance_map
+ ADD CONSTRAINT instance_map_pkey PRIMARY KEY (instance_id);
+ALTER TABLE ONLY local_media_repository
+ ADD CONSTRAINT local_media_repository_media_id_key UNIQUE (media_id);
+ALTER TABLE ONLY user_threepids
+ ADD CONSTRAINT medium_address UNIQUE (medium, address);
+ALTER TABLE ONLY open_id_tokens
+ ADD CONSTRAINT open_id_tokens_pkey PRIMARY KEY (token);
+ALTER TABLE ONLY partial_state_events
+ ADD CONSTRAINT partial_state_events_event_id_key UNIQUE (event_id);
+ALTER TABLE ONLY partial_state_rooms
+ ADD CONSTRAINT partial_state_rooms_pkey PRIMARY KEY (room_id);
+ALTER TABLE ONLY partial_state_rooms_servers
+ ADD CONSTRAINT partial_state_rooms_servers_room_id_server_name_key UNIQUE (room_id, server_name);
+ALTER TABLE ONLY presence
+ ADD CONSTRAINT presence_user_id_key UNIQUE (user_id);
+ALTER TABLE ONLY profiles
+ ADD CONSTRAINT profiles_user_id_key UNIQUE (user_id);
+ALTER TABLE ONLY push_rules_enable
+ ADD CONSTRAINT push_rules_enable_pkey PRIMARY KEY (id);
+ALTER TABLE ONLY push_rules_enable
+ ADD CONSTRAINT push_rules_enable_user_name_rule_id_key UNIQUE (user_name, rule_id);
+ALTER TABLE ONLY push_rules
+ ADD CONSTRAINT push_rules_pkey PRIMARY KEY (id);
+ALTER TABLE ONLY push_rules
+ ADD CONSTRAINT push_rules_user_name_rule_id_key UNIQUE (user_name, rule_id);
+ALTER TABLE ONLY pusher_throttle
+ ADD CONSTRAINT pusher_throttle_pkey PRIMARY KEY (pusher, room_id);
+ALTER TABLE ONLY pushers
+ ADD CONSTRAINT pushers2_app_id_pushkey_user_name_key UNIQUE (app_id, pushkey, user_name);
+ALTER TABLE ONLY pushers
+ ADD CONSTRAINT pushers2_pkey PRIMARY KEY (id);
+ALTER TABLE ONLY receipts_graph
+ ADD CONSTRAINT receipts_graph_uniqueness UNIQUE (room_id, receipt_type, user_id);
+ALTER TABLE ONLY receipts_graph
+ ADD CONSTRAINT receipts_graph_uniqueness_thread UNIQUE (room_id, receipt_type, user_id, thread_id);
+ALTER TABLE ONLY receipts_linearized
+ ADD CONSTRAINT receipts_linearized_uniqueness UNIQUE (room_id, receipt_type, user_id);
+ALTER TABLE ONLY receipts_linearized
+ ADD CONSTRAINT receipts_linearized_uniqueness_thread UNIQUE (room_id, receipt_type, user_id, thread_id);
+ALTER TABLE ONLY received_transactions
+ ADD CONSTRAINT received_transactions_transaction_id_origin_key UNIQUE (transaction_id, origin);
+ALTER TABLE ONLY redactions
+ ADD CONSTRAINT redactions_event_id_key UNIQUE (event_id);
+ALTER TABLE ONLY refresh_tokens
+ ADD CONSTRAINT refresh_tokens_pkey PRIMARY KEY (id);
+ALTER TABLE ONLY refresh_tokens
+ ADD CONSTRAINT refresh_tokens_token_key UNIQUE (token);
+ALTER TABLE ONLY registration_tokens
+ ADD CONSTRAINT registration_tokens_token_key UNIQUE (token);
+ALTER TABLE ONLY rejections
+ ADD CONSTRAINT rejections_event_id_key UNIQUE (event_id);
+ALTER TABLE ONLY remote_media_cache
+ ADD CONSTRAINT remote_media_cache_media_origin_media_id_key UNIQUE (media_origin, media_id);
+ALTER TABLE ONLY room_account_data
+ ADD CONSTRAINT room_account_data_uniqueness UNIQUE (user_id, room_id, account_data_type);
+ALTER TABLE ONLY room_aliases
+ ADD CONSTRAINT room_aliases_room_alias_key UNIQUE (room_alias);
+ALTER TABLE ONLY room_depth
+ ADD CONSTRAINT room_depth_room_id_key UNIQUE (room_id);
+ALTER TABLE ONLY room_memberships
+ ADD CONSTRAINT room_memberships_event_id_key UNIQUE (event_id);
+ALTER TABLE ONLY room_retention
+ ADD CONSTRAINT room_retention_pkey PRIMARY KEY (room_id, event_id);
+ALTER TABLE ONLY room_stats_current
+ ADD CONSTRAINT room_stats_current_pkey PRIMARY KEY (room_id);
+ALTER TABLE ONLY room_tags_revisions
+ ADD CONSTRAINT room_tag_revisions_uniqueness UNIQUE (user_id, room_id);
+ALTER TABLE ONLY room_tags
+ ADD CONSTRAINT room_tag_uniqueness UNIQUE (user_id, room_id, tag);
+ALTER TABLE ONLY rooms
+ ADD CONSTRAINT rooms_pkey PRIMARY KEY (room_id);
+ALTER TABLE ONLY server_keys_json
+ ADD CONSTRAINT server_keys_json_uniqueness UNIQUE (server_name, key_id, from_server);
+ALTER TABLE ONLY server_signature_keys
+ ADD CONSTRAINT server_signature_keys_server_name_key_id_key UNIQUE (server_name, key_id);
+ALTER TABLE ONLY sessions
+ ADD CONSTRAINT sessions_session_type_session_id_key UNIQUE (session_type, session_id);
+ALTER TABLE ONLY state_events
+ ADD CONSTRAINT state_events_event_id_key UNIQUE (event_id);
+ALTER TABLE ONLY stats_incremental_position
+ ADD CONSTRAINT stats_incremental_position_lock_key UNIQUE (lock);
+ALTER TABLE ONLY threepid_validation_session
+ ADD CONSTRAINT threepid_validation_session_pkey PRIMARY KEY (session_id);
+ALTER TABLE ONLY threepid_validation_token
+ ADD CONSTRAINT threepid_validation_token_pkey PRIMARY KEY (token);
+ALTER TABLE ONLY ui_auth_sessions_credentials
+ ADD CONSTRAINT ui_auth_sessions_credentials_session_id_stage_type_key UNIQUE (session_id, stage_type);
+ALTER TABLE ONLY ui_auth_sessions_ips
+ ADD CONSTRAINT ui_auth_sessions_ips_session_id_ip_user_agent_key UNIQUE (session_id, ip, user_agent);
+ALTER TABLE ONLY ui_auth_sessions
+ ADD CONSTRAINT ui_auth_sessions_session_id_key UNIQUE (session_id);
+ALTER TABLE ONLY user_directory_stream_pos
+ ADD CONSTRAINT user_directory_stream_pos_lock_key UNIQUE (lock);
+ALTER TABLE ONLY user_external_ids
+ ADD CONSTRAINT user_external_ids_auth_provider_external_id_key UNIQUE (auth_provider, external_id);
+ALTER TABLE ONLY user_stats_current
+ ADD CONSTRAINT user_stats_current_pkey PRIMARY KEY (user_id);
+ALTER TABLE ONLY users
+ ADD CONSTRAINT users_name_key UNIQUE (name);
+ALTER TABLE ONLY users_to_send_full_presence_to
+ ADD CONSTRAINT users_to_send_full_presence_to_pkey PRIMARY KEY (user_id);
+CREATE INDEX access_tokens_device_id ON access_tokens USING btree (user_id, device_id);
+CREATE INDEX account_data_stream_id ON account_data USING btree (user_id, stream_id);
+CREATE INDEX application_services_txns_id ON application_services_txns USING btree (as_id);
+CREATE UNIQUE INDEX appservice_room_list_idx ON appservice_room_list USING btree (appservice_id, network_id, room_id);
+CREATE INDEX batch_events_batch_id ON batch_events USING btree (batch_id);
+CREATE UNIQUE INDEX blocked_rooms_idx ON blocked_rooms USING btree (room_id);
+CREATE UNIQUE INDEX cache_invalidation_stream_by_instance_id ON cache_invalidation_stream_by_instance USING btree (stream_id);
+CREATE INDEX cache_invalidation_stream_by_instance_instance_index ON cache_invalidation_stream_by_instance USING btree (instance_name, stream_id);
+CREATE UNIQUE INDEX chunk_events_event_id ON batch_events USING btree (event_id);
+CREATE INDEX current_state_delta_stream_idx ON current_state_delta_stream USING btree (stream_id);
+CREATE INDEX current_state_events_member_index ON current_state_events USING btree (state_key) WHERE (type = 'm.room.member'::text);
+CREATE INDEX deleted_pushers_stream_id ON deleted_pushers USING btree (stream_id);
+CREATE INDEX destination_rooms_room_id ON destination_rooms USING btree (room_id);
+CREATE INDEX device_auth_providers_devices ON device_auth_providers USING btree (user_id, device_id);
+CREATE INDEX device_auth_providers_sessions ON device_auth_providers USING btree (auth_provider_id, auth_provider_session_id);
+CREATE INDEX device_federation_inbox_sender_id ON device_federation_inbox USING btree (origin, message_id);
+CREATE INDEX device_federation_outbox_destination_id ON device_federation_outbox USING btree (destination, stream_id);
+CREATE INDEX device_federation_outbox_id ON device_federation_outbox USING btree (stream_id);
+CREATE INDEX device_inbox_stream_id_user_id ON device_inbox USING btree (stream_id, user_id);
+CREATE INDEX device_inbox_user_stream_id ON device_inbox USING btree (user_id, device_id, stream_id);
+CREATE UNIQUE INDEX device_lists_changes_in_stream_id ON device_lists_changes_in_room USING btree (stream_id, room_id);
+CREATE INDEX device_lists_changes_in_stream_id_unconverted ON device_lists_changes_in_room USING btree (stream_id) WHERE (NOT converted_to_destinations);
+CREATE UNIQUE INDEX device_lists_outbound_last_success_unique_idx ON device_lists_outbound_last_success USING btree (destination, user_id);
+CREATE INDEX device_lists_outbound_pokes_id ON device_lists_outbound_pokes USING btree (destination, stream_id);
+CREATE INDEX device_lists_outbound_pokes_stream ON device_lists_outbound_pokes USING btree (stream_id);
+CREATE INDEX device_lists_outbound_pokes_user ON device_lists_outbound_pokes USING btree (destination, user_id);
+CREATE UNIQUE INDEX device_lists_remote_cache_unique_id ON device_lists_remote_cache USING btree (user_id, device_id);
+CREATE UNIQUE INDEX device_lists_remote_extremeties_unique_idx ON device_lists_remote_extremeties USING btree (user_id);
+CREATE UNIQUE INDEX device_lists_remote_resync_idx ON device_lists_remote_resync USING btree (user_id);
+CREATE INDEX device_lists_remote_resync_ts_idx ON device_lists_remote_resync USING btree (added_ts);
+CREATE INDEX device_lists_stream_id ON device_lists_stream USING btree (stream_id, user_id);
+CREATE INDEX device_lists_stream_user_id ON device_lists_stream USING btree (user_id, device_id);
+CREATE UNIQUE INDEX e2e_cross_signing_keys_idx ON e2e_cross_signing_keys USING btree (user_id, keytype, stream_id);
+CREATE UNIQUE INDEX e2e_cross_signing_keys_stream_idx ON e2e_cross_signing_keys USING btree (stream_id);
+CREATE INDEX e2e_cross_signing_signatures2_idx ON e2e_cross_signing_signatures USING btree (user_id, target_user_id, target_device_id);
+CREATE UNIQUE INDEX e2e_room_keys_versions_idx ON e2e_room_keys_versions USING btree (user_id, version);
+CREATE UNIQUE INDEX e2e_room_keys_with_version_idx ON e2e_room_keys USING btree (user_id, version, room_id, session_id);
+CREATE UNIQUE INDEX erased_users_user ON erased_users USING btree (user_id);
+CREATE INDEX ev_b_extrem_id ON event_backward_extremities USING btree (event_id);
+CREATE INDEX ev_b_extrem_room ON event_backward_extremities USING btree (room_id);
+CREATE INDEX ev_edges_prev_id ON event_edges USING btree (prev_event_id);
+CREATE INDEX ev_extrem_id ON event_forward_extremities USING btree (event_id);
+CREATE INDEX ev_extrem_room ON event_forward_extremities USING btree (room_id);
+CREATE INDEX evauth_edges_id ON event_auth USING btree (event_id);
+CREATE INDEX event_auth_chain_links_idx ON event_auth_chain_links USING btree (origin_chain_id, target_chain_id);
+CREATE INDEX event_auth_chain_to_calculate_rm_id ON event_auth_chain_to_calculate USING btree (room_id);
+CREATE UNIQUE INDEX event_auth_chains_c_seq_index ON event_auth_chains USING btree (chain_id, sequence_number);
+CREATE INDEX event_contains_url_index ON events USING btree (room_id, topological_ordering, stream_ordering) WHERE ((contains_url = true) AND (outlier = false));
+CREATE UNIQUE INDEX event_edges_event_id_prev_event_id_idx ON event_edges USING btree (event_id, prev_event_id);
+CREATE INDEX event_expiry_expiry_ts_idx ON event_expiry USING btree (expiry_ts);
+CREATE INDEX event_labels_room_id_label_idx ON event_labels USING btree (room_id, label, topological_ordering);
+CREATE INDEX event_push_actions_highlights_index ON event_push_actions USING btree (user_id, room_id, topological_ordering, stream_ordering) WHERE (highlight = 1);
+CREATE INDEX event_push_actions_rm_tokens ON event_push_actions USING btree (user_id, room_id, topological_ordering, stream_ordering);
+CREATE INDEX event_push_actions_room_id_user_id ON event_push_actions USING btree (room_id, user_id);
+CREATE INDEX event_push_actions_staging_id ON event_push_actions_staging USING btree (event_id);
+CREATE INDEX event_push_actions_stream_highlight_index ON event_push_actions USING btree (highlight, stream_ordering) WHERE (highlight = 0);
+CREATE INDEX event_push_actions_stream_ordering ON event_push_actions USING btree (stream_ordering, user_id);
+CREATE INDEX event_push_actions_u_highlight ON event_push_actions USING btree (user_id, stream_ordering);
+CREATE UNIQUE INDEX event_push_summary_unique_index ON event_push_summary USING btree (user_id, room_id);
+CREATE UNIQUE INDEX event_push_summary_unique_index2 ON event_push_summary USING btree (user_id, room_id, thread_id);
+CREATE UNIQUE INDEX event_relations_id ON event_relations USING btree (event_id);
+CREATE INDEX event_relations_relates ON event_relations USING btree (relates_to_id, relation_type, aggregation_key);
+CREATE INDEX event_search_ev_ridx ON event_search USING btree (room_id);
+CREATE UNIQUE INDEX event_search_event_id_idx ON event_search USING btree (event_id);
+CREATE INDEX event_search_fts_idx ON event_search USING gin (vector);
+CREATE INDEX event_to_state_groups_sg_index ON event_to_state_groups USING btree (state_group);
+CREATE UNIQUE INDEX event_txn_id_event_id ON event_txn_id USING btree (event_id);
+CREATE INDEX event_txn_id_ts ON event_txn_id USING btree (inserted_ts);
+CREATE UNIQUE INDEX event_txn_id_txn_id ON event_txn_id USING btree (room_id, user_id, token_id, txn_id);
+CREATE INDEX events_order_room ON events USING btree (room_id, topological_ordering, stream_ordering);
+CREATE INDEX events_room_stream ON events USING btree (room_id, stream_ordering);
+CREATE UNIQUE INDEX events_stream_ordering ON events USING btree (stream_ordering);
+CREATE INDEX events_ts ON events USING btree (origin_server_ts, stream_ordering);
+CREATE UNIQUE INDEX federation_inbound_events_staging_instance_event ON federation_inbound_events_staging USING btree (origin, event_id);
+CREATE INDEX federation_inbound_events_staging_room ON federation_inbound_events_staging USING btree (room_id, received_ts);
+CREATE UNIQUE INDEX federation_stream_position_instance ON federation_stream_position USING btree (type, instance_name);
+CREATE INDEX ignored_users_ignored_user_id ON ignored_users USING btree (ignored_user_id);
+CREATE UNIQUE INDEX ignored_users_uniqueness ON ignored_users USING btree (ignorer_user_id, ignored_user_id);
+CREATE INDEX insertion_event_edges_event_id ON insertion_event_edges USING btree (event_id);
+CREATE INDEX insertion_event_edges_insertion_prev_event_id ON insertion_event_edges USING btree (insertion_prev_event_id);
+CREATE INDEX insertion_event_edges_insertion_room_id ON insertion_event_edges USING btree (room_id);
+CREATE UNIQUE INDEX insertion_event_extremities_event_id ON insertion_event_extremities USING btree (event_id);
+CREATE INDEX insertion_event_extremities_room_id ON insertion_event_extremities USING btree (room_id);
+CREATE UNIQUE INDEX insertion_events_event_id ON insertion_events USING btree (event_id);
+CREATE INDEX insertion_events_next_batch_id ON insertion_events USING btree (next_batch_id);
+CREATE UNIQUE INDEX instance_map_idx ON instance_map USING btree (instance_name);
+CREATE UNIQUE INDEX local_current_membership_idx ON local_current_membership USING btree (user_id, room_id);
+CREATE INDEX local_current_membership_room_idx ON local_current_membership USING btree (room_id);
+CREATE UNIQUE INDEX local_media_repository_thumbn_media_id_width_height_method_key ON local_media_repository_thumbnails USING btree (media_id, thumbnail_width, thumbnail_height, thumbnail_type, thumbnail_method);
+CREATE INDEX local_media_repository_thumbnails_media_id ON local_media_repository_thumbnails USING btree (media_id);
+CREATE INDEX local_media_repository_url_cache_by_url_download_ts ON local_media_repository_url_cache USING btree (url, download_ts);
+CREATE INDEX local_media_repository_url_cache_expires_idx ON local_media_repository_url_cache USING btree (expires_ts);
+CREATE INDEX local_media_repository_url_cache_media_idx ON local_media_repository_url_cache USING btree (media_id);
+CREATE INDEX local_media_repository_url_idx ON local_media_repository USING btree (created_ts) WHERE (url_cache IS NOT NULL);
+CREATE INDEX monthly_active_users_time_stamp ON monthly_active_users USING btree ("timestamp");
+CREATE UNIQUE INDEX monthly_active_users_users ON monthly_active_users USING btree (user_id);
+CREATE INDEX open_id_tokens_ts_valid_until_ms ON open_id_tokens USING btree (ts_valid_until_ms);
+CREATE INDEX partial_state_events_room_id_idx ON partial_state_events USING btree (room_id);
+CREATE INDEX presence_stream_id ON presence_stream USING btree (stream_id, user_id);
+CREATE INDEX presence_stream_state_not_offline_idx ON presence_stream USING btree (state) WHERE (state <> 'offline'::text);
+CREATE INDEX presence_stream_user_id ON presence_stream USING btree (user_id);
+CREATE INDEX public_room_index ON rooms USING btree (is_public);
+CREATE INDEX push_rules_enable_user_name ON push_rules_enable USING btree (user_name);
+CREATE INDEX push_rules_stream_id ON push_rules_stream USING btree (stream_id);
+CREATE INDEX push_rules_stream_user_stream_id ON push_rules_stream USING btree (user_id, stream_id);
+CREATE INDEX push_rules_user_name ON push_rules USING btree (user_name);
+CREATE UNIQUE INDEX ratelimit_override_idx ON ratelimit_override USING btree (user_id);
+CREATE UNIQUE INDEX receipts_graph_unique_index ON receipts_graph USING btree (room_id, receipt_type, user_id) WHERE (thread_id IS NULL);
+CREATE INDEX receipts_linearized_id ON receipts_linearized USING btree (stream_id);
+CREATE INDEX receipts_linearized_room_stream ON receipts_linearized USING btree (room_id, stream_id);
+CREATE UNIQUE INDEX receipts_linearized_unique_index ON receipts_linearized USING btree (room_id, receipt_type, user_id) WHERE (thread_id IS NULL);
+CREATE INDEX receipts_linearized_user ON receipts_linearized USING btree (user_id);
+CREATE INDEX received_transactions_ts ON received_transactions USING btree (ts);
+CREATE INDEX redactions_have_censored_ts ON redactions USING btree (received_ts) WHERE (NOT have_censored);
+CREATE INDEX redactions_redacts ON redactions USING btree (redacts);
+CREATE INDEX refresh_tokens_next_token_id ON refresh_tokens USING btree (next_token_id) WHERE (next_token_id IS NOT NULL);
+CREATE UNIQUE INDEX remote_media_repository_thumbn_media_origin_id_width_height_met ON remote_media_cache_thumbnails USING btree (media_origin, media_id, thumbnail_width, thumbnail_height, thumbnail_type, thumbnail_method);
+CREATE INDEX room_account_data_stream_id ON room_account_data USING btree (user_id, stream_id);
+CREATE INDEX room_alias_servers_alias ON room_alias_servers USING btree (room_alias);
+CREATE INDEX room_aliases_id ON room_aliases USING btree (room_id);
+CREATE INDEX room_memberships_room_id ON room_memberships USING btree (room_id);
+CREATE INDEX room_memberships_user_id ON room_memberships USING btree (user_id);
+CREATE INDEX room_memberships_user_room_forgotten ON room_memberships USING btree (user_id, room_id) WHERE (forgotten = 1);
+CREATE INDEX room_retention_max_lifetime_idx ON room_retention USING btree (max_lifetime);
+CREATE UNIQUE INDEX room_stats_earliest_token_idx ON room_stats_earliest_token USING btree (room_id);
+CREATE UNIQUE INDEX room_stats_state_room ON room_stats_state USING btree (room_id);
+CREATE INDEX stream_ordering_to_exterm_idx ON stream_ordering_to_exterm USING btree (stream_ordering);
+CREATE INDEX stream_ordering_to_exterm_rm_idx ON stream_ordering_to_exterm USING btree (room_id, stream_ordering);
+CREATE UNIQUE INDEX stream_positions_idx ON stream_positions USING btree (stream_name, instance_name);
+CREATE UNIQUE INDEX threepid_guest_access_tokens_index ON threepid_guest_access_tokens USING btree (medium, address);
+CREATE INDEX threepid_validation_token_session_id ON threepid_validation_token USING btree (session_id);
+CREATE INDEX user_daily_visits_ts_idx ON user_daily_visits USING btree ("timestamp");
+CREATE INDEX user_daily_visits_uts_idx ON user_daily_visits USING btree (user_id, "timestamp");
+CREATE INDEX user_directory_room_idx ON user_directory USING btree (room_id);
+CREATE INDEX user_directory_search_fts_idx ON user_directory_search USING gin (vector);
+CREATE UNIQUE INDEX user_directory_search_user_idx ON user_directory_search USING btree (user_id);
+CREATE UNIQUE INDEX user_directory_user_idx ON user_directory USING btree (user_id);
+CREATE INDEX user_external_ids_user_id_idx ON user_external_ids USING btree (user_id);
+CREATE UNIQUE INDEX user_filters_unique ON user_filters USING btree (user_id, filter_id);
+CREATE INDEX user_ips_device_id ON user_ips USING btree (user_id, device_id, last_seen);
+CREATE INDEX user_ips_last_seen ON user_ips USING btree (user_id, last_seen);
+CREATE INDEX user_ips_last_seen_only ON user_ips USING btree (last_seen);
+CREATE UNIQUE INDEX user_ips_user_token_ip_unique_index ON user_ips USING btree (user_id, access_token, ip);
+CREATE UNIQUE INDEX user_signature_stream_idx ON user_signature_stream USING btree (stream_id);
+CREATE UNIQUE INDEX user_threepid_id_server_idx ON user_threepid_id_server USING btree (user_id, medium, address, id_server);
+CREATE INDEX user_threepids_medium_address ON user_threepids USING btree (medium, address);
+CREATE INDEX user_threepids_user_id ON user_threepids USING btree (user_id);
+CREATE INDEX users_creation_ts ON users USING btree (creation_ts);
+CREATE INDEX users_have_local_media ON local_media_repository USING btree (user_id, created_ts);
+CREATE INDEX users_in_public_rooms_r_idx ON users_in_public_rooms USING btree (room_id);
+CREATE UNIQUE INDEX users_in_public_rooms_u_idx ON users_in_public_rooms USING btree (user_id, room_id);
+CREATE INDEX users_who_share_private_rooms_o_idx ON users_who_share_private_rooms USING btree (other_user_id);
+CREATE INDEX users_who_share_private_rooms_r_idx ON users_who_share_private_rooms USING btree (room_id);
+CREATE UNIQUE INDEX users_who_share_private_rooms_u_idx ON users_who_share_private_rooms USING btree (user_id, other_user_id, room_id);
+CREATE UNIQUE INDEX worker_locks_key ON worker_locks USING btree (lock_name, lock_key);
+CREATE TRIGGER check_partial_state_events BEFORE INSERT OR UPDATE ON partial_state_events FOR EACH ROW EXECUTE PROCEDURE check_partial_state_events();
+ALTER TABLE ONLY access_tokens
+ ADD CONSTRAINT access_tokens_refresh_token_id_fkey FOREIGN KEY (refresh_token_id) REFERENCES refresh_tokens(id) ON DELETE CASCADE;
+ALTER TABLE ONLY destination_rooms
+ ADD CONSTRAINT destination_rooms_destination_fkey FOREIGN KEY (destination) REFERENCES destinations(destination);
+ALTER TABLE ONLY destination_rooms
+ ADD CONSTRAINT destination_rooms_room_id_fkey FOREIGN KEY (room_id) REFERENCES rooms(room_id);
+ALTER TABLE ONLY event_edges
+ ADD CONSTRAINT event_edges_event_id_fkey FOREIGN KEY (event_id) REFERENCES events(event_id);
+ALTER TABLE ONLY event_txn_id
+ ADD CONSTRAINT event_txn_id_event_id_fkey FOREIGN KEY (event_id) REFERENCES events(event_id) ON DELETE CASCADE;
+ALTER TABLE ONLY event_txn_id
+ ADD CONSTRAINT event_txn_id_token_id_fkey FOREIGN KEY (token_id) REFERENCES access_tokens(id) ON DELETE CASCADE;
+ALTER TABLE ONLY partial_state_events
+ ADD CONSTRAINT partial_state_events_event_id_fkey FOREIGN KEY (event_id) REFERENCES events(event_id);
+ALTER TABLE ONLY partial_state_events
+ ADD CONSTRAINT partial_state_events_room_id_fkey FOREIGN KEY (room_id) REFERENCES partial_state_rooms(room_id);
+ALTER TABLE ONLY partial_state_rooms
+ ADD CONSTRAINT partial_state_rooms_room_id_fkey FOREIGN KEY (room_id) REFERENCES rooms(room_id);
+ALTER TABLE ONLY partial_state_rooms_servers
+ ADD CONSTRAINT partial_state_rooms_servers_room_id_fkey FOREIGN KEY (room_id) REFERENCES partial_state_rooms(room_id);
+ALTER TABLE ONLY refresh_tokens
+ ADD CONSTRAINT refresh_tokens_next_token_id_fkey FOREIGN KEY (next_token_id) REFERENCES refresh_tokens(id) ON DELETE CASCADE;
+ALTER TABLE ONLY ui_auth_sessions_credentials
+ ADD CONSTRAINT ui_auth_sessions_credentials_session_id_fkey FOREIGN KEY (session_id) REFERENCES ui_auth_sessions(session_id);
+ALTER TABLE ONLY ui_auth_sessions_ips
+ ADD CONSTRAINT ui_auth_sessions_ips_session_id_fkey FOREIGN KEY (session_id) REFERENCES ui_auth_sessions(session_id);
+ALTER TABLE ONLY users_to_send_full_presence_to
+ ADD CONSTRAINT users_to_send_full_presence_to_user_id_fkey FOREIGN KEY (user_id) REFERENCES users(name);
+INSERT INTO appservice_stream_position VALUES ('X', 0);
+INSERT INTO event_push_summary_last_receipt_stream_id VALUES ('X', 0);
+INSERT INTO event_push_summary_stream_ordering VALUES ('X', 0);
+INSERT INTO federation_stream_position VALUES ('federation', -1, 'master');
+INSERT INTO federation_stream_position VALUES ('events', -1, 'master');
+INSERT INTO stats_incremental_position VALUES ('X', 1);
+INSERT INTO user_directory_stream_pos VALUES ('X', 1);
+SELECT pg_catalog.setval('account_data_sequence', 1, true);
+SELECT pg_catalog.setval('application_services_txn_id_seq', 1, false);
+SELECT pg_catalog.setval('cache_invalidation_stream_seq', 1, true);
+SELECT pg_catalog.setval('device_inbox_sequence', 1, true);
+SELECT pg_catalog.setval('event_auth_chain_id', 1, false);
+SELECT pg_catalog.setval('events_backfill_stream_seq', 1, true);
+SELECT pg_catalog.setval('events_stream_seq', 1, true);
+SELECT pg_catalog.setval('instance_map_instance_id_seq', 1, false);
+SELECT pg_catalog.setval('presence_stream_sequence', 1, true);
+SELECT pg_catalog.setval('receipts_sequence', 1, true);
+SELECT pg_catalog.setval('user_id_seq', 1, false);
diff --git a/synapse/storage/schema/main/full_schemas/72/full.sql.sqlite b/synapse/storage/schema/main/full_schemas/72/full.sql.sqlite
new file mode 100644
index 0000000000..d403baf1fb
--- /dev/null
+++ b/synapse/storage/schema/main/full_schemas/72/full.sql.sqlite
@@ -0,0 +1,646 @@
+CREATE TABLE application_services_txns( as_id TEXT NOT NULL, txn_id INTEGER NOT NULL, event_ids TEXT NOT NULL, UNIQUE(as_id, txn_id) );
+CREATE INDEX application_services_txns_id ON application_services_txns ( as_id );
+CREATE TABLE presence( user_id TEXT NOT NULL, state VARCHAR(20), status_msg TEXT, mtime BIGINT, UNIQUE (user_id) );
+CREATE TABLE users( name TEXT, password_hash TEXT, creation_ts BIGINT, admin SMALLINT DEFAULT 0 NOT NULL, upgrade_ts BIGINT, is_guest SMALLINT DEFAULT 0 NOT NULL, appservice_id TEXT, consent_version TEXT, consent_server_notice_sent TEXT, user_type TEXT DEFAULT NULL, deactivated SMALLINT DEFAULT 0 NOT NULL, shadow_banned BOOLEAN, consent_ts bigint, UNIQUE(name) );
+CREATE TABLE user_ips ( user_id TEXT NOT NULL, access_token TEXT NOT NULL, device_id TEXT, ip TEXT NOT NULL, user_agent TEXT NOT NULL, last_seen BIGINT NOT NULL );
+CREATE TABLE profiles( user_id TEXT NOT NULL, displayname TEXT, avatar_url TEXT, UNIQUE(user_id) );
+CREATE TABLE received_transactions( transaction_id TEXT, origin TEXT, ts BIGINT, response_code INTEGER, response_json bytea, has_been_referenced smallint default 0, UNIQUE (transaction_id, origin) );
+CREATE TABLE destinations( destination TEXT PRIMARY KEY, retry_last_ts BIGINT, retry_interval INTEGER , failure_ts BIGINT, last_successful_stream_ordering BIGINT);
+CREATE TABLE events( stream_ordering INTEGER PRIMARY KEY, topological_ordering BIGINT NOT NULL, event_id TEXT NOT NULL, type TEXT NOT NULL, room_id TEXT NOT NULL, content TEXT, unrecognized_keys TEXT, processed BOOL NOT NULL, outlier BOOL NOT NULL, depth BIGINT DEFAULT 0 NOT NULL, origin_server_ts BIGINT, received_ts BIGINT, sender TEXT, contains_url BOOLEAN, instance_name TEXT, state_key TEXT DEFAULT NULL, rejection_reason TEXT DEFAULT NULL, UNIQUE (event_id) );
+CREATE INDEX events_order_room ON events ( room_id, topological_ordering, stream_ordering );
+CREATE TABLE event_json( event_id TEXT NOT NULL, room_id TEXT NOT NULL, internal_metadata TEXT NOT NULL, json TEXT NOT NULL, format_version INTEGER, UNIQUE (event_id) );
+CREATE TABLE state_events( event_id TEXT NOT NULL, room_id TEXT NOT NULL, type TEXT NOT NULL, state_key TEXT NOT NULL, prev_state TEXT, UNIQUE (event_id) );
+CREATE TABLE current_state_events( event_id TEXT NOT NULL, room_id TEXT NOT NULL, type TEXT NOT NULL, state_key TEXT NOT NULL, membership TEXT, UNIQUE (event_id), UNIQUE (room_id, type, state_key) );
+CREATE TABLE room_memberships( event_id TEXT NOT NULL, user_id TEXT NOT NULL, sender TEXT NOT NULL, room_id TEXT NOT NULL, membership TEXT NOT NULL, forgotten INTEGER DEFAULT 0, display_name TEXT, avatar_url TEXT, UNIQUE (event_id) );
+CREATE INDEX room_memberships_room_id ON room_memberships (room_id);
+CREATE INDEX room_memberships_user_id ON room_memberships (user_id);
+CREATE TABLE rooms( room_id TEXT PRIMARY KEY NOT NULL, is_public BOOL, creator TEXT , room_version TEXT, has_auth_chain_index BOOLEAN);
+CREATE TABLE server_signature_keys( server_name TEXT, key_id TEXT, from_server TEXT, ts_added_ms BIGINT, verify_key bytea, ts_valid_until_ms BIGINT, UNIQUE (server_name, key_id) );
+CREATE TABLE rejections( event_id TEXT NOT NULL, reason TEXT NOT NULL, last_check TEXT NOT NULL, UNIQUE (event_id) );
+CREATE TABLE push_rules ( id BIGINT PRIMARY KEY, user_name TEXT NOT NULL, rule_id TEXT NOT NULL, priority_class SMALLINT NOT NULL, priority INTEGER NOT NULL DEFAULT 0, conditions TEXT NOT NULL, actions TEXT NOT NULL, UNIQUE(user_name, rule_id) );
+CREATE INDEX push_rules_user_name on push_rules (user_name);
+CREATE TABLE push_rules_enable ( id BIGINT PRIMARY KEY, user_name TEXT NOT NULL, rule_id TEXT NOT NULL, enabled SMALLINT, UNIQUE(user_name, rule_id) );
+CREATE INDEX push_rules_enable_user_name on push_rules_enable (user_name);
+CREATE TABLE event_forward_extremities( event_id TEXT NOT NULL, room_id TEXT NOT NULL, UNIQUE (event_id, room_id) );
+CREATE INDEX ev_extrem_room ON event_forward_extremities(room_id);
+CREATE INDEX ev_extrem_id ON event_forward_extremities(event_id);
+CREATE TABLE event_backward_extremities( event_id TEXT NOT NULL, room_id TEXT NOT NULL, UNIQUE (event_id, room_id) );
+CREATE INDEX ev_b_extrem_room ON event_backward_extremities(room_id);
+CREATE INDEX ev_b_extrem_id ON event_backward_extremities(event_id);
+CREATE TABLE room_depth( room_id TEXT NOT NULL, min_depth INTEGER NOT NULL, UNIQUE (room_id) );
+CREATE TABLE event_to_state_groups( event_id TEXT NOT NULL, state_group BIGINT NOT NULL, UNIQUE (event_id) );
+CREATE TABLE local_media_repository ( media_id TEXT, media_type TEXT, media_length INTEGER, created_ts BIGINT, upload_name TEXT, user_id TEXT, quarantined_by TEXT, url_cache TEXT, last_access_ts BIGINT, safe_from_quarantine BOOLEAN NOT NULL DEFAULT 0, UNIQUE (media_id) );
+CREATE TABLE remote_media_cache ( media_origin TEXT, media_id TEXT, media_type TEXT, created_ts BIGINT, upload_name TEXT, media_length INTEGER, filesystem_id TEXT, last_access_ts BIGINT, quarantined_by TEXT, UNIQUE (media_origin, media_id) );
+CREATE TABLE redactions ( event_id TEXT NOT NULL, redacts TEXT NOT NULL, have_censored BOOL NOT NULL DEFAULT false, received_ts BIGINT, UNIQUE (event_id) );
+CREATE INDEX redactions_redacts ON redactions (redacts);
+CREATE TABLE room_aliases( room_alias TEXT NOT NULL, room_id TEXT NOT NULL, creator TEXT, UNIQUE (room_alias) );
+CREATE INDEX room_aliases_id ON room_aliases(room_id);
+CREATE TABLE room_alias_servers( room_alias TEXT NOT NULL, server TEXT NOT NULL );
+CREATE INDEX room_alias_servers_alias ON room_alias_servers(room_alias);
+CREATE TABLE IF NOT EXISTS "server_keys_json" ( server_name TEXT NOT NULL, key_id TEXT NOT NULL, from_server TEXT NOT NULL, ts_added_ms BIGINT NOT NULL, ts_valid_until_ms BIGINT NOT NULL, key_json bytea NOT NULL, CONSTRAINT server_keys_json_uniqueness UNIQUE (server_name, key_id, from_server) );
+CREATE TABLE e2e_device_keys_json ( user_id TEXT NOT NULL, device_id TEXT NOT NULL, ts_added_ms BIGINT NOT NULL, key_json TEXT NOT NULL, CONSTRAINT e2e_device_keys_json_uniqueness UNIQUE (user_id, device_id) );
+CREATE TABLE e2e_one_time_keys_json ( user_id TEXT NOT NULL, device_id TEXT NOT NULL, algorithm TEXT NOT NULL, key_id TEXT NOT NULL, ts_added_ms BIGINT NOT NULL, key_json TEXT NOT NULL, CONSTRAINT e2e_one_time_keys_json_uniqueness UNIQUE (user_id, device_id, algorithm, key_id) );
+CREATE TABLE IF NOT EXISTS "user_threepids" ( user_id TEXT NOT NULL, medium TEXT NOT NULL, address TEXT NOT NULL, validated_at BIGINT NOT NULL, added_at BIGINT NOT NULL, CONSTRAINT medium_address UNIQUE (medium, address) );
+CREATE INDEX user_threepids_user_id ON user_threepids(user_id);
+CREATE VIRTUAL TABLE event_search USING fts4 ( event_id, room_id, sender, key, value )
+/* event_search(event_id,room_id,sender,"key",value) */;
+CREATE TABLE room_tags( user_id TEXT NOT NULL, room_id TEXT NOT NULL, tag TEXT NOT NULL, content TEXT NOT NULL, CONSTRAINT room_tag_uniqueness UNIQUE (user_id, room_id, tag) );
+CREATE TABLE room_tags_revisions ( user_id TEXT NOT NULL, room_id TEXT NOT NULL, stream_id BIGINT NOT NULL, instance_name TEXT, CONSTRAINT room_tag_revisions_uniqueness UNIQUE (user_id, room_id) );
+CREATE TABLE account_data( user_id TEXT NOT NULL, account_data_type TEXT NOT NULL, stream_id BIGINT NOT NULL, content TEXT NOT NULL, instance_name TEXT, CONSTRAINT account_data_uniqueness UNIQUE (user_id, account_data_type) );
+CREATE TABLE room_account_data( user_id TEXT NOT NULL, room_id TEXT NOT NULL, account_data_type TEXT NOT NULL, stream_id BIGINT NOT NULL, content TEXT NOT NULL, instance_name TEXT, CONSTRAINT room_account_data_uniqueness UNIQUE (user_id, room_id, account_data_type) );
+CREATE INDEX account_data_stream_id on account_data(user_id, stream_id);
+CREATE INDEX room_account_data_stream_id on room_account_data(user_id, stream_id);
+CREATE INDEX events_ts ON events(origin_server_ts, stream_ordering);
+CREATE TABLE event_push_actions( room_id TEXT NOT NULL, event_id TEXT NOT NULL, user_id TEXT NOT NULL, profile_tag VARCHAR(32), actions TEXT NOT NULL, topological_ordering BIGINT, stream_ordering BIGINT, notif SMALLINT, highlight SMALLINT, unread SMALLINT, thread_id TEXT, CONSTRAINT event_id_user_id_profile_tag_uniqueness UNIQUE (room_id, event_id, user_id, profile_tag) );
+CREATE INDEX event_push_actions_room_id_user_id on event_push_actions(room_id, user_id);
+CREATE INDEX events_room_stream on events(room_id, stream_ordering);
+CREATE INDEX public_room_index on rooms(is_public);
+CREATE INDEX event_push_actions_rm_tokens on event_push_actions( user_id, room_id, topological_ordering, stream_ordering );
+CREATE TABLE presence_stream( stream_id BIGINT, user_id TEXT, state TEXT, last_active_ts BIGINT, last_federation_update_ts BIGINT, last_user_sync_ts BIGINT, status_msg TEXT, currently_active BOOLEAN , instance_name TEXT);
+CREATE INDEX presence_stream_id ON presence_stream(stream_id, user_id);
+CREATE INDEX presence_stream_user_id ON presence_stream(user_id);
+CREATE TABLE push_rules_stream( stream_id BIGINT NOT NULL, event_stream_ordering BIGINT NOT NULL, user_id TEXT NOT NULL, rule_id TEXT NOT NULL, op TEXT NOT NULL, priority_class SMALLINT, priority INTEGER, conditions TEXT, actions TEXT );
+CREATE INDEX push_rules_stream_id ON push_rules_stream(stream_id);
+CREATE INDEX push_rules_stream_user_stream_id on push_rules_stream(user_id, stream_id);
+CREATE TABLE ex_outlier_stream( event_stream_ordering BIGINT PRIMARY KEY NOT NULL, event_id TEXT NOT NULL, state_group BIGINT NOT NULL , instance_name TEXT);
+CREATE TABLE threepid_guest_access_tokens( medium TEXT, address TEXT, guest_access_token TEXT, first_inviter TEXT );
+CREATE UNIQUE INDEX threepid_guest_access_tokens_index ON threepid_guest_access_tokens(medium, address);
+CREATE INDEX event_push_actions_stream_ordering on event_push_actions( stream_ordering, user_id );
+CREATE TABLE open_id_tokens ( token TEXT NOT NULL PRIMARY KEY, ts_valid_until_ms bigint NOT NULL, user_id TEXT NOT NULL, UNIQUE (token) );
+CREATE INDEX open_id_tokens_ts_valid_until_ms ON open_id_tokens(ts_valid_until_ms);
+CREATE TABLE pusher_throttle( pusher BIGINT NOT NULL, room_id TEXT NOT NULL, last_sent_ts BIGINT, throttle_ms BIGINT, PRIMARY KEY (pusher, room_id) );
+CREATE TABLE event_reports( id BIGINT NOT NULL PRIMARY KEY, received_ts BIGINT NOT NULL, room_id TEXT NOT NULL, event_id TEXT NOT NULL, user_id TEXT NOT NULL, reason TEXT, content TEXT );
+CREATE TABLE appservice_stream_position( Lock CHAR(1) NOT NULL DEFAULT 'X' UNIQUE, stream_ordering BIGINT, CHECK (Lock='X') );
+CREATE TABLE device_inbox ( user_id TEXT NOT NULL, device_id TEXT NOT NULL, stream_id BIGINT NOT NULL, message_json TEXT NOT NULL , instance_name TEXT);
+CREATE INDEX device_inbox_user_stream_id ON device_inbox(user_id, device_id, stream_id);
+CREATE INDEX received_transactions_ts ON received_transactions(ts);
+CREATE TABLE device_federation_outbox ( destination TEXT NOT NULL, stream_id BIGINT NOT NULL, queued_ts BIGINT NOT NULL, messages_json TEXT NOT NULL , instance_name TEXT);
+CREATE INDEX device_federation_outbox_destination_id ON device_federation_outbox(destination, stream_id);
+CREATE TABLE device_federation_inbox ( origin TEXT NOT NULL, message_id TEXT NOT NULL, received_ts BIGINT NOT NULL , instance_name TEXT);
+CREATE INDEX device_federation_inbox_sender_id ON device_federation_inbox(origin, message_id);
+CREATE TABLE stream_ordering_to_exterm ( stream_ordering BIGINT NOT NULL, room_id TEXT NOT NULL, event_id TEXT NOT NULL );
+CREATE INDEX stream_ordering_to_exterm_idx on stream_ordering_to_exterm( stream_ordering );
+CREATE INDEX stream_ordering_to_exterm_rm_idx on stream_ordering_to_exterm( room_id, stream_ordering );
+CREATE TABLE IF NOT EXISTS "event_auth"( event_id TEXT NOT NULL, auth_id TEXT NOT NULL, room_id TEXT NOT NULL );
+CREATE INDEX evauth_edges_id ON event_auth(event_id);
+CREATE INDEX user_threepids_medium_address on user_threepids (medium, address);
+CREATE TABLE appservice_room_list( appservice_id TEXT NOT NULL, network_id TEXT NOT NULL, room_id TEXT NOT NULL );
+CREATE UNIQUE INDEX appservice_room_list_idx ON appservice_room_list( appservice_id, network_id, room_id );
+CREATE INDEX device_federation_outbox_id ON device_federation_outbox(stream_id);
+CREATE TABLE federation_stream_position( type TEXT NOT NULL, stream_id INTEGER NOT NULL , instance_name TEXT NOT NULL DEFAULT 'master');
+CREATE TABLE device_lists_remote_cache ( user_id TEXT NOT NULL, device_id TEXT NOT NULL, content TEXT NOT NULL );
+CREATE TABLE device_lists_remote_extremeties ( user_id TEXT NOT NULL, stream_id TEXT NOT NULL );
+CREATE TABLE device_lists_stream ( stream_id BIGINT NOT NULL, user_id TEXT NOT NULL, device_id TEXT NOT NULL );
+CREATE INDEX device_lists_stream_id ON device_lists_stream(stream_id, user_id);
+CREATE TABLE device_lists_outbound_pokes ( destination TEXT NOT NULL, stream_id BIGINT NOT NULL, user_id TEXT NOT NULL, device_id TEXT NOT NULL, sent BOOLEAN NOT NULL, ts BIGINT NOT NULL , opentracing_context TEXT);
+CREATE INDEX device_lists_outbound_pokes_id ON device_lists_outbound_pokes(destination, stream_id);
+CREATE INDEX device_lists_outbound_pokes_user ON device_lists_outbound_pokes(destination, user_id);
+CREATE TABLE event_push_summary ( user_id TEXT NOT NULL, room_id TEXT NOT NULL, notif_count BIGINT NOT NULL, stream_ordering BIGINT NOT NULL , unread_count BIGINT, last_receipt_stream_ordering BIGINT, thread_id TEXT);
+CREATE TABLE event_push_summary_stream_ordering ( Lock CHAR(1) NOT NULL DEFAULT 'X' UNIQUE, stream_ordering BIGINT NOT NULL, CHECK (Lock='X') );
+CREATE TABLE IF NOT EXISTS "pushers" ( id BIGINT PRIMARY KEY, user_name TEXT NOT NULL, access_token BIGINT DEFAULT NULL, profile_tag TEXT NOT NULL, kind TEXT NOT NULL, app_id TEXT NOT NULL, app_display_name TEXT NOT NULL, device_display_name TEXT NOT NULL, pushkey TEXT NOT NULL, ts BIGINT NOT NULL, lang TEXT, data TEXT, last_stream_ordering INTEGER, last_success BIGINT, failing_since BIGINT, UNIQUE (app_id, pushkey, user_name) );
+CREATE INDEX device_lists_outbound_pokes_stream ON device_lists_outbound_pokes(stream_id);
+CREATE TABLE ratelimit_override ( user_id TEXT NOT NULL, messages_per_second BIGINT, burst_count BIGINT );
+CREATE UNIQUE INDEX ratelimit_override_idx ON ratelimit_override(user_id);
+CREATE TABLE current_state_delta_stream ( stream_id BIGINT NOT NULL, room_id TEXT NOT NULL, type TEXT NOT NULL, state_key TEXT NOT NULL, event_id TEXT, prev_event_id TEXT , instance_name TEXT);
+CREATE INDEX current_state_delta_stream_idx ON current_state_delta_stream(stream_id);
+CREATE TABLE user_directory_stream_pos ( Lock CHAR(1) NOT NULL DEFAULT 'X' UNIQUE, stream_id BIGINT, CHECK (Lock='X') );
+CREATE VIRTUAL TABLE user_directory_search USING fts4 ( user_id, value )
+/* user_directory_search(user_id,value) */;
+CREATE TABLE blocked_rooms ( room_id TEXT NOT NULL, user_id TEXT NOT NULL );
+CREATE UNIQUE INDEX blocked_rooms_idx ON blocked_rooms(room_id);
+CREATE TABLE IF NOT EXISTS "local_media_repository_url_cache"( url TEXT, response_code INTEGER, etag TEXT, expires_ts BIGINT, og TEXT, media_id TEXT, download_ts BIGINT );
+CREATE INDEX local_media_repository_url_cache_expires_idx ON local_media_repository_url_cache(expires_ts);
+CREATE INDEX local_media_repository_url_cache_by_url_download_ts ON local_media_repository_url_cache(url, download_ts);
+CREATE INDEX local_media_repository_url_cache_media_idx ON local_media_repository_url_cache(media_id);
+CREATE TABLE IF NOT EXISTS "deleted_pushers" ( stream_id BIGINT NOT NULL, app_id TEXT NOT NULL, pushkey TEXT NOT NULL, user_id TEXT NOT NULL );
+CREATE INDEX deleted_pushers_stream_id ON deleted_pushers (stream_id);
+CREATE TABLE IF NOT EXISTS "user_directory" ( user_id TEXT NOT NULL, room_id TEXT, display_name TEXT, avatar_url TEXT );
+CREATE INDEX user_directory_room_idx ON user_directory(room_id);
+CREATE UNIQUE INDEX user_directory_user_idx ON user_directory(user_id);
+CREATE TABLE event_push_actions_staging ( event_id TEXT NOT NULL, user_id TEXT NOT NULL, actions TEXT NOT NULL, notif SMALLINT NOT NULL, highlight SMALLINT NOT NULL , unread SMALLINT, thread_id TEXT);
+CREATE INDEX event_push_actions_staging_id ON event_push_actions_staging(event_id);
+CREATE TABLE users_pending_deactivation ( user_id TEXT NOT NULL );
+CREATE TABLE user_daily_visits ( user_id TEXT NOT NULL, device_id TEXT, timestamp BIGINT NOT NULL , user_agent TEXT);
+CREATE INDEX user_daily_visits_uts_idx ON user_daily_visits(user_id, timestamp);
+CREATE INDEX user_daily_visits_ts_idx ON user_daily_visits(timestamp);
+CREATE TABLE erased_users ( user_id TEXT NOT NULL );
+CREATE UNIQUE INDEX erased_users_user ON erased_users(user_id);
+CREATE TABLE monthly_active_users ( user_id TEXT NOT NULL, timestamp BIGINT NOT NULL );
+CREATE UNIQUE INDEX monthly_active_users_users ON monthly_active_users(user_id);
+CREATE INDEX monthly_active_users_time_stamp ON monthly_active_users(timestamp);
+CREATE TABLE IF NOT EXISTS "e2e_room_keys_versions" ( user_id TEXT NOT NULL, version BIGINT NOT NULL, algorithm TEXT NOT NULL, auth_data TEXT NOT NULL, deleted SMALLINT DEFAULT 0 NOT NULL , etag BIGINT);
+CREATE UNIQUE INDEX e2e_room_keys_versions_idx ON e2e_room_keys_versions(user_id, version);
+CREATE TABLE IF NOT EXISTS "e2e_room_keys" ( user_id TEXT NOT NULL, room_id TEXT NOT NULL, session_id TEXT NOT NULL, version BIGINT NOT NULL, first_message_index INT, forwarded_count INT, is_verified BOOLEAN, session_data TEXT NOT NULL );
+CREATE TABLE users_who_share_private_rooms ( user_id TEXT NOT NULL, other_user_id TEXT NOT NULL, room_id TEXT NOT NULL );
+CREATE UNIQUE INDEX users_who_share_private_rooms_u_idx ON users_who_share_private_rooms(user_id, other_user_id, room_id);
+CREATE INDEX users_who_share_private_rooms_r_idx ON users_who_share_private_rooms(room_id);
+CREATE INDEX users_who_share_private_rooms_o_idx ON users_who_share_private_rooms(other_user_id);
+CREATE TABLE user_threepid_id_server ( user_id TEXT NOT NULL, medium TEXT NOT NULL, address TEXT NOT NULL, id_server TEXT NOT NULL );
+CREATE UNIQUE INDEX user_threepid_id_server_idx ON user_threepid_id_server( user_id, medium, address, id_server );
+CREATE TABLE users_in_public_rooms ( user_id TEXT NOT NULL, room_id TEXT NOT NULL );
+CREATE UNIQUE INDEX users_in_public_rooms_u_idx ON users_in_public_rooms(user_id, room_id);
+CREATE TABLE account_validity ( user_id TEXT PRIMARY KEY, expiration_ts_ms BIGINT NOT NULL, email_sent BOOLEAN NOT NULL, renewal_token TEXT , token_used_ts_ms BIGINT);
+CREATE TABLE event_relations ( event_id TEXT NOT NULL, relates_to_id TEXT NOT NULL, relation_type TEXT NOT NULL, aggregation_key TEXT );
+CREATE UNIQUE INDEX event_relations_id ON event_relations(event_id);
+CREATE INDEX event_relations_relates ON event_relations(relates_to_id, relation_type, aggregation_key);
+CREATE TABLE room_stats_earliest_token ( room_id TEXT NOT NULL, token BIGINT NOT NULL );
+CREATE UNIQUE INDEX room_stats_earliest_token_idx ON room_stats_earliest_token(room_id);
+CREATE INDEX user_ips_device_id ON user_ips (user_id, device_id, last_seen);
+CREATE INDEX event_push_actions_u_highlight ON event_push_actions (user_id, stream_ordering);
+CREATE INDEX device_inbox_stream_id_user_id ON device_inbox (stream_id, user_id);
+CREATE INDEX device_lists_stream_user_id ON device_lists_stream (user_id, device_id);
+CREATE INDEX user_ips_last_seen ON user_ips (user_id, last_seen);
+CREATE INDEX user_ips_last_seen_only ON user_ips (last_seen);
+CREATE INDEX users_creation_ts ON users (creation_ts);
+CREATE INDEX event_to_state_groups_sg_index ON event_to_state_groups (state_group);
+CREATE UNIQUE INDEX device_lists_remote_cache_unique_id ON device_lists_remote_cache (user_id, device_id);
+CREATE UNIQUE INDEX device_lists_remote_extremeties_unique_idx ON device_lists_remote_extremeties (user_id);
+CREATE UNIQUE INDEX user_ips_user_token_ip_unique_index ON user_ips (user_id, access_token, ip);
+CREATE TABLE threepid_validation_session (
+ session_id TEXT PRIMARY KEY,
+ medium TEXT NOT NULL,
+ address TEXT NOT NULL,
+ client_secret TEXT NOT NULL,
+ last_send_attempt BIGINT NOT NULL,
+ validated_at BIGINT
+);
+CREATE TABLE threepid_validation_token (
+ token TEXT PRIMARY KEY,
+ session_id TEXT NOT NULL,
+ next_link TEXT,
+ expires BIGINT NOT NULL
+);
+CREATE INDEX threepid_validation_token_session_id ON threepid_validation_token(session_id);
+CREATE TABLE event_expiry (
+ event_id TEXT PRIMARY KEY,
+ expiry_ts BIGINT NOT NULL
+);
+CREATE INDEX event_expiry_expiry_ts_idx ON event_expiry(expiry_ts);
+CREATE TABLE event_labels (
+ event_id TEXT,
+ label TEXT,
+ room_id TEXT NOT NULL,
+ topological_ordering BIGINT NOT NULL,
+ PRIMARY KEY(event_id, label)
+);
+CREATE INDEX event_labels_room_id_label_idx ON event_labels(room_id, label, topological_ordering);
+CREATE UNIQUE INDEX e2e_room_keys_with_version_idx ON e2e_room_keys(user_id, version, room_id, session_id);
+CREATE TABLE IF NOT EXISTS "devices" (
+ user_id TEXT NOT NULL,
+ device_id TEXT NOT NULL,
+ display_name TEXT,
+ last_seen BIGINT,
+ ip TEXT,
+ user_agent TEXT,
+ hidden BOOLEAN DEFAULT 0,
+ CONSTRAINT device_uniqueness UNIQUE (user_id, device_id)
+);
+CREATE TABLE room_retention(
+ room_id TEXT,
+ event_id TEXT,
+ min_lifetime BIGINT,
+ max_lifetime BIGINT,
+
+ PRIMARY KEY(room_id, event_id)
+);
+CREATE INDEX room_retention_max_lifetime_idx on room_retention(max_lifetime);
+CREATE TABLE e2e_cross_signing_keys (
+ user_id TEXT NOT NULL,
+ -- the type of cross-signing key (master, user_signing, or self_signing)
+ keytype TEXT NOT NULL,
+ -- the full key information, as a json-encoded dict
+ keydata TEXT NOT NULL,
+ -- for keeping the keys in order, so that we can fetch the latest one
+ stream_id BIGINT NOT NULL
+);
+CREATE UNIQUE INDEX e2e_cross_signing_keys_idx ON e2e_cross_signing_keys(user_id, keytype, stream_id);
+CREATE TABLE e2e_cross_signing_signatures (
+ -- user who did the signing
+ user_id TEXT NOT NULL,
+ -- key used to sign
+ key_id TEXT NOT NULL,
+ -- user who was signed
+ target_user_id TEXT NOT NULL,
+ -- device/key that was signed
+ target_device_id TEXT NOT NULL,
+ -- the actual signature
+ signature TEXT NOT NULL
+);
+CREATE TABLE user_signature_stream (
+ -- uses the same stream ID as device list stream
+ stream_id BIGINT NOT NULL,
+ -- user who did the signing
+ from_user_id TEXT NOT NULL,
+ -- list of users who were signed, as a JSON array
+ user_ids TEXT NOT NULL
+);
+CREATE UNIQUE INDEX user_signature_stream_idx ON user_signature_stream(stream_id);
+CREATE INDEX e2e_cross_signing_signatures2_idx ON e2e_cross_signing_signatures(user_id, target_user_id, target_device_id);
+CREATE TABLE stats_incremental_position (
+ Lock CHAR(1) NOT NULL DEFAULT 'X' UNIQUE, -- Makes sure this table only has one row.
+ stream_id BIGINT NOT NULL,
+ CHECK (Lock='X')
+);
+CREATE TABLE room_stats_current (
+ room_id TEXT NOT NULL PRIMARY KEY,
+
+ -- These are absolute counts
+ current_state_events INT NOT NULL,
+ joined_members INT NOT NULL,
+ invited_members INT NOT NULL,
+ left_members INT NOT NULL,
+ banned_members INT NOT NULL,
+
+ local_users_in_room INT NOT NULL,
+
+ -- The maximum delta stream position that this row takes into account.
+ completed_delta_stream_id BIGINT NOT NULL
+, knocked_members INT);
+CREATE TABLE user_stats_current (
+ user_id TEXT NOT NULL PRIMARY KEY,
+
+ joined_rooms BIGINT NOT NULL,
+
+ -- The maximum delta stream position that this row takes into account.
+ completed_delta_stream_id BIGINT NOT NULL
+);
+CREATE TABLE room_stats_state (
+ room_id TEXT NOT NULL,
+ name TEXT,
+ canonical_alias TEXT,
+ join_rules TEXT,
+ history_visibility TEXT,
+ encryption TEXT,
+ avatar TEXT,
+ guest_access TEXT,
+ is_federatable BOOLEAN,
+ topic TEXT
+, room_type TEXT);
+CREATE UNIQUE INDEX room_stats_state_room ON room_stats_state(room_id);
+CREATE TABLE IF NOT EXISTS "user_filters" ( user_id TEXT NOT NULL, filter_id BIGINT NOT NULL, filter_json BYTEA NOT NULL );
+CREATE UNIQUE INDEX user_filters_unique ON "user_filters" (user_id, filter_id);
+CREATE TABLE user_external_ids (
+ auth_provider TEXT NOT NULL,
+ external_id TEXT NOT NULL,
+ user_id TEXT NOT NULL,
+ UNIQUE (auth_provider, external_id)
+);
+CREATE INDEX users_in_public_rooms_r_idx ON users_in_public_rooms(room_id);
+CREATE TABLE device_lists_remote_resync (
+ user_id TEXT NOT NULL,
+ added_ts BIGINT NOT NULL
+);
+CREATE UNIQUE INDEX device_lists_remote_resync_idx ON device_lists_remote_resync (user_id);
+CREATE INDEX device_lists_remote_resync_ts_idx ON device_lists_remote_resync (added_ts);
+CREATE TABLE local_current_membership (
+ room_id TEXT NOT NULL,
+ user_id TEXT NOT NULL,
+ event_id TEXT NOT NULL,
+ membership TEXT NOT NULL
+ );
+CREATE UNIQUE INDEX local_current_membership_idx ON local_current_membership(user_id, room_id);
+CREATE INDEX local_current_membership_room_idx ON local_current_membership(room_id);
+CREATE TABLE ui_auth_sessions(
+ session_id TEXT NOT NULL, -- The session ID passed to the client.
+ creation_time BIGINT NOT NULL, -- The time this session was created (epoch time in milliseconds).
+ serverdict TEXT NOT NULL, -- A JSON dictionary of arbitrary data added by Synapse.
+ clientdict TEXT NOT NULL, -- A JSON dictionary of arbitrary data from the client.
+ uri TEXT NOT NULL, -- The URI the UI authentication session is using.
+ method TEXT NOT NULL, -- The HTTP method the UI authentication session is using.
+ -- The clientdict, uri, and method make up an tuple that must be immutable
+ -- throughout the lifetime of the UI Auth session.
+ description TEXT NOT NULL, -- A human readable description of the operation which caused the UI Auth flow to occur.
+ UNIQUE (session_id)
+);
+CREATE TABLE ui_auth_sessions_credentials(
+ session_id TEXT NOT NULL, -- The corresponding UI Auth session.
+ stage_type TEXT NOT NULL, -- The stage type.
+ result TEXT NOT NULL, -- The result of the stage verification, stored as JSON.
+ UNIQUE (session_id, stage_type),
+ FOREIGN KEY (session_id)
+ REFERENCES ui_auth_sessions (session_id)
+);
+CREATE TABLE IF NOT EXISTS "device_lists_outbound_last_success" ( destination TEXT NOT NULL, user_id TEXT NOT NULL, stream_id BIGINT NOT NULL );
+CREATE UNIQUE INDEX device_lists_outbound_last_success_unique_idx ON "device_lists_outbound_last_success" (destination, user_id);
+CREATE TABLE IF NOT EXISTS "local_media_repository_thumbnails" ( media_id TEXT, thumbnail_width INTEGER, thumbnail_height INTEGER, thumbnail_type TEXT, thumbnail_method TEXT, thumbnail_length INTEGER, UNIQUE ( media_id, thumbnail_width, thumbnail_height, thumbnail_type, thumbnail_method ) );
+CREATE INDEX local_media_repository_thumbnails_media_id ON local_media_repository_thumbnails (media_id);
+CREATE TABLE IF NOT EXISTS "remote_media_cache_thumbnails" ( media_origin TEXT, media_id TEXT, thumbnail_width INTEGER, thumbnail_height INTEGER, thumbnail_method TEXT, thumbnail_type TEXT, thumbnail_length INTEGER, filesystem_id TEXT, UNIQUE ( media_origin, media_id, thumbnail_width, thumbnail_height, thumbnail_type, thumbnail_method ) );
+CREATE TABLE ui_auth_sessions_ips(
+ session_id TEXT NOT NULL,
+ ip TEXT NOT NULL,
+ user_agent TEXT NOT NULL,
+ UNIQUE (session_id, ip, user_agent),
+ FOREIGN KEY (session_id)
+ REFERENCES ui_auth_sessions (session_id)
+);
+CREATE UNIQUE INDEX federation_stream_position_instance ON federation_stream_position(type, instance_name);
+CREATE TABLE dehydrated_devices(
+ user_id TEXT NOT NULL PRIMARY KEY,
+ device_id TEXT NOT NULL,
+ device_data TEXT NOT NULL -- JSON-encoded client-defined data
+);
+CREATE TABLE e2e_fallback_keys_json (
+ user_id TEXT NOT NULL, -- The user this fallback key is for.
+ device_id TEXT NOT NULL, -- The device this fallback key is for.
+ algorithm TEXT NOT NULL, -- Which algorithm this fallback key is for.
+ key_id TEXT NOT NULL, -- An id for suppressing duplicate uploads.
+ key_json TEXT NOT NULL, -- The key as a JSON blob.
+ used BOOLEAN NOT NULL DEFAULT FALSE, -- Whether the key has been used or not.
+ CONSTRAINT e2e_fallback_keys_json_uniqueness UNIQUE (user_id, device_id, algorithm)
+);
+CREATE TABLE destination_rooms (
+ -- the destination in question.
+ destination TEXT NOT NULL REFERENCES destinations (destination),
+ -- the ID of the room in question
+ room_id TEXT NOT NULL REFERENCES rooms (room_id),
+ -- the stream_ordering of the event
+ stream_ordering BIGINT NOT NULL,
+ PRIMARY KEY (destination, room_id)
+ -- We don't declare a foreign key on stream_ordering here because that'd mean
+ -- we'd need to either maintain an index (expensive) or do a table scan of
+ -- destination_rooms whenever we delete an event (also potentially expensive).
+ -- In addition to that, a foreign key on stream_ordering would be redundant
+ -- as this row doesn't need to refer to a specific event; if the event gets
+ -- deleted then it doesn't affect the validity of the stream_ordering here.
+);
+CREATE INDEX destination_rooms_room_id
+ ON destination_rooms (room_id);
+CREATE TABLE stream_positions (
+ stream_name TEXT NOT NULL,
+ instance_name TEXT NOT NULL,
+ stream_id BIGINT NOT NULL
+);
+CREATE UNIQUE INDEX stream_positions_idx ON stream_positions(stream_name, instance_name);
+CREATE TABLE IF NOT EXISTS "access_tokens" (
+ id BIGINT PRIMARY KEY,
+ user_id TEXT NOT NULL,
+ device_id TEXT,
+ token TEXT NOT NULL,
+ valid_until_ms BIGINT,
+ puppets_user_id TEXT,
+ last_validated BIGINT, refresh_token_id BIGINT REFERENCES refresh_tokens (id) ON DELETE CASCADE, used BOOLEAN,
+ UNIQUE(token)
+);
+CREATE INDEX access_tokens_device_id ON access_tokens (user_id, device_id);
+CREATE TABLE IF NOT EXISTS "event_txn_id" (
+ event_id TEXT NOT NULL,
+ room_id TEXT NOT NULL,
+ user_id TEXT NOT NULL,
+ token_id BIGINT NOT NULL,
+ txn_id TEXT NOT NULL,
+ inserted_ts BIGINT NOT NULL,
+ FOREIGN KEY (event_id)
+ REFERENCES events (event_id) ON DELETE CASCADE,
+ FOREIGN KEY (token_id)
+ REFERENCES access_tokens (id) ON DELETE CASCADE
+);
+CREATE UNIQUE INDEX event_txn_id_event_id ON event_txn_id(event_id);
+CREATE UNIQUE INDEX event_txn_id_txn_id ON event_txn_id(room_id, user_id, token_id, txn_id);
+CREATE INDEX event_txn_id_ts ON event_txn_id(inserted_ts);
+CREATE TABLE ignored_users( ignorer_user_id TEXT NOT NULL, ignored_user_id TEXT NOT NULL );
+CREATE UNIQUE INDEX ignored_users_uniqueness ON ignored_users (ignorer_user_id, ignored_user_id);
+CREATE INDEX ignored_users_ignored_user_id ON ignored_users (ignored_user_id);
+CREATE TABLE event_auth_chains (
+ event_id TEXT PRIMARY KEY,
+ chain_id BIGINT NOT NULL,
+ sequence_number BIGINT NOT NULL
+);
+CREATE UNIQUE INDEX event_auth_chains_c_seq_index ON event_auth_chains (chain_id, sequence_number);
+CREATE TABLE event_auth_chain_links (
+ origin_chain_id BIGINT NOT NULL,
+ origin_sequence_number BIGINT NOT NULL,
+
+ target_chain_id BIGINT NOT NULL,
+ target_sequence_number BIGINT NOT NULL
+);
+CREATE INDEX event_auth_chain_links_idx ON event_auth_chain_links (origin_chain_id, target_chain_id);
+CREATE TABLE event_auth_chain_to_calculate (
+ event_id TEXT PRIMARY KEY,
+ room_id TEXT NOT NULL,
+ type TEXT NOT NULL,
+ state_key TEXT NOT NULL
+);
+CREATE INDEX event_auth_chain_to_calculate_rm_id ON event_auth_chain_to_calculate(room_id);
+CREATE TABLE users_to_send_full_presence_to(
+ -- The user ID to send full presence to.
+ user_id TEXT PRIMARY KEY,
+ -- A presence stream ID token - the current presence stream token when the row was last upserted.
+ -- If a user calls /sync and this token is part of the update they're to receive, we also include
+ -- full user presence in the response.
+ -- This allows multiple devices for a user to receive full presence whenever they next call /sync.
+ presence_stream_id BIGINT,
+ FOREIGN KEY (user_id)
+ REFERENCES users (name)
+);
+CREATE TABLE refresh_tokens (
+ id BIGINT PRIMARY KEY,
+ user_id TEXT NOT NULL,
+ device_id TEXT NOT NULL,
+ token TEXT NOT NULL,
+ -- When consumed, a new refresh token is generated, which is tracked by
+ -- this foreign key
+ next_token_id BIGINT REFERENCES refresh_tokens (id) ON DELETE CASCADE, expiry_ts BIGINT DEFAULT NULL, ultimate_session_expiry_ts BIGINT DEFAULT NULL,
+ UNIQUE(token)
+);
+CREATE TABLE worker_locks (
+ lock_name TEXT NOT NULL,
+ lock_key TEXT NOT NULL,
+ -- We write the instance name to ease manual debugging, we don't ever read
+ -- from it.
+ -- Note: instance names aren't guarenteed to be unique.
+ instance_name TEXT NOT NULL,
+ -- A random string generated each time an instance takes out a lock. Used by
+ -- the instance to tell whether the lock is still held by it (e.g. in the
+ -- case where the process stalls for a long time the lock may time out and
+ -- be taken out by another instance, at which point the original instance
+ -- can tell it no longer holds the lock as the tokens no longer match).
+ token TEXT NOT NULL,
+ last_renewed_ts BIGINT NOT NULL
+);
+CREATE UNIQUE INDEX worker_locks_key ON worker_locks (lock_name, lock_key);
+CREATE TABLE federation_inbound_events_staging (
+ origin TEXT NOT NULL,
+ room_id TEXT NOT NULL,
+ event_id TEXT NOT NULL,
+ received_ts BIGINT NOT NULL,
+ event_json TEXT NOT NULL,
+ internal_metadata TEXT NOT NULL
+);
+CREATE INDEX federation_inbound_events_staging_room ON federation_inbound_events_staging(room_id, received_ts);
+CREATE UNIQUE INDEX federation_inbound_events_staging_instance_event ON federation_inbound_events_staging(origin, event_id);
+CREATE TABLE insertion_event_edges(
+ event_id TEXT NOT NULL,
+ room_id TEXT NOT NULL,
+ insertion_prev_event_id TEXT NOT NULL
+);
+CREATE INDEX insertion_event_edges_insertion_room_id ON insertion_event_edges(room_id);
+CREATE INDEX insertion_event_edges_insertion_prev_event_id ON insertion_event_edges(insertion_prev_event_id);
+CREATE TABLE insertion_event_extremities(
+ event_id TEXT NOT NULL,
+ room_id TEXT NOT NULL
+);
+CREATE UNIQUE INDEX insertion_event_extremities_event_id ON insertion_event_extremities(event_id);
+CREATE INDEX insertion_event_extremities_room_id ON insertion_event_extremities(room_id);
+CREATE TABLE registration_tokens(
+ token TEXT NOT NULL, -- The token that can be used for authentication.
+ uses_allowed INT, -- The total number of times this token can be used. NULL if no limit.
+ pending INT NOT NULL, -- The number of in progress registrations using this token.
+ completed INT NOT NULL, -- The number of times this token has been used to complete a registration.
+ expiry_time BIGINT, -- The latest time this token will be valid (epoch time in milliseconds). NULL if token doesn't expire.
+ UNIQUE (token)
+);
+CREATE TABLE sessions(
+ session_type TEXT NOT NULL, -- The unique key for this type of session.
+ session_id TEXT NOT NULL, -- The session ID passed to the client.
+ value TEXT NOT NULL, -- A JSON dictionary to persist.
+ expiry_time_ms BIGINT NOT NULL, -- The time this session will expire (epoch time in milliseconds).
+ UNIQUE (session_type, session_id)
+);
+CREATE TABLE insertion_events(
+ event_id TEXT NOT NULL,
+ room_id TEXT NOT NULL,
+ next_batch_id TEXT NOT NULL
+);
+CREATE UNIQUE INDEX insertion_events_event_id ON insertion_events(event_id);
+CREATE INDEX insertion_events_next_batch_id ON insertion_events(next_batch_id);
+CREATE TABLE batch_events(
+ event_id TEXT NOT NULL,
+ room_id TEXT NOT NULL,
+ batch_id TEXT NOT NULL
+);
+CREATE UNIQUE INDEX batch_events_event_id ON batch_events(event_id);
+CREATE INDEX batch_events_batch_id ON batch_events(batch_id);
+CREATE INDEX insertion_event_edges_event_id ON insertion_event_edges(event_id);
+CREATE TABLE device_auth_providers (
+ user_id TEXT NOT NULL,
+ device_id TEXT NOT NULL,
+ auth_provider_id TEXT NOT NULL,
+ auth_provider_session_id TEXT NOT NULL
+);
+CREATE INDEX device_auth_providers_devices
+ ON device_auth_providers (user_id, device_id);
+CREATE INDEX device_auth_providers_sessions
+ ON device_auth_providers (auth_provider_id, auth_provider_session_id);
+CREATE INDEX refresh_tokens_next_token_id
+ ON refresh_tokens(next_token_id)
+ WHERE next_token_id IS NOT NULL;
+CREATE TABLE partial_state_rooms (
+ room_id TEXT PRIMARY KEY,
+ FOREIGN KEY(room_id) REFERENCES rooms(room_id)
+);
+CREATE TABLE partial_state_rooms_servers (
+ room_id TEXT NOT NULL REFERENCES partial_state_rooms(room_id),
+ server_name TEXT NOT NULL,
+ UNIQUE(room_id, server_name)
+);
+CREATE TABLE partial_state_events (
+ -- the room_id is denormalised for efficient indexing (the canonical source is `events`)
+ room_id TEXT NOT NULL REFERENCES partial_state_rooms(room_id),
+ event_id TEXT NOT NULL REFERENCES events(event_id),
+ UNIQUE(event_id)
+);
+CREATE INDEX partial_state_events_room_id_idx
+ ON partial_state_events (room_id);
+CREATE TRIGGER partial_state_events_bad_room_id
+ BEFORE INSERT ON partial_state_events
+ FOR EACH ROW
+ BEGIN
+ SELECT RAISE(ABORT, 'Incorrect room_id in partial_state_events')
+ WHERE EXISTS (
+ SELECT 1 FROM events
+ WHERE events.event_id = NEW.event_id
+ AND events.room_id != NEW.room_id
+ );
+ END;
+CREATE TABLE device_lists_changes_in_room (
+ user_id TEXT NOT NULL,
+ device_id TEXT NOT NULL,
+ room_id TEXT NOT NULL,
+
+ -- This initially matches `device_lists_stream.stream_id`. Note that we
+ -- delete older values from `device_lists_stream`, so we can't use a foreign
+ -- constraint here.
+ --
+ -- The table will contain rows with the same `stream_id` but different
+ -- `room_id`, as for each device update we store a row per room the user is
+ -- joined to. Therefore `(stream_id, room_id)` gives a unique index.
+ stream_id BIGINT NOT NULL,
+
+ -- We have a background process which goes through this table and converts
+ -- entries into rows in `device_lists_outbound_pokes`. Once we have processed
+ -- a row, we mark it as such by setting `converted_to_destinations=TRUE`.
+ converted_to_destinations BOOLEAN NOT NULL,
+ opentracing_context TEXT
+);
+CREATE UNIQUE INDEX device_lists_changes_in_stream_id ON device_lists_changes_in_room(stream_id, room_id);
+CREATE INDEX device_lists_changes_in_stream_id_unconverted ON device_lists_changes_in_room(stream_id) WHERE NOT converted_to_destinations;
+CREATE TABLE IF NOT EXISTS "event_edges" (
+ event_id TEXT NOT NULL,
+ prev_event_id TEXT NOT NULL,
+ room_id TEXT NULL,
+ is_state BOOL NOT NULL DEFAULT 0,
+ FOREIGN KEY(event_id) REFERENCES events(event_id)
+);
+CREATE UNIQUE INDEX event_edges_event_id_prev_event_id_idx
+ ON event_edges (event_id, prev_event_id);
+CREATE INDEX ev_edges_prev_id ON event_edges (prev_event_id);
+CREATE TABLE event_push_summary_last_receipt_stream_id (
+ Lock CHAR(1) NOT NULL DEFAULT 'X' UNIQUE, -- Makes sure this table only has one row.
+ stream_id BIGINT NOT NULL,
+ CHECK (Lock='X')
+);
+CREATE TABLE IF NOT EXISTS "application_services_state" (
+ as_id TEXT PRIMARY KEY NOT NULL,
+ state VARCHAR(5),
+ read_receipt_stream_id BIGINT,
+ presence_stream_id BIGINT,
+ to_device_stream_id BIGINT,
+ device_list_stream_id BIGINT
+);
+CREATE TABLE IF NOT EXISTS "receipts_linearized" (
+ stream_id BIGINT NOT NULL,
+ room_id TEXT NOT NULL,
+ receipt_type TEXT NOT NULL,
+ user_id TEXT NOT NULL,
+ event_id TEXT NOT NULL,
+ thread_id TEXT,
+ event_stream_ordering BIGINT,
+ data TEXT NOT NULL,
+ CONSTRAINT receipts_linearized_uniqueness UNIQUE (room_id, receipt_type, user_id),
+ CONSTRAINT receipts_linearized_uniqueness_thread UNIQUE (room_id, receipt_type, user_id, thread_id)
+);
+CREATE TABLE IF NOT EXISTS "receipts_graph" (
+ room_id TEXT NOT NULL,
+ receipt_type TEXT NOT NULL,
+ user_id TEXT NOT NULL,
+ event_ids TEXT NOT NULL,
+ thread_id TEXT,
+ data TEXT NOT NULL,
+ CONSTRAINT receipts_graph_uniqueness UNIQUE (room_id, receipt_type, user_id),
+ CONSTRAINT receipts_graph_uniqueness_thread UNIQUE (room_id, receipt_type, user_id, thread_id)
+);
+CREATE INDEX receipts_linearized_id ON receipts_linearized( stream_id );
+CREATE INDEX receipts_linearized_room_stream ON receipts_linearized( room_id, stream_id );
+CREATE INDEX receipts_linearized_user ON receipts_linearized( user_id );
+CREATE INDEX redactions_have_censored_ts ON redactions (received_ts) WHERE NOT have_censored;
+CREATE INDEX room_memberships_user_room_forgotten ON room_memberships (user_id, room_id) WHERE forgotten = 1;
+CREATE INDEX users_have_local_media ON local_media_repository (user_id, created_ts) ;
+CREATE UNIQUE INDEX e2e_cross_signing_keys_stream_idx ON e2e_cross_signing_keys (stream_id) ;
+CREATE INDEX user_external_ids_user_id_idx ON user_external_ids (user_id) ;
+CREATE INDEX presence_stream_state_not_offline_idx ON presence_stream (state) WHERE state != 'offline';
+CREATE UNIQUE INDEX event_push_summary_unique_index ON event_push_summary (user_id, room_id) ;
+CREATE UNIQUE INDEX event_push_summary_unique_index2 ON event_push_summary (user_id, room_id, thread_id) ;
+CREATE UNIQUE INDEX receipts_graph_unique_index ON receipts_graph (room_id, receipt_type, user_id) WHERE thread_id IS NULL;
+CREATE UNIQUE INDEX receipts_linearized_unique_index ON receipts_linearized (room_id, receipt_type, user_id) WHERE thread_id IS NULL;
+CREATE INDEX event_push_actions_stream_highlight_index ON event_push_actions (highlight, stream_ordering) WHERE highlight=0;
+CREATE INDEX current_state_events_member_index ON current_state_events (state_key) WHERE type='m.room.member';
+CREATE INDEX event_contains_url_index ON events (room_id, topological_ordering, stream_ordering) WHERE contains_url = true AND outlier = false;
+CREATE INDEX event_push_actions_highlights_index ON event_push_actions (user_id, room_id, topological_ordering, stream_ordering) WHERE highlight=1;
+CREATE INDEX local_media_repository_url_idx ON local_media_repository (created_ts) WHERE url_cache IS NOT NULL;
+INSERT INTO appservice_stream_position VALUES('X',0);
+INSERT INTO federation_stream_position VALUES('federation',-1,'master');
+INSERT INTO federation_stream_position VALUES('events',-1,'master');
+INSERT INTO event_push_summary_stream_ordering VALUES('X',0);
+INSERT INTO user_directory_stream_pos VALUES('X',1);
+INSERT INTO stats_incremental_position VALUES('X',1);
+INSERT INTO event_push_summary_last_receipt_stream_id VALUES('X',0);
diff --git a/synapse/storage/schema/state/full_schemas/72/full.sql.postgres b/synapse/storage/schema/state/full_schemas/72/full.sql.postgres
new file mode 100644
index 0000000000..263ade761e
--- /dev/null
+++ b/synapse/storage/schema/state/full_schemas/72/full.sql.postgres
@@ -0,0 +1,30 @@
+CREATE TABLE state_group_edges (
+ state_group bigint NOT NULL,
+ prev_state_group bigint NOT NULL
+);
+CREATE SEQUENCE state_group_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+CREATE TABLE state_groups (
+ id bigint NOT NULL,
+ room_id text NOT NULL,
+ event_id text NOT NULL
+);
+CREATE TABLE state_groups_state (
+ state_group bigint NOT NULL,
+ room_id text NOT NULL,
+ type text NOT NULL,
+ state_key text NOT NULL,
+ event_id text NOT NULL
+);
+ALTER TABLE ONLY state_groups_state ALTER COLUMN state_group SET (n_distinct=-0.02);
+ALTER TABLE ONLY state_groups
+ ADD CONSTRAINT state_groups_pkey PRIMARY KEY (id);
+CREATE INDEX state_group_edges_prev_idx ON state_group_edges USING btree (prev_state_group);
+CREATE UNIQUE INDEX state_group_edges_unique_idx ON state_group_edges USING btree (state_group, prev_state_group);
+CREATE INDEX state_groups_room_id_idx ON state_groups USING btree (room_id);
+CREATE INDEX state_groups_state_type_idx ON state_groups_state USING btree (state_group, type, state_key);
+SELECT pg_catalog.setval('state_group_id_seq', 1, false);
diff --git a/synapse/storage/schema/state/full_schemas/72/full.sql.sqlite b/synapse/storage/schema/state/full_schemas/72/full.sql.sqlite
new file mode 100644
index 0000000000..dda060b638
--- /dev/null
+++ b/synapse/storage/schema/state/full_schemas/72/full.sql.sqlite
@@ -0,0 +1,20 @@
+CREATE TABLE state_groups (
+ id BIGINT PRIMARY KEY,
+ room_id TEXT NOT NULL,
+ event_id TEXT NOT NULL
+);
+CREATE TABLE state_groups_state (
+ state_group BIGINT NOT NULL,
+ room_id TEXT NOT NULL,
+ type TEXT NOT NULL,
+ state_key TEXT NOT NULL,
+ event_id TEXT NOT NULL
+);
+CREATE TABLE state_group_edges (
+ state_group BIGINT NOT NULL,
+ prev_state_group BIGINT NOT NULL
+);
+CREATE INDEX state_group_edges_prev_idx ON state_group_edges (prev_state_group);
+CREATE INDEX state_groups_state_type_idx ON state_groups_state (state_group, type, state_key);
+CREATE INDEX state_groups_room_id_idx ON state_groups (room_id) ;
+CREATE UNIQUE INDEX state_group_edges_unique_idx ON state_group_edges (state_group, prev_state_group) ;
diff --git a/synapse/types.py b/synapse/types.py
index ec44601f54..773f0438d5 100644
--- a/synapse/types.py
+++ b/synapse/types.py
@@ -835,6 +835,7 @@ class ReadReceipt:
receipt_type: str
user_id: str
event_ids: List[str]
+ thread_id: Optional[str]
data: JsonDict
diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py
index 10aff4d04a..3909f1caea 100644
--- a/synapse/util/caches/descriptors.py
+++ b/synapse/util/caches/descriptors.py
@@ -53,7 +53,7 @@ CacheKey = Union[Tuple, Any]
F = TypeVar("F", bound=Callable[..., Any])
-class _CachedFunction(Generic[F]):
+class CachedFunction(Generic[F]):
invalidate: Any = None
invalidate_all: Any = None
prefill: Any = None
@@ -242,7 +242,7 @@ class LruCacheDescriptor(_CacheDescriptorBase):
return ret2
- wrapped = cast(_CachedFunction, _wrapped)
+ wrapped = cast(CachedFunction, _wrapped)
wrapped.cache = cache
obj.__dict__[self.name] = wrapped
@@ -363,7 +363,7 @@ class DeferredCacheDescriptor(_CacheDescriptorBase):
return make_deferred_yieldable(ret)
- wrapped = cast(_CachedFunction, _wrapped)
+ wrapped = cast(CachedFunction, _wrapped)
if self.num_args == 1:
assert not self.tree
@@ -572,7 +572,7 @@ def cached(
iterable: bool = False,
prune_unread_entries: bool = True,
name: Optional[str] = None,
-) -> Callable[[F], _CachedFunction[F]]:
+) -> Callable[[F], CachedFunction[F]]:
func = lambda orig: DeferredCacheDescriptor(
orig,
max_entries=max_entries,
@@ -585,7 +585,7 @@ def cached(
name=name,
)
- return cast(Callable[[F], _CachedFunction[F]], func)
+ return cast(Callable[[F], CachedFunction[F]], func)
def cachedList(
@@ -594,7 +594,7 @@ def cachedList(
list_name: str,
num_args: Optional[int] = None,
name: Optional[str] = None,
-) -> Callable[[F], _CachedFunction[F]]:
+) -> Callable[[F], CachedFunction[F]]:
"""Creates a descriptor that wraps a function in a `DeferredCacheListDescriptor`.
Used to do batch lookups for an already created cache. One of the arguments
@@ -631,7 +631,7 @@ def cachedList(
name=name,
)
- return cast(Callable[[F], _CachedFunction[F]], func)
+ return cast(Callable[[F], CachedFunction[F]], func)
def _get_cache_key_builder(
diff --git a/tests/federation/test_federation_sender.py b/tests/federation/test_federation_sender.py
index a5aa500ef8..f1e357764f 100644
--- a/tests/federation/test_federation_sender.py
+++ b/tests/federation/test_federation_sender.py
@@ -49,7 +49,12 @@ class FederationSenderReceiptsTestCases(HomeserverTestCase):
sender = self.hs.get_federation_sender()
receipt = ReadReceipt(
- "room_id", "m.read", "user_id", ["event_id"], {"ts": 1234}
+ "room_id",
+ "m.read",
+ "user_id",
+ ["event_id"],
+ thread_id=None,
+ data={"ts": 1234},
)
self.successResultOf(defer.ensureDeferred(sender.send_read_receipt(receipt)))
@@ -89,7 +94,12 @@ class FederationSenderReceiptsTestCases(HomeserverTestCase):
sender = self.hs.get_federation_sender()
receipt = ReadReceipt(
- "room_id", "m.read", "user_id", ["event_id"], {"ts": 1234}
+ "room_id",
+ "m.read",
+ "user_id",
+ ["event_id"],
+ thread_id=None,
+ data={"ts": 1234},
)
self.successResultOf(defer.ensureDeferred(sender.send_read_receipt(receipt)))
@@ -121,7 +131,12 @@ class FederationSenderReceiptsTestCases(HomeserverTestCase):
# send the second RR
receipt = ReadReceipt(
- "room_id", "m.read", "user_id", ["other_id"], {"ts": 1234}
+ "room_id",
+ "m.read",
+ "user_id",
+ ["other_id"],
+ thread_id=None,
+ data={"ts": 1234},
)
self.successResultOf(defer.ensureDeferred(sender.send_read_receipt(receipt)))
self.pump()
diff --git a/tests/handlers/test_appservice.py b/tests/handlers/test_appservice.py
index b17af2725b..af24c4984d 100644
--- a/tests/handlers/test_appservice.py
+++ b/tests/handlers/test_appservice.py
@@ -447,6 +447,7 @@ class ApplicationServicesHandlerSendEventsTestCase(unittest.HomeserverTestCase):
receipt_type="m.read",
user_id=self.local_user,
event_ids=[f"$eventid_{i}"],
+ thread_id=None,
data={},
)
)
diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py
index 8adba29d7f..9c821b3042 100644
--- a/tests/handlers/test_typing.py
+++ b/tests/handlers/test_typing.py
@@ -129,7 +129,7 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase):
async def check_host_in_room(room_id: str, server_name: str) -> bool:
return room_id == ROOM_ID
- hs.get_event_auth_handler().check_host_in_room = check_host_in_room
+ hs.get_event_auth_handler().is_host_in_room = check_host_in_room
async def get_current_hosts_in_room(room_id: str):
return {member.domain for member in self.room_members}
@@ -138,6 +138,10 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase):
get_current_hosts_in_room
)
+ hs.get_storage_controllers().state.get_current_hosts_in_room_or_partial_state_approximation = (
+ get_current_hosts_in_room
+ )
+
async def get_users_in_room(room_id: str):
return {str(u) for u in self.room_members}
diff --git a/tests/push/test_email.py b/tests/push/test_email.py
index 7a3b0d6755..fd14568f55 100644
--- a/tests/push/test_email.py
+++ b/tests/push/test_email.py
@@ -114,7 +114,7 @@ class EmailPusherTests(HomeserverTestCase):
)
self.pusher = self.get_success(
- self.hs.get_pusherpool().add_pusher(
+ self.hs.get_pusherpool().add_or_update_pusher(
user_id=self.user_id,
access_token=self.token_id,
kind="email",
@@ -136,7 +136,7 @@ class EmailPusherTests(HomeserverTestCase):
"""
with self.assertRaises(SynapseError) as cm:
self.get_success_or_raise(
- self.hs.get_pusherpool().add_pusher(
+ self.hs.get_pusherpool().add_or_update_pusher(
user_id=self.user_id,
access_token=self.token_id,
kind="email",
diff --git a/tests/push/test_http.py b/tests/push/test_http.py
index d9c68cdd2d..b383b8401f 100644
--- a/tests/push/test_http.py
+++ b/tests/push/test_http.py
@@ -19,9 +19,10 @@ from twisted.test.proto_helpers import MemoryReactor
import synapse.rest.admin
from synapse.logging.context import make_deferred_yieldable
-from synapse.push import PusherConfigException
-from synapse.rest.client import login, push_rule, receipts, room
+from synapse.push import PusherConfig, PusherConfigException
+from synapse.rest.client import login, push_rule, pusher, receipts, room
from synapse.server import HomeServer
+from synapse.storage.databases.main.registration import TokenLookupResult
from synapse.types import JsonDict
from synapse.util import Clock
@@ -35,6 +36,7 @@ class HTTPPusherTests(HomeserverTestCase):
login.register_servlets,
receipts.register_servlets,
push_rule.register_servlets,
+ pusher.register_servlets,
]
user_id = True
hijack_auth = False
@@ -74,7 +76,7 @@ class HTTPPusherTests(HomeserverTestCase):
def test_data(data: Optional[JsonDict]) -> None:
self.get_failure(
- self.hs.get_pusherpool().add_pusher(
+ self.hs.get_pusherpool().add_or_update_pusher(
user_id=user_id,
access_token=token_id,
kind="http",
@@ -119,7 +121,7 @@ class HTTPPusherTests(HomeserverTestCase):
token_id = user_tuple.token_id
self.get_success(
- self.hs.get_pusherpool().add_pusher(
+ self.hs.get_pusherpool().add_or_update_pusher(
user_id=user_id,
access_token=token_id,
kind="http",
@@ -235,7 +237,7 @@ class HTTPPusherTests(HomeserverTestCase):
token_id = user_tuple.token_id
self.get_success(
- self.hs.get_pusherpool().add_pusher(
+ self.hs.get_pusherpool().add_or_update_pusher(
user_id=user_id,
access_token=token_id,
kind="http",
@@ -355,7 +357,7 @@ class HTTPPusherTests(HomeserverTestCase):
token_id = user_tuple.token_id
self.get_success(
- self.hs.get_pusherpool().add_pusher(
+ self.hs.get_pusherpool().add_or_update_pusher(
user_id=user_id,
access_token=token_id,
kind="http",
@@ -441,7 +443,7 @@ class HTTPPusherTests(HomeserverTestCase):
token_id = user_tuple.token_id
self.get_success(
- self.hs.get_pusherpool().add_pusher(
+ self.hs.get_pusherpool().add_or_update_pusher(
user_id=user_id,
access_token=token_id,
kind="http",
@@ -518,7 +520,7 @@ class HTTPPusherTests(HomeserverTestCase):
token_id = user_tuple.token_id
self.get_success(
- self.hs.get_pusherpool().add_pusher(
+ self.hs.get_pusherpool().add_or_update_pusher(
user_id=user_id,
access_token=token_id,
kind="http",
@@ -624,7 +626,7 @@ class HTTPPusherTests(HomeserverTestCase):
token_id = user_tuple.token_id
self.get_success(
- self.hs.get_pusherpool().add_pusher(
+ self.hs.get_pusherpool().add_or_update_pusher(
user_id=user_id,
access_token=token_id,
kind="http",
@@ -728,18 +730,38 @@ class HTTPPusherTests(HomeserverTestCase):
)
self.assertEqual(channel.code, 200, channel.json_body)
- def _make_user_with_pusher(self, username: str) -> Tuple[str, str]:
+ def _make_user_with_pusher(
+ self, username: str, enabled: bool = True
+ ) -> Tuple[str, str]:
+ """Registers a user and creates a pusher for them.
+
+ Args:
+ username: the localpart of the new user's Matrix ID.
+ enabled: whether to create the pusher in an enabled or disabled state.
+ """
user_id = self.register_user(username, "pass")
access_token = self.login(username, "pass")
# Register the pusher
+ self._set_pusher(user_id, access_token, enabled)
+
+ return user_id, access_token
+
+ def _set_pusher(self, user_id: str, access_token: str, enabled: bool) -> None:
+ """Creates or updates the pusher for the given user.
+
+ Args:
+ user_id: the user's Matrix ID.
+ access_token: the access token associated with the pusher.
+ enabled: whether to enable or disable the pusher.
+ """
user_tuple = self.get_success(
self.hs.get_datastores().main.get_user_by_access_token(access_token)
)
token_id = user_tuple.token_id
self.get_success(
- self.hs.get_pusherpool().add_pusher(
+ self.hs.get_pusherpool().add_or_update_pusher(
user_id=user_id,
access_token=token_id,
kind="http",
@@ -749,11 +771,11 @@ class HTTPPusherTests(HomeserverTestCase):
pushkey="a@example.com",
lang=None,
data={"url": "http://example.com/_matrix/push/v1/notify"},
+ enabled=enabled,
+ device_id=user_tuple.device_id,
)
)
- return user_id, access_token
-
def test_dont_notify_rule_overrides_message(self) -> None:
"""
The override push rule will suppress notification
@@ -791,3 +813,148 @@ class HTTPPusherTests(HomeserverTestCase):
# The user sends a message back (sends a notification)
self.helper.send(room, body="Hello", tok=access_token)
self.assertEqual(len(self.push_attempts), 1)
+
+ @override_config({"experimental_features": {"msc3881_enabled": True}})
+ def test_disable(self) -> None:
+ """Tests that disabling a pusher means it's not pushed to anymore."""
+ user_id, access_token = self._make_user_with_pusher("user")
+ other_user_id, other_access_token = self._make_user_with_pusher("otheruser")
+
+ room = self.helper.create_room_as(user_id, tok=access_token)
+ self.helper.join(room=room, user=other_user_id, tok=other_access_token)
+
+ # Send a message and check that it generated a push.
+ self.helper.send(room, body="Hi!", tok=other_access_token)
+ self.assertEqual(len(self.push_attempts), 1)
+
+ # Disable the pusher.
+ self._set_pusher(user_id, access_token, enabled=False)
+
+ # Send another message and check that it did not generate a push.
+ self.helper.send(room, body="Hi!", tok=other_access_token)
+ self.assertEqual(len(self.push_attempts), 1)
+
+ # Get the pushers for the user and check that it is marked as disabled.
+ channel = self.make_request("GET", "/pushers", access_token=access_token)
+ self.assertEqual(channel.code, 200)
+ self.assertEqual(len(channel.json_body["pushers"]), 1)
+
+ enabled = channel.json_body["pushers"][0]["org.matrix.msc3881.enabled"]
+ self.assertFalse(enabled)
+ self.assertTrue(isinstance(enabled, bool))
+
+ @override_config({"experimental_features": {"msc3881_enabled": True}})
+ def test_enable(self) -> None:
+ """Tests that enabling a disabled pusher means it gets pushed to."""
+ # Create the user with the pusher already disabled.
+ user_id, access_token = self._make_user_with_pusher("user", enabled=False)
+ other_user_id, other_access_token = self._make_user_with_pusher("otheruser")
+
+ room = self.helper.create_room_as(user_id, tok=access_token)
+ self.helper.join(room=room, user=other_user_id, tok=other_access_token)
+
+ # Send a message and check that it did not generate a push.
+ self.helper.send(room, body="Hi!", tok=other_access_token)
+ self.assertEqual(len(self.push_attempts), 0)
+
+ # Enable the pusher.
+ self._set_pusher(user_id, access_token, enabled=True)
+
+ # Send another message and check that it did generate a push.
+ self.helper.send(room, body="Hi!", tok=other_access_token)
+ self.assertEqual(len(self.push_attempts), 1)
+
+ # Get the pushers for the user and check that it is marked as enabled.
+ channel = self.make_request("GET", "/pushers", access_token=access_token)
+ self.assertEqual(channel.code, 200)
+ self.assertEqual(len(channel.json_body["pushers"]), 1)
+
+ enabled = channel.json_body["pushers"][0]["org.matrix.msc3881.enabled"]
+ self.assertTrue(enabled)
+ self.assertTrue(isinstance(enabled, bool))
+
+ @override_config({"experimental_features": {"msc3881_enabled": True}})
+ def test_null_enabled(self) -> None:
+ """Tests that a pusher that has an 'enabled' column set to NULL (eg pushers
+ created before the column was introduced) is considered enabled.
+ """
+ # We intentionally set 'enabled' to None so that it's stored as NULL in the
+ # database.
+ user_id, access_token = self._make_user_with_pusher("user", enabled=None) # type: ignore[arg-type]
+
+ channel = self.make_request("GET", "/pushers", access_token=access_token)
+ self.assertEqual(channel.code, 200)
+ self.assertEqual(len(channel.json_body["pushers"]), 1)
+ self.assertTrue(channel.json_body["pushers"][0]["org.matrix.msc3881.enabled"])
+
+ def test_update_different_device_access_token_device_id(self) -> None:
+ """Tests that if we create a pusher from one device, the update it from another
+ device, the access token and device ID associated with the pusher stays the
+ same.
+ """
+ # Create a user with a pusher.
+ user_id, access_token = self._make_user_with_pusher("user")
+
+ # Get the token ID for the current access token, since that's what we store in
+ # the pushers table. Also get the device ID from it.
+ user_tuple = self.get_success(
+ self.hs.get_datastores().main.get_user_by_access_token(access_token)
+ )
+ token_id = user_tuple.token_id
+ device_id = user_tuple.device_id
+
+ # Generate a new access token, and update the pusher with it.
+ new_token = self.login("user", "pass")
+ self._set_pusher(user_id, new_token, enabled=False)
+
+ # Get the current list of pushers for the user.
+ ret = self.get_success(
+ self.hs.get_datastores().main.get_pushers_by({"user_name": user_id})
+ )
+ pushers: List[PusherConfig] = list(ret)
+
+ # Check that we still have one pusher, and that the access token and device ID
+ # associated with it didn't change.
+ self.assertEqual(len(pushers), 1)
+ self.assertEqual(pushers[0].access_token, token_id)
+ self.assertEqual(pushers[0].device_id, device_id)
+
+ @override_config({"experimental_features": {"msc3881_enabled": True}})
+ def test_device_id(self) -> None:
+ """Tests that a pusher created with a given device ID shows that device ID in
+ GET /pushers requests.
+ """
+ self.register_user("user", "pass")
+ access_token = self.login("user", "pass")
+
+ # We create the pusher with an HTTP request rather than with
+ # _make_user_with_pusher so that we can test the device ID is correctly set when
+ # creating a pusher via an API call.
+ self.make_request(
+ method="POST",
+ path="/pushers/set",
+ content={
+ "kind": "http",
+ "app_id": "m.http",
+ "app_display_name": "HTTP Push Notifications",
+ "device_display_name": "pushy push",
+ "pushkey": "a@example.com",
+ "lang": "en",
+ "data": {"url": "http://example.com/_matrix/push/v1/notify"},
+ },
+ access_token=access_token,
+ )
+
+ # Look up the user info for the access token so we can compare the device ID.
+ lookup_result: TokenLookupResult = self.get_success(
+ self.hs.get_datastores().main.get_user_by_access_token(access_token)
+ )
+
+ # Get the user's devices and check it has the correct device ID.
+ channel = self.make_request("GET", "/pushers", access_token=access_token)
+ self.assertEqual(channel.code, 200)
+ self.assertEqual(len(channel.json_body["pushers"]), 1)
+ self.assertEqual(
+ channel.json_body["pushers"][0]["org.matrix.msc3881.device_id"],
+ lookup_result.device_id,
+ )
diff --git a/tests/replication/slave/storage/test_events.py b/tests/replication/slave/storage/test_events.py
index 49a21e2e85..efd92793c0 100644
--- a/tests/replication/slave/storage/test_events.py
+++ b/tests/replication/slave/storage/test_events.py
@@ -171,7 +171,7 @@ class SlavedEventStoreTestCase(BaseSlavedStoreTestCase):
if send_receipt:
self.get_success(
self.master_store.insert_receipt(
- ROOM_ID, ReceiptTypes.READ, USER_ID_2, [event1.event_id], {}
+ ROOM_ID, ReceiptTypes.READ, USER_ID_2, [event1.event_id], None, {}
)
)
diff --git a/tests/replication/tcp/streams/test_receipts.py b/tests/replication/tcp/streams/test_receipts.py
index eb00117845..ede6d0c118 100644
--- a/tests/replication/tcp/streams/test_receipts.py
+++ b/tests/replication/tcp/streams/test_receipts.py
@@ -33,7 +33,12 @@ class ReceiptsStreamTestCase(BaseStreamTestCase):
# tell the master to send a new receipt
self.get_success(
self.hs.get_datastores().main.insert_receipt(
- "!room:blue", "m.read", USER_ID, ["$event:blue"], {"a": 1}
+ "!room:blue",
+ "m.read",
+ USER_ID,
+ ["$event:blue"],
+ thread_id=None,
+ data={"a": 1},
)
)
self.replicate()
@@ -48,6 +53,7 @@ class ReceiptsStreamTestCase(BaseStreamTestCase):
self.assertEqual("m.read", row.receipt_type)
self.assertEqual(USER_ID, row.user_id)
self.assertEqual("$event:blue", row.event_id)
+ self.assertIsNone(row.thread_id)
self.assertEqual({"a": 1}, row.data)
# Now let's disconnect and insert some data.
@@ -57,7 +63,12 @@ class ReceiptsStreamTestCase(BaseStreamTestCase):
self.get_success(
self.hs.get_datastores().main.insert_receipt(
- "!room2:blue", "m.read", USER_ID, ["$event2:foo"], {"a": 2}
+ "!room2:blue",
+ "m.read",
+ USER_ID,
+ ["$event2:foo"],
+ thread_id=None,
+ data={"a": 2},
)
)
self.replicate()
diff --git a/tests/replication/test_module_cache_invalidation.py b/tests/replication/test_module_cache_invalidation.py
new file mode 100644
index 0000000000..b93cae67d3
--- /dev/null
+++ b/tests/replication/test_module_cache_invalidation.py
@@ -0,0 +1,79 @@
+# Copyright 2022 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import logging
+
+import synapse
+from synapse.module_api import cached
+
+from tests.replication._base import BaseMultiWorkerStreamTestCase
+
+logger = logging.getLogger(__name__)
+
+FIRST_VALUE = "one"
+SECOND_VALUE = "two"
+
+KEY = "mykey"
+
+
+class TestCache:
+ current_value = FIRST_VALUE
+
+ @cached()
+ async def cached_function(self, user_id: str) -> str:
+ return self.current_value
+
+
+class ModuleCacheInvalidationTestCase(BaseMultiWorkerStreamTestCase):
+ servlets = [
+ synapse.rest.admin.register_servlets,
+ ]
+
+ def test_module_cache_full_invalidation(self):
+ main_cache = TestCache()
+ self.hs.get_module_api().register_cached_function(main_cache.cached_function)
+
+ worker_hs = self.make_worker_hs("synapse.app.generic_worker")
+
+ worker_cache = TestCache()
+ worker_hs.get_module_api().register_cached_function(
+ worker_cache.cached_function
+ )
+
+ self.assertEqual(FIRST_VALUE, self.get_success(main_cache.cached_function(KEY)))
+ self.assertEqual(
+ FIRST_VALUE, self.get_success(worker_cache.cached_function(KEY))
+ )
+
+ main_cache.current_value = SECOND_VALUE
+ worker_cache.current_value = SECOND_VALUE
+ # No invalidation yet, should return the cached value on both the main process and the worker
+ self.assertEqual(FIRST_VALUE, self.get_success(main_cache.cached_function(KEY)))
+ self.assertEqual(
+ FIRST_VALUE, self.get_success(worker_cache.cached_function(KEY))
+ )
+
+ # Full invalidation on the main process, should be replicated on the worker that
+ # should returned the updated value too
+ self.get_success(
+ self.hs.get_module_api().invalidate_cache(
+ main_cache.cached_function, (KEY,)
+ )
+ )
+
+ self.assertEqual(
+ SECOND_VALUE, self.get_success(main_cache.cached_function(KEY))
+ )
+ self.assertEqual(
+ SECOND_VALUE, self.get_success(worker_cache.cached_function(KEY))
+ )
diff --git a/tests/replication/test_pusher_shard.py b/tests/replication/test_pusher_shard.py
index 8f4f6688ce..59fea93e49 100644
--- a/tests/replication/test_pusher_shard.py
+++ b/tests/replication/test_pusher_shard.py
@@ -55,7 +55,7 @@ class PusherShardTestCase(BaseMultiWorkerStreamTestCase):
token_id = user_dict.token_id
self.get_success(
- self.hs.get_pusherpool().add_pusher(
+ self.hs.get_pusherpool().add_or_update_pusher(
user_id=user_id,
access_token=token_id,
kind="http",
diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py
index 9f536ceeb3..1847e6ad6b 100644
--- a/tests/rest/admin/test_user.py
+++ b/tests/rest/admin/test_user.py
@@ -2839,7 +2839,7 @@ class PushersRestTestCase(unittest.HomeserverTestCase):
token_id = user_tuple.token_id
self.get_success(
- self.hs.get_pusherpool().add_pusher(
+ self.hs.get_pusherpool().add_or_update_pusher(
user_id=self.other_user,
access_token=token_id,
kind="http",
diff --git a/tests/rest/client/test_login_token_request.py b/tests/rest/client/test_login_token_request.py
new file mode 100644
index 0000000000..d5bb16c98d
--- /dev/null
+++ b/tests/rest/client/test_login_token_request.py
@@ -0,0 +1,132 @@
+# Copyright 2022 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from twisted.test.proto_helpers import MemoryReactor
+
+from synapse.rest import admin
+from synapse.rest.client import login, login_token_request
+from synapse.server import HomeServer
+from synapse.util import Clock
+
+from tests import unittest
+from tests.unittest import override_config
+
+
+class LoginTokenRequestServletTestCase(unittest.HomeserverTestCase):
+
+ servlets = [
+ login.register_servlets,
+ admin.register_servlets,
+ login_token_request.register_servlets,
+ ]
+
+ def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
+ self.hs = self.setup_test_homeserver()
+ self.hs.config.registration.enable_registration = True
+ self.hs.config.registration.registrations_require_3pid = []
+ self.hs.config.registration.auto_join_rooms = []
+ self.hs.config.captcha.enable_registration_captcha = False
+
+ return self.hs
+
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+ self.user = "user123"
+ self.password = "password"
+
+ def test_disabled(self) -> None:
+ channel = self.make_request("POST", "/login/token", {}, access_token=None)
+ self.assertEqual(channel.code, 400)
+
+ self.register_user(self.user, self.password)
+ token = self.login(self.user, self.password)
+
+ channel = self.make_request("POST", "/login/token", {}, access_token=token)
+ self.assertEqual(channel.code, 400)
+
+ @override_config({"experimental_features": {"msc3882_enabled": True}})
+ def test_require_auth(self) -> None:
+ channel = self.make_request("POST", "/login/token", {}, access_token=None)
+ self.assertEqual(channel.code, 401)
+
+ @override_config({"experimental_features": {"msc3882_enabled": True}})
+ def test_uia_on(self) -> None:
+ user_id = self.register_user(self.user, self.password)
+ token = self.login(self.user, self.password)
+
+ channel = self.make_request("POST", "/login/token", {}, access_token=token)
+ self.assertEqual(channel.code, 401)
+ self.assertIn({"stages": ["m.login.password"]}, channel.json_body["flows"])
+
+ session = channel.json_body["session"]
+
+ uia = {
+ "auth": {
+ "type": "m.login.password",
+ "identifier": {"type": "m.id.user", "user": self.user},
+ "password": self.password,
+ "session": session,
+ },
+ }
+
+ channel = self.make_request("POST", "/login/token", uia, access_token=token)
+ self.assertEqual(channel.code, 200)
+ self.assertEqual(channel.json_body["expires_in"], 300)
+
+ login_token = channel.json_body["login_token"]
+
+ channel = self.make_request(
+ "POST",
+ "/login",
+ content={"type": "m.login.token", "token": login_token},
+ )
+ self.assertEqual(channel.code, 200, channel.result)
+ self.assertEqual(channel.json_body["user_id"], user_id)
+
+ @override_config(
+ {"experimental_features": {"msc3882_enabled": True, "msc3882_ui_auth": False}}
+ )
+ def test_uia_off(self) -> None:
+ user_id = self.register_user(self.user, self.password)
+ token = self.login(self.user, self.password)
+
+ channel = self.make_request("POST", "/login/token", {}, access_token=token)
+ self.assertEqual(channel.code, 200)
+ self.assertEqual(channel.json_body["expires_in"], 300)
+
+ login_token = channel.json_body["login_token"]
+
+ channel = self.make_request(
+ "POST",
+ "/login",
+ content={"type": "m.login.token", "token": login_token},
+ )
+ self.assertEqual(channel.code, 200, channel.result)
+ self.assertEqual(channel.json_body["user_id"], user_id)
+
+ @override_config(
+ {
+ "experimental_features": {
+ "msc3882_enabled": True,
+ "msc3882_ui_auth": False,
+ "msc3882_token_timeout": "15s",
+ }
+ }
+ )
+ def test_expires_in(self) -> None:
+ self.register_user(self.user, self.password)
+ token = self.login(self.user, self.password)
+
+ channel = self.make_request("POST", "/login/token", {}, access_token=token)
+ self.assertEqual(channel.code, 200)
+ self.assertEqual(channel.json_body["expires_in"], 15)
diff --git a/tests/rest/client/test_relations.py b/tests/rest/client/test_relations.py
index 651f4f415d..d33e34d829 100644
--- a/tests/rest/client/test_relations.py
+++ b/tests/rest/client/test_relations.py
@@ -788,6 +788,7 @@ class RelationPaginationTestCase(BaseRelationsTestCase):
channel.json_body["chunk"][0],
)
+ @unittest.override_config({"experimental_features": {"msc3715_enabled": True}})
def test_repeated_paginate_relations(self) -> None:
"""Test that if we paginate using a limit and tokens then we get the
expected events.
@@ -809,7 +810,7 @@ class RelationPaginationTestCase(BaseRelationsTestCase):
channel = self.make_request(
"GET",
- f"/_matrix/client/v1/rooms/{self.room}/relations/{self.parent_id}?limit=1{from_token}",
+ f"/_matrix/client/v1/rooms/{self.room}/relations/{self.parent_id}?limit=3{from_token}",
access_token=self.user_token,
)
self.assertEqual(200, channel.code, channel.json_body)
@@ -827,6 +828,32 @@ class RelationPaginationTestCase(BaseRelationsTestCase):
found_event_ids.reverse()
self.assertEqual(found_event_ids, expected_event_ids)
+ # Test forward pagination.
+ prev_token = ""
+ found_event_ids = []
+ for _ in range(20):
+ from_token = ""
+ if prev_token:
+ from_token = "&from=" + prev_token
+
+ channel = self.make_request(
+ "GET",
+ f"/_matrix/client/v1/rooms/{self.room}/relations/{self.parent_id}?org.matrix.msc3715.dir=f&limit=3{from_token}",
+ access_token=self.user_token,
+ )
+ self.assertEqual(200, channel.code, channel.json_body)
+
+ found_event_ids.extend(e["event_id"] for e in channel.json_body["chunk"])
+ next_batch = channel.json_body.get("next_batch")
+
+ self.assertNotEqual(prev_token, next_batch)
+ prev_token = next_batch
+
+ if not prev_token:
+ break
+
+ self.assertEqual(found_event_ids, expected_event_ids)
+
def test_pagination_from_sync_and_messages(self) -> None:
"""Pagination tokens from /sync and /messages can be used to paginate /relations."""
channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "A")
diff --git a/tests/storage/test_event_federation.py b/tests/storage/test_event_federation.py
index a6679e1312..85739c464e 100644
--- a/tests/storage/test_event_federation.py
+++ b/tests/storage/test_event_federation.py
@@ -12,25 +12,38 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from typing import Tuple, Union
+import datetime
+from typing import Dict, List, Tuple, Union
import attr
from parameterized import parameterized
+from twisted.test.proto_helpers import MemoryReactor
+
+from synapse.api.constants import EventTypes
from synapse.api.room_versions import (
KNOWN_ROOM_VERSIONS,
EventFormatVersions,
RoomVersion,
)
from synapse.events import _EventInternalMetadata
-from synapse.util import json_encoder
+from synapse.server import HomeServer
+from synapse.storage.database import LoggingTransaction
+from synapse.types import JsonDict
+from synapse.util import Clock, json_encoder
import tests.unittest
import tests.utils
+@attr.s(auto_attribs=True, frozen=True, slots=True)
+class _BackfillSetupInfo:
+ room_id: str
+ depth_map: Dict[str, int]
+
+
class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase):
- def prepare(self, reactor, clock, hs):
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.store = hs.get_datastores().main
def test_get_prev_events_for_room(self):
@@ -571,11 +584,471 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase):
)
self.assertEqual(count, 1)
- _, event_id = self.get_success(
+ next_staged_event_info = self.get_success(
self.store.get_next_staged_event_id_for_room(room_id)
)
+ assert next_staged_event_info
+ _, event_id = next_staged_event_info
self.assertEqual(event_id, "$fake_event_id_500")
+ def _setup_room_for_backfill_tests(self) -> _BackfillSetupInfo:
+ """
+ Sets up a room with various events and backward extremities to test
+ backfill functions against.
+
+ Returns:
+ _BackfillSetupInfo including the `room_id` to test against and
+ `depth_map` of events in the room
+ """
+ room_id = "!backfill-room-test:some-host"
+
+ # The silly graph we use to test grabbing backward extremities,
+ # where the top is the oldest events.
+ # 1 (oldest)
+ # |
+ # 2 ⹁
+ # | \
+ # | [b1, b2, b3]
+ # | |
+ # | A
+ # | /
+ # 3 {
+ # | \
+ # | [b4, b5, b6]
+ # | |
+ # | B
+ # | /
+ # 4 ´
+ # |
+ # 5 (newest)
+
+ event_graph: Dict[str, List[str]] = {
+ "1": [],
+ "2": ["1"],
+ "3": ["2", "A"],
+ "4": ["3", "B"],
+ "5": ["4"],
+ "A": ["b1", "b2", "b3"],
+ "b1": ["2"],
+ "b2": ["2"],
+ "b3": ["2"],
+ "B": ["b4", "b5", "b6"],
+ "b4": ["3"],
+ "b5": ["3"],
+ "b6": ["3"],
+ }
+
+ depth_map: Dict[str, int] = {
+ "1": 1,
+ "2": 2,
+ "b1": 3,
+ "b2": 3,
+ "b3": 3,
+ "A": 4,
+ "3": 5,
+ "b4": 6,
+ "b5": 6,
+ "b6": 6,
+ "B": 7,
+ "4": 8,
+ "5": 9,
+ }
+
+ # The events we have persisted on our server.
+ # The rest are events in the room but not backfilled tet.
+ our_server_events = {"5", "4", "B", "3", "A"}
+
+ complete_event_dict_map: Dict[str, JsonDict] = {}
+ stream_ordering = 0
+ for (event_id, prev_event_ids) in event_graph.items():
+ depth = depth_map[event_id]
+
+ complete_event_dict_map[event_id] = {
+ "event_id": event_id,
+ "type": "test_regular_type",
+ "room_id": room_id,
+ "sender": "@sender",
+ "prev_event_ids": prev_event_ids,
+ "auth_event_ids": [],
+ "origin_server_ts": stream_ordering,
+ "depth": depth,
+ "stream_ordering": stream_ordering,
+ "content": {"body": "event" + event_id},
+ }
+
+ stream_ordering += 1
+
+ def populate_db(txn: LoggingTransaction):
+ # Insert the room to satisfy the foreign key constraint of
+ # `event_failed_pull_attempts`
+ self.store.db_pool.simple_insert_txn(
+ txn,
+ "rooms",
+ {
+ "room_id": room_id,
+ "creator": "room_creator_user_id",
+ "is_public": True,
+ "room_version": "6",
+ },
+ )
+
+ # Insert our server events
+ for event_id in our_server_events:
+ event_dict = complete_event_dict_map[event_id]
+
+ self.store.db_pool.simple_insert_txn(
+ txn,
+ table="events",
+ values={
+ "event_id": event_dict.get("event_id"),
+ "type": event_dict.get("type"),
+ "room_id": event_dict.get("room_id"),
+ "depth": event_dict.get("depth"),
+ "topological_ordering": event_dict.get("depth"),
+ "stream_ordering": event_dict.get("stream_ordering"),
+ "processed": True,
+ "outlier": False,
+ },
+ )
+
+ # Insert the event edges
+ for event_id in our_server_events:
+ for prev_event_id in event_graph[event_id]:
+ self.store.db_pool.simple_insert_txn(
+ txn,
+ table="event_edges",
+ values={
+ "event_id": event_id,
+ "prev_event_id": prev_event_id,
+ "room_id": room_id,
+ },
+ )
+
+ # Insert the backward extremities
+ prev_events_of_our_events = {
+ prev_event_id
+ for our_server_event in our_server_events
+ for prev_event_id in complete_event_dict_map[our_server_event][
+ "prev_event_ids"
+ ]
+ }
+ backward_extremities = prev_events_of_our_events - our_server_events
+ for backward_extremity in backward_extremities:
+ self.store.db_pool.simple_insert_txn(
+ txn,
+ table="event_backward_extremities",
+ values={
+ "event_id": backward_extremity,
+ "room_id": room_id,
+ },
+ )
+
+ self.get_success(
+ self.store.db_pool.runInteraction(
+ "_setup_room_for_backfill_tests_populate_db",
+ populate_db,
+ )
+ )
+
+ return _BackfillSetupInfo(room_id=room_id, depth_map=depth_map)
+
+ def test_get_backfill_points_in_room(self):
+ """
+ Test to make sure we get some backfill points
+ """
+ setup_info = self._setup_room_for_backfill_tests()
+ room_id = setup_info.room_id
+
+ backfill_points = self.get_success(
+ self.store.get_backfill_points_in_room(room_id)
+ )
+ backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points]
+ self.assertListEqual(
+ backfill_event_ids, ["b6", "b5", "b4", "2", "b3", "b2", "b1"]
+ )
+
+ def test_get_backfill_points_in_room_excludes_events_we_have_attempted(
+ self,
+ ):
+ """
+ Test to make sure that events we have attempted to backfill (and within
+ backoff timeout duration) do not show up as an event to backfill again.
+ """
+ setup_info = self._setup_room_for_backfill_tests()
+ room_id = setup_info.room_id
+
+ # Record some attempts to backfill these events which will make
+ # `get_backfill_points_in_room` exclude them because we
+ # haven't passed the backoff interval.
+ self.get_success(
+ self.store.record_event_failed_pull_attempt(room_id, "b5", "fake cause")
+ )
+ self.get_success(
+ self.store.record_event_failed_pull_attempt(room_id, "b4", "fake cause")
+ )
+ self.get_success(
+ self.store.record_event_failed_pull_attempt(room_id, "b3", "fake cause")
+ )
+ self.get_success(
+ self.store.record_event_failed_pull_attempt(room_id, "b2", "fake cause")
+ )
+
+ # No time has passed since we attempted to backfill ^
+
+ backfill_points = self.get_success(
+ self.store.get_backfill_points_in_room(room_id)
+ )
+ backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points]
+ # Only the backfill points that we didn't record earlier exist here.
+ self.assertListEqual(backfill_event_ids, ["b6", "2", "b1"])
+
+ def test_get_backfill_points_in_room_attempted_event_retry_after_backoff_duration(
+ self,
+ ):
+ """
+ Test to make sure after we fake attempt to backfill event "b3" many times,
+ we can see retry and see the "b3" again after the backoff timeout duration
+ has exceeded.
+ """
+ setup_info = self._setup_room_for_backfill_tests()
+ room_id = setup_info.room_id
+
+ # Record some attempts to backfill these events which will make
+ # `get_backfill_points_in_room` exclude them because we
+ # haven't passed the backoff interval.
+ self.get_success(
+ self.store.record_event_failed_pull_attempt(room_id, "b3", "fake cause")
+ )
+ self.get_success(
+ self.store.record_event_failed_pull_attempt(room_id, "b1", "fake cause")
+ )
+ self.get_success(
+ self.store.record_event_failed_pull_attempt(room_id, "b1", "fake cause")
+ )
+ self.get_success(
+ self.store.record_event_failed_pull_attempt(room_id, "b1", "fake cause")
+ )
+ self.get_success(
+ self.store.record_event_failed_pull_attempt(room_id, "b1", "fake cause")
+ )
+
+ # Now advance time by 2 hours and we should only be able to see "b3"
+ # because we have waited long enough for the single attempt (2^1 hours)
+ # but we still shouldn't see "b1" because we haven't waited long enough
+ # for this many attempts. We didn't do anything to "b2" so it should be
+ # visible regardless.
+ self.reactor.advance(datetime.timedelta(hours=2).total_seconds())
+
+ # Make sure that "b1" is not in the list because we've
+ # already attempted many times
+ backfill_points = self.get_success(
+ self.store.get_backfill_points_in_room(room_id)
+ )
+ backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points]
+ self.assertListEqual(backfill_event_ids, ["b6", "b5", "b4", "2", "b3", "b2"])
+
+ # Now advance time by 20 hours (above 2^4 because we made 4 attemps) and
+ # see if we can now backfill it
+ self.reactor.advance(datetime.timedelta(hours=20).total_seconds())
+
+ # Try again after we advanced enough time and we should see "b3" again
+ backfill_points = self.get_success(
+ self.store.get_backfill_points_in_room(room_id)
+ )
+ backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points]
+ self.assertListEqual(
+ backfill_event_ids, ["b6", "b5", "b4", "2", "b3", "b2", "b1"]
+ )
+
+ def _setup_room_for_insertion_backfill_tests(self) -> _BackfillSetupInfo:
+ """
+ Sets up a room with various insertion event backward extremities to test
+ backfill functions against.
+
+ Returns:
+ _BackfillSetupInfo including the `room_id` to test against and
+ `depth_map` of events in the room
+ """
+ room_id = "!backfill-room-test:some-host"
+
+ depth_map: Dict[str, int] = {
+ "1": 1,
+ "2": 2,
+ "insertion_eventA": 3,
+ "3": 4,
+ "insertion_eventB": 5,
+ "4": 6,
+ "5": 7,
+ }
+
+ def populate_db(txn: LoggingTransaction):
+ # Insert the room to satisfy the foreign key constraint of
+ # `event_failed_pull_attempts`
+ self.store.db_pool.simple_insert_txn(
+ txn,
+ "rooms",
+ {
+ "room_id": room_id,
+ "creator": "room_creator_user_id",
+ "is_public": True,
+ "room_version": "6",
+ },
+ )
+
+ # Insert our server events
+ stream_ordering = 0
+ for event_id, depth in depth_map.items():
+ self.store.db_pool.simple_insert_txn(
+ txn,
+ table="events",
+ values={
+ "event_id": event_id,
+ "type": EventTypes.MSC2716_INSERTION
+ if event_id.startswith("insertion_event")
+ else "test_regular_type",
+ "room_id": room_id,
+ "depth": depth,
+ "topological_ordering": depth,
+ "stream_ordering": stream_ordering,
+ "processed": True,
+ "outlier": False,
+ },
+ )
+
+ if event_id.startswith("insertion_event"):
+ self.store.db_pool.simple_insert_txn(
+ txn,
+ table="insertion_event_extremities",
+ values={
+ "event_id": event_id,
+ "room_id": room_id,
+ },
+ )
+
+ stream_ordering += 1
+
+ self.get_success(
+ self.store.db_pool.runInteraction(
+ "_setup_room_for_insertion_backfill_tests_populate_db",
+ populate_db,
+ )
+ )
+
+ return _BackfillSetupInfo(room_id=room_id, depth_map=depth_map)
+
+ def test_get_insertion_event_backward_extremities_in_room(self):
+ """
+ Test to make sure insertion event backward extremities are returned.
+ """
+ setup_info = self._setup_room_for_insertion_backfill_tests()
+ room_id = setup_info.room_id
+
+ backfill_points = self.get_success(
+ self.store.get_insertion_event_backward_extremities_in_room(room_id)
+ )
+ backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points]
+ self.assertListEqual(
+ backfill_event_ids, ["insertion_eventB", "insertion_eventA"]
+ )
+
+ def test_get_insertion_event_backward_extremities_in_room_excludes_events_we_have_attempted(
+ self,
+ ):
+ """
+ Test to make sure that insertion events we have attempted to backfill
+ (and within backoff timeout duration) do not show up as an event to
+ backfill again.
+ """
+ setup_info = self._setup_room_for_insertion_backfill_tests()
+ room_id = setup_info.room_id
+
+ # Record some attempts to backfill these events which will make
+ # `get_insertion_event_backward_extremities_in_room` exclude them
+ # because we haven't passed the backoff interval.
+ self.get_success(
+ self.store.record_event_failed_pull_attempt(
+ room_id, "insertion_eventA", "fake cause"
+ )
+ )
+
+ # No time has passed since we attempted to backfill ^
+
+ backfill_points = self.get_success(
+ self.store.get_insertion_event_backward_extremities_in_room(room_id)
+ )
+ backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points]
+ # Only the backfill points that we didn't record earlier exist here.
+ self.assertListEqual(backfill_event_ids, ["insertion_eventB"])
+
+ def test_get_insertion_event_backward_extremities_in_room_attempted_event_retry_after_backoff_duration(
+ self,
+ ):
+ """
+ Test to make sure after we fake attempt to backfill event
+ "insertion_eventA" many times, we can see retry and see the
+ "insertion_eventA" again after the backoff timeout duration has
+ exceeded.
+ """
+ setup_info = self._setup_room_for_insertion_backfill_tests()
+ room_id = setup_info.room_id
+
+ # Record some attempts to backfill these events which will make
+ # `get_backfill_points_in_room` exclude them because we
+ # haven't passed the backoff interval.
+ self.get_success(
+ self.store.record_event_failed_pull_attempt(
+ room_id, "insertion_eventB", "fake cause"
+ )
+ )
+ self.get_success(
+ self.store.record_event_failed_pull_attempt(
+ room_id, "insertion_eventA", "fake cause"
+ )
+ )
+ self.get_success(
+ self.store.record_event_failed_pull_attempt(
+ room_id, "insertion_eventA", "fake cause"
+ )
+ )
+ self.get_success(
+ self.store.record_event_failed_pull_attempt(
+ room_id, "insertion_eventA", "fake cause"
+ )
+ )
+ self.get_success(
+ self.store.record_event_failed_pull_attempt(
+ room_id, "insertion_eventA", "fake cause"
+ )
+ )
+
+ # Now advance time by 2 hours and we should only be able to see
+ # "insertion_eventB" because we have waited long enough for the single
+ # attempt (2^1 hours) but we still shouldn't see "insertion_eventA"
+ # because we haven't waited long enough for this many attempts.
+ self.reactor.advance(datetime.timedelta(hours=2).total_seconds())
+
+ # Make sure that "insertion_eventA" is not in the list because we've
+ # already attempted many times
+ backfill_points = self.get_success(
+ self.store.get_insertion_event_backward_extremities_in_room(room_id)
+ )
+ backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points]
+ self.assertListEqual(backfill_event_ids, ["insertion_eventB"])
+
+ # Now advance time by 20 hours (above 2^4 because we made 4 attemps) and
+ # see if we can now backfill it
+ self.reactor.advance(datetime.timedelta(hours=20).total_seconds())
+
+ # Try at "insertion_eventA" again after we advanced enough time and we
+ # should see "insertion_eventA" again
+ backfill_points = self.get_success(
+ self.store.get_insertion_event_backward_extremities_in_room(room_id)
+ )
+ backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points]
+ self.assertListEqual(
+ backfill_event_ids, ["insertion_eventB", "insertion_eventA"]
+ )
+
@attr.s
class FakeEvent:
diff --git a/tests/storage/test_event_push_actions.py b/tests/storage/test_event_push_actions.py
index fc43d7edd1..473c965e19 100644
--- a/tests/storage/test_event_push_actions.py
+++ b/tests/storage/test_event_push_actions.py
@@ -12,6 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+from typing import Tuple
+
from twisted.test.proto_helpers import MemoryReactor
from synapse.rest import admin
@@ -22,8 +24,6 @@ from synapse.util import Clock
from tests.unittest import HomeserverTestCase
-USER_ID = "@user:example.com"
-
class EventPushActionsStoreTestCase(HomeserverTestCase):
servlets = [
@@ -38,21 +38,13 @@ class EventPushActionsStoreTestCase(HomeserverTestCase):
assert persist_events_store is not None
self.persist_events_store = persist_events_store
- def test_get_unread_push_actions_for_user_in_range_for_http(self) -> None:
- self.get_success(
- self.store.get_unread_push_actions_for_user_in_range_for_http(
- USER_ID, 0, 1000, 20
- )
- )
+ def _create_users_and_room(self) -> Tuple[str, str, str, str, str]:
+ """
+ Creates two users and a shared room.
- def test_get_unread_push_actions_for_user_in_range_for_email(self) -> None:
- self.get_success(
- self.store.get_unread_push_actions_for_user_in_range_for_email(
- USER_ID, 0, 1000, 20
- )
- )
-
- def test_count_aggregation(self) -> None:
+ Returns:
+ Tuple of (user 1 ID, user 1 token, user 2 ID, user 2 token, room ID).
+ """
# Create a user to receive notifications and send receipts.
user_id = self.register_user("user1235", "pass")
token = self.login("user1235", "pass")
@@ -65,6 +57,70 @@ class EventPushActionsStoreTestCase(HomeserverTestCase):
room_id = self.helper.create_room_as(user_id, tok=token)
self.helper.join(room_id, other_id, tok=other_token)
+ return user_id, token, other_id, other_token, room_id
+
+ def test_get_unread_push_actions_for_user_in_range(self) -> None:
+ """Test getting unread push actions for HTTP and email pushers."""
+ user_id, token, _, other_token, room_id = self._create_users_and_room()
+
+ # Create two events, one of which is a highlight.
+ self.helper.send_event(
+ room_id,
+ type="m.room.message",
+ content={"msgtype": "m.text", "body": "msg"},
+ tok=other_token,
+ )
+ event_id = self.helper.send_event(
+ room_id,
+ type="m.room.message",
+ content={"msgtype": "m.text", "body": user_id},
+ tok=other_token,
+ )["event_id"]
+
+ # Fetch unread actions for HTTP pushers.
+ http_actions = self.get_success(
+ self.store.get_unread_push_actions_for_user_in_range_for_http(
+ user_id, 0, 1000, 20
+ )
+ )
+ self.assertEqual(2, len(http_actions))
+
+ # Fetch unread actions for email pushers.
+ email_actions = self.get_success(
+ self.store.get_unread_push_actions_for_user_in_range_for_email(
+ user_id, 0, 1000, 20
+ )
+ )
+ self.assertEqual(2, len(email_actions))
+
+ # Send a receipt, which should clear any actions.
+ self.get_success(
+ self.store.insert_receipt(
+ room_id,
+ "m.read",
+ user_id=user_id,
+ event_ids=[event_id],
+ thread_id=None,
+ data={},
+ )
+ )
+ http_actions = self.get_success(
+ self.store.get_unread_push_actions_for_user_in_range_for_http(
+ user_id, 0, 1000, 20
+ )
+ )
+ self.assertEqual([], http_actions)
+ email_actions = self.get_success(
+ self.store.get_unread_push_actions_for_user_in_range_for_email(
+ user_id, 0, 1000, 20
+ )
+ )
+ self.assertEqual([], email_actions)
+
+ def test_count_aggregation(self) -> None:
+ # Create a user to receive notifications and send receipts.
+ user_id, token, _, other_token, room_id = self._create_users_and_room()
+
last_event_id: str
def _assert_counts(noitf_count: int, highlight_count: int) -> None:
@@ -106,6 +162,7 @@ class EventPushActionsStoreTestCase(HomeserverTestCase):
"m.read",
user_id=user_id,
event_ids=[event_id],
+ thread_id=None,
data={},
)
)
diff --git a/tests/storage/test_receipts.py b/tests/storage/test_receipts.py
index c89bfff241..9459ee1705 100644
--- a/tests/storage/test_receipts.py
+++ b/tests/storage/test_receipts.py
@@ -131,13 +131,18 @@ class ReceiptTestCase(HomeserverTestCase):
# Send public read receipt for the first event
self.get_success(
self.store.insert_receipt(
- self.room_id1, ReceiptTypes.READ, OUR_USER_ID, [event1_1_id], {}
+ self.room_id1, ReceiptTypes.READ, OUR_USER_ID, [event1_1_id], None, {}
)
)
# Send private read receipt for the second event
self.get_success(
self.store.insert_receipt(
- self.room_id1, ReceiptTypes.READ_PRIVATE, OUR_USER_ID, [event1_2_id], {}
+ self.room_id1,
+ ReceiptTypes.READ_PRIVATE,
+ OUR_USER_ID,
+ [event1_2_id],
+ None,
+ {},
)
)
@@ -164,7 +169,7 @@ class ReceiptTestCase(HomeserverTestCase):
# Test receipt updating
self.get_success(
self.store.insert_receipt(
- self.room_id1, ReceiptTypes.READ, OUR_USER_ID, [event1_2_id], {}
+ self.room_id1, ReceiptTypes.READ, OUR_USER_ID, [event1_2_id], None, {}
)
)
res = self.get_success(
@@ -180,7 +185,12 @@ class ReceiptTestCase(HomeserverTestCase):
# Test new room is reflected in what the method returns
self.get_success(
self.store.insert_receipt(
- self.room_id2, ReceiptTypes.READ_PRIVATE, OUR_USER_ID, [event2_1_id], {}
+ self.room_id2,
+ ReceiptTypes.READ_PRIVATE,
+ OUR_USER_ID,
+ [event2_1_id],
+ None,
+ {},
)
)
res = self.get_success(
@@ -202,13 +212,18 @@ class ReceiptTestCase(HomeserverTestCase):
# Send public read receipt for the first event
self.get_success(
self.store.insert_receipt(
- self.room_id1, ReceiptTypes.READ, OUR_USER_ID, [event1_1_id], {}
+ self.room_id1, ReceiptTypes.READ, OUR_USER_ID, [event1_1_id], None, {}
)
)
# Send private read receipt for the second event
self.get_success(
self.store.insert_receipt(
- self.room_id1, ReceiptTypes.READ_PRIVATE, OUR_USER_ID, [event1_2_id], {}
+ self.room_id1,
+ ReceiptTypes.READ_PRIVATE,
+ OUR_USER_ID,
+ [event1_2_id],
+ None,
+ {},
)
)
@@ -241,7 +256,7 @@ class ReceiptTestCase(HomeserverTestCase):
# Test receipt updating
self.get_success(
self.store.insert_receipt(
- self.room_id1, ReceiptTypes.READ, OUR_USER_ID, [event1_2_id], {}
+ self.room_id1, ReceiptTypes.READ, OUR_USER_ID, [event1_2_id], None, {}
)
)
res = self.get_success(
@@ -259,7 +274,12 @@ class ReceiptTestCase(HomeserverTestCase):
# Test new room is reflected in what the method returns
self.get_success(
self.store.insert_receipt(
- self.room_id2, ReceiptTypes.READ_PRIVATE, OUR_USER_ID, [event2_1_id], {}
+ self.room_id2,
+ ReceiptTypes.READ_PRIVATE,
+ OUR_USER_ID,
+ [event2_1_id],
+ None,
+ {},
)
)
res = self.get_success(
diff --git a/tests/unittest.py b/tests/unittest.py
index 975b0a23a7..00cb023198 100644
--- a/tests/unittest.py
+++ b/tests/unittest.py
@@ -300,47 +300,31 @@ class HomeserverTestCase(TestCase):
if hasattr(self, "user_id"):
if self.hijack_auth:
assert self.helper.auth_user_id is not None
+ token = "some_fake_token"
# We need a valid token ID to satisfy foreign key constraints.
token_id = self.get_success(
self.hs.get_datastores().main.add_access_token_to_user(
self.helper.auth_user_id,
- "some_fake_token",
+ token,
None,
None,
)
)
- async def get_user_by_access_token(
- token: Optional[str] = None, allow_guest: bool = False
- ) -> JsonDict:
- assert self.helper.auth_user_id is not None
- return {
- "user": UserID.from_string(self.helper.auth_user_id),
- "token_id": token_id,
- "is_guest": False,
- }
-
- async def get_user_by_req(
- request: SynapseRequest,
- allow_guest: bool = False,
- allow_expired: bool = False,
- ) -> Requester:
+ # This has to be a function and not just a Mock, because
+ # `self.helper.auth_user_id` is temporarily reassigned in some tests
+ async def get_requester(*args, **kwargs) -> Requester:
assert self.helper.auth_user_id is not None
return create_requester(
- UserID.from_string(self.helper.auth_user_id),
- token_id,
- False,
- False,
- None,
+ user_id=UserID.from_string(self.helper.auth_user_id),
+ access_token_id=token_id,
)
# Type ignore: mypy doesn't like us assigning to methods.
- self.hs.get_auth().get_user_by_req = get_user_by_req # type: ignore[assignment]
- self.hs.get_auth().get_user_by_access_token = get_user_by_access_token # type: ignore[assignment]
- self.hs.get_auth().get_access_token_from_request = Mock( # type: ignore[assignment]
- return_value="1234"
- )
+ self.hs.get_auth().get_user_by_req = get_requester # type: ignore[assignment]
+ self.hs.get_auth().get_user_by_access_token = get_requester # type: ignore[assignment]
+ self.hs.get_auth().get_access_token_from_request = Mock(return_value=token) # type: ignore[assignment]
if self.needs_threadpool:
self.reactor.threadpool = ThreadPool() # type: ignore[assignment]
|