From bcd2495469726b1447db9c40d3f44856ac8643f0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 15 May 2023 10:42:51 +0200 Subject: Bump serde from 1.0.162 to 1.0.163 (#15589) --- Cargo.lock | 8 ++++---- changelog.d/15589.misc | 1 + 2 files changed, 5 insertions(+), 4 deletions(-) create mode 100644 changelog.d/15589.misc diff --git a/Cargo.lock b/Cargo.lock index b03076d9fa..e169a665b6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -323,18 +323,18 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" [[package]] name = "serde" -version = "1.0.162" +version = "1.0.163" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71b2f6e1ab5c2b98c05f0f35b236b22e8df7ead6ffbf51d7808da7f8817e7ab6" +checksum = "2113ab51b87a539ae008b5c6c02dc020ffa39afd2d83cffcb3f4eb2722cebec2" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.162" +version = "1.0.163" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2a0814352fd64b58489904a44ea8d90cb1a91dcb6b4f5ebabc32c8318e93cb6" +checksum = "8c805777e3930c8883389c602315a24224bcc738b63905ef87cd1420353ea93e" dependencies = [ "proc-macro2", "quote", diff --git a/changelog.d/15589.misc b/changelog.d/15589.misc new file mode 100644 index 0000000000..e055add929 --- /dev/null +++ b/changelog.d/15589.misc @@ -0,0 +1 @@ +Bump serde from 1.0.162 to 1.0.163. -- cgit 1.5.1 From 34ab8013793df7471352c4d2bb9d9dcd50de769a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 15 May 2023 10:44:06 +0200 Subject: Bump pillow from 9.4.0 to 9.5.0 (#15593) --- changelog.d/15593.misc | 1 + poetry.lock | 153 +++++++++++++++++++++++-------------------------- 2 files changed, 72 insertions(+), 82 deletions(-) create mode 100644 changelog.d/15593.misc diff --git a/changelog.d/15593.misc b/changelog.d/15593.misc new file mode 100644 index 0000000000..32f1a0eba2 --- /dev/null +++ b/changelog.d/15593.misc @@ -0,0 +1 @@ +Bump pillow from 9.4.0 to 9.5.0. diff --git a/poetry.lock b/poetry.lock index 09d486ba51..1ef3c76b09 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1644,93 +1644,82 @@ files = [ [[package]] name = "pillow" -version = "9.4.0" +version = "9.5.0" description = "Python Imaging Library (Fork)" category = "main" optional = false python-versions = ">=3.7" files = [ - {file = "Pillow-9.4.0-1-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:1b4b4e9dda4f4e4c4e6896f93e84a8f0bcca3b059de9ddf67dac3c334b1195e1"}, - {file = "Pillow-9.4.0-1-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:fb5c1ad6bad98c57482236a21bf985ab0ef42bd51f7ad4e4538e89a997624e12"}, - {file = "Pillow-9.4.0-1-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:f0caf4a5dcf610d96c3bd32932bfac8aee61c96e60481c2a0ea58da435e25acd"}, - {file = "Pillow-9.4.0-1-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:3f4cc516e0b264c8d4ccd6b6cbc69a07c6d582d8337df79be1e15a5056b258c9"}, - {file = "Pillow-9.4.0-1-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:b8c2f6eb0df979ee99433d8b3f6d193d9590f735cf12274c108bd954e30ca858"}, - {file = "Pillow-9.4.0-1-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:b70756ec9417c34e097f987b4d8c510975216ad26ba6e57ccb53bc758f490dab"}, - {file = "Pillow-9.4.0-1-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:43521ce2c4b865d385e78579a082b6ad1166ebed2b1a2293c3be1d68dd7ca3b9"}, - {file = "Pillow-9.4.0-2-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:9d9a62576b68cd90f7075876f4e8444487db5eeea0e4df3ba298ee38a8d067b0"}, - {file = "Pillow-9.4.0-2-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:87708d78a14d56a990fbf4f9cb350b7d89ee8988705e58e39bdf4d82c149210f"}, - {file = "Pillow-9.4.0-2-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:8a2b5874d17e72dfb80d917213abd55d7e1ed2479f38f001f264f7ce7bae757c"}, - {file = "Pillow-9.4.0-2-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:83125753a60cfc8c412de5896d10a0a405e0bd88d0470ad82e0869ddf0cb3848"}, - {file = "Pillow-9.4.0-2-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:9e5f94742033898bfe84c93c831a6f552bb629448d4072dd312306bab3bd96f1"}, - {file = "Pillow-9.4.0-2-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:013016af6b3a12a2f40b704677f8b51f72cb007dac785a9933d5c86a72a7fe33"}, - {file = "Pillow-9.4.0-2-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:99d92d148dd03fd19d16175b6d355cc1b01faf80dae93c6c3eb4163709edc0a9"}, - {file = "Pillow-9.4.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:2968c58feca624bb6c8502f9564dd187d0e1389964898f5e9e1fbc8533169157"}, - {file = "Pillow-9.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c5c1362c14aee73f50143d74389b2c158707b4abce2cb055b7ad37ce60738d47"}, - {file = "Pillow-9.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd752c5ff1b4a870b7661234694f24b1d2b9076b8bf337321a814c612665f343"}, - {file = "Pillow-9.4.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9a3049a10261d7f2b6514d35bbb7a4dfc3ece4c4de14ef5876c4b7a23a0e566d"}, - {file = "Pillow-9.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:16a8df99701f9095bea8a6c4b3197da105df6f74e6176c5b410bc2df2fd29a57"}, - {file = "Pillow-9.4.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:94cdff45173b1919350601f82d61365e792895e3c3a3443cf99819e6fbf717a5"}, - {file = "Pillow-9.4.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:ed3e4b4e1e6de75fdc16d3259098de7c6571b1a6cc863b1a49e7d3d53e036070"}, - {file = "Pillow-9.4.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d5b2f8a31bd43e0f18172d8ac82347c8f37ef3e0b414431157718aa234991b28"}, - {file = "Pillow-9.4.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:09b89ddc95c248ee788328528e6a2996e09eaccddeeb82a5356e92645733be35"}, - {file = "Pillow-9.4.0-cp310-cp310-win32.whl", hash = "sha256:f09598b416ba39a8f489c124447b007fe865f786a89dbfa48bb5cf395693132a"}, - {file = "Pillow-9.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:f6e78171be3fb7941f9910ea15b4b14ec27725865a73c15277bc39f5ca4f8391"}, - {file = "Pillow-9.4.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:3fa1284762aacca6dc97474ee9c16f83990b8eeb6697f2ba17140d54b453e133"}, - {file = "Pillow-9.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:eaef5d2de3c7e9b21f1e762f289d17b726c2239a42b11e25446abf82b26ac132"}, - {file = "Pillow-9.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a4dfdae195335abb4e89cc9762b2edc524f3c6e80d647a9a81bf81e17e3fb6f0"}, - {file = "Pillow-9.4.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6abfb51a82e919e3933eb137e17c4ae9c0475a25508ea88993bb59faf82f3b35"}, - {file = "Pillow-9.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:451f10ef963918e65b8869e17d67db5e2f4ab40e716ee6ce7129b0cde2876eab"}, - {file = "Pillow-9.4.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:6663977496d616b618b6cfa43ec86e479ee62b942e1da76a2c3daa1c75933ef4"}, - {file = "Pillow-9.4.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:60e7da3a3ad1812c128750fc1bc14a7ceeb8d29f77e0a2356a8fb2aa8925287d"}, - {file = "Pillow-9.4.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:19005a8e58b7c1796bc0167862b1f54a64d3b44ee5d48152b06bb861458bc0f8"}, - {file = "Pillow-9.4.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:f715c32e774a60a337b2bb8ad9839b4abf75b267a0f18806f6f4f5f1688c4b5a"}, - {file = "Pillow-9.4.0-cp311-cp311-win32.whl", hash = "sha256:b222090c455d6d1a64e6b7bb5f4035c4dff479e22455c9eaa1bdd4c75b52c80c"}, - {file = "Pillow-9.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:ba6612b6548220ff5e9df85261bddc811a057b0b465a1226b39bfb8550616aee"}, - {file = "Pillow-9.4.0-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:5f532a2ad4d174eb73494e7397988e22bf427f91acc8e6ebf5bb10597b49c493"}, - {file = "Pillow-9.4.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5dd5a9c3091a0f414a963d427f920368e2b6a4c2f7527fdd82cde8ef0bc7a327"}, - {file = "Pillow-9.4.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ef21af928e807f10bf4141cad4746eee692a0dd3ff56cfb25fce076ec3cc8abe"}, - {file = "Pillow-9.4.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:847b114580c5cc9ebaf216dd8c8dbc6b00a3b7ab0131e173d7120e6deade1f57"}, - {file = "Pillow-9.4.0-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:653d7fb2df65efefbcbf81ef5fe5e5be931f1ee4332c2893ca638c9b11a409c4"}, - {file = "Pillow-9.4.0-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:46f39cab8bbf4a384ba7cb0bc8bae7b7062b6a11cfac1ca4bc144dea90d4a9f5"}, - {file = "Pillow-9.4.0-cp37-cp37m-win32.whl", hash = "sha256:7ac7594397698f77bce84382929747130765f66406dc2cd8b4ab4da68ade4c6e"}, - {file = "Pillow-9.4.0-cp37-cp37m-win_amd64.whl", hash = "sha256:46c259e87199041583658457372a183636ae8cd56dbf3f0755e0f376a7f9d0e6"}, - {file = "Pillow-9.4.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:0e51f608da093e5d9038c592b5b575cadc12fd748af1479b5e858045fff955a9"}, - {file = "Pillow-9.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:765cb54c0b8724a7c12c55146ae4647e0274a839fb6de7bcba841e04298e1011"}, - {file = "Pillow-9.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:519e14e2c49fcf7616d6d2cfc5c70adae95682ae20f0395e9280db85e8d6c4df"}, - {file = "Pillow-9.4.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d197df5489004db87d90b918033edbeee0bd6df3848a204bca3ff0a903bef837"}, - {file = "Pillow-9.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0845adc64fe9886db00f5ab68c4a8cd933ab749a87747555cec1c95acea64b0b"}, - {file = "Pillow-9.4.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:e1339790c083c5a4de48f688b4841f18df839eb3c9584a770cbd818b33e26d5d"}, - {file = "Pillow-9.4.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:a96e6e23f2b79433390273eaf8cc94fec9c6370842e577ab10dabdcc7ea0a66b"}, - {file = "Pillow-9.4.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:7cfc287da09f9d2a7ec146ee4d72d6ea1342e770d975e49a8621bf54eaa8f30f"}, - {file = "Pillow-9.4.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d7081c084ceb58278dd3cf81f836bc818978c0ccc770cbbb202125ddabec6628"}, - {file = "Pillow-9.4.0-cp38-cp38-win32.whl", hash = "sha256:df41112ccce5d47770a0c13651479fbcd8793f34232a2dd9faeccb75eb5d0d0d"}, - {file = "Pillow-9.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:7a21222644ab69ddd9967cfe6f2bb420b460dae4289c9d40ff9a4896e7c35c9a"}, - {file = "Pillow-9.4.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:0f3269304c1a7ce82f1759c12ce731ef9b6e95b6df829dccd9fe42912cc48569"}, - {file = "Pillow-9.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:cb362e3b0976dc994857391b776ddaa8c13c28a16f80ac6522c23d5257156bed"}, - {file = "Pillow-9.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a2e0f87144fcbbe54297cae708c5e7f9da21a4646523456b00cc956bd4c65815"}, - {file = "Pillow-9.4.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:28676836c7796805914b76b1837a40f76827ee0d5398f72f7dcc634bae7c6264"}, - {file = "Pillow-9.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0884ba7b515163a1a05440a138adeb722b8a6ae2c2b33aea93ea3118dd3a899e"}, - {file = "Pillow-9.4.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:53dcb50fbdc3fb2c55431a9b30caeb2f7027fcd2aeb501459464f0214200a503"}, - {file = "Pillow-9.4.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:e8c5cf126889a4de385c02a2c3d3aba4b00f70234bfddae82a5eaa3ee6d5e3e6"}, - {file = "Pillow-9.4.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:6c6b1389ed66cdd174d040105123a5a1bc91d0aa7059c7261d20e583b6d8cbd2"}, - {file = "Pillow-9.4.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:0dd4c681b82214b36273c18ca7ee87065a50e013112eea7d78c7a1b89a739153"}, - {file = "Pillow-9.4.0-cp39-cp39-win32.whl", hash = "sha256:6d9dfb9959a3b0039ee06c1a1a90dc23bac3b430842dcb97908ddde05870601c"}, - {file = "Pillow-9.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:54614444887e0d3043557d9dbc697dbb16cfb5a35d672b7a0fcc1ed0cf1c600b"}, - {file = "Pillow-9.4.0-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:b9b752ab91e78234941e44abdecc07f1f0d8f51fb62941d32995b8161f68cfe5"}, - {file = "Pillow-9.4.0-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d3b56206244dc8711f7e8b7d6cad4663917cd5b2d950799425076681e8766286"}, - {file = "Pillow-9.4.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aabdab8ec1e7ca7f1434d042bf8b1e92056245fb179790dc97ed040361f16bfd"}, - {file = "Pillow-9.4.0-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:db74f5562c09953b2c5f8ec4b7dfd3f5421f31811e97d1dbc0a7c93d6e3a24df"}, - {file = "Pillow-9.4.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:e9d7747847c53a16a729b6ee5e737cf170f7a16611c143d95aa60a109a59c336"}, - {file = "Pillow-9.4.0-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:b52ff4f4e002f828ea6483faf4c4e8deea8d743cf801b74910243c58acc6eda3"}, - {file = "Pillow-9.4.0-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:575d8912dca808edd9acd6f7795199332696d3469665ef26163cd090fa1f8bfa"}, - {file = "Pillow-9.4.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c3c4ed2ff6760e98d262e0cc9c9a7f7b8a9f61aa4d47c58835cdaf7b0b8811bb"}, - {file = "Pillow-9.4.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:e621b0246192d3b9cb1dc62c78cfa4c6f6d2ddc0ec207d43c0dedecb914f152a"}, - {file = "Pillow-9.4.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:8f127e7b028900421cad64f51f75c051b628db17fb00e099eb148761eed598c9"}, - {file = "Pillow-9.4.0.tar.gz", hash = "sha256:a1c2d7780448eb93fbcc3789bf3916aa5720d942e37945f4056680317f1cd23e"}, -] - -[package.extras] -docs = ["furo", "olefile", "sphinx (>=2.4)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinx-issues (>=3.0.1)", "sphinx-removed-in", "sphinxext-opengraph"] + {file = "Pillow-9.5.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:ace6ca218308447b9077c14ea4ef381ba0b67ee78d64046b3f19cf4e1139ad16"}, + {file = "Pillow-9.5.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d3d403753c9d5adc04d4694d35cf0391f0f3d57c8e0030aac09d7678fa8030aa"}, + {file = "Pillow-9.5.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5ba1b81ee69573fe7124881762bb4cd2e4b6ed9dd28c9c60a632902fe8db8b38"}, + {file = "Pillow-9.5.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fe7e1c262d3392afcf5071df9afa574544f28eac825284596ac6db56e6d11062"}, + {file = "Pillow-9.5.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f36397bf3f7d7c6a3abdea815ecf6fd14e7fcd4418ab24bae01008d8d8ca15e"}, + {file = "Pillow-9.5.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:252a03f1bdddce077eff2354c3861bf437c892fb1832f75ce813ee94347aa9b5"}, + {file = "Pillow-9.5.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:85ec677246533e27770b0de5cf0f9d6e4ec0c212a1f89dfc941b64b21226009d"}, + {file = "Pillow-9.5.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:b416f03d37d27290cb93597335a2f85ed446731200705b22bb927405320de903"}, + {file = "Pillow-9.5.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:1781a624c229cb35a2ac31cc4a77e28cafc8900733a864870c49bfeedacd106a"}, + {file = "Pillow-9.5.0-cp310-cp310-win32.whl", hash = "sha256:8507eda3cd0608a1f94f58c64817e83ec12fa93a9436938b191b80d9e4c0fc44"}, + {file = "Pillow-9.5.0-cp310-cp310-win_amd64.whl", hash = "sha256:d3c6b54e304c60c4181da1c9dadf83e4a54fd266a99c70ba646a9baa626819eb"}, + {file = "Pillow-9.5.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:7ec6f6ce99dab90b52da21cf0dc519e21095e332ff3b399a357c187b1a5eee32"}, + {file = "Pillow-9.5.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:560737e70cb9c6255d6dcba3de6578a9e2ec4b573659943a5e7e4af13f298f5c"}, + {file = "Pillow-9.5.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:96e88745a55b88a7c64fa49bceff363a1a27d9a64e04019c2281049444a571e3"}, + {file = "Pillow-9.5.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d9c206c29b46cfd343ea7cdfe1232443072bbb270d6a46f59c259460db76779a"}, + {file = "Pillow-9.5.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cfcc2c53c06f2ccb8976fb5c71d448bdd0a07d26d8e07e321c103416444c7ad1"}, + {file = "Pillow-9.5.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:a0f9bb6c80e6efcde93ffc51256d5cfb2155ff8f78292f074f60f9e70b942d99"}, + {file = "Pillow-9.5.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:8d935f924bbab8f0a9a28404422da8af4904e36d5c33fc6f677e4c4485515625"}, + {file = "Pillow-9.5.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:fed1e1cf6a42577953abbe8e6cf2fe2f566daebde7c34724ec8803c4c0cda579"}, + {file = "Pillow-9.5.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:c1170d6b195555644f0616fd6ed929dfcf6333b8675fcca044ae5ab110ded296"}, + {file = "Pillow-9.5.0-cp311-cp311-win32.whl", hash = "sha256:54f7102ad31a3de5666827526e248c3530b3a33539dbda27c6843d19d72644ec"}, + {file = "Pillow-9.5.0-cp311-cp311-win_amd64.whl", hash = "sha256:cfa4561277f677ecf651e2b22dc43e8f5368b74a25a8f7d1d4a3a243e573f2d4"}, + {file = "Pillow-9.5.0-cp311-cp311-win_arm64.whl", hash = "sha256:965e4a05ef364e7b973dd17fc765f42233415974d773e82144c9bbaaaea5d089"}, + {file = "Pillow-9.5.0-cp312-cp312-win32.whl", hash = "sha256:22baf0c3cf0c7f26e82d6e1adf118027afb325e703922c8dfc1d5d0156bb2eeb"}, + {file = "Pillow-9.5.0-cp312-cp312-win_amd64.whl", hash = "sha256:432b975c009cf649420615388561c0ce7cc31ce9b2e374db659ee4f7d57a1f8b"}, + {file = "Pillow-9.5.0-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:5d4ebf8e1db4441a55c509c4baa7a0587a0210f7cd25fcfe74dbbce7a4bd1906"}, + {file = "Pillow-9.5.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:375f6e5ee9620a271acb6820b3d1e94ffa8e741c0601db4c0c4d3cb0a9c224bf"}, + {file = "Pillow-9.5.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:99eb6cafb6ba90e436684e08dad8be1637efb71c4f2180ee6b8f940739406e78"}, + {file = "Pillow-9.5.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2dfaaf10b6172697b9bceb9a3bd7b951819d1ca339a5ef294d1f1ac6d7f63270"}, + {file = "Pillow-9.5.0-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:763782b2e03e45e2c77d7779875f4432e25121ef002a41829d8868700d119392"}, + {file = "Pillow-9.5.0-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:35f6e77122a0c0762268216315bf239cf52b88865bba522999dc38f1c52b9b47"}, + {file = "Pillow-9.5.0-cp37-cp37m-win32.whl", hash = "sha256:aca1c196f407ec7cf04dcbb15d19a43c507a81f7ffc45b690899d6a76ac9fda7"}, + {file = "Pillow-9.5.0-cp37-cp37m-win_amd64.whl", hash = "sha256:322724c0032af6692456cd6ed554bb85f8149214d97398bb80613b04e33769f6"}, + {file = "Pillow-9.5.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:a0aa9417994d91301056f3d0038af1199eb7adc86e646a36b9e050b06f526597"}, + {file = "Pillow-9.5.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:f8286396b351785801a976b1e85ea88e937712ee2c3ac653710a4a57a8da5d9c"}, + {file = "Pillow-9.5.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c830a02caeb789633863b466b9de10c015bded434deb3ec87c768e53752ad22a"}, + {file = "Pillow-9.5.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fbd359831c1657d69bb81f0db962905ee05e5e9451913b18b831febfe0519082"}, + {file = "Pillow-9.5.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f8fc330c3370a81bbf3f88557097d1ea26cd8b019d6433aa59f71195f5ddebbf"}, + {file = "Pillow-9.5.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:7002d0797a3e4193c7cdee3198d7c14f92c0836d6b4a3f3046a64bd1ce8df2bf"}, + {file = "Pillow-9.5.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:229e2c79c00e85989a34b5981a2b67aa079fd08c903f0aaead522a1d68d79e51"}, + {file = "Pillow-9.5.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:9adf58f5d64e474bed00d69bcd86ec4bcaa4123bfa70a65ce72e424bfb88ed96"}, + {file = "Pillow-9.5.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:662da1f3f89a302cc22faa9f14a262c2e3951f9dbc9617609a47521c69dd9f8f"}, + {file = "Pillow-9.5.0-cp38-cp38-win32.whl", hash = "sha256:6608ff3bf781eee0cd14d0901a2b9cc3d3834516532e3bd673a0a204dc8615fc"}, + {file = "Pillow-9.5.0-cp38-cp38-win_amd64.whl", hash = "sha256:e49eb4e95ff6fd7c0c402508894b1ef0e01b99a44320ba7d8ecbabefddcc5569"}, + {file = "Pillow-9.5.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:482877592e927fd263028c105b36272398e3e1be3269efda09f6ba21fd83ec66"}, + {file = "Pillow-9.5.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3ded42b9ad70e5f1754fb7c2e2d6465a9c842e41d178f262e08b8c85ed8a1d8e"}, + {file = "Pillow-9.5.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c446d2245ba29820d405315083d55299a796695d747efceb5717a8b450324115"}, + {file = "Pillow-9.5.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8aca1152d93dcc27dc55395604dcfc55bed5f25ef4c98716a928bacba90d33a3"}, + {file = "Pillow-9.5.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:608488bdcbdb4ba7837461442b90ea6f3079397ddc968c31265c1e056964f1ef"}, + {file = "Pillow-9.5.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:60037a8db8750e474af7ffc9faa9b5859e6c6d0a50e55c45576bf28be7419705"}, + {file = "Pillow-9.5.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:07999f5834bdc404c442146942a2ecadd1cb6292f5229f4ed3b31e0a108746b1"}, + {file = "Pillow-9.5.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:a127ae76092974abfbfa38ca2d12cbeddcdeac0fb71f9627cc1135bedaf9d51a"}, + {file = "Pillow-9.5.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:489f8389261e5ed43ac8ff7b453162af39c3e8abd730af8363587ba64bb2e865"}, + {file = "Pillow-9.5.0-cp39-cp39-win32.whl", hash = "sha256:9b1af95c3a967bf1da94f253e56b6286b50af23392a886720f563c547e48e964"}, + {file = "Pillow-9.5.0-cp39-cp39-win_amd64.whl", hash = "sha256:77165c4a5e7d5a284f10a6efaa39a0ae8ba839da344f20b111d62cc932fa4e5d"}, + {file = "Pillow-9.5.0-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:833b86a98e0ede388fa29363159c9b1a294b0905b5128baf01db683672f230f5"}, + {file = "Pillow-9.5.0-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:aaf305d6d40bd9632198c766fb64f0c1a83ca5b667f16c1e79e1661ab5060140"}, + {file = "Pillow-9.5.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0852ddb76d85f127c135b6dd1f0bb88dbb9ee990d2cd9aa9e28526c93e794fba"}, + {file = "Pillow-9.5.0-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:91ec6fe47b5eb5a9968c79ad9ed78c342b1f97a091677ba0e012701add857829"}, + {file = "Pillow-9.5.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:cb841572862f629b99725ebaec3287fc6d275be9b14443ea746c1dd325053cbd"}, + {file = "Pillow-9.5.0-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:c380b27d041209b849ed246b111b7c166ba36d7933ec6e41175fd15ab9eb1572"}, + {file = "Pillow-9.5.0-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7c9af5a3b406a50e313467e3565fc99929717f780164fe6fbb7704edba0cebbe"}, + {file = "Pillow-9.5.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5671583eab84af046a397d6d0ba25343c00cd50bce03787948e0fff01d4fd9b1"}, + {file = "Pillow-9.5.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:84a6f19ce086c1bf894644b43cd129702f781ba5751ca8572f08aa40ef0ab7b7"}, + {file = "Pillow-9.5.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:1e7723bd90ef94eda669a3c2c19d549874dd5badaeefabefd26053304abe5799"}, + {file = "Pillow-9.5.0.tar.gz", hash = "sha256:bf548479d336726d7a0eceb6e767e179fbde37833ae42794602631a070d630f1"}, +] + +[package.extras] +docs = ["furo", "olefile", "sphinx (>=2.4)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinx-removed-in", "sphinxext-opengraph"] tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"] [[package]] -- cgit 1.5.1 From 1b4782a37d77029caf5cb29fb737244fcf51a710 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 15 May 2023 10:44:31 +0200 Subject: Bump types-setuptools from 67.7.0.1 to 67.7.0.2 (#15594) --- changelog.d/15594.misc | 1 + poetry.lock | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/15594.misc diff --git a/changelog.d/15594.misc b/changelog.d/15594.misc new file mode 100644 index 0000000000..bf6810b4e4 --- /dev/null +++ b/changelog.d/15594.misc @@ -0,0 +1 @@ +Bump types-setuptools from 67.7.0.1 to 67.7.0.2. diff --git a/poetry.lock b/poetry.lock index 1ef3c76b09..ebc08922a6 100644 --- a/poetry.lock +++ b/poetry.lock @@ -3113,14 +3113,14 @@ types-urllib3 = "*" [[package]] name = "types-setuptools" -version = "67.7.0.1" +version = "67.7.0.2" description = "Typing stubs for setuptools" category = "dev" optional = false python-versions = "*" files = [ - {file = "types-setuptools-67.7.0.1.tar.gz", hash = "sha256:980a2651b2b019809817e1585071596b87fbafcb54433ff3b12445461db23790"}, - {file = "types_setuptools-67.7.0.1-py3-none-any.whl", hash = "sha256:471a4ecf6984ffada63ffcfa884bfcb62718bd2d1a1acf8ee5513ec99789ed5e"}, + {file = "types-setuptools-67.7.0.2.tar.gz", hash = "sha256:155789e85e79d5682b0d341919d4beb6140408ae52bac922af25b54e36ab25c0"}, + {file = "types_setuptools-67.7.0.2-py3-none-any.whl", hash = "sha256:bd30f6dbe9b83f0a7e6e3eab6d2df748aa4f55700d54e9f077d3aa30cc019445"}, ] [[package]] -- cgit 1.5.1 From 3fd8eb81dee9f5f651e5f17d92ddca8ad767385a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 15 May 2023 10:44:47 +0200 Subject: Bump types-commonmark from 0.9.2.2 to 0.9.2.3 (#15592) --- changelog.d/15592.misc | 1 + poetry.lock | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/15592.misc diff --git a/changelog.d/15592.misc b/changelog.d/15592.misc new file mode 100644 index 0000000000..7f9160607a --- /dev/null +++ b/changelog.d/15592.misc @@ -0,0 +1 @@ +Bump types-commonmark from 0.9.2.2 to 0.9.2.3. diff --git a/poetry.lock b/poetry.lock index ebc08922a6..33e236d2eb 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2999,14 +2999,14 @@ files = [ [[package]] name = "types-commonmark" -version = "0.9.2.2" +version = "0.9.2.3" description = "Typing stubs for commonmark" category = "dev" optional = false python-versions = "*" files = [ - {file = "types-commonmark-0.9.2.2.tar.gz", hash = "sha256:f3259350634c2ce68ae503398430482f7cf44e5cae3d344995e916fbf453b4be"}, - {file = "types_commonmark-0.9.2.2-py3-none-any.whl", hash = "sha256:d3d878692615e7fbe47bf19ba67497837b135812d665012a3d42219c1f2c3a61"}, + {file = "types-commonmark-0.9.2.3.tar.gz", hash = "sha256:42769a2c194fd5b49fd9eedfd4a83cd1d2514c6d0a36f00f5c5ffe0b6a2d2fcf"}, + {file = "types_commonmark-0.9.2.3-py3-none-any.whl", hash = "sha256:b575156e1b8a292d43acb36f861110b85c4bc7aa53bbfb5ac64addec15d18cfa"}, ] [[package]] -- cgit 1.5.1 From 2e8a2bda525042c0427404928a905056a86c6d3a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 15 May 2023 10:45:15 +0200 Subject: Bump types-psycopg2 from 2.9.21.9 to 2.9.21.10 (#15591) --- changelog.d/15591.misc | 1 + poetry.lock | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/15591.misc diff --git a/changelog.d/15591.misc b/changelog.d/15591.misc new file mode 100644 index 0000000000..82584b1f7c --- /dev/null +++ b/changelog.d/15591.misc @@ -0,0 +1 @@ +Bump types-psycopg2 from 2.9.21.9 to 2.9.21.10. diff --git a/poetry.lock b/poetry.lock index 33e236d2eb..b0b457832d 100644 --- a/poetry.lock +++ b/poetry.lock @@ -3059,14 +3059,14 @@ files = [ [[package]] name = "types-psycopg2" -version = "2.9.21.9" +version = "2.9.21.10" description = "Typing stubs for psycopg2" category = "dev" optional = false python-versions = "*" files = [ - {file = "types-psycopg2-2.9.21.9.tar.gz", hash = "sha256:388dc36a04551632289c4aaf1fc5b91e147654b165db896d094844e216f22bf5"}, - {file = "types_psycopg2-2.9.21.9-py3-none-any.whl", hash = "sha256:0332525fb9d3031d3da46f091e7d40b2c4d4958e9c00d2b4c1eaaa9f8ef9de4e"}, + {file = "types-psycopg2-2.9.21.10.tar.gz", hash = "sha256:c2600892312ae1c34e12f145749795d93dc4eac3ef7dbf8a9c1bfd45385e80d7"}, + {file = "types_psycopg2-2.9.21.10-py3-none-any.whl", hash = "sha256:918224a0731a3650832e46633e720703b5beef7693a064e777d9748654fcf5e5"}, ] [[package]] -- cgit 1.5.1 From 7b6c9f4c04b8581ca518c6bfbd902541fb2eca9f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 15 May 2023 10:45:34 +0200 Subject: Bump phonenumbers from 8.13.7 to 8.13.11 (#15590) --- changelog.d/15590.misc | 1 + poetry.lock | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/15590.misc diff --git a/changelog.d/15590.misc b/changelog.d/15590.misc new file mode 100644 index 0000000000..a3ed116c45 --- /dev/null +++ b/changelog.d/15590.misc @@ -0,0 +1 @@ +Bump phonenumbers from 8.13.7 to 8.13.11. diff --git a/poetry.lock b/poetry.lock index b0b457832d..0bbaf4b012 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1632,14 +1632,14 @@ files = [ [[package]] name = "phonenumbers" -version = "8.13.7" +version = "8.13.11" description = "Python version of Google's common library for parsing, formatting, storing and validating international phone numbers." category = "main" optional = false python-versions = "*" files = [ - {file = "phonenumbers-8.13.7-py2.py3-none-any.whl", hash = "sha256:d3e3555b38c89b121f5b2e917847003bdd07027569d758d5f40156c01aeac089"}, - {file = "phonenumbers-8.13.7.tar.gz", hash = "sha256:253bb0e01250d21a11f2b42b3e6e161b7f6cb2ac440e2e2a95c1da71d221ee1a"}, + {file = "phonenumbers-8.13.11-py2.py3-none-any.whl", hash = "sha256:107469114fd297258a485bdf8238d0522cb392db1257faf2bf23384ecbdb0e8a"}, + {file = "phonenumbers-8.13.11.tar.gz", hash = "sha256:3e3274d88cab3609b55ff5b93417075dbca2d13064f103fbf562e0ea1dda0f9a"}, ] [[package]] -- cgit 1.5.1 From 3690d5bd89e696264ed2d56759c216f47bf23fca Mon Sep 17 00:00:00 2001 From: Michael Weimann Date: Mon, 15 May 2023 10:54:49 +0200 Subject: Add an unstable feature flag for MSC3981 to the /versions endpoint (#15558) Signed-off-by: Michael Weimann Co-authored-by: Patrick Cloke --- changelog.d/15558.misc | 1 + synapse/rest/client/versions.py | 2 ++ 2 files changed, 3 insertions(+) create mode 100644 changelog.d/15558.misc diff --git a/changelog.d/15558.misc b/changelog.d/15558.misc new file mode 100644 index 0000000000..a7cfee2513 --- /dev/null +++ b/changelog.d/15558.misc @@ -0,0 +1 @@ +Add `org.matrix.msc3981` info to `client/versions`. diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py index e9b56fc3f8..58c5b07390 100644 --- a/synapse/rest/client/versions.py +++ b/synapse/rest/client/versions.py @@ -126,6 +126,8 @@ class VersionsRestServlet(RestServlet): "org.matrix.msc3912": self.config.experimental.msc3912_enabled, # Adds support for unstable "intentional mentions" behaviour. "org.matrix.msc3952_intentional_mentions": self.config.experimental.msc3952_intentional_mentions, + # Whether recursively provide relations is supported. + "org.matrix.msc3981": self.config.experimental.msc3981_recurse_relations, # Adds support for deleting account data. "org.matrix.msc3391": self.config.experimental.msc3391_enabled, }, -- cgit 1.5.1 From aa5c0592e72e7ef8c015208e9947b385e6a87e59 Mon Sep 17 00:00:00 2001 From: villepeh <100730729+villepeh@users.noreply.github.com> Date: Mon, 15 May 2023 12:17:24 +0300 Subject: Update Mastodon SSO instructions (#15587) --- changelog.d/15587.doc | 1 + docs/openid.md | 6 ++++-- 2 files changed, 5 insertions(+), 2 deletions(-) create mode 100644 changelog.d/15587.doc diff --git a/changelog.d/15587.doc b/changelog.d/15587.doc new file mode 100644 index 0000000000..b0d768b460 --- /dev/null +++ b/changelog.d/15587.doc @@ -0,0 +1 @@ +Update and improve Mastodon Single Sign-On documentation. diff --git a/docs/openid.md b/docs/openid.md index 73f1e06121..9773a7de52 100644 --- a/docs/openid.md +++ b/docs/openid.md @@ -569,7 +569,7 @@ You should receive a response similar to the following. Make sure to save it. {"client_id":"someclientid_123","client_secret":"someclientsecret_123","id":"12345","name":"my_synapse_app","redirect_uri":"https://[synapse_public_baseurl]/_synapse/client/oidc/callback","website":null,"vapid_key":"somerandomvapidkey_123"} ``` -As the Synapse login mechanism needs an attribute to uniquely identify users, and Mastodon's endpoint does not return a `sub` property, an alternative `subject_claim` has to be set. Your Synapse configuration should include the following: +As the Synapse login mechanism needs an attribute to uniquely identify users, and Mastodon's endpoint does not return a `sub` property, an alternative `subject_template` has to be set. Your Synapse configuration should include the following: ```yaml oidc_providers: @@ -585,7 +585,9 @@ oidc_providers: scopes: ["read"] user_mapping_provider: config: - subject_claim: "id" + subject_template: "{{ user.id }}" + localpart_template: "{{ user.username }}" + display_name_template: "{{ user.display_name }}" ``` Note that the fields `client_id` and `client_secret` are taken from the CURL response above. -- cgit 1.5.1 From b3ada9bfb4af574f4a913f225b50e9b7f9e0879f Mon Sep 17 00:00:00 2001 From: icp Date: Mon, 15 May 2023 14:49:11 +0530 Subject: Allow poetry-core 1.6.0 (#15588) --- changelog.d/15588.misc | 1 + pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/15588.misc diff --git a/changelog.d/15588.misc b/changelog.d/15588.misc new file mode 100644 index 0000000000..8574359843 --- /dev/null +++ b/changelog.d/15588.misc @@ -0,0 +1 @@ +Update build system requirements to allow building with poetry-core==1.6.0. diff --git a/pyproject.toml b/pyproject.toml index 16b5d9bc3b..6471c1a40b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -368,7 +368,7 @@ furo = ">=2022.12.7,<2024.0.0" # system changes. # We are happy to raise these upper bounds upon request, # provided we check that it's safe to do so (i.e. that CI passes). -requires = ["poetry-core>=1.1.0,<=1.5.0", "setuptools_rust>=1.3,<=1.6.0"] +requires = ["poetry-core>=1.1.0,<=1.6.0", "setuptools_rust>=1.3,<=1.6.0"] build-backend = "poetry.core.masonry.api" -- cgit 1.5.1 From 8583346335565e31169761793b3d77201c61bd67 Mon Sep 17 00:00:00 2001 From: Mathieu Velten Date: Mon, 15 May 2023 14:22:07 +0200 Subject: Revert "Bump pillow from 9.4.0 to 9.5.0 (#15593)" This reverts commit 34ab8013793df7471352c4d2bb9d9dcd50de769a. --- changelog.d/15593.misc | 1 - poetry.lock | 153 ++++++++++++++++++++++++++----------------------- 2 files changed, 82 insertions(+), 72 deletions(-) delete mode 100644 changelog.d/15593.misc diff --git a/changelog.d/15593.misc b/changelog.d/15593.misc deleted file mode 100644 index 32f1a0eba2..0000000000 --- a/changelog.d/15593.misc +++ /dev/null @@ -1 +0,0 @@ -Bump pillow from 9.4.0 to 9.5.0. diff --git a/poetry.lock b/poetry.lock index 0bbaf4b012..48a752986d 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1644,82 +1644,93 @@ files = [ [[package]] name = "pillow" -version = "9.5.0" +version = "9.4.0" description = "Python Imaging Library (Fork)" category = "main" optional = false python-versions = ">=3.7" files = [ - {file = "Pillow-9.5.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:ace6ca218308447b9077c14ea4ef381ba0b67ee78d64046b3f19cf4e1139ad16"}, - {file = "Pillow-9.5.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d3d403753c9d5adc04d4694d35cf0391f0f3d57c8e0030aac09d7678fa8030aa"}, - {file = "Pillow-9.5.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5ba1b81ee69573fe7124881762bb4cd2e4b6ed9dd28c9c60a632902fe8db8b38"}, - {file = "Pillow-9.5.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fe7e1c262d3392afcf5071df9afa574544f28eac825284596ac6db56e6d11062"}, - {file = "Pillow-9.5.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f36397bf3f7d7c6a3abdea815ecf6fd14e7fcd4418ab24bae01008d8d8ca15e"}, - {file = "Pillow-9.5.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:252a03f1bdddce077eff2354c3861bf437c892fb1832f75ce813ee94347aa9b5"}, - {file = "Pillow-9.5.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:85ec677246533e27770b0de5cf0f9d6e4ec0c212a1f89dfc941b64b21226009d"}, - {file = "Pillow-9.5.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:b416f03d37d27290cb93597335a2f85ed446731200705b22bb927405320de903"}, - {file = "Pillow-9.5.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:1781a624c229cb35a2ac31cc4a77e28cafc8900733a864870c49bfeedacd106a"}, - {file = "Pillow-9.5.0-cp310-cp310-win32.whl", hash = "sha256:8507eda3cd0608a1f94f58c64817e83ec12fa93a9436938b191b80d9e4c0fc44"}, - {file = "Pillow-9.5.0-cp310-cp310-win_amd64.whl", hash = "sha256:d3c6b54e304c60c4181da1c9dadf83e4a54fd266a99c70ba646a9baa626819eb"}, - {file = "Pillow-9.5.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:7ec6f6ce99dab90b52da21cf0dc519e21095e332ff3b399a357c187b1a5eee32"}, - {file = "Pillow-9.5.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:560737e70cb9c6255d6dcba3de6578a9e2ec4b573659943a5e7e4af13f298f5c"}, - {file = "Pillow-9.5.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:96e88745a55b88a7c64fa49bceff363a1a27d9a64e04019c2281049444a571e3"}, - {file = "Pillow-9.5.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d9c206c29b46cfd343ea7cdfe1232443072bbb270d6a46f59c259460db76779a"}, - {file = "Pillow-9.5.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cfcc2c53c06f2ccb8976fb5c71d448bdd0a07d26d8e07e321c103416444c7ad1"}, - {file = "Pillow-9.5.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:a0f9bb6c80e6efcde93ffc51256d5cfb2155ff8f78292f074f60f9e70b942d99"}, - {file = "Pillow-9.5.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:8d935f924bbab8f0a9a28404422da8af4904e36d5c33fc6f677e4c4485515625"}, - {file = "Pillow-9.5.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:fed1e1cf6a42577953abbe8e6cf2fe2f566daebde7c34724ec8803c4c0cda579"}, - {file = "Pillow-9.5.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:c1170d6b195555644f0616fd6ed929dfcf6333b8675fcca044ae5ab110ded296"}, - {file = "Pillow-9.5.0-cp311-cp311-win32.whl", hash = "sha256:54f7102ad31a3de5666827526e248c3530b3a33539dbda27c6843d19d72644ec"}, - {file = "Pillow-9.5.0-cp311-cp311-win_amd64.whl", hash = "sha256:cfa4561277f677ecf651e2b22dc43e8f5368b74a25a8f7d1d4a3a243e573f2d4"}, - {file = "Pillow-9.5.0-cp311-cp311-win_arm64.whl", hash = "sha256:965e4a05ef364e7b973dd17fc765f42233415974d773e82144c9bbaaaea5d089"}, - {file = "Pillow-9.5.0-cp312-cp312-win32.whl", hash = "sha256:22baf0c3cf0c7f26e82d6e1adf118027afb325e703922c8dfc1d5d0156bb2eeb"}, - {file = "Pillow-9.5.0-cp312-cp312-win_amd64.whl", hash = "sha256:432b975c009cf649420615388561c0ce7cc31ce9b2e374db659ee4f7d57a1f8b"}, - {file = "Pillow-9.5.0-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:5d4ebf8e1db4441a55c509c4baa7a0587a0210f7cd25fcfe74dbbce7a4bd1906"}, - {file = "Pillow-9.5.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:375f6e5ee9620a271acb6820b3d1e94ffa8e741c0601db4c0c4d3cb0a9c224bf"}, - {file = "Pillow-9.5.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:99eb6cafb6ba90e436684e08dad8be1637efb71c4f2180ee6b8f940739406e78"}, - {file = "Pillow-9.5.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2dfaaf10b6172697b9bceb9a3bd7b951819d1ca339a5ef294d1f1ac6d7f63270"}, - {file = "Pillow-9.5.0-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:763782b2e03e45e2c77d7779875f4432e25121ef002a41829d8868700d119392"}, - {file = "Pillow-9.5.0-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:35f6e77122a0c0762268216315bf239cf52b88865bba522999dc38f1c52b9b47"}, - {file = "Pillow-9.5.0-cp37-cp37m-win32.whl", hash = "sha256:aca1c196f407ec7cf04dcbb15d19a43c507a81f7ffc45b690899d6a76ac9fda7"}, - {file = "Pillow-9.5.0-cp37-cp37m-win_amd64.whl", hash = "sha256:322724c0032af6692456cd6ed554bb85f8149214d97398bb80613b04e33769f6"}, - {file = "Pillow-9.5.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:a0aa9417994d91301056f3d0038af1199eb7adc86e646a36b9e050b06f526597"}, - {file = "Pillow-9.5.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:f8286396b351785801a976b1e85ea88e937712ee2c3ac653710a4a57a8da5d9c"}, - {file = "Pillow-9.5.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c830a02caeb789633863b466b9de10c015bded434deb3ec87c768e53752ad22a"}, - {file = "Pillow-9.5.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fbd359831c1657d69bb81f0db962905ee05e5e9451913b18b831febfe0519082"}, - {file = "Pillow-9.5.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f8fc330c3370a81bbf3f88557097d1ea26cd8b019d6433aa59f71195f5ddebbf"}, - {file = "Pillow-9.5.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:7002d0797a3e4193c7cdee3198d7c14f92c0836d6b4a3f3046a64bd1ce8df2bf"}, - {file = "Pillow-9.5.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:229e2c79c00e85989a34b5981a2b67aa079fd08c903f0aaead522a1d68d79e51"}, - {file = "Pillow-9.5.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:9adf58f5d64e474bed00d69bcd86ec4bcaa4123bfa70a65ce72e424bfb88ed96"}, - {file = "Pillow-9.5.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:662da1f3f89a302cc22faa9f14a262c2e3951f9dbc9617609a47521c69dd9f8f"}, - {file = "Pillow-9.5.0-cp38-cp38-win32.whl", hash = "sha256:6608ff3bf781eee0cd14d0901a2b9cc3d3834516532e3bd673a0a204dc8615fc"}, - {file = "Pillow-9.5.0-cp38-cp38-win_amd64.whl", hash = "sha256:e49eb4e95ff6fd7c0c402508894b1ef0e01b99a44320ba7d8ecbabefddcc5569"}, - {file = "Pillow-9.5.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:482877592e927fd263028c105b36272398e3e1be3269efda09f6ba21fd83ec66"}, - {file = "Pillow-9.5.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3ded42b9ad70e5f1754fb7c2e2d6465a9c842e41d178f262e08b8c85ed8a1d8e"}, - {file = "Pillow-9.5.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c446d2245ba29820d405315083d55299a796695d747efceb5717a8b450324115"}, - {file = "Pillow-9.5.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8aca1152d93dcc27dc55395604dcfc55bed5f25ef4c98716a928bacba90d33a3"}, - {file = "Pillow-9.5.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:608488bdcbdb4ba7837461442b90ea6f3079397ddc968c31265c1e056964f1ef"}, - {file = "Pillow-9.5.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:60037a8db8750e474af7ffc9faa9b5859e6c6d0a50e55c45576bf28be7419705"}, - {file = "Pillow-9.5.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:07999f5834bdc404c442146942a2ecadd1cb6292f5229f4ed3b31e0a108746b1"}, - {file = "Pillow-9.5.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:a127ae76092974abfbfa38ca2d12cbeddcdeac0fb71f9627cc1135bedaf9d51a"}, - {file = "Pillow-9.5.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:489f8389261e5ed43ac8ff7b453162af39c3e8abd730af8363587ba64bb2e865"}, - {file = "Pillow-9.5.0-cp39-cp39-win32.whl", hash = "sha256:9b1af95c3a967bf1da94f253e56b6286b50af23392a886720f563c547e48e964"}, - {file = "Pillow-9.5.0-cp39-cp39-win_amd64.whl", hash = "sha256:77165c4a5e7d5a284f10a6efaa39a0ae8ba839da344f20b111d62cc932fa4e5d"}, - {file = "Pillow-9.5.0-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:833b86a98e0ede388fa29363159c9b1a294b0905b5128baf01db683672f230f5"}, - {file = "Pillow-9.5.0-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:aaf305d6d40bd9632198c766fb64f0c1a83ca5b667f16c1e79e1661ab5060140"}, - {file = "Pillow-9.5.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0852ddb76d85f127c135b6dd1f0bb88dbb9ee990d2cd9aa9e28526c93e794fba"}, - {file = "Pillow-9.5.0-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:91ec6fe47b5eb5a9968c79ad9ed78c342b1f97a091677ba0e012701add857829"}, - {file = "Pillow-9.5.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:cb841572862f629b99725ebaec3287fc6d275be9b14443ea746c1dd325053cbd"}, - {file = "Pillow-9.5.0-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:c380b27d041209b849ed246b111b7c166ba36d7933ec6e41175fd15ab9eb1572"}, - {file = "Pillow-9.5.0-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7c9af5a3b406a50e313467e3565fc99929717f780164fe6fbb7704edba0cebbe"}, - {file = "Pillow-9.5.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5671583eab84af046a397d6d0ba25343c00cd50bce03787948e0fff01d4fd9b1"}, - {file = "Pillow-9.5.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:84a6f19ce086c1bf894644b43cd129702f781ba5751ca8572f08aa40ef0ab7b7"}, - {file = "Pillow-9.5.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:1e7723bd90ef94eda669a3c2c19d549874dd5badaeefabefd26053304abe5799"}, - {file = "Pillow-9.5.0.tar.gz", hash = "sha256:bf548479d336726d7a0eceb6e767e179fbde37833ae42794602631a070d630f1"}, -] - -[package.extras] -docs = ["furo", "olefile", "sphinx (>=2.4)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinx-removed-in", "sphinxext-opengraph"] + {file = "Pillow-9.4.0-1-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:1b4b4e9dda4f4e4c4e6896f93e84a8f0bcca3b059de9ddf67dac3c334b1195e1"}, + {file = "Pillow-9.4.0-1-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:fb5c1ad6bad98c57482236a21bf985ab0ef42bd51f7ad4e4538e89a997624e12"}, + {file = "Pillow-9.4.0-1-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:f0caf4a5dcf610d96c3bd32932bfac8aee61c96e60481c2a0ea58da435e25acd"}, + {file = "Pillow-9.4.0-1-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:3f4cc516e0b264c8d4ccd6b6cbc69a07c6d582d8337df79be1e15a5056b258c9"}, + {file = "Pillow-9.4.0-1-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:b8c2f6eb0df979ee99433d8b3f6d193d9590f735cf12274c108bd954e30ca858"}, + {file = "Pillow-9.4.0-1-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:b70756ec9417c34e097f987b4d8c510975216ad26ba6e57ccb53bc758f490dab"}, + {file = "Pillow-9.4.0-1-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:43521ce2c4b865d385e78579a082b6ad1166ebed2b1a2293c3be1d68dd7ca3b9"}, + {file = "Pillow-9.4.0-2-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:9d9a62576b68cd90f7075876f4e8444487db5eeea0e4df3ba298ee38a8d067b0"}, + {file = "Pillow-9.4.0-2-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:87708d78a14d56a990fbf4f9cb350b7d89ee8988705e58e39bdf4d82c149210f"}, + {file = "Pillow-9.4.0-2-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:8a2b5874d17e72dfb80d917213abd55d7e1ed2479f38f001f264f7ce7bae757c"}, + {file = "Pillow-9.4.0-2-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:83125753a60cfc8c412de5896d10a0a405e0bd88d0470ad82e0869ddf0cb3848"}, + {file = "Pillow-9.4.0-2-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:9e5f94742033898bfe84c93c831a6f552bb629448d4072dd312306bab3bd96f1"}, + {file = "Pillow-9.4.0-2-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:013016af6b3a12a2f40b704677f8b51f72cb007dac785a9933d5c86a72a7fe33"}, + {file = "Pillow-9.4.0-2-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:99d92d148dd03fd19d16175b6d355cc1b01faf80dae93c6c3eb4163709edc0a9"}, + {file = "Pillow-9.4.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:2968c58feca624bb6c8502f9564dd187d0e1389964898f5e9e1fbc8533169157"}, + {file = "Pillow-9.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c5c1362c14aee73f50143d74389b2c158707b4abce2cb055b7ad37ce60738d47"}, + {file = "Pillow-9.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd752c5ff1b4a870b7661234694f24b1d2b9076b8bf337321a814c612665f343"}, + {file = "Pillow-9.4.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9a3049a10261d7f2b6514d35bbb7a4dfc3ece4c4de14ef5876c4b7a23a0e566d"}, + {file = "Pillow-9.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:16a8df99701f9095bea8a6c4b3197da105df6f74e6176c5b410bc2df2fd29a57"}, + {file = "Pillow-9.4.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:94cdff45173b1919350601f82d61365e792895e3c3a3443cf99819e6fbf717a5"}, + {file = "Pillow-9.4.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:ed3e4b4e1e6de75fdc16d3259098de7c6571b1a6cc863b1a49e7d3d53e036070"}, + {file = "Pillow-9.4.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d5b2f8a31bd43e0f18172d8ac82347c8f37ef3e0b414431157718aa234991b28"}, + {file = "Pillow-9.4.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:09b89ddc95c248ee788328528e6a2996e09eaccddeeb82a5356e92645733be35"}, + {file = "Pillow-9.4.0-cp310-cp310-win32.whl", hash = "sha256:f09598b416ba39a8f489c124447b007fe865f786a89dbfa48bb5cf395693132a"}, + {file = "Pillow-9.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:f6e78171be3fb7941f9910ea15b4b14ec27725865a73c15277bc39f5ca4f8391"}, + {file = "Pillow-9.4.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:3fa1284762aacca6dc97474ee9c16f83990b8eeb6697f2ba17140d54b453e133"}, + {file = "Pillow-9.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:eaef5d2de3c7e9b21f1e762f289d17b726c2239a42b11e25446abf82b26ac132"}, + {file = "Pillow-9.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a4dfdae195335abb4e89cc9762b2edc524f3c6e80d647a9a81bf81e17e3fb6f0"}, + {file = "Pillow-9.4.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6abfb51a82e919e3933eb137e17c4ae9c0475a25508ea88993bb59faf82f3b35"}, + {file = "Pillow-9.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:451f10ef963918e65b8869e17d67db5e2f4ab40e716ee6ce7129b0cde2876eab"}, + {file = "Pillow-9.4.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:6663977496d616b618b6cfa43ec86e479ee62b942e1da76a2c3daa1c75933ef4"}, + {file = "Pillow-9.4.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:60e7da3a3ad1812c128750fc1bc14a7ceeb8d29f77e0a2356a8fb2aa8925287d"}, + {file = "Pillow-9.4.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:19005a8e58b7c1796bc0167862b1f54a64d3b44ee5d48152b06bb861458bc0f8"}, + {file = "Pillow-9.4.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:f715c32e774a60a337b2bb8ad9839b4abf75b267a0f18806f6f4f5f1688c4b5a"}, + {file = "Pillow-9.4.0-cp311-cp311-win32.whl", hash = "sha256:b222090c455d6d1a64e6b7bb5f4035c4dff479e22455c9eaa1bdd4c75b52c80c"}, + {file = "Pillow-9.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:ba6612b6548220ff5e9df85261bddc811a057b0b465a1226b39bfb8550616aee"}, + {file = "Pillow-9.4.0-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:5f532a2ad4d174eb73494e7397988e22bf427f91acc8e6ebf5bb10597b49c493"}, + {file = "Pillow-9.4.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5dd5a9c3091a0f414a963d427f920368e2b6a4c2f7527fdd82cde8ef0bc7a327"}, + {file = "Pillow-9.4.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ef21af928e807f10bf4141cad4746eee692a0dd3ff56cfb25fce076ec3cc8abe"}, + {file = "Pillow-9.4.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:847b114580c5cc9ebaf216dd8c8dbc6b00a3b7ab0131e173d7120e6deade1f57"}, + {file = "Pillow-9.4.0-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:653d7fb2df65efefbcbf81ef5fe5e5be931f1ee4332c2893ca638c9b11a409c4"}, + {file = "Pillow-9.4.0-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:46f39cab8bbf4a384ba7cb0bc8bae7b7062b6a11cfac1ca4bc144dea90d4a9f5"}, + {file = "Pillow-9.4.0-cp37-cp37m-win32.whl", hash = "sha256:7ac7594397698f77bce84382929747130765f66406dc2cd8b4ab4da68ade4c6e"}, + {file = "Pillow-9.4.0-cp37-cp37m-win_amd64.whl", hash = "sha256:46c259e87199041583658457372a183636ae8cd56dbf3f0755e0f376a7f9d0e6"}, + {file = "Pillow-9.4.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:0e51f608da093e5d9038c592b5b575cadc12fd748af1479b5e858045fff955a9"}, + {file = "Pillow-9.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:765cb54c0b8724a7c12c55146ae4647e0274a839fb6de7bcba841e04298e1011"}, + {file = "Pillow-9.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:519e14e2c49fcf7616d6d2cfc5c70adae95682ae20f0395e9280db85e8d6c4df"}, + {file = "Pillow-9.4.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d197df5489004db87d90b918033edbeee0bd6df3848a204bca3ff0a903bef837"}, + {file = "Pillow-9.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0845adc64fe9886db00f5ab68c4a8cd933ab749a87747555cec1c95acea64b0b"}, + {file = "Pillow-9.4.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:e1339790c083c5a4de48f688b4841f18df839eb3c9584a770cbd818b33e26d5d"}, + {file = "Pillow-9.4.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:a96e6e23f2b79433390273eaf8cc94fec9c6370842e577ab10dabdcc7ea0a66b"}, + {file = "Pillow-9.4.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:7cfc287da09f9d2a7ec146ee4d72d6ea1342e770d975e49a8621bf54eaa8f30f"}, + {file = "Pillow-9.4.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d7081c084ceb58278dd3cf81f836bc818978c0ccc770cbbb202125ddabec6628"}, + {file = "Pillow-9.4.0-cp38-cp38-win32.whl", hash = "sha256:df41112ccce5d47770a0c13651479fbcd8793f34232a2dd9faeccb75eb5d0d0d"}, + {file = "Pillow-9.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:7a21222644ab69ddd9967cfe6f2bb420b460dae4289c9d40ff9a4896e7c35c9a"}, + {file = "Pillow-9.4.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:0f3269304c1a7ce82f1759c12ce731ef9b6e95b6df829dccd9fe42912cc48569"}, + {file = "Pillow-9.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:cb362e3b0976dc994857391b776ddaa8c13c28a16f80ac6522c23d5257156bed"}, + {file = "Pillow-9.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a2e0f87144fcbbe54297cae708c5e7f9da21a4646523456b00cc956bd4c65815"}, + {file = "Pillow-9.4.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:28676836c7796805914b76b1837a40f76827ee0d5398f72f7dcc634bae7c6264"}, + {file = "Pillow-9.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0884ba7b515163a1a05440a138adeb722b8a6ae2c2b33aea93ea3118dd3a899e"}, + {file = "Pillow-9.4.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:53dcb50fbdc3fb2c55431a9b30caeb2f7027fcd2aeb501459464f0214200a503"}, + {file = "Pillow-9.4.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:e8c5cf126889a4de385c02a2c3d3aba4b00f70234bfddae82a5eaa3ee6d5e3e6"}, + {file = "Pillow-9.4.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:6c6b1389ed66cdd174d040105123a5a1bc91d0aa7059c7261d20e583b6d8cbd2"}, + {file = "Pillow-9.4.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:0dd4c681b82214b36273c18ca7ee87065a50e013112eea7d78c7a1b89a739153"}, + {file = "Pillow-9.4.0-cp39-cp39-win32.whl", hash = "sha256:6d9dfb9959a3b0039ee06c1a1a90dc23bac3b430842dcb97908ddde05870601c"}, + {file = "Pillow-9.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:54614444887e0d3043557d9dbc697dbb16cfb5a35d672b7a0fcc1ed0cf1c600b"}, + {file = "Pillow-9.4.0-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:b9b752ab91e78234941e44abdecc07f1f0d8f51fb62941d32995b8161f68cfe5"}, + {file = "Pillow-9.4.0-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d3b56206244dc8711f7e8b7d6cad4663917cd5b2d950799425076681e8766286"}, + {file = "Pillow-9.4.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aabdab8ec1e7ca7f1434d042bf8b1e92056245fb179790dc97ed040361f16bfd"}, + {file = "Pillow-9.4.0-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:db74f5562c09953b2c5f8ec4b7dfd3f5421f31811e97d1dbc0a7c93d6e3a24df"}, + {file = "Pillow-9.4.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:e9d7747847c53a16a729b6ee5e737cf170f7a16611c143d95aa60a109a59c336"}, + {file = "Pillow-9.4.0-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:b52ff4f4e002f828ea6483faf4c4e8deea8d743cf801b74910243c58acc6eda3"}, + {file = "Pillow-9.4.0-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:575d8912dca808edd9acd6f7795199332696d3469665ef26163cd090fa1f8bfa"}, + {file = "Pillow-9.4.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c3c4ed2ff6760e98d262e0cc9c9a7f7b8a9f61aa4d47c58835cdaf7b0b8811bb"}, + {file = "Pillow-9.4.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:e621b0246192d3b9cb1dc62c78cfa4c6f6d2ddc0ec207d43c0dedecb914f152a"}, + {file = "Pillow-9.4.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:8f127e7b028900421cad64f51f75c051b628db17fb00e099eb148761eed598c9"}, + {file = "Pillow-9.4.0.tar.gz", hash = "sha256:a1c2d7780448eb93fbcc3789bf3916aa5720d942e37945f4056680317f1cd23e"}, +] + +[package.extras] +docs = ["furo", "olefile", "sphinx (>=2.4)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinx-issues (>=3.0.1)", "sphinx-removed-in", "sphinxext-opengraph"] tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"] [[package]] -- cgit 1.5.1 From ba6b21c81e67583ac850eab5d96fe5666620d614 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 15 May 2023 08:58:09 -0400 Subject: Implement MSC3389 to protect relations from redaction. (#15565) MSC3389 proposes protecting the relation type & parent event ID from redaction. This keeps the relation information intact after redaction which helps with some UX flaws (e.g. deleting an event causes it to no longer be in a thread, which is confusing). --- changelog.d/15565.misc | 1 + synapse/api/room_versions.py | 17 +++++++++ synapse/events/utils.py | 12 ++++++ tests/events/test_utils.py | 90 ++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 120 insertions(+) create mode 100644 changelog.d/15565.misc diff --git a/changelog.d/15565.misc b/changelog.d/15565.misc new file mode 100644 index 0000000000..5adc1aab9d --- /dev/null +++ b/changelog.d/15565.misc @@ -0,0 +1 @@ +Implement updated redaction rules from [MSC3389](https://github.com/matrix-org/matrix-spec-proposals/pull/3389). diff --git a/synapse/api/room_versions.py b/synapse/api/room_versions.py index 5d9c13e3c3..e65b9a0287 100644 --- a/synapse/api/room_versions.py +++ b/synapse/api/room_versions.py @@ -96,6 +96,8 @@ class RoomVersion: msc2716_historical: bool # MSC2716: Adds support for redacting "insertion", "chunk", and "marker" events msc2716_redactions: bool + # MSC3389: Protect relation information from redaction. + msc3389_relation_redactions: bool # MSC3787: Adds support for a `knock_restricted` join rule, mixing concepts of # knocks and restricted join rules into the same join condition. msc3787_knock_restricted_join_rule: bool @@ -128,6 +130,7 @@ class RoomVersions: msc2403_knocking=False, msc2716_historical=False, msc2716_redactions=False, + msc3389_relation_redactions=False, msc3787_knock_restricted_join_rule=False, msc3667_int_only_power_levels=False, msc3931_push_features=(), @@ -149,6 +152,7 @@ class RoomVersions: msc2403_knocking=False, msc2716_historical=False, msc2716_redactions=False, + msc3389_relation_redactions=False, msc3787_knock_restricted_join_rule=False, msc3667_int_only_power_levels=False, msc3931_push_features=(), @@ -170,6 +174,7 @@ class RoomVersions: msc2403_knocking=False, msc2716_historical=False, msc2716_redactions=False, + msc3389_relation_redactions=False, msc3787_knock_restricted_join_rule=False, msc3667_int_only_power_levels=False, msc3931_push_features=(), @@ -191,6 +196,7 @@ class RoomVersions: msc2403_knocking=False, msc2716_historical=False, msc2716_redactions=False, + msc3389_relation_redactions=False, msc3787_knock_restricted_join_rule=False, msc3667_int_only_power_levels=False, msc3931_push_features=(), @@ -212,6 +218,7 @@ class RoomVersions: msc2403_knocking=False, msc2716_historical=False, msc2716_redactions=False, + msc3389_relation_redactions=False, msc3787_knock_restricted_join_rule=False, msc3667_int_only_power_levels=False, msc3931_push_features=(), @@ -233,6 +240,7 @@ class RoomVersions: msc2403_knocking=False, msc2716_historical=False, msc2716_redactions=False, + msc3389_relation_redactions=False, msc3787_knock_restricted_join_rule=False, msc3667_int_only_power_levels=False, msc3931_push_features=(), @@ -254,6 +262,7 @@ class RoomVersions: msc2403_knocking=False, msc2716_historical=False, msc2716_redactions=False, + msc3389_relation_redactions=False, msc3787_knock_restricted_join_rule=False, msc3667_int_only_power_levels=False, msc3931_push_features=(), @@ -275,6 +284,7 @@ class RoomVersions: msc2403_knocking=True, msc2716_historical=False, msc2716_redactions=False, + msc3389_relation_redactions=False, msc3787_knock_restricted_join_rule=False, msc3667_int_only_power_levels=False, msc3931_push_features=(), @@ -296,6 +306,7 @@ class RoomVersions: msc2403_knocking=True, msc2716_historical=False, msc2716_redactions=False, + msc3389_relation_redactions=False, msc3787_knock_restricted_join_rule=False, msc3667_int_only_power_levels=False, msc3931_push_features=(), @@ -317,6 +328,7 @@ class RoomVersions: msc2403_knocking=True, msc2716_historical=False, msc2716_redactions=False, + msc3389_relation_redactions=False, msc3787_knock_restricted_join_rule=False, msc3667_int_only_power_levels=False, msc3931_push_features=(), @@ -338,6 +350,7 @@ class RoomVersions: msc2403_knocking=True, msc2716_historical=False, msc2716_redactions=False, + msc3389_relation_redactions=False, msc3787_knock_restricted_join_rule=True, msc3667_int_only_power_levels=False, msc3931_push_features=(), @@ -359,6 +372,7 @@ class RoomVersions: msc2403_knocking=True, msc2716_historical=False, msc2716_redactions=False, + msc3389_relation_redactions=False, msc3787_knock_restricted_join_rule=True, msc3667_int_only_power_levels=True, msc3931_push_features=(), @@ -380,6 +394,7 @@ class RoomVersions: msc2403_knocking=True, msc2716_historical=True, msc2716_redactions=True, + msc3389_relation_redactions=False, msc3787_knock_restricted_join_rule=False, msc3667_int_only_power_levels=False, msc3931_push_features=(), @@ -402,6 +417,7 @@ class RoomVersions: msc2403_knocking=True, msc2716_historical=False, msc2716_redactions=False, + msc3389_relation_redactions=False, msc3787_knock_restricted_join_rule=True, msc3667_int_only_power_levels=True, msc3931_push_features=(PushRuleRoomFlag.EXTENSIBLE_EVENTS,), @@ -423,6 +439,7 @@ class RoomVersions: msc2403_knocking=True, msc2716_historical=False, msc2716_redactions=False, + msc3389_relation_redactions=False, msc3787_knock_restricted_join_rule=True, msc3667_int_only_power_levels=True, msc3931_push_features=(), diff --git a/synapse/events/utils.py b/synapse/events/utils.py index 0802eb1963..e540f1582a 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -171,6 +171,18 @@ def prune_event_dict(room_version: RoomVersion, event_dict: JsonDict) -> JsonDic elif room_version.msc2716_redactions and event_type == EventTypes.MSC2716_MARKER: add_fields(EventContentFields.MSC2716_INSERTION_EVENT_REFERENCE) + # Protect the rel_type and event_id fields under the m.relates_to field. + if room_version.msc3389_relation_redactions: + relates_to = event_dict["content"].get("m.relates_to") + if isinstance(relates_to, collections.abc.Mapping): + new_relates_to = {} + for field in ("rel_type", "event_id"): + if field in relates_to: + new_relates_to[field] = relates_to[field] + # Only include a non-empty relates_to field. + if new_relates_to: + new_content["m.relates_to"] = new_relates_to + allowed_fields = {k: v for k, v in event_dict.items() if k in allowed_keys} allowed_fields["content"] = new_content diff --git a/tests/events/test_utils.py b/tests/events/test_utils.py index 1b179acb20..02f0800a31 100644 --- a/tests/events/test_utils.py +++ b/tests/events/test_utils.py @@ -15,6 +15,8 @@ import unittest as stdlib_unittest from typing import Any, List, Mapping, Optional +import attr + from synapse.api.constants import EventContentFields from synapse.api.room_versions import RoomVersions from synapse.events import EventBase, make_event_from_dict @@ -435,6 +437,94 @@ class PruneEventTestCase(stdlib_unittest.TestCase): room_version=RoomVersions.V9, ) + def test_relations(self) -> None: + """Event relations get redacted until MSC3389.""" + # Normally the m._relates_to field is redacted. + self.run_test( + { + "type": "m.room.message", + "content": { + "body": "foo", + "m.relates_to": { + "rel_type": "rel_type", + "event_id": "$parent:domain", + "other": "stripped", + }, + }, + }, + { + "type": "m.room.message", + "content": {}, + "signatures": {}, + "unsigned": {}, + }, + room_version=RoomVersions.V10, + ) + + # Create a new room version. + msc3389_room_ver = attr.evolve( + RoomVersions.V10, msc3389_relation_redactions=True + ) + + self.run_test( + { + "type": "m.room.message", + "content": { + "body": "foo", + "m.relates_to": { + "rel_type": "rel_type", + "event_id": "$parent:domain", + "other": "stripped", + }, + }, + }, + { + "type": "m.room.message", + "content": { + "m.relates_to": { + "rel_type": "rel_type", + "event_id": "$parent:domain", + }, + }, + "signatures": {}, + "unsigned": {}, + }, + room_version=msc3389_room_ver, + ) + + # If the field is not an object, redact it. + self.run_test( + { + "type": "m.room.message", + "content": { + "body": "foo", + "m.relates_to": "stripped", + }, + }, + { + "type": "m.room.message", + "content": {}, + "signatures": {}, + "unsigned": {}, + }, + room_version=msc3389_room_ver, + ) + + # If the m.relates_to property would be empty, redact it. + self.run_test( + { + "type": "m.room.message", + "content": {"body": "foo", "m.relates_to": {"foo": "stripped"}}, + }, + { + "type": "m.room.message", + "content": {}, + "signatures": {}, + "unsigned": {}, + }, + room_version=msc3389_room_ver, + ) + class SerializeEventTestCase(stdlib_unittest.TestCase): def serialize(self, ev: EventBase, fields: Optional[List[str]]) -> JsonDict: -- cgit 1.5.1 From eb3c1823d8b059073903354facfed81ed41efbce Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 15 May 2023 15:01:29 -0400 Subject: Reject instead of erroring on invalid membership events. (#15564) Instead of resulting in an internal server error for invalid events, return that the event is invalid. --- changelog.d/15564.bugfix | 1 + synapse/event_auth.py | 17 +++++++++++------ 2 files changed, 12 insertions(+), 6 deletions(-) create mode 100644 changelog.d/15564.bugfix diff --git a/changelog.d/15564.bugfix b/changelog.d/15564.bugfix new file mode 100644 index 0000000000..667114ba42 --- /dev/null +++ b/changelog.d/15564.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug where an invalid membership event could cause an internal server error. diff --git a/synapse/event_auth.py b/synapse/event_auth.py index 25898b95a5..b4b43ec4d7 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py @@ -1054,10 +1054,15 @@ def _verify_third_party_invite( """ if "third_party_invite" not in event.content: return False - if "signed" not in event.content["third_party_invite"]: + third_party_invite = event.content["third_party_invite"] + if not isinstance(third_party_invite, collections.abc.Mapping): return False - signed = event.content["third_party_invite"]["signed"] - for key in {"mxid", "token"}: + if "signed" not in third_party_invite: + return False + signed = third_party_invite["signed"] + if not isinstance(signed, collections.abc.Mapping): + return False + for key in {"mxid", "token", "signatures"}: if key not in signed: return False @@ -1075,8 +1080,6 @@ def _verify_third_party_invite( if signed["mxid"] != event.state_key: return False - if signed["token"] != token: - return False for public_key_object in get_public_keys(invite_event): public_key = public_key_object["public_key"] @@ -1088,7 +1091,9 @@ def _verify_third_party_invite( verify_key = decode_verify_key_bytes( key_name, decode_base64(public_key) ) - verify_signed_json(signed, server, verify_key) + # verify_signed_json incorrectly states it wants a dict, it + # just needs a mapping. + verify_signed_json(signed, server, verify_key) # type: ignore[arg-type] # We got the public key from the invite, so we know that the # correct server signed the signed bundle. -- cgit 1.5.1 From f2905d827f8e5360907dadfd205da588f92aa286 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 15 May 2023 15:02:24 -0400 Subject: Implement MSC3821 to update redaction rules (`third_party_invite.signed`) (#15563) Updates the redaction rules to protect enough information that the event can still be properly verified. --- changelog.d/15563.misc | 1 + synapse/api/room_versions.py | 40 +++++++++++++++++++++++ synapse/events/utils.py | 10 ++++++ tests/events/test_utils.py | 75 +++++++++++++++++++++++++++++++++++++++++++- 4 files changed, 125 insertions(+), 1 deletion(-) create mode 100644 changelog.d/15563.misc diff --git a/changelog.d/15563.misc b/changelog.d/15563.misc new file mode 100644 index 0000000000..8bfecf2b95 --- /dev/null +++ b/changelog.d/15563.misc @@ -0,0 +1 @@ +Implement [MSC3821](https://github.com/matrix-org/matrix-spec-proposals/pull/3821) to update the redaction rules. diff --git a/synapse/api/room_versions.py b/synapse/api/room_versions.py index e65b9a0287..7030b133d3 100644 --- a/synapse/api/room_versions.py +++ b/synapse/api/room_versions.py @@ -103,6 +103,8 @@ class RoomVersion: msc3787_knock_restricted_join_rule: bool # MSC3667: Enforce integer power levels msc3667_int_only_power_levels: bool + # MSC3821: Do not redact the third_party_invite content field for membership events. + msc3821_redaction_rules: bool # MSC3931: Adds a push rule condition for "room version feature flags", making # some push rules room version dependent. Note that adding a flag to this list # is not enough to mark it "supported": the push rule evaluator also needs to @@ -133,6 +135,7 @@ class RoomVersions: msc3389_relation_redactions=False, msc3787_knock_restricted_join_rule=False, msc3667_int_only_power_levels=False, + msc3821_redaction_rules=False, msc3931_push_features=(), msc3989_redaction_rules=False, ) @@ -155,6 +158,7 @@ class RoomVersions: msc3389_relation_redactions=False, msc3787_knock_restricted_join_rule=False, msc3667_int_only_power_levels=False, + msc3821_redaction_rules=False, msc3931_push_features=(), msc3989_redaction_rules=False, ) @@ -177,6 +181,7 @@ class RoomVersions: msc3389_relation_redactions=False, msc3787_knock_restricted_join_rule=False, msc3667_int_only_power_levels=False, + msc3821_redaction_rules=False, msc3931_push_features=(), msc3989_redaction_rules=False, ) @@ -199,6 +204,7 @@ class RoomVersions: msc3389_relation_redactions=False, msc3787_knock_restricted_join_rule=False, msc3667_int_only_power_levels=False, + msc3821_redaction_rules=False, msc3931_push_features=(), msc3989_redaction_rules=False, ) @@ -221,6 +227,7 @@ class RoomVersions: msc3389_relation_redactions=False, msc3787_knock_restricted_join_rule=False, msc3667_int_only_power_levels=False, + msc3821_redaction_rules=False, msc3931_push_features=(), msc3989_redaction_rules=False, ) @@ -243,6 +250,7 @@ class RoomVersions: msc3389_relation_redactions=False, msc3787_knock_restricted_join_rule=False, msc3667_int_only_power_levels=False, + msc3821_redaction_rules=False, msc3931_push_features=(), msc3989_redaction_rules=False, ) @@ -265,6 +273,7 @@ class RoomVersions: msc3389_relation_redactions=False, msc3787_knock_restricted_join_rule=False, msc3667_int_only_power_levels=False, + msc3821_redaction_rules=False, msc3931_push_features=(), msc3989_redaction_rules=False, ) @@ -287,6 +296,7 @@ class RoomVersions: msc3389_relation_redactions=False, msc3787_knock_restricted_join_rule=False, msc3667_int_only_power_levels=False, + msc3821_redaction_rules=False, msc3931_push_features=(), msc3989_redaction_rules=False, ) @@ -309,6 +319,7 @@ class RoomVersions: msc3389_relation_redactions=False, msc3787_knock_restricted_join_rule=False, msc3667_int_only_power_levels=False, + msc3821_redaction_rules=False, msc3931_push_features=(), msc3989_redaction_rules=False, ) @@ -331,6 +342,7 @@ class RoomVersions: msc3389_relation_redactions=False, msc3787_knock_restricted_join_rule=False, msc3667_int_only_power_levels=False, + msc3821_redaction_rules=False, msc3931_push_features=(), msc3989_redaction_rules=False, ) @@ -353,6 +365,30 @@ class RoomVersions: msc3389_relation_redactions=False, msc3787_knock_restricted_join_rule=True, msc3667_int_only_power_levels=False, + msc3821_redaction_rules=False, + msc3931_push_features=(), + msc3989_redaction_rules=False, + ) + MSC3821 = RoomVersion( + "org.matrix.msc3821.opt1", + RoomDisposition.UNSTABLE, + EventFormatVersions.ROOM_V4_PLUS, + StateResolutionVersions.V2, + enforce_key_validity=True, + special_case_aliases_auth=False, + strict_canonicaljson=True, + limit_notifications_power_levels=True, + msc2175_implicit_room_creator=False, + msc2176_redaction_rules=False, + msc3083_join_rules=True, + msc3375_redaction_rules=True, + msc2403_knocking=True, + msc2716_historical=False, + msc2716_redactions=False, + msc3389_relation_redactions=False, + msc3787_knock_restricted_join_rule=False, + msc3667_int_only_power_levels=False, + msc3821_redaction_rules=True, msc3931_push_features=(), msc3989_redaction_rules=False, ) @@ -375,6 +411,7 @@ class RoomVersions: msc3389_relation_redactions=False, msc3787_knock_restricted_join_rule=True, msc3667_int_only_power_levels=True, + msc3821_redaction_rules=False, msc3931_push_features=(), msc3989_redaction_rules=False, ) @@ -397,6 +434,7 @@ class RoomVersions: msc3389_relation_redactions=False, msc3787_knock_restricted_join_rule=False, msc3667_int_only_power_levels=False, + msc3821_redaction_rules=False, msc3931_push_features=(), msc3989_redaction_rules=False, ) @@ -420,6 +458,7 @@ class RoomVersions: msc3389_relation_redactions=False, msc3787_knock_restricted_join_rule=True, msc3667_int_only_power_levels=True, + msc3821_redaction_rules=False, msc3931_push_features=(PushRuleRoomFlag.EXTENSIBLE_EVENTS,), msc3989_redaction_rules=False, ) @@ -442,6 +481,7 @@ class RoomVersions: msc3389_relation_redactions=False, msc3787_knock_restricted_join_rule=True, msc3667_int_only_power_levels=True, + msc3821_redaction_rules=False, msc3931_push_features=(), msc3989_redaction_rules=True, ) diff --git a/synapse/events/utils.py b/synapse/events/utils.py index e540f1582a..e6d040176b 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -130,6 +130,16 @@ def prune_event_dict(room_version: RoomVersion, event_dict: JsonDict) -> JsonDic add_fields("membership") if room_version.msc3375_redaction_rules: add_fields(EventContentFields.AUTHORISING_USER) + if room_version.msc3821_redaction_rules: + # Preserve the signed field under third_party_invite. + third_party_invite = event_dict["content"].get("third_party_invite") + if isinstance(third_party_invite, collections.abc.Mapping): + new_content["third_party_invite"] = {} + if "signed" in third_party_invite: + new_content["third_party_invite"]["signed"] = third_party_invite[ + "signed" + ] + elif event_type == EventTypes.Create: # MSC2176 rules state that create events cannot be redacted. if room_version.msc2176_redaction_rules: diff --git a/tests/events/test_utils.py b/tests/events/test_utils.py index 02f0800a31..e40eac2eb0 100644 --- a/tests/events/test_utils.py +++ b/tests/events/test_utils.py @@ -394,7 +394,7 @@ class PruneEventTestCase(stdlib_unittest.TestCase): ) def test_member(self) -> None: - """Member events have changed behavior starting with MSC3375.""" + """Member events have changed behavior in MSC3375 and MSC3821.""" self.run_test( { "type": "m.room.member", @@ -437,6 +437,79 @@ class PruneEventTestCase(stdlib_unittest.TestCase): room_version=RoomVersions.V9, ) + # After MSC3821, the signed key under third_party_invite is protected + # from redaction. + THIRD_PARTY_INVITE = { + "display_name": "alice", + "signed": { + "mxid": "@alice:example.org", + "signatures": { + "magic.forest": { + "ed25519:3": "fQpGIW1Snz+pwLZu6sTy2aHy/DYWWTspTJRPyNp0PKkymfIsNffysMl6ObMMFdIJhk6g6pwlIqZ54rxo8SLmAg" + } + }, + "token": "abc123", + }, + } + + self.run_test( + { + "type": "m.room.member", + "content": { + "membership": "invite", + "third_party_invite": THIRD_PARTY_INVITE, + "other_key": "stripped", + }, + }, + { + "type": "m.room.member", + "content": { + "membership": "invite", + "third_party_invite": {"signed": THIRD_PARTY_INVITE["signed"]}, + }, + "signatures": {}, + "unsigned": {}, + }, + room_version=RoomVersions.MSC3821, + ) + + # Ensure this doesn't break if an invalid field is sent. + self.run_test( + { + "type": "m.room.member", + "content": { + "membership": "invite", + "third_party_invite": {}, + "other_key": "stripped", + }, + }, + { + "type": "m.room.member", + "content": {"membership": "invite", "third_party_invite": {}}, + "signatures": {}, + "unsigned": {}, + }, + room_version=RoomVersions.MSC3821, + ) + + self.run_test( + { + "type": "m.room.member", + "content": { + "membership": "invite", + "third_party_invite": "stripped", + "other_key": "stripped", + }, + }, + { + "type": "m.room.member", + "content": {"membership": "invite"}, + "signatures": {}, + "unsigned": {}, + }, + room_version=RoomVersions.MSC3821, + ) + def test_relations(self) -> None: """Event relations get redacted until MSC3389.""" # Normally the m._relates_to field is redacted. -- cgit 1.5.1 From ba572647b291e593e70a30e45c234c9766472ff3 Mon Sep 17 00:00:00 2001 From: Shay Date: Mon, 15 May 2023 13:11:21 -0700 Subject: Export `run_as_background_process` from the module API (#15577) --- changelog.d/15577.misc | 1 + synapse/module_api/__init__.py | 1 + 2 files changed, 2 insertions(+) create mode 100644 changelog.d/15577.misc diff --git a/changelog.d/15577.misc b/changelog.d/15577.misc new file mode 100644 index 0000000000..74a7f495de --- /dev/null +++ b/changelog.d/15577.misc @@ -0,0 +1 @@ +Export `run_as_background_process` from the module API. diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index 4b59e6825b..2c9d181acf 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -156,6 +156,7 @@ __all__ = [ "parse_json_object_from_request", "respond_with_html", "run_in_background", + "run_as_background_process", "cached", "NOT_SPAM", "UserID", -- cgit 1.5.1 From 55b08534a412f462251753f67308405ca4d02ebe Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Mon, 15 May 2023 17:40:10 -0500 Subject: Fix subscriptable dict type Fix: ``` tests/test_state.py:267: error: "dict" is not subscriptable, use "typing.Dict" instead [misc] ``` In Python 3.9, `typing` is deprecated and the types are subscriptable (generics) by default, https://peps.python.org/pep-0585/#implementation --- tests/test_state.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_state.py b/tests/test_state.py index 2029d3d60a..ddf59916b1 100644 --- a/tests/test_state.py +++ b/tests/test_state.py @@ -264,7 +264,7 @@ class StateTestCase(unittest.TestCase): self.dummy_store.register_events(graph.walk()) - context_store: dict[str, EventContext] = {} + context_store: Dict[str, EventContext] = {} for event in graph.walk(): context = yield defer.ensureDeferred( -- cgit 1.5.1 From c97198ee140fabf2c7444c9beea5baa87e98fd99 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Mon, 15 May 2023 17:44:26 -0500 Subject: Revert "Fix subscriptable dict type" This reverts commit 55b08534a412f462251753f67308405ca4d02ebe. --- tests/test_state.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_state.py b/tests/test_state.py index ddf59916b1..2029d3d60a 100644 --- a/tests/test_state.py +++ b/tests/test_state.py @@ -264,7 +264,7 @@ class StateTestCase(unittest.TestCase): self.dummy_store.register_events(graph.walk()) - context_store: Dict[str, EventContext] = {} + context_store: dict[str, EventContext] = {} for event in graph.walk(): context = yield defer.ensureDeferred( -- cgit 1.5.1 From 3ec9f3b0cc2766d361b458420de5399cad87b17b Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Tue, 16 May 2023 11:23:05 +0100 Subject: 1.84.0rc1 --- CHANGES.md | 77 +++++++++++++++++++++++++++++++++++++++++++++++ changelog.d/15025.misc | 1 - changelog.d/15197.feature | 1 - changelog.d/15224.feature | 1 - changelog.d/15312.feature | 1 - changelog.d/15470.misc | 1 - changelog.d/15491.misc | 1 - changelog.d/15509.misc | 1 - changelog.d/15516.feature | 1 - changelog.d/15522.misc | 1 - changelog.d/15523.bugfix | 1 - changelog.d/15527.misc | 1 - changelog.d/15528.feature | 1 - changelog.d/15529.misc | 1 - changelog.d/15531.misc | 1 - changelog.d/15532.misc | 1 - changelog.d/15533.misc | 1 - changelog.d/15534.misc | 1 - changelog.d/15535.misc | 1 - changelog.d/15536.feature | 1 - changelog.d/15539.misc | 1 - changelog.d/15542.misc | 1 - changelog.d/15543.misc | 1 - changelog.d/15544.doc | 1 - changelog.d/15545.misc | 1 - changelog.d/15548.misc | 1 - changelog.d/15549.misc | 1 - changelog.d/15550.misc | 1 - changelog.d/15551.misc | 1 - changelog.d/15552.misc | 1 - changelog.d/15553.misc | 1 - changelog.d/15554.bugfix | 1 - changelog.d/15555.bugfix | 1 - changelog.d/15558.misc | 1 - changelog.d/15559.feature | 1 - changelog.d/15560.doc | 1 - changelog.d/15562.misc | 1 - changelog.d/15563.misc | 1 - changelog.d/15564.bugfix | 1 - changelog.d/15565.misc | 1 - changelog.d/15566.bugfix | 1 - changelog.d/15567.docker | 1 - changelog.d/15569.feature | 1 - changelog.d/15570.misc | 1 - changelog.d/15571.bugfix | 1 - changelog.d/15576.misc | 1 - changelog.d/15577.misc | 1 - changelog.d/15587.doc | 1 - changelog.d/15588.misc | 1 - changelog.d/15589.misc | 1 - changelog.d/15590.misc | 1 - changelog.d/15591.misc | 1 - changelog.d/15592.misc | 1 - changelog.d/15594.misc | 1 - debian/changelog | 6 ++++ pyproject.toml | 2 +- 56 files changed, 84 insertions(+), 54 deletions(-) delete mode 100644 changelog.d/15025.misc delete mode 100644 changelog.d/15197.feature delete mode 100644 changelog.d/15224.feature delete mode 100644 changelog.d/15312.feature delete mode 100644 changelog.d/15470.misc delete mode 100644 changelog.d/15491.misc delete mode 100644 changelog.d/15509.misc delete mode 100644 changelog.d/15516.feature delete mode 100644 changelog.d/15522.misc delete mode 100644 changelog.d/15523.bugfix delete mode 100644 changelog.d/15527.misc delete mode 100644 changelog.d/15528.feature delete mode 100644 changelog.d/15529.misc delete mode 100644 changelog.d/15531.misc delete mode 100644 changelog.d/15532.misc delete mode 100644 changelog.d/15533.misc delete mode 100644 changelog.d/15534.misc delete mode 100644 changelog.d/15535.misc delete mode 100644 changelog.d/15536.feature delete mode 100644 changelog.d/15539.misc delete mode 100644 changelog.d/15542.misc delete mode 100644 changelog.d/15543.misc delete mode 100644 changelog.d/15544.doc delete mode 100644 changelog.d/15545.misc delete mode 100644 changelog.d/15548.misc delete mode 100644 changelog.d/15549.misc delete mode 100644 changelog.d/15550.misc delete mode 100644 changelog.d/15551.misc delete mode 100644 changelog.d/15552.misc delete mode 100644 changelog.d/15553.misc delete mode 100644 changelog.d/15554.bugfix delete mode 100644 changelog.d/15555.bugfix delete mode 100644 changelog.d/15558.misc delete mode 100644 changelog.d/15559.feature delete mode 100644 changelog.d/15560.doc delete mode 100644 changelog.d/15562.misc delete mode 100644 changelog.d/15563.misc delete mode 100644 changelog.d/15564.bugfix delete mode 100644 changelog.d/15565.misc delete mode 100644 changelog.d/15566.bugfix delete mode 100644 changelog.d/15567.docker delete mode 100644 changelog.d/15569.feature delete mode 100644 changelog.d/15570.misc delete mode 100644 changelog.d/15571.bugfix delete mode 100644 changelog.d/15576.misc delete mode 100644 changelog.d/15577.misc delete mode 100644 changelog.d/15587.doc delete mode 100644 changelog.d/15588.misc delete mode 100644 changelog.d/15589.misc delete mode 100644 changelog.d/15590.misc delete mode 100644 changelog.d/15591.misc delete mode 100644 changelog.d/15592.misc delete mode 100644 changelog.d/15594.misc diff --git a/CHANGES.md b/CHANGES.md index 9c200bfb7b..9e497b1b81 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,80 @@ +Synapse 1.84.0rc1 (2023-05-16) +============================== + +Features +-------- + +- Add an option to prevent media downloads from configured domains. ([\#15197](https://github.com/matrix-org/synapse/issues/15197)) +- Add `forget_rooms_on_leave` config option to automatically forget rooms when users leave them or are removed from them. ([\#15224](https://github.com/matrix-org/synapse/issues/15224)) +- Add redis TLS configuration options. ([\#15312](https://github.com/matrix-org/synapse/issues/15312)) +- Add a config option to delay push notifications by a random amount, to discourage time-based profiling. ([\#15516](https://github.com/matrix-org/synapse/issues/15516)) +- Stabilize support for [MSC2659](https://github.com/matrix-org/matrix-spec-proposals/pull/2659): application service ping endpoint. Contributed by Tulir @ Beeper. ([\#15528](https://github.com/matrix-org/synapse/issues/15528)) +- Implement [MSC4009](https://github.com/matrix-org/matrix-spec-proposals/pull/4009) to expand the supported characters in Matrix IDs. ([\#15536](https://github.com/matrix-org/synapse/issues/15536)) +- Advertise support for Matrix 1.6 on `/_matrix/client/versions`. ([\#15559](https://github.com/matrix-org/synapse/issues/15559)) +- Print full error and stack-trace of any exception that occurs during startup/initialization. ([\#15569](https://github.com/matrix-org/synapse/issues/15569)) + + +Bugfixes +-------- + +- Don't fail on federation over TOR where SRV queries are not supported. Contributed by Zdzichu. ([\#15523](https://github.com/matrix-org/synapse/issues/15523)) +- Experimental support for [MSC4010](https://github.com/matrix-org/matrix-spec-proposals/pull/4010) which rejects setting the `"m.push_rules"` via account data. ([\#15554](https://github.com/matrix-org/synapse/issues/15554), [\#15555](https://github.com/matrix-org/synapse/issues/15555)) +- Fix a long-standing bug where an invalid membership event could cause an internal server error. ([\#15564](https://github.com/matrix-org/synapse/issues/15564)) +- Require at least poetry-core v1.1.0. ([\#15566](https://github.com/matrix-org/synapse/issues/15566), [\#15571](https://github.com/matrix-org/synapse/issues/15571)) + + +Updates to the Docker image +--------------------------- + +- Add pkg-config package to Stage 0 to be able to build Dockerfile on ppc64le architecture. ([\#15567](https://github.com/matrix-org/synapse/issues/15567)) + + +Improved Documentation +---------------------- + +- Clarify documentation of the "Create or modify account" Admin API. ([\#15544](https://github.com/matrix-org/synapse/issues/15544)) +- Fix path to the `statistics/database/rooms` admin API in documentation. ([\#15560](https://github.com/matrix-org/synapse/issues/15560)) +- Update and improve Mastodon Single Sign-On documentation. ([\#15587](https://github.com/matrix-org/synapse/issues/15587)) + + +Internal Changes +---------------- + +- Use oEmbed to generate URL previews for YouTube Shorts. ([\#15025](https://github.com/matrix-org/synapse/issues/15025)) +- Create new `Client` for use with HTTP Replication between workers. Contributed by Jason Little. ([\#15470](https://github.com/matrix-org/synapse/issues/15470)) +- Remove need for `worker_replication_*` based settings in worker configuration yaml by placing this data directly on the `instance_map` instead. ([\#15491](https://github.com/matrix-org/synapse/issues/15491)) +- Bump pyicu from 2.10.2 to 2.11. ([\#15509](https://github.com/matrix-org/synapse/issues/15509)) +- Remove references to supporting per-user flag for [MSC2654](https://github.com/matrix-org/matrix-spec-proposals/pull/2654). ([\#15522](https://github.com/matrix-org/synapse/issues/15522)) +- Don't use a trusted key server when running the demo scripts. ([\#15527](https://github.com/matrix-org/synapse/issues/15527)) +- Speed up rebuilding of the user directory for local users. ([\#15529](https://github.com/matrix-org/synapse/issues/15529)) +- Speed up deleting of old rows in `event_push_actions`. ([\#15531](https://github.com/matrix-org/synapse/issues/15531)) +- Install the `xmlsec` and `mdbook` packages and switch back to the upstream [cachix/devenv](https://github.com/cachix/devenv) repo in the nix development environment. ([\#15532](https://github.com/matrix-org/synapse/issues/15532), [\#15533](https://github.com/matrix-org/synapse/issues/15533), [\#15545](https://github.com/matrix-org/synapse/issues/15545)) +- Implement [MSC3987](https://github.com/matrix-org/matrix-spec-proposals/pull/3987) by removing `"dont_notify"` from the list of actions in default push rules. ([\#15534](https://github.com/matrix-org/synapse/issues/15534)) +- Move various module API callback registration methods to a dedicated class. ([\#15535](https://github.com/matrix-org/synapse/issues/15535)) +- Proxy `/user/devices` federation queries to application services for [MSC3984](https://github.com/matrix-org/matrix-spec-proposals/pull/3984). ([\#15539](https://github.com/matrix-org/synapse/issues/15539)) +- Factor out an `is_mine_server_name` method. ([\#15542](https://github.com/matrix-org/synapse/issues/15542)) +- Allow running Complement tests using [podman](https://podman.io/) by adding a `PODMAN` environment variable to `scripts-dev/complement.sh`. ([\#15543](https://github.com/matrix-org/synapse/issues/15543)) +- Bump serde from 1.0.160 to 1.0.162. ([\#15548](https://github.com/matrix-org/synapse/issues/15548)) +- Bump types-setuptools from 67.6.0.5 to 67.7.0.1. ([\#15549](https://github.com/matrix-org/synapse/issues/15549)) +- Bump sentry-sdk from 1.19.1 to 1.22.1. ([\#15550](https://github.com/matrix-org/synapse/issues/15550)) +- Bump ruff from 0.0.259 to 0.0.265. ([\#15551](https://github.com/matrix-org/synapse/issues/15551)) +- Bump hiredis from 2.2.2 to 2.2.3. ([\#15552](https://github.com/matrix-org/synapse/issues/15552)) +- Bump types-requests from 2.29.0.0 to 2.30.0.0. ([\#15553](https://github.com/matrix-org/synapse/issues/15553)) +- Add `org.matrix.msc3981` info to `client/versions`. ([\#15558](https://github.com/matrix-org/synapse/issues/15558)) +- Declare unstable support for [MSC3391](https://github.com/matrix-org/matrix-spec-proposals/pull/3391) under `/_matrix/client/versions` if the experimental implementation is enabled. ([\#15562](https://github.com/matrix-org/synapse/issues/15562)) +- Implement [MSC3821](https://github.com/matrix-org/matrix-spec-proposals/pull/3821) to update the redaction rules. ([\#15563](https://github.com/matrix-org/synapse/issues/15563)) +- Implement updated redaction rules from [MSC3389](https://github.com/matrix-org/matrix-spec-proposals/pull/3389). ([\#15565](https://github.com/matrix-org/synapse/issues/15565)) +- Allow `pip install` to use setuptools_rust 1.6.0 when building Synapse. ([\#15570](https://github.com/matrix-org/synapse/issues/15570)) +- Deal with upcoming Github Actions deprecations. ([\#15576](https://github.com/matrix-org/synapse/issues/15576)) +- Export `run_as_background_process` from the module API. ([\#15577](https://github.com/matrix-org/synapse/issues/15577)) +- Update build system requirements to allow building with poetry-core==1.6.0. ([\#15588](https://github.com/matrix-org/synapse/issues/15588)) +- Bump serde from 1.0.162 to 1.0.163. ([\#15589](https://github.com/matrix-org/synapse/issues/15589)) +- Bump phonenumbers from 8.13.7 to 8.13.11. ([\#15590](https://github.com/matrix-org/synapse/issues/15590)) +- Bump types-psycopg2 from 2.9.21.9 to 2.9.21.10. ([\#15591](https://github.com/matrix-org/synapse/issues/15591)) +- Bump types-commonmark from 0.9.2.2 to 0.9.2.3. ([\#15592](https://github.com/matrix-org/synapse/issues/15592)) +- Bump types-setuptools from 67.7.0.1 to 67.7.0.2. ([\#15594](https://github.com/matrix-org/synapse/issues/15594)) + + Synapse 1.83.0 (2023-05-09) =========================== diff --git a/changelog.d/15025.misc b/changelog.d/15025.misc deleted file mode 100644 index 1f04d85729..0000000000 --- a/changelog.d/15025.misc +++ /dev/null @@ -1 +0,0 @@ -Use oEmbed to generate URL previews for YouTube Shorts. diff --git a/changelog.d/15197.feature b/changelog.d/15197.feature deleted file mode 100644 index c8a6f114e8..0000000000 --- a/changelog.d/15197.feature +++ /dev/null @@ -1 +0,0 @@ -Add an option to prevent media downloads from configured domains. \ No newline at end of file diff --git a/changelog.d/15224.feature b/changelog.d/15224.feature deleted file mode 100644 index 5d8413f8be..0000000000 --- a/changelog.d/15224.feature +++ /dev/null @@ -1 +0,0 @@ -Add `forget_rooms_on_leave` config option to automatically forget rooms when users leave them or are removed from them. diff --git a/changelog.d/15312.feature b/changelog.d/15312.feature deleted file mode 100644 index e4e972cfeb..0000000000 --- a/changelog.d/15312.feature +++ /dev/null @@ -1 +0,0 @@ -Add redis TLS configuration options. \ No newline at end of file diff --git a/changelog.d/15470.misc b/changelog.d/15470.misc deleted file mode 100644 index 0af0b499c6..0000000000 --- a/changelog.d/15470.misc +++ /dev/null @@ -1 +0,0 @@ -Create new `Client` for use with HTTP Replication between workers. Contributed by Jason Little. diff --git a/changelog.d/15491.misc b/changelog.d/15491.misc deleted file mode 100644 index 98f88dbf19..0000000000 --- a/changelog.d/15491.misc +++ /dev/null @@ -1 +0,0 @@ -Remove need for `worker_replication_*` based settings in worker configuration yaml by placing this data directly on the `instance_map` instead. diff --git a/changelog.d/15509.misc b/changelog.d/15509.misc deleted file mode 100644 index 1eb26c83b7..0000000000 --- a/changelog.d/15509.misc +++ /dev/null @@ -1 +0,0 @@ -Bump pyicu from 2.10.2 to 2.11. diff --git a/changelog.d/15516.feature b/changelog.d/15516.feature deleted file mode 100644 index 02a101bb88..0000000000 --- a/changelog.d/15516.feature +++ /dev/null @@ -1 +0,0 @@ -Add a config option to delay push notifications by a random amount, to discourage time-based profiling. diff --git a/changelog.d/15522.misc b/changelog.d/15522.misc deleted file mode 100644 index a5a229e4a0..0000000000 --- a/changelog.d/15522.misc +++ /dev/null @@ -1 +0,0 @@ -Remove references to supporting per-user flag for [MSC2654](https://github.com/matrix-org/matrix-spec-proposals/pull/2654) (#15522). diff --git a/changelog.d/15523.bugfix b/changelog.d/15523.bugfix deleted file mode 100644 index c00754019f..0000000000 --- a/changelog.d/15523.bugfix +++ /dev/null @@ -1 +0,0 @@ -Don't fail on federation over TOR where SRV queries are not supported. Contributed by Zdzichu. diff --git a/changelog.d/15527.misc b/changelog.d/15527.misc deleted file mode 100644 index 752a32adeb..0000000000 --- a/changelog.d/15527.misc +++ /dev/null @@ -1 +0,0 @@ -Don't use a trusted key server when running the demo scripts. \ No newline at end of file diff --git a/changelog.d/15528.feature b/changelog.d/15528.feature deleted file mode 100644 index aae9fa1ecf..0000000000 --- a/changelog.d/15528.feature +++ /dev/null @@ -1 +0,0 @@ -Stabilize support for [MSC2659](https://github.com/matrix-org/matrix-spec-proposals/pull/2659): application service ping endpoint. Contributed by Tulir @ Beeper. diff --git a/changelog.d/15529.misc b/changelog.d/15529.misc deleted file mode 100644 index 7ad424d8df..0000000000 --- a/changelog.d/15529.misc +++ /dev/null @@ -1 +0,0 @@ -Speed up rebuilding of the user directory for local users. diff --git a/changelog.d/15531.misc b/changelog.d/15531.misc deleted file mode 100644 index 6d4da961b5..0000000000 --- a/changelog.d/15531.misc +++ /dev/null @@ -1 +0,0 @@ -Speed up deleting of old rows in `event_push_actions`. diff --git a/changelog.d/15532.misc b/changelog.d/15532.misc deleted file mode 100644 index 1ee700f829..0000000000 --- a/changelog.d/15532.misc +++ /dev/null @@ -1 +0,0 @@ -Install the `xmlsec` and `mdbook` packages and switch back to the upstream [cachix/devenv](https://github.com/cachix/devenv) repo in the nix development environment. diff --git a/changelog.d/15533.misc b/changelog.d/15533.misc deleted file mode 100644 index 1ee700f829..0000000000 --- a/changelog.d/15533.misc +++ /dev/null @@ -1 +0,0 @@ -Install the `xmlsec` and `mdbook` packages and switch back to the upstream [cachix/devenv](https://github.com/cachix/devenv) repo in the nix development environment. diff --git a/changelog.d/15534.misc b/changelog.d/15534.misc deleted file mode 100644 index fd9ba2a6e1..0000000000 --- a/changelog.d/15534.misc +++ /dev/null @@ -1 +0,0 @@ -Implement [MSC3987](https://github.com/matrix-org/matrix-spec-proposals/pull/3987) by removing `"dont_notify"` from the list of actions in default push rules. diff --git a/changelog.d/15535.misc b/changelog.d/15535.misc deleted file mode 100644 index 9981606c32..0000000000 --- a/changelog.d/15535.misc +++ /dev/null @@ -1 +0,0 @@ -Move various module API callback registration methods to a dedicated class. \ No newline at end of file diff --git a/changelog.d/15536.feature b/changelog.d/15536.feature deleted file mode 100644 index 824c24575f..0000000000 --- a/changelog.d/15536.feature +++ /dev/null @@ -1 +0,0 @@ -Implement [MSC4009](https://github.com/matrix-org/matrix-spec-proposals/pull/4009) to expand the supported characters in Matrix IDs. diff --git a/changelog.d/15539.misc b/changelog.d/15539.misc deleted file mode 100644 index e5af5dee5c..0000000000 --- a/changelog.d/15539.misc +++ /dev/null @@ -1 +0,0 @@ -Proxy `/user/devices` federation queries to application services for [MSC3984](https://github.com/matrix-org/matrix-spec-proposals/pull/3984). diff --git a/changelog.d/15542.misc b/changelog.d/15542.misc deleted file mode 100644 index 32e3d678a1..0000000000 --- a/changelog.d/15542.misc +++ /dev/null @@ -1 +0,0 @@ -Factor out an `is_mine_server_name` method. diff --git a/changelog.d/15543.misc b/changelog.d/15543.misc deleted file mode 100644 index ba1dc7597e..0000000000 --- a/changelog.d/15543.misc +++ /dev/null @@ -1 +0,0 @@ -Allow running Complement tests using [podman](https://podman.io/) by adding a `PODMAN` environment variable to `scripts-dev/complement.sh`. \ No newline at end of file diff --git a/changelog.d/15544.doc b/changelog.d/15544.doc deleted file mode 100644 index a6d1e96900..0000000000 --- a/changelog.d/15544.doc +++ /dev/null @@ -1 +0,0 @@ -Clarify documentation of the "Create or modify account" Admin API. \ No newline at end of file diff --git a/changelog.d/15545.misc b/changelog.d/15545.misc deleted file mode 100644 index c7c0741f96..0000000000 --- a/changelog.d/15545.misc +++ /dev/null @@ -1 +0,0 @@ - Install the `xmlsec` and `mdbook` packages and switch back to the upstream [cachix/devenv](https://github.com/cachix/devenv) repo in the nix development environment. \ No newline at end of file diff --git a/changelog.d/15548.misc b/changelog.d/15548.misc deleted file mode 100644 index e05ddde438..0000000000 --- a/changelog.d/15548.misc +++ /dev/null @@ -1 +0,0 @@ -Bump serde from 1.0.160 to 1.0.162. diff --git a/changelog.d/15549.misc b/changelog.d/15549.misc deleted file mode 100644 index 70573688d1..0000000000 --- a/changelog.d/15549.misc +++ /dev/null @@ -1 +0,0 @@ -Bump types-setuptools from 67.6.0.5 to 67.7.0.1. diff --git a/changelog.d/15550.misc b/changelog.d/15550.misc deleted file mode 100644 index 58d5594e7a..0000000000 --- a/changelog.d/15550.misc +++ /dev/null @@ -1 +0,0 @@ -Bump sentry-sdk from 1.19.1 to 1.22.1. diff --git a/changelog.d/15551.misc b/changelog.d/15551.misc deleted file mode 100644 index a8bedbe0e7..0000000000 --- a/changelog.d/15551.misc +++ /dev/null @@ -1 +0,0 @@ -Bump ruff from 0.0.259 to 0.0.265. diff --git a/changelog.d/15552.misc b/changelog.d/15552.misc deleted file mode 100644 index 24972a2f8c..0000000000 --- a/changelog.d/15552.misc +++ /dev/null @@ -1 +0,0 @@ -Bump hiredis from 2.2.2 to 2.2.3. diff --git a/changelog.d/15553.misc b/changelog.d/15553.misc deleted file mode 100644 index ca9eafd6c1..0000000000 --- a/changelog.d/15553.misc +++ /dev/null @@ -1 +0,0 @@ -Bump types-requests from 2.29.0.0 to 2.30.0.0. diff --git a/changelog.d/15554.bugfix b/changelog.d/15554.bugfix deleted file mode 100644 index 0fd9de8c65..0000000000 --- a/changelog.d/15554.bugfix +++ /dev/null @@ -1 +0,0 @@ -Experimental support for [MSC4010](https://github.com/matrix-org/matrix-spec-proposals/pull/4010) which rejects setting the `"m.push_rules"` via account data. diff --git a/changelog.d/15555.bugfix b/changelog.d/15555.bugfix deleted file mode 100644 index 0fd9de8c65..0000000000 --- a/changelog.d/15555.bugfix +++ /dev/null @@ -1 +0,0 @@ -Experimental support for [MSC4010](https://github.com/matrix-org/matrix-spec-proposals/pull/4010) which rejects setting the `"m.push_rules"` via account data. diff --git a/changelog.d/15558.misc b/changelog.d/15558.misc deleted file mode 100644 index a7cfee2513..0000000000 --- a/changelog.d/15558.misc +++ /dev/null @@ -1 +0,0 @@ -Add `org.matrix.msc3981` info to `client/versions`. diff --git a/changelog.d/15559.feature b/changelog.d/15559.feature deleted file mode 100644 index 07f729e38c..0000000000 --- a/changelog.d/15559.feature +++ /dev/null @@ -1 +0,0 @@ -Advertise support for Matrix 1.6 on `/_matrix/client/versions`. diff --git a/changelog.d/15560.doc b/changelog.d/15560.doc deleted file mode 100644 index a552391886..0000000000 --- a/changelog.d/15560.doc +++ /dev/null @@ -1 +0,0 @@ -Fix path to the `statistics/database/rooms` admin API in documentation. diff --git a/changelog.d/15562.misc b/changelog.d/15562.misc deleted file mode 100644 index eeeb553d8f..0000000000 --- a/changelog.d/15562.misc +++ /dev/null @@ -1 +0,0 @@ -Declare unstable support for [MSC3391](https://github.com/matrix-org/matrix-spec-proposals/pull/3391) under `/_matrix/client/versions` if the experimental implementation is enabled. \ No newline at end of file diff --git a/changelog.d/15563.misc b/changelog.d/15563.misc deleted file mode 100644 index 8bfecf2b95..0000000000 --- a/changelog.d/15563.misc +++ /dev/null @@ -1 +0,0 @@ -Implement [MSC3821](https://github.com/matrix-org/matrix-spec-proposals/pull/3821) to update the redaction rules. diff --git a/changelog.d/15564.bugfix b/changelog.d/15564.bugfix deleted file mode 100644 index 667114ba42..0000000000 --- a/changelog.d/15564.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug where an invalid membership event could cause an internal server error. diff --git a/changelog.d/15565.misc b/changelog.d/15565.misc deleted file mode 100644 index 5adc1aab9d..0000000000 --- a/changelog.d/15565.misc +++ /dev/null @@ -1 +0,0 @@ -Implement updated redaction rules from [MSC3389](https://github.com/matrix-org/matrix-spec-proposals/pull/3389). diff --git a/changelog.d/15566.bugfix b/changelog.d/15566.bugfix deleted file mode 100644 index 130342590c..0000000000 --- a/changelog.d/15566.bugfix +++ /dev/null @@ -1 +0,0 @@ -Require at least poetry-core v1.1.0. diff --git a/changelog.d/15567.docker b/changelog.d/15567.docker deleted file mode 100644 index 8995bc1bd7..0000000000 --- a/changelog.d/15567.docker +++ /dev/null @@ -1 +0,0 @@ -Add pkg-config package to Stage 0 to be able to build Dockerfile on ppc64le architecture. diff --git a/changelog.d/15569.feature b/changelog.d/15569.feature deleted file mode 100644 index b58af8ad55..0000000000 --- a/changelog.d/15569.feature +++ /dev/null @@ -1 +0,0 @@ -Print full error and stack-trace of any exception that occurs during startup/initialization. diff --git a/changelog.d/15570.misc b/changelog.d/15570.misc deleted file mode 100644 index ee04509981..0000000000 --- a/changelog.d/15570.misc +++ /dev/null @@ -1 +0,0 @@ -Allow `pip install` to use setuptools_rust 1.6.0 when building Synapse. diff --git a/changelog.d/15571.bugfix b/changelog.d/15571.bugfix deleted file mode 100644 index 130342590c..0000000000 --- a/changelog.d/15571.bugfix +++ /dev/null @@ -1 +0,0 @@ -Require at least poetry-core v1.1.0. diff --git a/changelog.d/15576.misc b/changelog.d/15576.misc deleted file mode 100644 index 9df71999d9..0000000000 --- a/changelog.d/15576.misc +++ /dev/null @@ -1 +0,0 @@ -Deal with upcoming Github Actions deprecations. diff --git a/changelog.d/15577.misc b/changelog.d/15577.misc deleted file mode 100644 index 74a7f495de..0000000000 --- a/changelog.d/15577.misc +++ /dev/null @@ -1 +0,0 @@ -Export `run_as_background_process` from the module API. diff --git a/changelog.d/15587.doc b/changelog.d/15587.doc deleted file mode 100644 index b0d768b460..0000000000 --- a/changelog.d/15587.doc +++ /dev/null @@ -1 +0,0 @@ -Update and improve Mastodon Single Sign-On documentation. diff --git a/changelog.d/15588.misc b/changelog.d/15588.misc deleted file mode 100644 index 8574359843..0000000000 --- a/changelog.d/15588.misc +++ /dev/null @@ -1 +0,0 @@ -Update build system requirements to allow building with poetry-core==1.6.0. diff --git a/changelog.d/15589.misc b/changelog.d/15589.misc deleted file mode 100644 index e055add929..0000000000 --- a/changelog.d/15589.misc +++ /dev/null @@ -1 +0,0 @@ -Bump serde from 1.0.162 to 1.0.163. diff --git a/changelog.d/15590.misc b/changelog.d/15590.misc deleted file mode 100644 index a3ed116c45..0000000000 --- a/changelog.d/15590.misc +++ /dev/null @@ -1 +0,0 @@ -Bump phonenumbers from 8.13.7 to 8.13.11. diff --git a/changelog.d/15591.misc b/changelog.d/15591.misc deleted file mode 100644 index 82584b1f7c..0000000000 --- a/changelog.d/15591.misc +++ /dev/null @@ -1 +0,0 @@ -Bump types-psycopg2 from 2.9.21.9 to 2.9.21.10. diff --git a/changelog.d/15592.misc b/changelog.d/15592.misc deleted file mode 100644 index 7f9160607a..0000000000 --- a/changelog.d/15592.misc +++ /dev/null @@ -1 +0,0 @@ -Bump types-commonmark from 0.9.2.2 to 0.9.2.3. diff --git a/changelog.d/15594.misc b/changelog.d/15594.misc deleted file mode 100644 index bf6810b4e4..0000000000 --- a/changelog.d/15594.misc +++ /dev/null @@ -1 +0,0 @@ -Bump types-setuptools from 67.7.0.1 to 67.7.0.2. diff --git a/debian/changelog b/debian/changelog index 15ff7e82c3..ad163add2b 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.84.0~rc1) stable; urgency=medium + + * New Synapse release 1.84.0rc1. + + -- Synapse Packaging team Tue, 16 May 2023 11:12:02 +0100 + matrix-synapse-py3 (1.83.0) stable; urgency=medium * New Synapse release 1.83.0. diff --git a/pyproject.toml b/pyproject.toml index 6471c1a40b..86e1537a6d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -89,7 +89,7 @@ manifest-path = "rust/Cargo.toml" [tool.poetry] name = "matrix-synapse" -version = "1.83.0" +version = "1.84.0rc1" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" -- cgit 1.5.1 From 0ccfb9318c525d8f8f073c2d8fe6e763fb7b9c46 Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Tue, 16 May 2023 11:57:29 +0100 Subject: Tweak changelog --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 9e497b1b81..4877ba9d44 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -60,7 +60,7 @@ Internal Changes - Bump ruff from 0.0.259 to 0.0.265. ([\#15551](https://github.com/matrix-org/synapse/issues/15551)) - Bump hiredis from 2.2.2 to 2.2.3. ([\#15552](https://github.com/matrix-org/synapse/issues/15552)) - Bump types-requests from 2.29.0.0 to 2.30.0.0. ([\#15553](https://github.com/matrix-org/synapse/issues/15553)) -- Add `org.matrix.msc3981` info to `client/versions`. ([\#15558](https://github.com/matrix-org/synapse/issues/15558)) +- Add `org.matrix.msc3981` info to `/_matrix/client/versions`. ([\#15558](https://github.com/matrix-org/synapse/issues/15558)) - Declare unstable support for [MSC3391](https://github.com/matrix-org/matrix-spec-proposals/pull/3391) under `/_matrix/client/versions` if the experimental implementation is enabled. ([\#15562](https://github.com/matrix-org/synapse/issues/15562)) - Implement [MSC3821](https://github.com/matrix-org/matrix-spec-proposals/pull/3821) to update the redaction rules. ([\#15563](https://github.com/matrix-org/synapse/issues/15563)) - Implement updated redaction rules from [MSC3389](https://github.com/matrix-org/matrix-spec-proposals/pull/3389). ([\#15565](https://github.com/matrix-org/synapse/issues/15565)) -- cgit 1.5.1 From b6a7d49b6f1f7c494372fd1b9aab3982c9a299c7 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Tue, 16 May 2023 08:56:42 -0500 Subject: `traceback.format_exception(...)` usage that is compatible with Python 3.7 and 3.11 (#15599) * Usage that is compatible with Python 3.8 and 3.11 > Since Python 3.10, instead of passing value and tb, an exception object can be passed as the first argument. If value and tb are provided, the first argument is ignored in order to provide backwards compatibility. > > -- https://docs.python.org/3/library/traceback.html * Add changelog --- changelog.d/15599.bugfix | 1 + synapse/app/_base.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/15599.bugfix diff --git a/changelog.d/15599.bugfix b/changelog.d/15599.bugfix new file mode 100644 index 0000000000..b58af8ad55 --- /dev/null +++ b/changelog.d/15599.bugfix @@ -0,0 +1 @@ +Print full error and stack-trace of any exception that occurs during startup/initialization. diff --git a/synapse/app/_base.py b/synapse/app/_base.py index 4dfcf484fa..936b1b0430 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -214,7 +214,7 @@ def handle_startup_exception(e: Exception) -> NoReturn: # the reactor are written to the logs, followed by a summary to stderr. logger.exception("Exception during startup") - error_string = "".join(traceback.format_exception(e)) + error_string = "".join(traceback.format_exception(type(e), e, e.__traceback__)) indented_error_string = indent(error_string, " ") quit_with_error( -- cgit 1.5.1 From c51d2e6199a901113f2dabeb64fc64b015751988 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Tue, 16 May 2023 12:19:46 -0500 Subject: Fix subscriptable type usage in Python <3.9 (#15604) Fix the following `mypy` errors when running `mypy` with Python 3.7: ``` synapse/storage/controllers/stats.py:58: error: "Counter" is not subscriptable, use "typing.Counter" instead [misc] tests/test_state.py:267: error: "dict" is not subscriptable, use "typing.Dict" instead [misc] ``` Part of https://github.com/matrix-org/synapse/issues/15603 In Python 3.9, `typing` is deprecated and the types are subscriptable (generics) by default, https://peps.python.org/pep-0585/#implementation --- changelog.d/15604.misc | 1 + synapse/storage/controllers/stats.py | 3 +-- tests/test_state.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) create mode 100644 changelog.d/15604.misc diff --git a/changelog.d/15604.misc b/changelog.d/15604.misc new file mode 100644 index 0000000000..92d1d600bc --- /dev/null +++ b/changelog.d/15604.misc @@ -0,0 +1 @@ +Fix subscriptable type usage in Python <3.9. diff --git a/synapse/storage/controllers/stats.py b/synapse/storage/controllers/stats.py index 988e44c6af..2a03528fee 100644 --- a/synapse/storage/controllers/stats.py +++ b/synapse/storage/controllers/stats.py @@ -13,8 +13,7 @@ # limitations under the License. import logging -from collections import Counter -from typing import TYPE_CHECKING, Collection, List, Tuple +from typing import TYPE_CHECKING, Collection, Counter, List, Tuple from synapse.api.errors import SynapseError from synapse.storage.database import LoggingTransaction diff --git a/tests/test_state.py b/tests/test_state.py index 2029d3d60a..ddf59916b1 100644 --- a/tests/test_state.py +++ b/tests/test_state.py @@ -264,7 +264,7 @@ class StateTestCase(unittest.TestCase): self.dummy_store.register_events(graph.walk()) - context_store: dict[str, EventContext] = {} + context_store: Dict[str, EventContext] = {} for event in graph.walk(): context = yield defer.ensureDeferred( -- cgit 1.5.1 From 77cda342be3c81fa8557d208e67dc1662ddb462a Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Tue, 16 May 2023 08:56:42 -0500 Subject: `traceback.format_exception(...)` usage that is compatible with Python 3.7 and 3.11 (#15599) * Usage that is compatible with Python 3.8 and 3.11 > Since Python 3.10, instead of passing value and tb, an exception object can be passed as the first argument. If value and tb are provided, the first argument is ignored in order to provide backwards compatibility. > > -- https://docs.python.org/3/library/traceback.html * Add changelog --- changelog.d/15599.bugfix | 1 + synapse/app/_base.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/15599.bugfix diff --git a/changelog.d/15599.bugfix b/changelog.d/15599.bugfix new file mode 100644 index 0000000000..b58af8ad55 --- /dev/null +++ b/changelog.d/15599.bugfix @@ -0,0 +1 @@ +Print full error and stack-trace of any exception that occurs during startup/initialization. diff --git a/synapse/app/_base.py b/synapse/app/_base.py index 4dfcf484fa..936b1b0430 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -214,7 +214,7 @@ def handle_startup_exception(e: Exception) -> NoReturn: # the reactor are written to the logs, followed by a summary to stderr. logger.exception("Exception during startup") - error_string = "".join(traceback.format_exception(e)) + error_string = "".join(traceback.format_exception(type(e), e, e.__traceback__)) indented_error_string = indent(error_string, " ") quit_with_error( -- cgit 1.5.1 From 9f6ff6a0eb94a9f81b9948bc3b651a1eb78de460 Mon Sep 17 00:00:00 2001 From: Shay Date: Tue, 16 May 2023 10:57:39 -0700 Subject: Add not null constraint to column `full_user_id` of tables `profiles` and `user_filters` (#15537) --- changelog.d/15537.misc | 1 + synapse/storage/databases/main/filtering.py | 95 +++++++++++++++++++ synapse/storage/databases/main/profile.py | 102 ++++++++++++++++++++- synapse/storage/schema/__init__.py | 10 +- .../01_add_profiles_not_valid_check.sql.postgres | 16 ++++ ...2_add_user_filters_not_valid_check.sql.postgres | 16 ++++ .../77/03bg_populate_full_user_id_profiles.sql | 16 ++++ .../77/04bg_populate_full_user_id_user_filters.sql | 16 ++++ tests/storage/test_profile.py | 63 +++++++++++++ tests/storage/test_user_filters.py | 94 +++++++++++++++++++ 10 files changed, 425 insertions(+), 4 deletions(-) create mode 100644 changelog.d/15537.misc create mode 100644 synapse/storage/schema/main/delta/77/01_add_profiles_not_valid_check.sql.postgres create mode 100644 synapse/storage/schema/main/delta/77/02_add_user_filters_not_valid_check.sql.postgres create mode 100644 synapse/storage/schema/main/delta/77/03bg_populate_full_user_id_profiles.sql create mode 100644 synapse/storage/schema/main/delta/77/04bg_populate_full_user_id_user_filters.sql create mode 100644 tests/storage/test_user_filters.py diff --git a/changelog.d/15537.misc b/changelog.d/15537.misc new file mode 100644 index 0000000000..979e0ba977 --- /dev/null +++ b/changelog.d/15537.misc @@ -0,0 +1 @@ +Add not null constraint to column full_user_id of tables profiles and user_filters. diff --git a/synapse/storage/databases/main/filtering.py b/synapse/storage/databases/main/filtering.py index 50516402f9..da31eb44dc 100644 --- a/synapse/storage/databases/main/filtering.py +++ b/synapse/storage/databases/main/filtering.py @@ -25,6 +25,7 @@ from synapse.storage.database import ( LoggingDatabaseConnection, LoggingTransaction, ) +from synapse.storage.engines import PostgresEngine from synapse.types import JsonDict, UserID from synapse.util.caches.descriptors import cached @@ -40,6 +41,8 @@ class FilteringWorkerStore(SQLBaseStore): hs: "HomeServer", ): super().__init__(database, db_conn, hs) + self.server_name: str = hs.hostname + self.database_engine = database.engine self.db_pool.updates.register_background_index_update( "full_users_filters_unique_idx", index_name="full_users_unique_idx", @@ -48,6 +51,98 @@ class FilteringWorkerStore(SQLBaseStore): unique=True, ) + self.db_pool.updates.register_background_update_handler( + "populate_full_user_id_user_filters", + self.populate_full_user_id_user_filters, + ) + + async def populate_full_user_id_user_filters( + self, progress: JsonDict, batch_size: int + ) -> int: + """ + Background update to populate the column `full_user_id` of the table + user_filters from entries in the column `user_local_part` of the same table + """ + + lower_bound_id = progress.get("lower_bound_id", "") + + def _get_last_id(txn: LoggingTransaction) -> Optional[str]: + sql = """ + SELECT user_id FROM user_filters + WHERE user_id > ? + ORDER BY user_id + LIMIT 1 OFFSET 50 + """ + txn.execute(sql, (lower_bound_id,)) + res = txn.fetchone() + if res: + upper_bound_id = res[0] + return upper_bound_id + else: + return None + + def _process_batch( + txn: LoggingTransaction, lower_bound_id: str, upper_bound_id: str + ) -> None: + sql = """ + UPDATE user_filters + SET full_user_id = '@' || user_id || ? + WHERE ? < user_id AND user_id <= ? AND full_user_id IS NULL + """ + txn.execute(sql, (f":{self.server_name}", lower_bound_id, upper_bound_id)) + + def _final_batch(txn: LoggingTransaction, lower_bound_id: str) -> None: + sql = """ + UPDATE user_filters + SET full_user_id = '@' || user_id || ? + WHERE ? < user_id AND full_user_id IS NULL + """ + txn.execute( + sql, + ( + f":{self.server_name}", + lower_bound_id, + ), + ) + + if isinstance(self.database_engine, PostgresEngine): + sql = """ + ALTER TABLE user_filters VALIDATE CONSTRAINT full_user_id_not_null + """ + txn.execute(sql) + + upper_bound_id = await self.db_pool.runInteraction( + "populate_full_user_id_user_filters", _get_last_id + ) + + if upper_bound_id is None: + await self.db_pool.runInteraction( + "populate_full_user_id_user_filters", _final_batch, lower_bound_id + ) + + await self.db_pool.updates._end_background_update( + "populate_full_user_id_user_filters" + ) + return 1 + + await self.db_pool.runInteraction( + "populate_full_user_id_user_filters", + _process_batch, + lower_bound_id, + upper_bound_id, + ) + + progress["lower_bound_id"] = upper_bound_id + + await self.db_pool.runInteraction( + "populate_full_user_id_user_filters", + self.db_pool.updates._background_update_progress_txn, + "populate_full_user_id_user_filters", + progress, + ) + + return 50 + @cached(num_args=2) async def get_user_filter( self, user_localpart: str, filter_id: Union[int, str] diff --git a/synapse/storage/databases/main/profile.py b/synapse/storage/databases/main/profile.py index c4022d2427..65c92bef51 100644 --- a/synapse/storage/databases/main/profile.py +++ b/synapse/storage/databases/main/profile.py @@ -15,9 +15,14 @@ from typing import TYPE_CHECKING, Optional from synapse.api.errors import StoreError from synapse.storage._base import SQLBaseStore -from synapse.storage.database import DatabasePool, LoggingDatabaseConnection +from synapse.storage.database import ( + DatabasePool, + LoggingDatabaseConnection, + LoggingTransaction, +) from synapse.storage.databases.main.roommember import ProfileInfo -from synapse.types import UserID +from synapse.storage.engines import PostgresEngine +from synapse.types import JsonDict, UserID if TYPE_CHECKING: from synapse.server import HomeServer @@ -31,6 +36,8 @@ class ProfileWorkerStore(SQLBaseStore): hs: "HomeServer", ): super().__init__(database, db_conn, hs) + self.server_name: str = hs.hostname + self.database_engine = database.engine self.db_pool.updates.register_background_index_update( "profiles_full_user_id_key_idx", index_name="profiles_full_user_id_key", @@ -39,6 +46,97 @@ class ProfileWorkerStore(SQLBaseStore): unique=True, ) + self.db_pool.updates.register_background_update_handler( + "populate_full_user_id_profiles", self.populate_full_user_id_profiles + ) + + async def populate_full_user_id_profiles( + self, progress: JsonDict, batch_size: int + ) -> int: + """ + Background update to populate the column `full_user_id` of the table + profiles from entries in the column `user_local_part` of the same table + """ + + lower_bound_id = progress.get("lower_bound_id", "") + + def _get_last_id(txn: LoggingTransaction) -> Optional[str]: + sql = """ + SELECT user_id FROM profiles + WHERE user_id > ? + ORDER BY user_id + LIMIT 1 OFFSET 50 + """ + txn.execute(sql, (lower_bound_id,)) + res = txn.fetchone() + if res: + upper_bound_id = res[0] + return upper_bound_id + else: + return None + + def _process_batch( + txn: LoggingTransaction, lower_bound_id: str, upper_bound_id: str + ) -> None: + sql = """ + UPDATE profiles + SET full_user_id = '@' || user_id || ? + WHERE ? < user_id AND user_id <= ? AND full_user_id IS NULL + """ + txn.execute(sql, (f":{self.server_name}", lower_bound_id, upper_bound_id)) + + def _final_batch(txn: LoggingTransaction, lower_bound_id: str) -> None: + sql = """ + UPDATE profiles + SET full_user_id = '@' || user_id || ? + WHERE ? < user_id AND full_user_id IS NULL + """ + txn.execute( + sql, + ( + f":{self.server_name}", + lower_bound_id, + ), + ) + + if isinstance(self.database_engine, PostgresEngine): + sql = """ + ALTER TABLE profiles VALIDATE CONSTRAINT full_user_id_not_null + """ + txn.execute(sql) + + upper_bound_id = await self.db_pool.runInteraction( + "populate_full_user_id_profiles", _get_last_id + ) + + if upper_bound_id is None: + await self.db_pool.runInteraction( + "populate_full_user_id_profiles", _final_batch, lower_bound_id + ) + + await self.db_pool.updates._end_background_update( + "populate_full_user_id_profiles" + ) + return 1 + + await self.db_pool.runInteraction( + "populate_full_user_id_profiles", + _process_batch, + lower_bound_id, + upper_bound_id, + ) + + progress["lower_bound_id"] = upper_bound_id + + await self.db_pool.runInteraction( + "populate_full_user_id_profiles", + self.db_pool.updates._background_update_progress_txn, + "populate_full_user_id_profiles", + progress, + ) + + return 50 + async def get_profileinfo(self, user_localpart: str) -> ProfileInfo: try: profile = await self.db_pool.simple_select_one( diff --git a/synapse/storage/schema/__init__.py b/synapse/storage/schema/__init__.py index 1672976209..df2cc31ca6 100644 --- a/synapse/storage/schema/__init__.py +++ b/synapse/storage/schema/__init__.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -SCHEMA_VERSION = 76 # remember to update the list below when updating +SCHEMA_VERSION = 77 # remember to update the list below when updating """Represents the expectations made by the codebase about the database schema This should be incremented whenever the codebase changes its requirements on the @@ -100,13 +100,19 @@ Changes in SCHEMA_VERSION = 75: Changes in SCHEMA_VERSION = 76: - Adds a full_user_id column to tables profiles and user_filters. + +Changes in SCHEMA_VERSION = 77 + - (Postgres) Add NOT VALID CHECK (full_user_id IS NOT NULL) to tables profiles and user_filters """ SCHEMA_COMPAT_VERSION = ( # Queries against `event_stream_ordering` columns in membership tables must # be disambiguated. - 74 + # + # insertions to the column `full_user_id` of tables profiles and user_filters can no + # longer be null + 76 ) """Limit on how far the synapse codebase can be rolled back without breaking db compat diff --git a/synapse/storage/schema/main/delta/77/01_add_profiles_not_valid_check.sql.postgres b/synapse/storage/schema/main/delta/77/01_add_profiles_not_valid_check.sql.postgres new file mode 100644 index 0000000000..3eb226c648 --- /dev/null +++ b/synapse/storage/schema/main/delta/77/01_add_profiles_not_valid_check.sql.postgres @@ -0,0 +1,16 @@ +/* Copyright 2023 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +ALTER TABLE profiles ADD CONSTRAINT full_user_id_not_null CHECK (full_user_id IS NOT NULL) NOT VALID; \ No newline at end of file diff --git a/synapse/storage/schema/main/delta/77/02_add_user_filters_not_valid_check.sql.postgres b/synapse/storage/schema/main/delta/77/02_add_user_filters_not_valid_check.sql.postgres new file mode 100644 index 0000000000..ba037daf47 --- /dev/null +++ b/synapse/storage/schema/main/delta/77/02_add_user_filters_not_valid_check.sql.postgres @@ -0,0 +1,16 @@ +/* Copyright 2023 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +ALTER TABLE user_filters ADD CONSTRAINT full_user_id_not_null CHECK (full_user_id IS NOT NULL) NOT VALID; \ No newline at end of file diff --git a/synapse/storage/schema/main/delta/77/03bg_populate_full_user_id_profiles.sql b/synapse/storage/schema/main/delta/77/03bg_populate_full_user_id_profiles.sql new file mode 100644 index 0000000000..12101ab914 --- /dev/null +++ b/synapse/storage/schema/main/delta/77/03bg_populate_full_user_id_profiles.sql @@ -0,0 +1,16 @@ +/* Copyright 2023 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +INSERT INTO background_updates (ordering, update_name, progress_json) VALUES (7703, 'populate_full_user_id_profiles', '{}'); \ No newline at end of file diff --git a/synapse/storage/schema/main/delta/77/04bg_populate_full_user_id_user_filters.sql b/synapse/storage/schema/main/delta/77/04bg_populate_full_user_id_user_filters.sql new file mode 100644 index 0000000000..1f4d683cac --- /dev/null +++ b/synapse/storage/schema/main/delta/77/04bg_populate_full_user_id_user_filters.sql @@ -0,0 +1,16 @@ +/* Copyright 2023 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +INSERT INTO background_updates (ordering, update_name, progress_json) VALUES (7704, 'populate_full_user_id_user_filters', '{}'); \ No newline at end of file diff --git a/tests/storage/test_profile.py b/tests/storage/test_profile.py index 6ec34997ea..f9cf0fcb82 100644 --- a/tests/storage/test_profile.py +++ b/tests/storage/test_profile.py @@ -14,6 +14,8 @@ from twisted.test.proto_helpers import MemoryReactor from synapse.server import HomeServer +from synapse.storage.database import LoggingTransaction +from synapse.storage.engines import PostgresEngine from synapse.types import UserID from synapse.util import Clock @@ -69,3 +71,64 @@ class ProfileStoreTestCase(unittest.HomeserverTestCase): self.assertIsNone( self.get_success(self.store.get_profile_avatar_url(self.u_frank.localpart)) ) + + def test_profiles_bg_migration(self) -> None: + """ + Test background job that copies entries from column user_id to full_user_id, adding + the hostname in the process. + """ + updater = self.hs.get_datastores().main.db_pool.updates + + # drop the constraint so we can insert nulls in full_user_id to populate the test + if isinstance(self.store.database_engine, PostgresEngine): + + def f(txn: LoggingTransaction) -> None: + txn.execute( + "ALTER TABLE profiles DROP CONSTRAINT full_user_id_not_null" + ) + + self.get_success(self.store.db_pool.runInteraction("", f)) + + for i in range(0, 70): + self.get_success( + self.store.db_pool.simple_insert( + "profiles", + {"user_id": f"hello{i:02}"}, + ) + ) + + # re-add the constraint so that when it's validated it actually exists + if isinstance(self.store.database_engine, PostgresEngine): + + def f(txn: LoggingTransaction) -> None: + txn.execute( + "ALTER TABLE profiles ADD CONSTRAINT full_user_id_not_null CHECK (full_user_id IS NOT NULL) NOT VALID" + ) + + self.get_success(self.store.db_pool.runInteraction("", f)) + + self.get_success( + self.store.db_pool.simple_insert( + "background_updates", + values={ + "update_name": "populate_full_user_id_profiles", + "progress_json": "{}", + }, + ) + ) + + self.get_success( + updater.run_background_updates(False), + ) + + expected_values = [] + for i in range(0, 70): + expected_values.append((f"@hello{i:02}:{self.hs.hostname}",)) + + res = self.get_success( + self.store.db_pool.execute( + "", None, "SELECT full_user_id from profiles ORDER BY full_user_id" + ) + ) + self.assertEqual(len(res), len(expected_values)) + self.assertEqual(res, expected_values) diff --git a/tests/storage/test_user_filters.py b/tests/storage/test_user_filters.py new file mode 100644 index 0000000000..bab802f56e --- /dev/null +++ b/tests/storage/test_user_filters.py @@ -0,0 +1,94 @@ +# Copyright 2023 The Matrix.org Foundation C.I.C +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from twisted.test.proto_helpers import MemoryReactor + +from synapse.server import HomeServer +from synapse.storage.database import LoggingTransaction +from synapse.storage.engines import PostgresEngine +from synapse.util import Clock + +from tests import unittest + + +class UserFiltersStoreTestCase(unittest.HomeserverTestCase): + """ + Test background migration that copies entries from column user_id to full_user_id, adding + the hostname in the process. + """ + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.store = hs.get_datastores().main + + def test_bg_migration(self) -> None: + updater = self.hs.get_datastores().main.db_pool.updates + + # drop the constraint so we can insert nulls in full_user_id to populate the test + if isinstance(self.store.database_engine, PostgresEngine): + + def f(txn: LoggingTransaction) -> None: + txn.execute( + "ALTER TABLE user_filters DROP CONSTRAINT full_user_id_not_null" + ) + + self.get_success(self.store.db_pool.runInteraction("", f)) + + for i in range(0, 70): + self.get_success( + self.store.db_pool.simple_insert( + "user_filters", + { + "user_id": f"hello{i:02}", + "filter_id": i, + "filter_json": bytearray(i), + }, + ) + ) + + # re-add the constraint so that when it's validated it actually exists + if isinstance(self.store.database_engine, PostgresEngine): + + def f(txn: LoggingTransaction) -> None: + txn.execute( + "ALTER TABLE user_filters ADD CONSTRAINT full_user_id_not_null CHECK (full_user_id IS NOT NULL) NOT VALID" + ) + + self.get_success(self.store.db_pool.runInteraction("", f)) + + self.get_success( + self.store.db_pool.simple_insert( + "background_updates", + values={ + "update_name": "populate_full_user_id_user_filters", + "progress_json": "{}", + }, + ) + ) + + self.get_success( + updater.run_background_updates(False), + ) + + expected_values = [] + for i in range(0, 70): + expected_values.append((f"@hello{i:02}:{self.hs.hostname}",)) + + res = self.get_success( + self.store.db_pool.execute( + "", None, "SELECT full_user_id from user_filters ORDER BY full_user_id" + ) + ) + self.assertEqual(len(res), len(expected_values)) + self.assertEqual(res, expected_values) -- cgit 1.5.1 From 7148c2a0d6310c8e97a4170cd4241c1cd4b0b037 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Tue, 16 May 2023 13:27:47 -0500 Subject: Run mypy type checking with the minimum supported Python version (#15602) We use the oldest Python version because later Python versions can include some overloads which don't work in the older versions which we still support. We're using Python 3.8 instead of 3.7 which is our actual minimum support version because it's EOL is in a matter of weeks so can avoid the extra effort. And in any case, minimum Python 3.8 support is better than winging it on Python 3.11. --- changelog.d/15602.misc | 1 + mypy.ini | 3 +++ 2 files changed, 4 insertions(+) create mode 100644 changelog.d/15602.misc diff --git a/changelog.d/15602.misc b/changelog.d/15602.misc new file mode 100644 index 0000000000..cdd0c039bd --- /dev/null +++ b/changelog.d/15602.misc @@ -0,0 +1 @@ +Run mypy type checking with the minimum supported Python version to catch new usage that isn't backwards-compatible. diff --git a/mypy.ini b/mypy.ini index 5e7057cfb7..3363c6daee 100644 --- a/mypy.ini +++ b/mypy.ini @@ -13,6 +13,9 @@ no_implicit_optional = True disallow_untyped_defs = True strict_equality = True warn_redundant_casts = True +# Run mypy type checking with the minimum supported Python version to catch new usage +# that isn't backwards-compatible (types, overloads, etc). +python_version = 3.8 files = docker/, -- cgit 1.5.1 From 375b0a8a119bb925ca280f050a25a931662fcbb5 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 16 May 2023 15:56:38 -0400 Subject: Update code to refer to "workers". (#15606) A bunch of comments and variables are out of date and use obsolete terms. --- changelog.d/15606.misc | 1 + docs/replication.md | 6 - synapse/app/admin_cmd.py | 4 +- synapse/app/generic_worker.py | 4 +- synapse/module_api/__init__.py | 6 +- synapse/replication/tcp/client.py | 4 +- synapse/storage/databases/main/account_data.py | 7 +- synapse/storage/databases/main/cache.py | 14 +- synapse/storage/databases/main/devices.py | 2 - synapse/storage/databases/main/events_worker.py | 7 +- synapse/storage/databases/main/receipts.py | 7 +- .../storage/schema/main/delta/34/cache_stream.py | 2 +- tests/app/test_openid_listener.py | 2 +- tests/replication/slave/__init__.py | 13 - tests/replication/slave/storage/__init__.py | 13 - tests/replication/slave/storage/_base.py | 72 ---- tests/replication/slave/storage/test_events.py | 420 --------------------- tests/replication/storage/__init__.py | 13 + tests/replication/storage/_base.py | 72 ++++ tests/replication/storage/test_events.py | 420 +++++++++++++++++++++ 20 files changed, 529 insertions(+), 560 deletions(-) create mode 100644 changelog.d/15606.misc delete mode 100644 tests/replication/slave/__init__.py delete mode 100644 tests/replication/slave/storage/__init__.py delete mode 100644 tests/replication/slave/storage/_base.py delete mode 100644 tests/replication/slave/storage/test_events.py create mode 100644 tests/replication/storage/__init__.py create mode 100644 tests/replication/storage/_base.py create mode 100644 tests/replication/storage/test_events.py diff --git a/changelog.d/15606.misc b/changelog.d/15606.misc new file mode 100644 index 0000000000..44265fbf02 --- /dev/null +++ b/changelog.d/15606.misc @@ -0,0 +1 @@ +Update internal terminology for workers. diff --git a/docs/replication.md b/docs/replication.md index 108da9a065..25145daaf5 100644 --- a/docs/replication.md +++ b/docs/replication.md @@ -30,12 +30,6 @@ minimal. See [the TCP replication documentation](tcp_replication.md). -### The Slaved DataStore - -There are read-only version of the synapse storage layer in -`synapse/replication/slave/storage` that use the response of the -replication API to invalidate their caches. - ### The TCP Replication Module Information about how the tcp replication module is structured, including how the classes interact, can be found in diff --git a/synapse/app/admin_cmd.py b/synapse/app/admin_cmd.py index b05fe2c589..f9aada269a 100644 --- a/synapse/app/admin_cmd.py +++ b/synapse/app/admin_cmd.py @@ -64,7 +64,7 @@ from synapse.util.logcontext import LoggingContext logger = logging.getLogger("synapse.app.admin_cmd") -class AdminCmdSlavedStore( +class AdminCmdStore( FilteringWorkerStore, ClientIpWorkerStore, DeviceWorkerStore, @@ -103,7 +103,7 @@ class AdminCmdSlavedStore( class AdminCmdServer(HomeServer): - DATASTORE_CLASS = AdminCmdSlavedStore # type: ignore + DATASTORE_CLASS = AdminCmdStore # type: ignore async def export_data_command(hs: HomeServer, args: argparse.Namespace) -> None: diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index e17ce35b8e..909ebccf78 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -102,7 +102,7 @@ from synapse.util.httpresourcetree import create_resource_tree logger = logging.getLogger("synapse.app.generic_worker") -class GenericWorkerSlavedStore( +class GenericWorkerStore( # FIXME(#3714): We need to add UserDirectoryStore as we write directly # rather than going via the correct worker. UserDirectoryStore, @@ -154,7 +154,7 @@ class GenericWorkerSlavedStore( class GenericWorkerServer(HomeServer): - DATASTORE_CLASS = GenericWorkerSlavedStore # type: ignore + DATASTORE_CLASS = GenericWorkerStore # type: ignore def _listen_http(self, listener_config: ListenerConfig) -> None: assert listener_config.http_options is not None diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index 2c9d181acf..0e9f366cba 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -134,7 +134,7 @@ from synapse.util.caches.descriptors import CachedFunction, cached as _cached from synapse.util.frozenutils import freeze if TYPE_CHECKING: - from synapse.app.generic_worker import GenericWorkerSlavedStore + from synapse.app.generic_worker import GenericWorkerStore from synapse.server import HomeServer @@ -237,9 +237,7 @@ class ModuleApi: # TODO: Fix this type hint once the types for the data stores have been ironed # out. - self._store: Union[ - DataStore, "GenericWorkerSlavedStore" - ] = hs.get_datastores().main + self._store: Union[DataStore, "GenericWorkerStore"] = hs.get_datastores().main self._storage_controllers = hs.get_storage_controllers() self._auth = hs.get_auth() self._auth_handler = auth_handler diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index 200f667fdf..139f57cf86 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -60,7 +60,7 @@ _WAIT_FOR_REPLICATION_TIMEOUT_SECONDS = 5 class ReplicationDataHandler: """Handles incoming stream updates from replication. - This instance notifies the slave data store about updates. Can be subclassed + This instance notifies the data store about updates. Can be subclassed to handle updates in additional ways. """ @@ -91,7 +91,7 @@ class ReplicationDataHandler: ) -> None: """Called to handle a batch of replication data with a given stream token. - By default this just pokes the slave store. Can be overridden in subclasses to + By default, this just pokes the data store. Can be overridden in subclasses to handle more. Args: diff --git a/synapse/storage/databases/main/account_data.py b/synapse/storage/databases/main/account_data.py index a9843f6e17..8f7bdbc61a 100644 --- a/synapse/storage/databases/main/account_data.py +++ b/synapse/storage/databases/main/account_data.py @@ -85,13 +85,10 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) writers=hs.config.worker.writers.account_data, ) else: + # Multiple writers are not supported for SQLite. + # # We shouldn't be running in worker mode with SQLite, but its useful # to support it for unit tests. - # - # If this process is the writer than we need to use - # `StreamIdGenerator`, otherwise we use `SlavedIdTracker` which gets - # updated over replication. (Multiple writers are not supported for - # SQLite). self._account_data_id_gen = StreamIdGenerator( db_conn, hs.get_replication_notifier(), diff --git a/synapse/storage/databases/main/cache.py b/synapse/storage/databases/main/cache.py index bd07d20171..46fa0a73f9 100644 --- a/synapse/storage/databases/main/cache.py +++ b/synapse/storage/databases/main/cache.py @@ -274,11 +274,11 @@ class CacheInvalidationWorkerStore(SQLBaseStore): async def invalidate_cache_and_stream( self, cache_name: str, keys: Tuple[Any, ...] ) -> None: - """Invalidates the cache and adds it to the cache stream so slaves + """Invalidates the cache and adds it to the cache stream so other workers will know to invalidate their caches. - This should only be used to invalidate caches where slaves won't - otherwise know from other replication streams that the cache should + This should only be used to invalidate caches where other workers won't + otherwise have known from other replication streams that the cache should be invalidated. """ cache_func = getattr(self, cache_name, None) @@ -297,11 +297,11 @@ class CacheInvalidationWorkerStore(SQLBaseStore): cache_func: CachedFunction, keys: Tuple[Any, ...], ) -> None: - """Invalidates the cache and adds it to the cache stream so slaves + """Invalidates the cache and adds it to the cache stream so other workers will know to invalidate their caches. - This should only be used to invalidate caches where slaves won't - otherwise know from other replication streams that the cache should + This should only be used to invalidate caches where other workers won't + otherwise have known from other replication streams that the cache should be invalidated. """ txn.call_after(cache_func.invalidate, keys) @@ -310,7 +310,7 @@ class CacheInvalidationWorkerStore(SQLBaseStore): def _invalidate_all_cache_and_stream( self, txn: LoggingTransaction, cache_func: CachedFunction ) -> None: - """Invalidates the entire cache and adds it to the cache stream so slaves + """Invalidates the entire cache and adds it to the cache stream so other workers will know to invalidate their caches. """ diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index 5503621ad6..a67fdb3c22 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -105,8 +105,6 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): is_writer=hs.config.worker.worker_app is None, ) - # Type-ignore: _device_list_id_gen is mixed in from either DataStore (as a - # StreamIdGenerator) or SlavedDataStore (as a SlavedIdTracker). device_list_max = self._device_list_id_gen.get_current_token() device_list_prefill, min_device_list_id = self.db_pool.get_cache_dict( db_conn, diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index 0ff3fc7369..53aa5933d5 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -213,13 +213,10 @@ class EventsWorkerStore(SQLBaseStore): writers=hs.config.worker.writers.events, ) else: + # Multiple writers are not supported for SQLite. + # # We shouldn't be running in worker mode with SQLite, but its useful # to support it for unit tests. - # - # If this process is the writer than we need to use - # `StreamIdGenerator`, otherwise we use `SlavedIdTracker` which gets - # updated over replication. (Multiple writers are not supported for - # SQLite). self._stream_id_gen = StreamIdGenerator( db_conn, hs.get_replication_notifier(), diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py index 074942b167..5ee5c7ad9f 100644 --- a/synapse/storage/databases/main/receipts.py +++ b/synapse/storage/databases/main/receipts.py @@ -85,13 +85,10 @@ class ReceiptsWorkerStore(SQLBaseStore): else: self._can_write_to_receipts = True + # Multiple writers are not supported for SQLite. + # # We shouldn't be running in worker mode with SQLite, but its useful # to support it for unit tests. - # - # If this process is the writer than we need to use - # `StreamIdGenerator`, otherwise we use `SlavedIdTracker` which gets - # updated over replication. (Multiple writers are not supported for - # SQLite). self._receipts_id_gen = StreamIdGenerator( db_conn, hs.get_replication_notifier(), diff --git a/synapse/storage/schema/main/delta/34/cache_stream.py b/synapse/storage/schema/main/delta/34/cache_stream.py index 682c86da1a..882f9b893b 100644 --- a/synapse/storage/schema/main/delta/34/cache_stream.py +++ b/synapse/storage/schema/main/delta/34/cache_stream.py @@ -21,7 +21,7 @@ from synapse.storage.prepare_database import get_statements logger = logging.getLogger(__name__) -# This stream is used to notify replication slaves that some caches have +# This stream is used to notify workers over replication that some caches have # been invalidated that they cannot infer from the other streams. CREATE_TABLE = """ CREATE TABLE cache_invalidation_stream ( diff --git a/tests/app/test_openid_listener.py b/tests/app/test_openid_listener.py index 2ee343d8a4..6e0413400e 100644 --- a/tests/app/test_openid_listener.py +++ b/tests/app/test_openid_listener.py @@ -38,7 +38,7 @@ class FederationReaderOpenIDListenerTests(HomeserverTestCase): def default_config(self) -> JsonDict: conf = super().default_config() - # we're using FederationReaderServer, which uses a SlavedStore, so we + # we're using GenericWorkerServer, which uses a GenericWorkerStore, so we # have to tell the FederationHandler not to try to access stuff that is only # in the primary store. conf["worker_app"] = "yes" diff --git a/tests/replication/slave/__init__.py b/tests/replication/slave/__init__.py deleted file mode 100644 index f43a360a80..0000000000 --- a/tests/replication/slave/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/tests/replication/slave/storage/__init__.py b/tests/replication/slave/storage/__init__.py deleted file mode 100644 index f43a360a80..0000000000 --- a/tests/replication/slave/storage/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/tests/replication/slave/storage/_base.py b/tests/replication/slave/storage/_base.py deleted file mode 100644 index 4c9b494344..0000000000 --- a/tests/replication/slave/storage/_base.py +++ /dev/null @@ -1,72 +0,0 @@ -# Copyright 2016 OpenMarket Ltd -# Copyright 2018 New Vector Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from typing import Any, Iterable, Optional -from unittest.mock import Mock - -from twisted.test.proto_helpers import MemoryReactor - -from synapse.server import HomeServer -from synapse.util import Clock - -from tests.replication._base import BaseStreamTestCase - - -class BaseSlavedStoreTestCase(BaseStreamTestCase): - def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - return self.setup_test_homeserver(federation_client=Mock()) - - def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: - super().prepare(reactor, clock, hs) - - self.reconnect() - - self.master_store = hs.get_datastores().main - self.slaved_store = self.worker_hs.get_datastores().main - persistence = hs.get_storage_controllers().persistence - assert persistence is not None - self.persistance = persistence - - def replicate(self) -> None: - """Tell the master side of replication that something has happened, and then - wait for the replication to occur. - """ - self.streamer.on_notifier_poke() - self.pump(0.1) - - def check( - self, method: str, args: Iterable[Any], expected_result: Optional[Any] = None - ) -> None: - master_result = self.get_success(getattr(self.master_store, method)(*args)) - slaved_result = self.get_success(getattr(self.slaved_store, method)(*args)) - if expected_result is not None: - self.assertEqual( - master_result, - expected_result, - "Expected master result to be %r but was %r" - % (expected_result, master_result), - ) - self.assertEqual( - slaved_result, - expected_result, - "Expected slave result to be %r but was %r" - % (expected_result, slaved_result), - ) - self.assertEqual( - master_result, - slaved_result, - "Slave result %r does not match master result %r" - % (slaved_result, master_result), - ) diff --git a/tests/replication/slave/storage/test_events.py b/tests/replication/slave/storage/test_events.py deleted file mode 100644 index b2125b1fea..0000000000 --- a/tests/replication/slave/storage/test_events.py +++ /dev/null @@ -1,420 +0,0 @@ -# Copyright 2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import logging -from typing import Any, Callable, Iterable, List, Optional, Tuple - -from canonicaljson import encode_canonical_json -from parameterized import parameterized - -from twisted.test.proto_helpers import MemoryReactor - -from synapse.api.constants import ReceiptTypes -from synapse.api.room_versions import RoomVersions -from synapse.events import EventBase, _EventInternalMetadata, make_event_from_dict -from synapse.events.snapshot import EventContext -from synapse.handlers.room import RoomEventSource -from synapse.server import HomeServer -from synapse.storage.databases.main.event_push_actions import ( - NotifCounts, - RoomNotifCounts, -) -from synapse.storage.databases.main.events_worker import EventsWorkerStore -from synapse.storage.roommember import GetRoomsForUserWithStreamOrdering, RoomsForUser -from synapse.types import PersistedEventPosition -from synapse.util import Clock - -from tests.server import FakeTransport - -from ._base import BaseSlavedStoreTestCase - -USER_ID = "@feeling:test" -USER_ID_2 = "@bright:test" -OUTLIER = {"outlier": True} -ROOM_ID = "!room:test" - -logger = logging.getLogger(__name__) - - -def dict_equals(self: EventBase, other: EventBase) -> bool: - me = encode_canonical_json(self.get_pdu_json()) - them = encode_canonical_json(other.get_pdu_json()) - return me == them - - -def patch__eq__(cls: object) -> Callable[[], None]: - eq = getattr(cls, "__eq__", None) - cls.__eq__ = dict_equals # type: ignore[assignment] - - def unpatch() -> None: - if eq is not None: - cls.__eq__ = eq # type: ignore[assignment] - - return unpatch - - -class EventsWorkerStoreTestCase(BaseSlavedStoreTestCase): - STORE_TYPE = EventsWorkerStore - - def setUp(self) -> None: - # Patch up the equality operator for events so that we can check - # whether lists of events match using assertEqual - self.unpatches = [patch__eq__(_EventInternalMetadata), patch__eq__(EventBase)] - super().setUp() - - def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: - super().prepare(reactor, clock, hs) - - self.get_success( - self.master_store.store_room( - ROOM_ID, - USER_ID, - is_public=False, - room_version=RoomVersions.V1, - ) - ) - - def tearDown(self) -> None: - [unpatch() for unpatch in self.unpatches] - - def test_get_latest_event_ids_in_room(self) -> None: - create = self.persist(type="m.room.create", key="", creator=USER_ID) - self.replicate() - self.check("get_latest_event_ids_in_room", (ROOM_ID,), [create.event_id]) - - join = self.persist( - type="m.room.member", - key=USER_ID, - membership="join", - prev_events=[(create.event_id, {})], - ) - self.replicate() - self.check("get_latest_event_ids_in_room", (ROOM_ID,), [join.event_id]) - - def test_redactions(self) -> None: - self.persist(type="m.room.create", key="", creator=USER_ID) - self.persist(type="m.room.member", key=USER_ID, membership="join") - - msg = self.persist(type="m.room.message", msgtype="m.text", body="Hello") - self.replicate() - self.check("get_event", [msg.event_id], msg) - - redaction = self.persist(type="m.room.redaction", redacts=msg.event_id) - self.replicate() - - msg_dict = msg.get_dict() - msg_dict["content"] = {} - msg_dict["unsigned"]["redacted_by"] = redaction.event_id - msg_dict["unsigned"]["redacted_because"] = redaction - redacted = make_event_from_dict( - msg_dict, internal_metadata_dict=msg.internal_metadata.get_dict() - ) - self.check("get_event", [msg.event_id], redacted) - - def test_backfilled_redactions(self) -> None: - self.persist(type="m.room.create", key="", creator=USER_ID) - self.persist(type="m.room.member", key=USER_ID, membership="join") - - msg = self.persist(type="m.room.message", msgtype="m.text", body="Hello") - self.replicate() - self.check("get_event", [msg.event_id], msg) - - redaction = self.persist( - type="m.room.redaction", redacts=msg.event_id, backfill=True - ) - self.replicate() - - msg_dict = msg.get_dict() - msg_dict["content"] = {} - msg_dict["unsigned"]["redacted_by"] = redaction.event_id - msg_dict["unsigned"]["redacted_because"] = redaction - redacted = make_event_from_dict( - msg_dict, internal_metadata_dict=msg.internal_metadata.get_dict() - ) - self.check("get_event", [msg.event_id], redacted) - - def test_invites(self) -> None: - self.persist(type="m.room.create", key="", creator=USER_ID) - self.check("get_invited_rooms_for_local_user", [USER_ID_2], []) - event = self.persist(type="m.room.member", key=USER_ID_2, membership="invite") - assert event.internal_metadata.stream_ordering is not None - - self.replicate() - - self.check( - "get_invited_rooms_for_local_user", - [USER_ID_2], - [ - RoomsForUser( - ROOM_ID, - USER_ID, - "invite", - event.event_id, - event.internal_metadata.stream_ordering, - RoomVersions.V1.identifier, - ) - ], - ) - - @parameterized.expand([(True,), (False,)]) - def test_push_actions_for_user(self, send_receipt: bool) -> None: - self.persist(type="m.room.create", key="", creator=USER_ID) - self.persist(type="m.room.member", key=USER_ID, membership="join") - self.persist( - type="m.room.member", sender=USER_ID, key=USER_ID_2, membership="join" - ) - event1 = self.persist(type="m.room.message", msgtype="m.text", body="hello") - self.replicate() - - if send_receipt: - self.get_success( - self.master_store.insert_receipt( - ROOM_ID, ReceiptTypes.READ, USER_ID_2, [event1.event_id], None, {} - ) - ) - - self.check( - "get_unread_event_push_actions_by_room_for_user", - [ROOM_ID, USER_ID_2], - RoomNotifCounts( - NotifCounts(highlight_count=0, unread_count=0, notify_count=0), {} - ), - ) - - self.persist( - type="m.room.message", - msgtype="m.text", - body="world", - push_actions=[(USER_ID_2, ["notify"])], - ) - self.replicate() - self.check( - "get_unread_event_push_actions_by_room_for_user", - [ROOM_ID, USER_ID_2], - RoomNotifCounts( - NotifCounts(highlight_count=0, unread_count=0, notify_count=1), {} - ), - ) - - self.persist( - type="m.room.message", - msgtype="m.text", - body="world", - push_actions=[ - (USER_ID_2, ["notify", {"set_tweak": "highlight", "value": True}]) - ], - ) - self.replicate() - self.check( - "get_unread_event_push_actions_by_room_for_user", - [ROOM_ID, USER_ID_2], - RoomNotifCounts( - NotifCounts(highlight_count=1, unread_count=0, notify_count=2), {} - ), - ) - - def test_get_rooms_for_user_with_stream_ordering(self) -> None: - """Check that the cache on get_rooms_for_user_with_stream_ordering is invalidated - by rows in the events stream - """ - self.persist(type="m.room.create", key="", creator=USER_ID) - self.persist(type="m.room.member", key=USER_ID, membership="join") - self.replicate() - self.check("get_rooms_for_user_with_stream_ordering", (USER_ID_2,), set()) - - j2 = self.persist( - type="m.room.member", sender=USER_ID_2, key=USER_ID_2, membership="join" - ) - assert j2.internal_metadata.stream_ordering is not None - self.replicate() - - expected_pos = PersistedEventPosition( - "master", j2.internal_metadata.stream_ordering - ) - self.check( - "get_rooms_for_user_with_stream_ordering", - (USER_ID_2,), - {GetRoomsForUserWithStreamOrdering(ROOM_ID, expected_pos)}, - ) - - def test_get_rooms_for_user_with_stream_ordering_with_multi_event_persist( - self, - ) -> None: - """Check that current_state invalidation happens correctly with multiple events - in the persistence batch. - - This test attempts to reproduce a race condition between the event persistence - loop and a worker-based Sync handler. - - The problem occurred when the master persisted several events in one batch. It - only updates the current_state at the end of each batch, so the obvious thing - to do is then to issue a current_state_delta stream update corresponding to the - last stream_id in the batch. - - However, that raises the possibility that a worker will see the replication - notification for a join event before the current_state caches are invalidated. - - The test involves: - * creating a join and a message event for a user, and persisting them in the - same batch - - * controlling the replication stream so that updates are sent gradually - - * between each bunch of replication updates, check that we see a consistent - snapshot of the state. - """ - self.persist(type="m.room.create", key="", creator=USER_ID) - self.persist(type="m.room.member", key=USER_ID, membership="join") - self.replicate() - self.check("get_rooms_for_user_with_stream_ordering", (USER_ID_2,), set()) - - # limit the replication rate - repl_transport = self._server_transport - assert isinstance(repl_transport, FakeTransport) - repl_transport.autoflush = False - - # build the join and message events and persist them in the same batch. - logger.info("----- build test events ------") - j2, j2ctx = self.build_event( - type="m.room.member", sender=USER_ID_2, key=USER_ID_2, membership="join" - ) - msg, msgctx = self.build_event() - self.get_success(self.persistance.persist_events([(j2, j2ctx), (msg, msgctx)])) - self.replicate() - assert j2.internal_metadata.stream_ordering is not None - - event_source = RoomEventSource(self.hs) - event_source.store = self.slaved_store - current_token = event_source.get_current_key() - - # gradually stream out the replication - while repl_transport.buffer: - logger.info("------ flush ------") - repl_transport.flush(30) - self.pump(0) - - prev_token = current_token - current_token = event_source.get_current_key() - - # attempt to replicate the behaviour of the sync handler. - # - # First, we get a list of the rooms we are joined to - joined_rooms = self.get_success( - self.slaved_store.get_rooms_for_user_with_stream_ordering(USER_ID_2) - ) - - # Then, we get a list of the events since the last sync - membership_changes = self.get_success( - self.slaved_store.get_membership_changes_for_user( - USER_ID_2, prev_token, current_token - ) - ) - - logger.info( - "%s->%s: joined_rooms=%r membership_changes=%r", - prev_token, - current_token, - joined_rooms, - membership_changes, - ) - - # the membership change is only any use to us if the room is in the - # joined_rooms list. - if membership_changes: - expected_pos = PersistedEventPosition( - "master", j2.internal_metadata.stream_ordering - ) - self.assertEqual( - joined_rooms, - {GetRoomsForUserWithStreamOrdering(ROOM_ID, expected_pos)}, - ) - - event_id = 0 - - def persist(self, backfill: bool = False, **kwargs: Any) -> EventBase: - """ - Returns: - The event that was persisted. - """ - event, context = self.build_event(**kwargs) - - if backfill: - self.get_success( - self.persistance.persist_events([(event, context)], backfilled=True) - ) - else: - self.get_success(self.persistance.persist_event(event, context)) - - return event - - def build_event( - self, - sender: str = USER_ID, - room_id: str = ROOM_ID, - type: str = "m.room.message", - key: Optional[str] = None, - internal: Optional[dict] = None, - depth: Optional[int] = None, - prev_events: Optional[List[Tuple[str, dict]]] = None, - auth_events: Optional[List[str]] = None, - prev_state: Optional[List[str]] = None, - redacts: Optional[str] = None, - push_actions: Iterable = frozenset(), - **content: object, - ) -> Tuple[EventBase, EventContext]: - prev_events = prev_events or [] - auth_events = auth_events or [] - prev_state = prev_state or [] - - if depth is None: - depth = self.event_id - - if not prev_events: - latest_event_ids = self.get_success( - self.master_store.get_latest_event_ids_in_room(room_id) - ) - prev_events = [(ev_id, {}) for ev_id in latest_event_ids] - - event_dict = { - "sender": sender, - "type": type, - "content": content, - "event_id": "$%d:blue" % (self.event_id,), - "room_id": room_id, - "depth": depth, - "origin_server_ts": self.event_id, - "prev_events": prev_events, - "auth_events": auth_events, - } - if key is not None: - event_dict["state_key"] = key - event_dict["prev_state"] = prev_state - - if redacts is not None: - event_dict["redacts"] = redacts - - event = make_event_from_dict(event_dict, internal_metadata_dict=internal or {}) - - self.event_id += 1 - state_handler = self.hs.get_state_handler() - context = self.get_success(state_handler.compute_event_context(event)) - - self.get_success( - self.master_store.add_push_actions_to_staging( - event.event_id, - dict(push_actions), - False, - "main", - ) - ) - return event, context diff --git a/tests/replication/storage/__init__.py b/tests/replication/storage/__init__.py new file mode 100644 index 0000000000..f43a360a80 --- /dev/null +++ b/tests/replication/storage/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2016 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/replication/storage/_base.py b/tests/replication/storage/_base.py new file mode 100644 index 0000000000..de26a62ae1 --- /dev/null +++ b/tests/replication/storage/_base.py @@ -0,0 +1,72 @@ +# Copyright 2016 OpenMarket Ltd +# Copyright 2018 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Any, Iterable, Optional +from unittest.mock import Mock + +from twisted.test.proto_helpers import MemoryReactor + +from synapse.server import HomeServer +from synapse.util import Clock + +from tests.replication._base import BaseStreamTestCase + + +class BaseWorkerStoreTestCase(BaseStreamTestCase): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: + return self.setup_test_homeserver(federation_client=Mock()) + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + super().prepare(reactor, clock, hs) + + self.reconnect() + + self.master_store = hs.get_datastores().main + self.worker_store = self.worker_hs.get_datastores().main + persistence = hs.get_storage_controllers().persistence + assert persistence is not None + self.persistance = persistence + + def replicate(self) -> None: + """Tell the master side of replication that something has happened, and then + wait for the replication to occur. + """ + self.streamer.on_notifier_poke() + self.pump(0.1) + + def check( + self, method: str, args: Iterable[Any], expected_result: Optional[Any] = None + ) -> None: + master_result = self.get_success(getattr(self.master_store, method)(*args)) + worker_result = self.get_success(getattr(self.worker_store, method)(*args)) + if expected_result is not None: + self.assertEqual( + master_result, + expected_result, + "Expected master result to be %r but was %r" + % (expected_result, master_result), + ) + self.assertEqual( + worker_result, + expected_result, + "Expected worker result to be %r but was %r" + % (expected_result, worker_result), + ) + self.assertEqual( + master_result, + worker_result, + "Worker result %r does not match master result %r" + % (worker_result, master_result), + ) diff --git a/tests/replication/storage/test_events.py b/tests/replication/storage/test_events.py new file mode 100644 index 0000000000..f7c6417a09 --- /dev/null +++ b/tests/replication/storage/test_events.py @@ -0,0 +1,420 @@ +# Copyright 2016 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import logging +from typing import Any, Callable, Iterable, List, Optional, Tuple + +from canonicaljson import encode_canonical_json +from parameterized import parameterized + +from twisted.test.proto_helpers import MemoryReactor + +from synapse.api.constants import ReceiptTypes +from synapse.api.room_versions import RoomVersions +from synapse.events import EventBase, _EventInternalMetadata, make_event_from_dict +from synapse.events.snapshot import EventContext +from synapse.handlers.room import RoomEventSource +from synapse.server import HomeServer +from synapse.storage.databases.main.event_push_actions import ( + NotifCounts, + RoomNotifCounts, +) +from synapse.storage.databases.main.events_worker import EventsWorkerStore +from synapse.storage.roommember import GetRoomsForUserWithStreamOrdering, RoomsForUser +from synapse.types import PersistedEventPosition +from synapse.util import Clock + +from tests.server import FakeTransport + +from ._base import BaseWorkerStoreTestCase + +USER_ID = "@feeling:test" +USER_ID_2 = "@bright:test" +OUTLIER = {"outlier": True} +ROOM_ID = "!room:test" + +logger = logging.getLogger(__name__) + + +def dict_equals(self: EventBase, other: EventBase) -> bool: + me = encode_canonical_json(self.get_pdu_json()) + them = encode_canonical_json(other.get_pdu_json()) + return me == them + + +def patch__eq__(cls: object) -> Callable[[], None]: + eq = getattr(cls, "__eq__", None) + cls.__eq__ = dict_equals # type: ignore[assignment] + + def unpatch() -> None: + if eq is not None: + cls.__eq__ = eq # type: ignore[assignment] + + return unpatch + + +class EventsWorkerStoreTestCase(BaseWorkerStoreTestCase): + STORE_TYPE = EventsWorkerStore + + def setUp(self) -> None: + # Patch up the equality operator for events so that we can check + # whether lists of events match using assertEqual + self.unpatches = [patch__eq__(_EventInternalMetadata), patch__eq__(EventBase)] + super().setUp() + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + super().prepare(reactor, clock, hs) + + self.get_success( + self.master_store.store_room( + ROOM_ID, + USER_ID, + is_public=False, + room_version=RoomVersions.V1, + ) + ) + + def tearDown(self) -> None: + [unpatch() for unpatch in self.unpatches] + + def test_get_latest_event_ids_in_room(self) -> None: + create = self.persist(type="m.room.create", key="", creator=USER_ID) + self.replicate() + self.check("get_latest_event_ids_in_room", (ROOM_ID,), [create.event_id]) + + join = self.persist( + type="m.room.member", + key=USER_ID, + membership="join", + prev_events=[(create.event_id, {})], + ) + self.replicate() + self.check("get_latest_event_ids_in_room", (ROOM_ID,), [join.event_id]) + + def test_redactions(self) -> None: + self.persist(type="m.room.create", key="", creator=USER_ID) + self.persist(type="m.room.member", key=USER_ID, membership="join") + + msg = self.persist(type="m.room.message", msgtype="m.text", body="Hello") + self.replicate() + self.check("get_event", [msg.event_id], msg) + + redaction = self.persist(type="m.room.redaction", redacts=msg.event_id) + self.replicate() + + msg_dict = msg.get_dict() + msg_dict["content"] = {} + msg_dict["unsigned"]["redacted_by"] = redaction.event_id + msg_dict["unsigned"]["redacted_because"] = redaction + redacted = make_event_from_dict( + msg_dict, internal_metadata_dict=msg.internal_metadata.get_dict() + ) + self.check("get_event", [msg.event_id], redacted) + + def test_backfilled_redactions(self) -> None: + self.persist(type="m.room.create", key="", creator=USER_ID) + self.persist(type="m.room.member", key=USER_ID, membership="join") + + msg = self.persist(type="m.room.message", msgtype="m.text", body="Hello") + self.replicate() + self.check("get_event", [msg.event_id], msg) + + redaction = self.persist( + type="m.room.redaction", redacts=msg.event_id, backfill=True + ) + self.replicate() + + msg_dict = msg.get_dict() + msg_dict["content"] = {} + msg_dict["unsigned"]["redacted_by"] = redaction.event_id + msg_dict["unsigned"]["redacted_because"] = redaction + redacted = make_event_from_dict( + msg_dict, internal_metadata_dict=msg.internal_metadata.get_dict() + ) + self.check("get_event", [msg.event_id], redacted) + + def test_invites(self) -> None: + self.persist(type="m.room.create", key="", creator=USER_ID) + self.check("get_invited_rooms_for_local_user", [USER_ID_2], []) + event = self.persist(type="m.room.member", key=USER_ID_2, membership="invite") + assert event.internal_metadata.stream_ordering is not None + + self.replicate() + + self.check( + "get_invited_rooms_for_local_user", + [USER_ID_2], + [ + RoomsForUser( + ROOM_ID, + USER_ID, + "invite", + event.event_id, + event.internal_metadata.stream_ordering, + RoomVersions.V1.identifier, + ) + ], + ) + + @parameterized.expand([(True,), (False,)]) + def test_push_actions_for_user(self, send_receipt: bool) -> None: + self.persist(type="m.room.create", key="", creator=USER_ID) + self.persist(type="m.room.member", key=USER_ID, membership="join") + self.persist( + type="m.room.member", sender=USER_ID, key=USER_ID_2, membership="join" + ) + event1 = self.persist(type="m.room.message", msgtype="m.text", body="hello") + self.replicate() + + if send_receipt: + self.get_success( + self.master_store.insert_receipt( + ROOM_ID, ReceiptTypes.READ, USER_ID_2, [event1.event_id], None, {} + ) + ) + + self.check( + "get_unread_event_push_actions_by_room_for_user", + [ROOM_ID, USER_ID_2], + RoomNotifCounts( + NotifCounts(highlight_count=0, unread_count=0, notify_count=0), {} + ), + ) + + self.persist( + type="m.room.message", + msgtype="m.text", + body="world", + push_actions=[(USER_ID_2, ["notify"])], + ) + self.replicate() + self.check( + "get_unread_event_push_actions_by_room_for_user", + [ROOM_ID, USER_ID_2], + RoomNotifCounts( + NotifCounts(highlight_count=0, unread_count=0, notify_count=1), {} + ), + ) + + self.persist( + type="m.room.message", + msgtype="m.text", + body="world", + push_actions=[ + (USER_ID_2, ["notify", {"set_tweak": "highlight", "value": True}]) + ], + ) + self.replicate() + self.check( + "get_unread_event_push_actions_by_room_for_user", + [ROOM_ID, USER_ID_2], + RoomNotifCounts( + NotifCounts(highlight_count=1, unread_count=0, notify_count=2), {} + ), + ) + + def test_get_rooms_for_user_with_stream_ordering(self) -> None: + """Check that the cache on get_rooms_for_user_with_stream_ordering is invalidated + by rows in the events stream + """ + self.persist(type="m.room.create", key="", creator=USER_ID) + self.persist(type="m.room.member", key=USER_ID, membership="join") + self.replicate() + self.check("get_rooms_for_user_with_stream_ordering", (USER_ID_2,), set()) + + j2 = self.persist( + type="m.room.member", sender=USER_ID_2, key=USER_ID_2, membership="join" + ) + assert j2.internal_metadata.stream_ordering is not None + self.replicate() + + expected_pos = PersistedEventPosition( + "master", j2.internal_metadata.stream_ordering + ) + self.check( + "get_rooms_for_user_with_stream_ordering", + (USER_ID_2,), + {GetRoomsForUserWithStreamOrdering(ROOM_ID, expected_pos)}, + ) + + def test_get_rooms_for_user_with_stream_ordering_with_multi_event_persist( + self, + ) -> None: + """Check that current_state invalidation happens correctly with multiple events + in the persistence batch. + + This test attempts to reproduce a race condition between the event persistence + loop and a worker-based Sync handler. + + The problem occurred when the master persisted several events in one batch. It + only updates the current_state at the end of each batch, so the obvious thing + to do is then to issue a current_state_delta stream update corresponding to the + last stream_id in the batch. + + However, that raises the possibility that a worker will see the replication + notification for a join event before the current_state caches are invalidated. + + The test involves: + * creating a join and a message event for a user, and persisting them in the + same batch + + * controlling the replication stream so that updates are sent gradually + + * between each bunch of replication updates, check that we see a consistent + snapshot of the state. + """ + self.persist(type="m.room.create", key="", creator=USER_ID) + self.persist(type="m.room.member", key=USER_ID, membership="join") + self.replicate() + self.check("get_rooms_for_user_with_stream_ordering", (USER_ID_2,), set()) + + # limit the replication rate + repl_transport = self._server_transport + assert isinstance(repl_transport, FakeTransport) + repl_transport.autoflush = False + + # build the join and message events and persist them in the same batch. + logger.info("----- build test events ------") + j2, j2ctx = self.build_event( + type="m.room.member", sender=USER_ID_2, key=USER_ID_2, membership="join" + ) + msg, msgctx = self.build_event() + self.get_success(self.persistance.persist_events([(j2, j2ctx), (msg, msgctx)])) + self.replicate() + assert j2.internal_metadata.stream_ordering is not None + + event_source = RoomEventSource(self.hs) + event_source.store = self.worker_store + current_token = event_source.get_current_key() + + # gradually stream out the replication + while repl_transport.buffer: + logger.info("------ flush ------") + repl_transport.flush(30) + self.pump(0) + + prev_token = current_token + current_token = event_source.get_current_key() + + # attempt to replicate the behaviour of the sync handler. + # + # First, we get a list of the rooms we are joined to + joined_rooms = self.get_success( + self.worker_store.get_rooms_for_user_with_stream_ordering(USER_ID_2) + ) + + # Then, we get a list of the events since the last sync + membership_changes = self.get_success( + self.worker_store.get_membership_changes_for_user( + USER_ID_2, prev_token, current_token + ) + ) + + logger.info( + "%s->%s: joined_rooms=%r membership_changes=%r", + prev_token, + current_token, + joined_rooms, + membership_changes, + ) + + # the membership change is only any use to us if the room is in the + # joined_rooms list. + if membership_changes: + expected_pos = PersistedEventPosition( + "master", j2.internal_metadata.stream_ordering + ) + self.assertEqual( + joined_rooms, + {GetRoomsForUserWithStreamOrdering(ROOM_ID, expected_pos)}, + ) + + event_id = 0 + + def persist(self, backfill: bool = False, **kwargs: Any) -> EventBase: + """ + Returns: + The event that was persisted. + """ + event, context = self.build_event(**kwargs) + + if backfill: + self.get_success( + self.persistance.persist_events([(event, context)], backfilled=True) + ) + else: + self.get_success(self.persistance.persist_event(event, context)) + + return event + + def build_event( + self, + sender: str = USER_ID, + room_id: str = ROOM_ID, + type: str = "m.room.message", + key: Optional[str] = None, + internal: Optional[dict] = None, + depth: Optional[int] = None, + prev_events: Optional[List[Tuple[str, dict]]] = None, + auth_events: Optional[List[str]] = None, + prev_state: Optional[List[str]] = None, + redacts: Optional[str] = None, + push_actions: Iterable = frozenset(), + **content: object, + ) -> Tuple[EventBase, EventContext]: + prev_events = prev_events or [] + auth_events = auth_events or [] + prev_state = prev_state or [] + + if depth is None: + depth = self.event_id + + if not prev_events: + latest_event_ids = self.get_success( + self.master_store.get_latest_event_ids_in_room(room_id) + ) + prev_events = [(ev_id, {}) for ev_id in latest_event_ids] + + event_dict = { + "sender": sender, + "type": type, + "content": content, + "event_id": "$%d:blue" % (self.event_id,), + "room_id": room_id, + "depth": depth, + "origin_server_ts": self.event_id, + "prev_events": prev_events, + "auth_events": auth_events, + } + if key is not None: + event_dict["state_key"] = key + event_dict["prev_state"] = prev_state + + if redacts is not None: + event_dict["redacts"] = redacts + + event = make_event_from_dict(event_dict, internal_metadata_dict=internal or {}) + + self.event_id += 1 + state_handler = self.hs.get_state_handler() + context = self.get_success(state_handler.compute_event_context(event)) + + self.get_success( + self.master_store.add_push_actions_to_staging( + event.event_id, + dict(push_actions), + False, + "main", + ) + ) + return event, context -- cgit 1.5.1 From 4ee82c0576baed6358e3818e8c22e01bde6afd02 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 16 May 2023 16:25:01 -0400 Subject: Apply url_preview_url_blacklist to oEmbed and pre-cached images (#15601) There are two situations which were previously not properly checked: 1. If the requested URL was replaced with an oEmbed URL, then the oEmbed URL was not checked against url_preview_url_blacklist. 2. Follow-up URLs (either via autodiscovery of oEmbed or to pre-cache images) were not checked against url_preview_url_blacklist. --- changelog.d/15601.bugfix | 1 + synapse/media/url_previewer.py | 121 +++++++++++++--------- tests/media/test_url_previewer.py | 113 ++++++++++++++++++++ tests/rest/media/test_url_preview.py | 194 ++++++++++++++++++++++++++++++++++- 4 files changed, 379 insertions(+), 50 deletions(-) create mode 100644 changelog.d/15601.bugfix create mode 100644 tests/media/test_url_previewer.py diff --git a/changelog.d/15601.bugfix b/changelog.d/15601.bugfix new file mode 100644 index 0000000000..426db6cea3 --- /dev/null +++ b/changelog.d/15601.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug where the `url_preview_url_blacklist` configuration setting was not applied to oEmbed or image URLs found while previewing a URL. diff --git a/synapse/media/url_previewer.py b/synapse/media/url_previewer.py index c8a4a809f1..dbdb1fd20e 100644 --- a/synapse/media/url_previewer.py +++ b/synapse/media/url_previewer.py @@ -113,7 +113,7 @@ class UrlPreviewer: 1. Checks URL and timestamp against the database cache and returns the result if it has not expired and was successful (a 2xx return code). 2. Checks if the URL matches an oEmbed (https://oembed.com/) pattern. If it - does, update the URL to download. + does and the new URL is not blocked, update the URL to download. 3. Downloads the URL and stores it into a file via the media storage provider and saves the local media metadata. 4. If the media is an image: @@ -127,14 +127,14 @@ class UrlPreviewer: and saves the local media metadata. 2. Convert the oEmbed response to an Open Graph response. 3. Override any Open Graph data from the HTML with data from oEmbed. - 4. If an image exists in the Open Graph response: + 4. If an image URL exists in the Open Graph response: 1. Downloads the URL and stores it into a file via the media storage provider and saves the local media metadata. 2. Generates thumbnails. 3. Updates the Open Graph response based on image properties. - 6. If the media is JSON and an oEmbed URL was found: + 6. If an oEmbed URL was found and the media is JSON: 1. Convert the oEmbed response to an Open Graph response. - 2. If a thumbnail or image is in the oEmbed response: + 2. If an image URL is in the oEmbed response: 1. Downloads the URL and stores it into a file via the media storage provider and saves the local media metadata. 2. Generates thumbnails. @@ -144,7 +144,8 @@ class UrlPreviewer: If any additional requests (e.g. from oEmbed autodiscovery, step 5.3 or image thumbnailing, step 5.4 or 6.4) fails then the URL preview as a whole - does not fail. As much information as possible is returned. + does not fail. If any of them are blocked, then those additional requests + are skipped. As much information as possible is returned. The in-memory cache expires after 1 hour. @@ -203,48 +204,14 @@ class UrlPreviewer: ) async def preview(self, url: str, user: UserID, ts: int) -> bytes: - # XXX: we could move this into _do_preview if we wanted. - url_tuple = urlsplit(url) - for entry in self.url_preview_url_blacklist: - match = True - for attrib in entry: - pattern = entry[attrib] - value = getattr(url_tuple, attrib) - logger.debug( - "Matching attrib '%s' with value '%s' against pattern '%s'", - attrib, - value, - pattern, - ) - - if value is None: - match = False - continue - - # Some attributes might not be parsed as strings by urlsplit (such as the - # port, which is parsed as an int). Because we use match functions that - # expect strings, we want to make sure that's what we give them. - value_str = str(value) - - if pattern.startswith("^"): - if not re.match(pattern, value_str): - match = False - continue - else: - if not fnmatch.fnmatch(value_str, pattern): - match = False - continue - if match: - logger.warning("URL %s blocked by url_blacklist entry %s", url, entry) - raise SynapseError( - 403, "URL blocked by url pattern blacklist entry", Codes.UNKNOWN - ) - # the in-memory cache: - # * ensures that only one request is active at a time + # * ensures that only one request to a URL is active at a time # * takes load off the DB for the thundering herds # * also caches any failures (unlike the DB) so we don't keep - # requesting the same endpoint + # requesting the same endpoint + # + # Note that autodiscovered oEmbed URLs and pre-caching of images + # are not captured in the in-memory cache. observable = self._cache.get(url) @@ -283,7 +250,7 @@ class UrlPreviewer: og = og.encode("utf8") return og - # If this URL can be accessed via oEmbed, use that instead. + # If this URL can be accessed via an allowed oEmbed, use that instead. url_to_download = url oembed_url = self._oembed.get_oembed_url(url) if oembed_url: @@ -329,6 +296,7 @@ class UrlPreviewer: # defer to that. oembed_url = self._oembed.autodiscover_from_html(tree) og_from_oembed: JsonDict = {} + # Only download to the oEmbed URL if it is allowed. if oembed_url: try: oembed_info = await self._handle_url( @@ -411,6 +379,59 @@ class UrlPreviewer: return jsonog.encode("utf8") + def _is_url_blocked(self, url: str) -> bool: + """ + Check whether the URL is allowed to be previewed (according to the homeserver + configuration). + + Args: + url: The requested URL. + + Return: + True if the URL is blocked, False if it is allowed. + """ + url_tuple = urlsplit(url) + for entry in self.url_preview_url_blacklist: + match = True + # Iterate over each entry. If *all* attributes of that entry match + # the current URL, then reject it. + for attrib, pattern in entry.items(): + value = getattr(url_tuple, attrib) + logger.debug( + "Matching attrib '%s' with value '%s' against pattern '%s'", + attrib, + value, + pattern, + ) + + if value is None: + match = False + break + + # Some attributes might not be parsed as strings by urlsplit (such as the + # port, which is parsed as an int). Because we use match functions that + # expect strings, we want to make sure that's what we give them. + value_str = str(value) + + # Check the value against the pattern as either a regular expression or + # a glob. If it doesn't match, the entry doesn't match. + if pattern.startswith("^"): + if not re.match(pattern, value_str): + match = False + break + else: + if not fnmatch.fnmatch(value_str, pattern): + match = False + break + + # All fields matched, return true (the URL is blocked). + if match: + logger.warning("URL %s blocked by url_blacklist entry %s", url, entry) + return match + + # No matches were found, the URL is allowed. + return False + async def _download_url(self, url: str, output_stream: BinaryIO) -> DownloadResult: """ Fetches a remote URL and parses the headers. @@ -547,8 +568,16 @@ class UrlPreviewer: Returns: A MediaInfo object describing the fetched content. + + Raises: + SynapseError if the URL is blocked. """ + if self._is_url_blocked(url): + raise SynapseError( + 403, "URL blocked by url pattern blacklist entry", Codes.UNKNOWN + ) + # TODO: we should probably honour robots.txt... except in practice # we're most likely being explicitly triggered by a human rather than a # bot, so are we really a robot? @@ -624,7 +653,7 @@ class UrlPreviewer: return # The image URL from the HTML might be relative to the previewed page, - # convert it to an URL which can be requested directly. + # convert it to a URL which can be requested directly. url_parts = urlparse(image_url) if url_parts.scheme != "data": image_url = urljoin(media_info.uri, image_url) diff --git a/tests/media/test_url_previewer.py b/tests/media/test_url_previewer.py new file mode 100644 index 0000000000..3c4c7d6765 --- /dev/null +++ b/tests/media/test_url_previewer.py @@ -0,0 +1,113 @@ +# Copyright 2023 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os + +from twisted.test.proto_helpers import MemoryReactor + +from synapse.server import HomeServer +from synapse.util import Clock + +from tests import unittest +from tests.unittest import override_config + +try: + import lxml +except ImportError: + lxml = None + + +class URLPreviewTests(unittest.HomeserverTestCase): + if not lxml: + skip = "url preview feature requires lxml" + + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: + config = self.default_config() + config["url_preview_enabled"] = True + config["max_spider_size"] = 9999999 + config["url_preview_ip_range_blacklist"] = ( + "192.168.1.1", + "1.0.0.0/8", + "3fff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", + "2001:800::/21", + ) + + self.storage_path = self.mktemp() + self.media_store_path = self.mktemp() + os.mkdir(self.storage_path) + os.mkdir(self.media_store_path) + config["media_store_path"] = self.media_store_path + + provider_config = { + "module": "synapse.media.storage_provider.FileStorageProviderBackend", + "store_local": True, + "store_synchronous": False, + "store_remote": True, + "config": {"directory": self.storage_path}, + } + + config["media_storage_providers"] = [provider_config] + + return self.setup_test_homeserver(config=config) + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + media_repo_resource = hs.get_media_repository_resource() + preview_url = media_repo_resource.children[b"preview_url"] + self.url_previewer = preview_url._url_previewer + + def test_all_urls_allowed(self) -> None: + self.assertFalse(self.url_previewer._is_url_blocked("http://matrix.org")) + self.assertFalse(self.url_previewer._is_url_blocked("https://matrix.org")) + self.assertFalse(self.url_previewer._is_url_blocked("http://localhost:8000")) + self.assertFalse( + self.url_previewer._is_url_blocked("http://user:pass@matrix.org") + ) + + @override_config( + { + "url_preview_url_blacklist": [ + {"username": "user"}, + {"scheme": "http", "netloc": "matrix.org"}, + ] + } + ) + def test_blocked_url(self) -> None: + # Blocked via scheme and URL. + self.assertTrue(self.url_previewer._is_url_blocked("http://matrix.org")) + # Not blocked because all components must match. + self.assertFalse(self.url_previewer._is_url_blocked("https://matrix.org")) + + # Blocked due to the user. + self.assertTrue( + self.url_previewer._is_url_blocked("http://user:pass@example.com") + ) + self.assertTrue(self.url_previewer._is_url_blocked("http://user@example.com")) + + @override_config({"url_preview_url_blacklist": [{"netloc": "*.example.com"}]}) + def test_glob_blocked_url(self) -> None: + # All subdomains are blocked. + self.assertTrue(self.url_previewer._is_url_blocked("http://foo.example.com")) + self.assertTrue(self.url_previewer._is_url_blocked("http://.example.com")) + + # The TLD is not blocked. + self.assertFalse(self.url_previewer._is_url_blocked("https://example.com")) + + @override_config({"url_preview_url_blacklist": [{"netloc": "^.+\\.example\\.com"}]}) + def test_regex_blocked_urL(self) -> None: + # All subdomains are blocked. + self.assertTrue(self.url_previewer._is_url_blocked("http://foo.example.com")) + # Requires a non-empty subdomain. + self.assertFalse(self.url_previewer._is_url_blocked("http://.example.com")) + + # The TLD is not blocked. + self.assertFalse(self.url_previewer._is_url_blocked("https://example.com")) diff --git a/tests/rest/media/test_url_preview.py b/tests/rest/media/test_url_preview.py index e44beae8c1..7517155cf3 100644 --- a/tests/rest/media/test_url_preview.py +++ b/tests/rest/media/test_url_preview.py @@ -653,6 +653,57 @@ class URLPreviewTests(unittest.HomeserverTestCase): server.data, ) + def test_image(self) -> None: + """An image should be precached if mentioned in the HTML.""" + self.lookups["matrix.org"] = [(IPv4Address, "10.1.2.3")] + self.lookups["cdn.matrix.org"] = [(IPv4Address, "10.1.2.4")] + + result = ( + b"""""" + ) + + channel = self.make_request( + "GET", + "preview_url?url=http://matrix.org", + shorthand=False, + await_result=False, + ) + self.pump() + + # Respond with the HTML. + client = self.reactor.tcpClients[0][2].buildProtocol(None) + server = AccumulatingProtocol() + server.makeConnection(FakeTransport(client, self.reactor)) + client.makeConnection(FakeTransport(server, self.reactor)) + client.dataReceived( + ( + b"HTTP/1.0 200 OK\r\nContent-Length: %d\r\n" + b'Content-Type: text/html; charset="utf8"\r\n\r\n' + ) + % (len(result),) + + result + ) + self.pump() + + # Respond with the photo. + client = self.reactor.tcpClients[1][2].buildProtocol(None) + server = AccumulatingProtocol() + server.makeConnection(FakeTransport(client, self.reactor)) + client.makeConnection(FakeTransport(server, self.reactor)) + client.dataReceived( + ( + b"HTTP/1.0 200 OK\r\nContent-Length: %d\r\n" + b"Content-Type: image/png\r\n\r\n" + ) + % (len(SMALL_PNG),) + + SMALL_PNG + ) + self.pump() + + # The image should be in the result. + self.assertEqual(channel.code, 200) + self._assert_small_png(channel.json_body) + def test_nonexistent_image(self) -> None: """If the preview image doesn't exist, ensure some data is returned.""" self.lookups["matrix.org"] = [(IPv4Address, "10.1.2.3")] @@ -683,9 +734,53 @@ class URLPreviewTests(unittest.HomeserverTestCase): ) self.pump() + + # There should not be a second connection. + self.assertEqual(len(self.reactor.tcpClients), 1) + + # The image should not be in the result. self.assertEqual(channel.code, 200) + self.assertNotIn("og:image", channel.json_body) + + @unittest.override_config( + {"url_preview_url_blacklist": [{"netloc": "cdn.matrix.org"}]} + ) + def test_image_blocked(self) -> None: + """If the preview image doesn't exist, ensure some data is returned.""" + self.lookups["matrix.org"] = [(IPv4Address, "10.1.2.3")] + self.lookups["cdn.matrix.org"] = [(IPv4Address, "10.1.2.4")] + + result = ( + b"""""" + ) + + channel = self.make_request( + "GET", + "preview_url?url=http://matrix.org", + shorthand=False, + await_result=False, + ) + self.pump() + + client = self.reactor.tcpClients[0][2].buildProtocol(None) + server = AccumulatingProtocol() + server.makeConnection(FakeTransport(client, self.reactor)) + client.makeConnection(FakeTransport(server, self.reactor)) + client.dataReceived( + ( + b"HTTP/1.0 200 OK\r\nContent-Length: %d\r\n" + b'Content-Type: text/html; charset="utf8"\r\n\r\n' + ) + % (len(result),) + + result + ) + self.pump() + + # There should not be a second connection. + self.assertEqual(len(self.reactor.tcpClients), 1) # The image should not be in the result. + self.assertEqual(channel.code, 200) self.assertNotIn("og:image", channel.json_body) def test_oembed_failure(self) -> None: @@ -880,6 +975,11 @@ class URLPreviewTests(unittest.HomeserverTestCase): ) self.pump() + + # Double check that the proper host is being connected to. (Note that + # twitter.com can't be resolved so this is already implicitly checked.) + self.assertIn(b"\r\nHost: publish.twitter.com\r\n", server.data) + self.assertEqual(channel.code, 200) body = channel.json_body self.assertEqual( @@ -940,6 +1040,22 @@ class URLPreviewTests(unittest.HomeserverTestCase): }, ) + @unittest.override_config( + {"url_preview_url_blacklist": [{"netloc": "publish.twitter.com"}]} + ) + def test_oembed_blocked(self) -> None: + """The oEmbed URL should not be downloaded if the oEmbed URL is blocked.""" + self.lookups["twitter.com"] = [(IPv4Address, "10.1.2.3")] + + channel = self.make_request( + "GET", + "preview_url?url=http://twitter.com/matrixdotorg/status/12345", + shorthand=False, + await_result=False, + ) + self.pump() + self.assertEqual(channel.code, 403, channel.result) + def test_oembed_autodiscovery(self) -> None: """ Autodiscovery works by finding the link in the HTML response and then requesting an oEmbed URL. @@ -980,7 +1096,6 @@ class URLPreviewTests(unittest.HomeserverTestCase): % (len(result),) + result ) - self.pump() # The oEmbed response. @@ -1004,7 +1119,6 @@ class URLPreviewTests(unittest.HomeserverTestCase): % (len(oembed_content),) + oembed_content ) - self.pump() # Ensure the URL is what was requested. @@ -1023,7 +1137,6 @@ class URLPreviewTests(unittest.HomeserverTestCase): % (len(SMALL_PNG),) + SMALL_PNG ) - self.pump() # Ensure the URL is what was requested. @@ -1036,6 +1149,59 @@ class URLPreviewTests(unittest.HomeserverTestCase): ) self._assert_small_png(body) + @unittest.override_config( + {"url_preview_url_blacklist": [{"netloc": "publish.twitter.com"}]} + ) + def test_oembed_autodiscovery_blocked(self) -> None: + """ + If the discovered oEmbed URL is blocked, it should be discarded. + """ + # This is a little cheesy in that we use the www subdomain (which isn't the + # list of oEmbed patterns) to get "raw" HTML response. + self.lookups["www.twitter.com"] = [(IPv4Address, "10.1.2.3")] + self.lookups["publish.twitter.com"] = [(IPv4Address, "10.1.2.4")] + + result = b""" + Test + + """ + + channel = self.make_request( + "GET", + "preview_url?url=http://www.twitter.com/matrixdotorg/status/12345", + shorthand=False, + await_result=False, + ) + self.pump() + + client = self.reactor.tcpClients[0][2].buildProtocol(None) + server = AccumulatingProtocol() + server.makeConnection(FakeTransport(client, self.reactor)) + client.makeConnection(FakeTransport(server, self.reactor)) + client.dataReceived( + ( + b"HTTP/1.0 200 OK\r\nContent-Length: %d\r\n" + b'Content-Type: text/html; charset="utf8"\r\n\r\n' + ) + % (len(result),) + + result + ) + + self.pump() + + # Ensure there's no additional connections. + self.assertEqual(len(self.reactor.tcpClients), 1) + + # Ensure the URL is what was requested. + self.assertIn(b"\r\nHost: www.twitter.com\r\n", server.data) + + self.assertEqual(channel.code, 200) + body = channel.json_body + self.assertEqual(body["og:title"], "Test") + self.assertNotIn("og:image", body) + def _download_image(self) -> Tuple[str, str]: """Downloads an image into the URL cache. Returns: @@ -1192,7 +1358,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): ) @unittest.override_config({"url_preview_url_blacklist": [{"port": "*"}]}) - def test_blacklist_port(self) -> None: + def test_blocked_port(self) -> None: """Tests that blacklisting URLs with a port makes previewing such URLs fail with a 403 error and doesn't impact other previews. """ @@ -1230,3 +1396,23 @@ class URLPreviewTests(unittest.HomeserverTestCase): self.pump() self.assertEqual(channel.code, 200) + + @unittest.override_config( + {"url_preview_url_blacklist": [{"netloc": "example.com"}]} + ) + def test_blocked_url(self) -> None: + """Tests that blacklisting URLs with a host makes previewing such URLs + fail with a 403 error. + """ + self.lookups["example.com"] = [(IPv4Address, "10.1.2.3")] + + bad_url = quote("http://example.com/foo") + + channel = self.make_request( + "GET", + "preview_url?url=" + bad_url, + shorthand=False, + await_result=False, + ) + self.pump() + self.assertEqual(channel.code, 403, channel.result) -- cgit 1.5.1 From 41b9def9f2c02118796e147f63abf23bc2d7dc04 Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Wed, 17 May 2023 16:39:06 +0200 Subject: Add a new admin API to create a new device for a user. (#15611) This allows an external service (e.g. the matrix-authentication-service) to create devices for users. --- changelog.d/15611.feature | 1 + docs/admin_api/user_admin_api.md | 27 +++++++++++++++++++++++++++ synapse/rest/admin/devices.py | 29 +++++++++++++++++++++++++++++ 3 files changed, 57 insertions(+) create mode 100644 changelog.d/15611.feature diff --git a/changelog.d/15611.feature b/changelog.d/15611.feature new file mode 100644 index 0000000000..7cfb46fd0a --- /dev/null +++ b/changelog.d/15611.feature @@ -0,0 +1 @@ +Add a new admin API to create a new device for a user. diff --git a/docs/admin_api/user_admin_api.md b/docs/admin_api/user_admin_api.md index 6b952ba396..229942b311 100644 --- a/docs/admin_api/user_admin_api.md +++ b/docs/admin_api/user_admin_api.md @@ -813,6 +813,33 @@ The following fields are returned in the JSON response body: - `total` - Total number of user's devices. +### Create a device + +Creates a new device for a specific `user_id` and `device_id`. Does nothing if the `device_id` +exists already. + +The API is: + +``` +POST /_synapse/admin/v2/users//devices + +{ + "device_id": "QBUAZIFURK" +} +``` + +An empty JSON dict is returned. + +**Parameters** + +The following parameters should be set in the URL: + +- `user_id` - fully qualified: for example, `@user:server.com`. + +The following fields are required in the JSON request body: + +- `device_id` - The device ID to create. + ### Delete multiple devices Deletes the given devices for a specific `user_id`, and invalidates any access token associated with them. diff --git a/synapse/rest/admin/devices.py b/synapse/rest/admin/devices.py index 3b2f2d9abb..11ebed9bfd 100644 --- a/synapse/rest/admin/devices.py +++ b/synapse/rest/admin/devices.py @@ -137,6 +137,35 @@ class DevicesRestServlet(RestServlet): devices = await self.device_handler.get_devices_by_user(target_user.to_string()) return HTTPStatus.OK, {"devices": devices, "total": len(devices)} + async def on_POST( + self, request: SynapseRequest, user_id: str + ) -> Tuple[int, JsonDict]: + """Creates a new device for the user.""" + await assert_requester_is_admin(self.auth, request) + + target_user = UserID.from_string(user_id) + if not self.is_mine(target_user): + raise SynapseError( + HTTPStatus.BAD_REQUEST, "Can only create devices for local users" + ) + + u = await self.store.get_user_by_id(target_user.to_string()) + if u is None: + raise NotFoundError("Unknown user") + + body = parse_json_object_from_request(request) + device_id = body.get("device_id") + if not device_id: + raise SynapseError(HTTPStatus.BAD_REQUEST, "Missing device_id") + if not isinstance(device_id, str): + raise SynapseError(HTTPStatus.BAD_REQUEST, "device_id must be a string") + + await self.device_handler.check_device_registered( + user_id=user_id, device_id=device_id + ) + + return HTTPStatus.CREATED, {} + class DeleteDevicesRestServlet(RestServlet): """ -- cgit 1.5.1 From e15aa00bc08f68c3a1c1b91f3a59e63554d7aa70 Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Thu, 18 May 2023 10:58:13 +0100 Subject: Fix error message when `app_service_config_files` validation fails (#15614) The second argument of `ConfigError` is a path, passed as an optional `Iterable[str]` and not a `str`. If a string is passed directly, Synapse unhelpfully emits "Error in configuration at a.p.p._.s.e.r.v.i.c.e._.c.o.n.f.i.g._.f.i.l.e.s'" when the config option has the wrong data type. Signed-off-by: Sean Quah --- changelog.d/15614.bugfix | 1 + synapse/config/appservice.py | 3 +-- 2 files changed, 2 insertions(+), 2 deletions(-) create mode 100644 changelog.d/15614.bugfix diff --git a/changelog.d/15614.bugfix b/changelog.d/15614.bugfix new file mode 100644 index 0000000000..b523ae6eb1 --- /dev/null +++ b/changelog.d/15614.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse 1.82.0 where the error message displayed when validation of the `app_service_config_files` config option fails would be incorrectly formatted. diff --git a/synapse/config/appservice.py b/synapse/config/appservice.py index fd89960e72..c2710fdf04 100644 --- a/synapse/config/appservice.py +++ b/synapse/config/appservice.py @@ -36,11 +36,10 @@ class AppServiceConfig(Config): if not isinstance(self.app_service_config_files, list) or not all( type(x) is str for x in self.app_service_config_files ): - # type-ignore: this function gets arbitrary json value; we do use this path. raise ConfigError( "Expected '%s' to be a list of AS config files:" % (self.app_service_config_files), - "app_service_config_files", + ("app_service_config_files",), ) self.track_appservice_user_ips = config.get("track_appservice_user_ips", False) -- cgit 1.5.1 From 68dcd2cbcb3c01787ade9cf3725486712a7cafda Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Thu, 18 May 2023 11:11:30 +0100 Subject: Re-type config paths in `ConfigError`s to be `StrSequence`s (#15615) Part of #14809. Signed-off-by: Sean Quah --- changelog.d/15615.misc | 1 + synapse/config/_base.py | 3 ++- synapse/config/_base.pyi | 3 ++- synapse/config/_util.py | 8 ++++---- synapse/config/oembed.py | 6 +++--- synapse/config/server.py | 4 ++-- synapse/types/__init__.py | 8 ++++++++ synapse/util/module_loader.py | 24 +++++++++--------------- 8 files changed, 31 insertions(+), 26 deletions(-) create mode 100644 changelog.d/15615.misc diff --git a/changelog.d/15615.misc b/changelog.d/15615.misc new file mode 100644 index 0000000000..a39fd0a098 --- /dev/null +++ b/changelog.d/15615.misc @@ -0,0 +1 @@ +Re-type config paths in `ConfigError`s to be `StrSequence`s instead of `Iterable[str]`s. diff --git a/synapse/config/_base.py b/synapse/config/_base.py index 2ce60610ca..1d268a1817 100644 --- a/synapse/config/_base.py +++ b/synapse/config/_base.py @@ -44,6 +44,7 @@ import jinja2 import pkg_resources import yaml +from synapse.types import StrSequence from synapse.util.templates import _create_mxc_to_http_filter, _format_ts_filter logger = logging.getLogger(__name__) @@ -58,7 +59,7 @@ class ConfigError(Exception): the problem lies. """ - def __init__(self, msg: str, path: Optional[Iterable[str]] = None): + def __init__(self, msg: str, path: Optional[StrSequence] = None): self.msg = msg self.path = path diff --git a/synapse/config/_base.pyi b/synapse/config/_base.pyi index b5cec132b4..fc51aed234 100644 --- a/synapse/config/_base.pyi +++ b/synapse/config/_base.pyi @@ -61,9 +61,10 @@ from synapse.config import ( # noqa: F401 voip, workers, ) +from synapse.types import StrSequence class ConfigError(Exception): - def __init__(self, msg: str, path: Optional[Iterable[str]] = None): + def __init__(self, msg: str, path: Optional[StrSequence] = None): self.msg = msg self.path = path diff --git a/synapse/config/_util.py b/synapse/config/_util.py index dfc5d12210..acccca413b 100644 --- a/synapse/config/_util.py +++ b/synapse/config/_util.py @@ -11,17 +11,17 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Dict, Iterable, Type, TypeVar +from typing import Any, Dict, Type, TypeVar import jsonschema from pydantic import BaseModel, ValidationError, parse_obj_as from synapse.config._base import ConfigError -from synapse.types import JsonDict +from synapse.types import JsonDict, StrSequence def validate_config( - json_schema: JsonDict, config: Any, config_path: Iterable[str] + json_schema: JsonDict, config: Any, config_path: StrSequence ) -> None: """Validates a config setting against a JsonSchema definition @@ -45,7 +45,7 @@ def validate_config( def json_error_to_config_error( - e: jsonschema.ValidationError, config_path: Iterable[str] + e: jsonschema.ValidationError, config_path: StrSequence ) -> ConfigError: """Converts a json validation error to a user-readable ConfigError diff --git a/synapse/config/oembed.py b/synapse/config/oembed.py index 0d32aba70a..d7959639ee 100644 --- a/synapse/config/oembed.py +++ b/synapse/config/oembed.py @@ -19,7 +19,7 @@ from urllib import parse as urlparse import attr import pkg_resources -from synapse.types import JsonDict +from synapse.types import JsonDict, StrSequence from ._base import Config, ConfigError from ._util import validate_config @@ -80,7 +80,7 @@ class OembedConfig(Config): ) def _parse_and_validate_provider( - self, providers: List[JsonDict], config_path: Iterable[str] + self, providers: List[JsonDict], config_path: StrSequence ) -> Iterable[OEmbedEndpointConfig]: # Ensure it is the proper form. validate_config( @@ -112,7 +112,7 @@ class OembedConfig(Config): api_endpoint, patterns, endpoint.get("formats") ) - def _glob_to_pattern(self, glob: str, config_path: Iterable[str]) -> Pattern: + def _glob_to_pattern(self, glob: str, config_path: StrSequence) -> Pattern: """ Convert the glob into a sane regular expression to match against. The rules followed will be slightly different for the domain portion vs. diff --git a/synapse/config/server.py b/synapse/config/server.py index 386c3194b8..64201238d6 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -27,7 +27,7 @@ from netaddr import AddrFormatError, IPNetwork, IPSet from twisted.conch.ssh.keys import Key from synapse.api.room_versions import KNOWN_ROOM_VERSIONS -from synapse.types import JsonDict +from synapse.types import JsonDict, StrSequence from synapse.util.module_loader import load_module from synapse.util.stringutils import parse_and_validate_server_name @@ -73,7 +73,7 @@ def _6to4(network: IPNetwork) -> IPNetwork: def generate_ip_set( ip_addresses: Optional[Iterable[str]], extra_addresses: Optional[Iterable[str]] = None, - config_path: Optional[Iterable[str]] = None, + config_path: Optional[StrSequence] = None, ) -> IPSet: """ Generate an IPSet from a list of IP addresses or CIDRs. diff --git a/synapse/types/__init__.py b/synapse/types/__init__.py index 325219656a..42baf8ac6b 100644 --- a/synapse/types/__init__.py +++ b/synapse/types/__init__.py @@ -84,7 +84,15 @@ JsonSerializable = object # Collection[str] that does not include str itself; str being a Sequence[str] # is very misleading and results in bugs. +# +# StrCollection is an unordered collection of strings. If ordering is important, +# StrSequence can be used instead. StrCollection = Union[Tuple[str, ...], List[str], AbstractSet[str]] +# Sequence[str] that does not include str itself; str being a Sequence[str] +# is very misleading and results in bugs. +# +# Unlike StrCollection, StrSequence is an ordered collection of strings. +StrSequence = Union[Tuple[str, ...], List[str]] # Note that this seems to require inheriting *directly* from Interface in order diff --git a/synapse/util/module_loader.py b/synapse/util/module_loader.py index 5a638c6e9a..e3a54df48b 100644 --- a/synapse/util/module_loader.py +++ b/synapse/util/module_loader.py @@ -14,17 +14,17 @@ import importlib import importlib.util -import itertools from types import ModuleType -from typing import Any, Iterable, Tuple, Type +from typing import Any, Tuple, Type import jsonschema from synapse.config._base import ConfigError from synapse.config._util import json_error_to_config_error +from synapse.types import StrSequence -def load_module(provider: dict, config_path: Iterable[str]) -> Tuple[Type, Any]: +def load_module(provider: dict, config_path: StrSequence) -> Tuple[Type, Any]: """Loads a synapse module with its config Args: @@ -39,9 +39,7 @@ def load_module(provider: dict, config_path: Iterable[str]) -> Tuple[Type, Any]: modulename = provider.get("module") if not isinstance(modulename, str): - raise ConfigError( - "expected a string", path=itertools.chain(config_path, ("module",)) - ) + raise ConfigError("expected a string", path=tuple(config_path) + ("module",)) # We need to import the module, and then pick the class out of # that, so we split based on the last dot. @@ -55,19 +53,17 @@ def load_module(provider: dict, config_path: Iterable[str]) -> Tuple[Type, Any]: try: provider_config = provider_class.parse_config(module_config) except jsonschema.ValidationError as e: - raise json_error_to_config_error( - e, itertools.chain(config_path, ("config",)) - ) + raise json_error_to_config_error(e, tuple(config_path) + ("config",)) except ConfigError as e: raise _wrap_config_error( "Failed to parse config for module %r" % (modulename,), - prefix=itertools.chain(config_path, ("config",)), + prefix=tuple(config_path) + ("config",), e=e, ) except Exception as e: raise ConfigError( "Failed to parse config for module %r" % (modulename,), - path=itertools.chain(config_path, ("config",)), + path=tuple(config_path) + ("config",), ) from e else: provider_config = module_config @@ -92,9 +88,7 @@ def load_python_module(location: str) -> ModuleType: return mod -def _wrap_config_error( - msg: str, prefix: Iterable[str], e: ConfigError -) -> "ConfigError": +def _wrap_config_error(msg: str, prefix: StrSequence, e: ConfigError) -> "ConfigError": """Wrap a relative ConfigError with a new path This is useful when we have a ConfigError with a relative path due to a problem @@ -102,7 +96,7 @@ def _wrap_config_error( """ path = prefix if e.path: - path = itertools.chain(prefix, e.path) + path = tuple(prefix) + tuple(e.path) e1 = ConfigError(msg, path) -- cgit 1.5.1 From 4ec40b16ac2436387d5d947308e5dc37857acbf6 Mon Sep 17 00:00:00 2001 From: axel simon Date: Thu, 18 May 2023 15:44:28 +0100 Subject: flake.nix: start synapse automatically, add space usage warning (#15613) Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> --- changelog.d/15613.doc | 1 + flake.nix | 53 ++++++++++++++++++++++++++++++++++++--------------- 2 files changed, 39 insertions(+), 15 deletions(-) create mode 100644 changelog.d/15613.doc diff --git a/changelog.d/15613.doc b/changelog.d/15613.doc new file mode 100644 index 0000000000..94733facf0 --- /dev/null +++ b/changelog.d/15613.doc @@ -0,0 +1 @@ +Warn users that at least 3.75GB of space is needed for the nix Synapse development environment. diff --git a/flake.nix b/flake.nix index 7351571e61..8d2bf779bd 100644 --- a/flake.nix +++ b/flake.nix @@ -1,35 +1,30 @@ -# A nix flake that sets up a complete Synapse development environment. Dependencies +# A Nix flake that sets up a complete Synapse development environment. Dependencies # for the SyTest (https://github.com/matrix-org/sytest) and Complement # (https://github.com/matrix-org/complement) Matrix homeserver test suites are also # installed automatically. # -# You must have already installed nix (https://nixos.org) on your system to use this. -# nix can be installed on Linux or MacOS; NixOS is not required. Windows is not -# directly supported, but nix can be installed inside of WSL2 or even Docker +# You must have already installed Nix (https://nixos.org) on your system to use this. +# Nix can be installed on Linux or MacOS; NixOS is not required. Windows is not +# directly supported, but Nix can be installed inside of WSL2 or even Docker # containers. Please refer to https://nixos.org/download for details. # # You must also enable support for flakes in Nix. See the following for how to # do so permanently: https://nixos.wiki/wiki/Flakes#Enable_flakes # +# Be warned: you'll need over 3.75 GB of free space to download all the dependencies. +# # Usage: # -# With nix installed, navigate to the directory containing this flake and run +# With Nix installed, navigate to the directory containing this flake and run # `nix develop --impure`. The `--impure` is necessary in order to store state # locally from "services", such as PostgreSQL and Redis. # # You should now be dropped into a new shell with all programs and dependencies # availabile to you! # -# You can start up pre-configured, local PostgreSQL and Redis instances by +# You can start up pre-configured local Synapse, PostgreSQL and Redis instances by # running: `devenv up`. To stop them, use Ctrl-C. # -# A PostgreSQL database called 'synapse' will be set up for you, along with -# a PostgreSQL user named 'synapse_user'. -# The 'host' can be found by running `echo $PGHOST` with the development -# shell activated. Use these values to configure your Synapse to connect -# to the local PostgreSQL database. You do not need to specify a password. -# https://matrix-org.github.io/synapse/latest/postgres -# # All state (the venv, postgres and redis data and config) are stored in # .devenv/state. Deleting a file from here and then re-entering the shell # will recreate these files from scratch. @@ -66,7 +61,7 @@ let pkgs = nixpkgs.legacyPackages.${system}; in { - # Everything is configured via devenv - a nix module for creating declarative + # Everything is configured via devenv - a Nix module for creating declarative # developer environments. See https://devenv.sh/reference/options/ for a list # of all possible options. default = devenv.lib.mkShell { @@ -153,11 +148,39 @@ # Redis is needed in order to run Synapse in worker mode. services.redis.enable = true; + # Configure and start Synapse. Before starting Synapse, this shell code: + # * generates a default homeserver.yaml config file if one does not exist, and + # * ensures a directory containing two additional homeserver config files exists; + # one to configure using the development environment's PostgreSQL as the + # database backend and another for enabling Redis support. + process.before = '' + python -m synapse.app.homeserver -c homeserver.yaml --generate-config --server-name=synapse.dev --report-stats=no + mkdir -p homeserver-config-overrides.d + cat > homeserver-config-overrides.d/database.yaml << EOF + ## Do not edit this file. This file is generated by flake.nix + database: + name: psycopg2 + args: + user: synapse_user + database: synapse + host: $PGHOST + cp_min: 5 + cp_max: 10 + EOF + cat > homeserver-config-overrides.d/redis.yaml << EOF + ## Do not edit this file. This file is generated by flake.nix + redis: + enabled: true + EOF + ''; + # Start synapse when `devenv up` is run. + processes.synapse.exec = "poetry run python -m synapse.app.homeserver -c homeserver.yaml --config-directory homeserver-config-overrides.d"; + # Define the perl modules we require to run SyTest. # # This list was compiled by cross-referencing https://metacpan.org/ # with the modules defined in './cpanfile' and then finding the - # corresponding nix packages on https://search.nixos.org/packages. + # corresponding Nix packages on https://search.nixos.org/packages. # # This was done until `./install-deps.pl --dryrun` produced no output. env.PERL5LIB = "${with pkgs.perl536Packages; makePerlPath [ -- cgit 1.5.1 From 5dc1f25c53b5909e3aa39a6faac2d28e64e4a9a1 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 18 May 2023 10:53:57 -0400 Subject: Fix olddeps build (#15626) Do an `apt update` before install packages. --- .github/workflows/tests.yml | 3 ++- changelog.d/15626.misc | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelog.d/15626.misc diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 51cbeb3298..f84a4ef644 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -314,8 +314,9 @@ jobs: # There aren't wheels for some of the older deps, so we need to install # their build dependencies - run: | + sudo apt update sudo apt-get -qq install build-essential libffi-dev python-dev \ - libxml2-dev libxslt-dev xmlsec1 zlib1g-dev libjpeg-dev libwebp-dev + libxml2-dev libxslt-dev xmlsec1 zlib1g-dev libjpeg-dev libwebp-dev - uses: actions/setup-python@v4 with: diff --git a/changelog.d/15626.misc b/changelog.d/15626.misc new file mode 100644 index 0000000000..0016cdbf10 --- /dev/null +++ b/changelog.d/15626.misc @@ -0,0 +1 @@ +Fix the olddeps CI. -- cgit 1.5.1 From e5b4d93770fe5cfc45f1e769d8cb00a2075d68fa Mon Sep 17 00:00:00 2001 From: Jonathan de Jong Date: Thu, 18 May 2023 18:49:12 +0200 Subject: Update Mutual Rooms (MSC2666) implementation (#15621) To track changes in MSC2666: - The change from `/mutual_rooms/{user_id}` to `/mutual_rooms?user_id={user_id}`. - The addition of `next_batch_token` (and logic). - Unstable flag now being `uk.half-shot.msc2666.query_mutual_rooms`. - The error code when your own user is requested. --- changelog.d/15621.misc | 1 + synapse/rest/client/mutual_rooms.py | 43 ++++++++++++++++++++++++---------- synapse/rest/client/versions.py | 2 +- tests/rest/client/test_mutual_rooms.py | 6 +++-- 4 files changed, 37 insertions(+), 15 deletions(-) create mode 100644 changelog.d/15621.misc diff --git a/changelog.d/15621.misc b/changelog.d/15621.misc new file mode 100644 index 0000000000..5d060f4dbc --- /dev/null +++ b/changelog.d/15621.misc @@ -0,0 +1 @@ +Update Mutual Rooms (MSC2666) implementation to match new proposal text. \ No newline at end of file diff --git a/synapse/rest/client/mutual_rooms.py b/synapse/rest/client/mutual_rooms.py index 38ef4e459f..c99445da30 100644 --- a/synapse/rest/client/mutual_rooms.py +++ b/synapse/rest/client/mutual_rooms.py @@ -12,13 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from typing import TYPE_CHECKING, Tuple +from http import HTTPStatus +from typing import TYPE_CHECKING, Dict, List, Tuple from synapse.api.errors import Codes, SynapseError from synapse.http.server import HttpServer -from synapse.http.servlet import RestServlet +from synapse.http.servlet import RestServlet, parse_strings_from_args from synapse.http.site import SynapseRequest -from synapse.types import JsonDict, UserID +from synapse.types import JsonDict from ._base import client_patterns @@ -30,11 +31,11 @@ logger = logging.getLogger(__name__) class UserMutualRoomsServlet(RestServlet): """ - GET /uk.half-shot.msc2666/user/mutual_rooms/{user_id} HTTP/1.1 + GET /uk.half-shot.msc2666/user/mutual_rooms?user_id={user_id} HTTP/1.1 """ PATTERNS = client_patterns( - "/uk.half-shot.msc2666/user/mutual_rooms/(?P[^/]*)", + "/uk.half-shot.msc2666/user/mutual_rooms$", releases=(), # This is an unstable feature ) @@ -43,17 +44,35 @@ class UserMutualRoomsServlet(RestServlet): self.auth = hs.get_auth() self.store = hs.get_datastores().main - async def on_GET( - self, request: SynapseRequest, user_id: str - ) -> Tuple[int, JsonDict]: - UserID.from_string(user_id) + async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + # twisted.web.server.Request.args is incorrectly defined as Optional[Any] + args: Dict[bytes, List[bytes]] = request.args # type: ignore + + user_ids = parse_strings_from_args(args, "user_id", required=True) + + if len(user_ids) > 1: + raise SynapseError( + HTTPStatus.BAD_REQUEST, + "Duplicate user_id query parameter", + errcode=Codes.INVALID_PARAM, + ) + + # We don't do batching, so a batch token is illegal by default + if b"batch_token" in args: + raise SynapseError( + HTTPStatus.BAD_REQUEST, + "Unknown batch_token", + errcode=Codes.INVALID_PARAM, + ) + + user_id = user_ids[0] requester = await self.auth.get_user_by_req(request) if user_id == requester.user.to_string(): raise SynapseError( - code=400, - msg="You cannot request a list of shared rooms with yourself", - errcode=Codes.FORBIDDEN, + HTTPStatus.UNPROCESSABLE_ENTITY, + "You cannot request a list of shared rooms with yourself", + errcode=Codes.INVALID_PARAM, ) rooms = await self.store.get_mutual_rooms_between_users( diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py index 58c5b07390..32df054f56 100644 --- a/synapse/rest/client/versions.py +++ b/synapse/rest/client/versions.py @@ -91,7 +91,7 @@ class VersionsRestServlet(RestServlet): # Implements additional endpoints as described in MSC2432 "org.matrix.msc2432": True, # Implements additional endpoints as described in MSC2666 - "uk.half-shot.msc2666.mutual_rooms": True, + "uk.half-shot.msc2666.query_mutual_rooms": True, # Whether new rooms will be set to encrypted or not (based on presets). "io.element.e2ee_forced.public": self.e2ee_forced_public, "io.element.e2ee_forced.private": self.e2ee_forced_private, diff --git a/tests/rest/client/test_mutual_rooms.py b/tests/rest/client/test_mutual_rooms.py index a4327f7ace..22fddbd6d6 100644 --- a/tests/rest/client/test_mutual_rooms.py +++ b/tests/rest/client/test_mutual_rooms.py @@ -11,6 +11,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from urllib.parse import quote + from twisted.test.proto_helpers import MemoryReactor import synapse.rest.admin @@ -44,8 +46,8 @@ class UserMutualRoomsTest(unittest.HomeserverTestCase): def _get_mutual_rooms(self, token: str, other_user: str) -> FakeChannel: return self.make_request( "GET", - "/_matrix/client/unstable/uk.half-shot.msc2666/user/mutual_rooms/%s" - % other_user, + "/_matrix/client/unstable/uk.half-shot.msc2666/user/mutual_rooms" + f"?user_id={quote(other_user)}", access_token=token, ) -- cgit 1.5.1 From ad50510a06d035a674f0eeed5db5dd3060bc0b1c Mon Sep 17 00:00:00 2001 From: Nick Mills-Barrett Date: Thu, 18 May 2023 19:37:31 +0100 Subject: Handle missing previous read marker event. (#15464) If the previous read marker is pointing to an event that no longer exists (e.g. due to retention) then assume that the newly given read marker is newer. --- changelog.d/15464.bugfix | 1 + synapse/handlers/read_marker.py | 18 ++- synapse/storage/databases/main/events_worker.py | 6 - tests/rest/client/test_read_marker.py | 147 ++++++++++++++++++++++++ 4 files changed, 162 insertions(+), 10 deletions(-) create mode 100644 changelog.d/15464.bugfix create mode 100644 tests/rest/client/test_read_marker.py diff --git a/changelog.d/15464.bugfix b/changelog.d/15464.bugfix new file mode 100644 index 0000000000..3c655989b3 --- /dev/null +++ b/changelog.d/15464.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug where setting the read marker could fail when using message retention. Contributed by Nick @ Beeper (@fizzadar). diff --git a/synapse/handlers/read_marker.py b/synapse/handlers/read_marker.py index 6d35e61880..49a497a860 100644 --- a/synapse/handlers/read_marker.py +++ b/synapse/handlers/read_marker.py @@ -16,6 +16,7 @@ import logging from typing import TYPE_CHECKING from synapse.api.constants import ReceiptTypes +from synapse.api.errors import SynapseError from synapse.util.async_helpers import Linearizer if TYPE_CHECKING: @@ -47,12 +48,21 @@ class ReadMarkerHandler: ) should_update = True + # Get event ordering, this also ensures we know about the event + event_ordering = await self.store.get_event_ordering(event_id) if existing_read_marker: - # Only update if the new marker is ahead in the stream - should_update = await self.store.is_event_after( - event_id, existing_read_marker["event_id"] - ) + try: + old_event_ordering = await self.store.get_event_ordering( + existing_read_marker["event_id"] + ) + except SynapseError: + # Old event no longer exists, assume new is ahead. This may + # happen if the old event was removed due to retention. + pass + else: + # Only update if the new marker is ahead in the stream + should_update = event_ordering > old_event_ordering if should_update: content = {"event_id": event_id} diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index 53aa5933d5..a39bc90974 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -1973,12 +1973,6 @@ class EventsWorkerStore(SQLBaseStore): return rows, to_token, True - async def is_event_after(self, event_id1: str, event_id2: str) -> bool: - """Returns True if event_id1 is after event_id2 in the stream""" - to_1, so_1 = await self.get_event_ordering(event_id1) - to_2, so_2 = await self.get_event_ordering(event_id2) - return (to_1, so_1) > (to_2, so_2) - @cached(max_entries=5000) async def get_event_ordering(self, event_id: str) -> Tuple[int, int]: res = await self.db_pool.simple_select_one( diff --git a/tests/rest/client/test_read_marker.py b/tests/rest/client/test_read_marker.py new file mode 100644 index 0000000000..0eedcdb476 --- /dev/null +++ b/tests/rest/client/test_read_marker.py @@ -0,0 +1,147 @@ +# Copyright 2023 Beeper +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from twisted.test.proto_helpers import MemoryReactor + +import synapse.rest.admin +from synapse.api.constants import EventTypes +from synapse.rest import admin +from synapse.rest.client import login, read_marker, register, room +from synapse.server import HomeServer +from synapse.util import Clock + +from tests import unittest + +ONE_HOUR_MS = 3600000 +ONE_DAY_MS = ONE_HOUR_MS * 24 + + +class ReadMarkerTestCase(unittest.HomeserverTestCase): + servlets = [ + login.register_servlets, + register.register_servlets, + read_marker.register_servlets, + room.register_servlets, + synapse.rest.admin.register_servlets, + admin.register_servlets, + ] + + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: + config = self.default_config() + + # merge this default retention config with anything that was specified in + # @override_config + retention_config = { + "enabled": True, + "allowed_lifetime_min": ONE_DAY_MS, + "allowed_lifetime_max": ONE_DAY_MS * 3, + } + retention_config.update(config.get("retention", {})) + config["retention"] = retention_config + + self.hs = self.setup_test_homeserver(config=config) + + return self.hs + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.owner = self.register_user("owner", "pass") + self.owner_tok = self.login("owner", "pass") + self.store = self.hs.get_datastores().main + self.clock = self.hs.get_clock() + + def test_send_read_marker(self) -> None: + room_id = self.helper.create_room_as(self.owner, tok=self.owner_tok) + + def send_message() -> str: + res = self.helper.send(room_id=room_id, body="1", tok=self.owner_tok) + return res["event_id"] + + # Test setting the read marker on the room + event_id_1 = send_message() + + channel = self.make_request( + "POST", + "/rooms/!abc:beep/read_markers", + content={ + "m.fully_read": event_id_1, + }, + access_token=self.owner_tok, + ) + self.assertEqual(channel.code, 200, channel.result) + + # Test moving the read marker to a newer event + event_id_2 = send_message() + channel = self.make_request( + "POST", + "/rooms/!abc:beep/read_markers", + content={ + "m.fully_read": event_id_2, + }, + access_token=self.owner_tok, + ) + self.assertEqual(channel.code, 200, channel.result) + + def test_send_read_marker_missing_previous_event(self) -> None: + """ + Test moving a read marker from an event that previously existed but was + later removed due to retention rules. + """ + + room_id = self.helper.create_room_as(self.owner, tok=self.owner_tok) + + # Set retention rule on the room so we remove old events to test this case + self.helper.send_state( + room_id=room_id, + event_type=EventTypes.Retention, + body={"max_lifetime": ONE_DAY_MS}, + tok=self.owner_tok, + ) + + def send_message() -> str: + res = self.helper.send(room_id=room_id, body="1", tok=self.owner_tok) + return res["event_id"] + + # Test setting the read marker on the room + event_id_1 = send_message() + + channel = self.make_request( + "POST", + "/rooms/!abc:beep/read_markers", + content={ + "m.fully_read": event_id_1, + }, + access_token=self.owner_tok, + ) + + # Send a second message (retention will not remove the latest event ever) + send_message() + # And then advance so retention rules remove the first event (where the marker is) + self.reactor.advance(ONE_DAY_MS * 2 / 1000) + + event = self.get_success(self.store.get_event(event_id_1, allow_none=True)) + assert event is None + + # TODO See https://github.com/matrix-org/synapse/issues/13476 + self.store.get_event_ordering.invalidate_all() + + # Test moving the read marker to a newer event + event_id_2 = send_message() + channel = self.make_request( + "POST", + "/rooms/!abc:beep/read_markers", + content={ + "m.fully_read": event_id_2, + }, + access_token=self.owner_tok, + ) + self.assertEqual(channel.code, 200, channel.result) -- cgit 1.5.1 From d0de452d1222ada8d219a8c5bc42498a89e5ecea Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Fri, 19 May 2023 11:17:12 +0100 Subject: Fix `HomeServer`s leaking during `trial` test runs (#15630) This change fixes two memory leaks during `trial` test runs. Garbage collection is disabled during each test case and a gen-0 GC is run at the end of each test. However, when the gen-0 GC is run, the `TestCase` object usually still holds references to the `HomeServer` used during the test. As a result, the `HomeServer` gets promoted to gen-1 and then never garbage collected. Fix this by periodically running full GCs. Additionally, fix `HomeServer`s leaking after tests that touch inbound federation due to `FederationRateLimiter`s adding themselves to a global set, by turning the set into a `WeakSet`. Resolves #15622. Signed-off-by: Sean Quah --- changelog.d/15630.misc | 1 + synapse/util/ratelimitutils.py | 6 +++++- tests/unittest.py | 11 +++++++++-- 3 files changed, 15 insertions(+), 3 deletions(-) create mode 100644 changelog.d/15630.misc diff --git a/changelog.d/15630.misc b/changelog.d/15630.misc new file mode 100644 index 0000000000..a30304bfd6 --- /dev/null +++ b/changelog.d/15630.misc @@ -0,0 +1 @@ +Fix two memory leaks in `trial` test runs. diff --git a/synapse/util/ratelimitutils.py b/synapse/util/ratelimitutils.py index f262bf95a0..2ad55ac13e 100644 --- a/synapse/util/ratelimitutils.py +++ b/synapse/util/ratelimitutils.py @@ -25,10 +25,12 @@ from typing import ( Iterator, List, Mapping, + MutableSet, Optional, Set, Tuple, ) +from weakref import WeakSet from prometheus_client.core import Counter from typing_extensions import ContextManager @@ -86,7 +88,9 @@ queue_wait_timer = Histogram( ) -_rate_limiter_instances: Set["FederationRateLimiter"] = set() +# This must be a `WeakSet`, otherwise we indirectly hold on to entire `HomeServer`s +# during trial test runs and leak a lot of memory. +_rate_limiter_instances: MutableSet["FederationRateLimiter"] = WeakSet() # Protects the _rate_limiter_instances set from concurrent access _rate_limiter_instances_lock = threading.Lock() diff --git a/tests/unittest.py b/tests/unittest.py index b6fdf69635..623c5a75a2 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -229,13 +229,20 @@ class TestCase(unittest.TestCase): # # The easiest way to do this would be to do a full GC after each test # run, but that is very expensive. Instead, we disable GC (above) for - # the duration of the test so that we only need to run a gen-0 GC, which - # is a lot quicker. + # the duration of the test and only run a gen-0 GC, which is a lot + # quicker. This doesn't clean up everything, since the TestCase + # instance still holds references to objects created during the test, + # such as HomeServers, so we do a full GC every so often. @around(self) def tearDown(orig: Callable[[], R]) -> R: ret = orig() gc.collect(0) + # Run a full GC every 50 gen-0 GCs. + gen0_stats = gc.get_stats()[0] + gen0_collections = gen0_stats["collections"] + if gen0_collections % 50 == 0: + gc.collect() gc.enable() set_current_context(SENTINEL_CONTEXT) -- cgit 1.5.1 From 07771fa487e1d281fdcae35a47db87ab675cb6b3 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 19 May 2023 07:23:09 -0400 Subject: Remove experimental configuration flags & unstable values for faster joins (#15625) Synapse will no longer send (or respond to) the unstable flags for faster joins. These were only available behind a configuration flag and handled in parallel with the stable flags. --- changelog.d/15625.misc | 1 + synapse/config/experimental.py | 12 -------- synapse/federation/federation_server.py | 2 -- synapse/federation/transport/client.py | 29 ++----------------- synapse/federation/transport/server/federation.py | 12 +------- tests/federation/transport/test_client.py | 35 ++--------------------- 6 files changed, 8 insertions(+), 83 deletions(-) create mode 100644 changelog.d/15625.misc diff --git a/changelog.d/15625.misc b/changelog.d/15625.misc new file mode 100644 index 0000000000..7ea8cc9433 --- /dev/null +++ b/changelog.d/15625.misc @@ -0,0 +1 @@ +Remove the unstable identifiers from faster joins ([MSC3706](https://github.com/matrix-org/matrix-spec-proposals/pull/3706). diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index 6e453bd963..d769b7f668 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -84,18 +84,6 @@ class ExperimentalConfig(Config): "msc3984_appservice_key_query", False ) - # MSC3706 (server-side support for partial state in /send_join responses) - # Synapse will always serve partial state responses to requests using the stable - # query parameter `omit_members`. If this flag is set, Synapse will also serve - # partial state responses to requests using the unstable query parameter - # `org.matrix.msc3706.partial_state`. - self.msc3706_enabled: bool = experimental.get("msc3706_enabled", False) - - # experimental support for faster joins over federation - # (MSC2775, MSC3706, MSC3895) - # requires a target server that can provide a partial join response (MSC3706) - self.faster_joins_enabled: bool = experimental.get("faster_joins", True) - # MSC3720 (Account status endpoint) self.msc3720_enabled: bool = experimental.get("msc3720_enabled", False) diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index c590d8f96f..f4ca70a698 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -739,12 +739,10 @@ class FederationServer(FederationBase): "event": event_json, "state": [p.get_pdu_json(time_now) for p in state_events], "auth_chain": [p.get_pdu_json(time_now) for p in auth_chain_events], - "org.matrix.msc3706.partial_state": caller_supports_partial_state, "members_omitted": caller_supports_partial_state, } if servers_in_room is not None: - resp["org.matrix.msc3706.servers_in_room"] = list(servers_in_room) resp["servers_in_room"] = list(servers_in_room) return resp diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index d2fa9976da..1cfc4446c4 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -59,7 +59,6 @@ class TransportLayerClient: def __init__(self, hs: "HomeServer"): self.client = hs.get_federation_http_client() - self._faster_joins_enabled = hs.config.experimental.faster_joins_enabled self._is_mine_server_name = hs.is_mine_server_name async def get_room_state_ids( @@ -363,12 +362,8 @@ class TransportLayerClient: ) -> "SendJoinResponse": path = _create_v2_path("/send_join/%s/%s", room_id, event_id) query_params: Dict[str, str] = {} - if self._faster_joins_enabled: - # lazy-load state on join - query_params["org.matrix.msc3706.partial_state"] = ( - "true" if omit_members else "false" - ) - query_params["omit_members"] = "true" if omit_members else "false" + # lazy-load state on join + query_params["omit_members"] = "true" if omit_members else "false" return await self.client.put_json( destination=destination, @@ -902,9 +897,7 @@ def _members_omitted_parser(response: SendJoinResponse) -> Generator[None, Any, while True: val = yield if not isinstance(val, bool): - raise TypeError( - "members_omitted (formerly org.matrix.msc370c.partial_state) must be a boolean" - ) + raise TypeError("members_omitted must be a boolean") response.members_omitted = val @@ -964,14 +957,6 @@ class SendJoinParser(ByteParser[SendJoinResponse]): ] if not v1_api: - self._coros.append( - ijson.items_coro( - _members_omitted_parser(self._response), - "org.matrix.msc3706.partial_state", - use_float="True", - ) - ) - # The stable field name comes last, so it "wins" if the fields disagree self._coros.append( ijson.items_coro( _members_omitted_parser(self._response), @@ -980,14 +965,6 @@ class SendJoinParser(ByteParser[SendJoinResponse]): ) ) - self._coros.append( - ijson.items_coro( - _servers_in_room_parser(self._response), - "org.matrix.msc3706.servers_in_room", - use_float="True", - ) - ) - # Again, stable field name comes last self._coros.append( ijson.items_coro( diff --git a/synapse/federation/transport/server/federation.py b/synapse/federation/transport/server/federation.py index 36b0362504..3a744e25be 100644 --- a/synapse/federation/transport/server/federation.py +++ b/synapse/federation/transport/server/federation.py @@ -440,7 +440,6 @@ class FederationV2SendJoinServlet(BaseFederationServerServlet): server_name: str, ): super().__init__(hs, authenticator, ratelimiter, server_name) - self._read_msc3706_query_param = hs.config.experimental.msc3706_enabled async def on_PUT( self, @@ -453,16 +452,7 @@ class FederationV2SendJoinServlet(BaseFederationServerServlet): # TODO(paul): assert that event_id parsed from path actually # match those given in content - partial_state = False - # The stable query parameter wins, if it disagrees with the unstable - # parameter for some reason. - stable_param = parse_boolean_from_args(query, "omit_members", default=None) - if stable_param is not None: - partial_state = stable_param - elif self._read_msc3706_query_param: - partial_state = parse_boolean_from_args( - query, "org.matrix.msc3706.partial_state", default=False - ) + partial_state = parse_boolean_from_args(query, "omit_members", default=False) result = await self.handler.on_send_join_request( origin, content, room_id, caller_supports_partial_state=partial_state diff --git a/tests/federation/transport/test_client.py b/tests/federation/transport/test_client.py index 3d61b1e8a9..93e5c85a27 100644 --- a/tests/federation/transport/test_client.py +++ b/tests/federation/transport/test_client.py @@ -86,18 +86,7 @@ class SendJoinParserTestCase(TestCase): return parsed_response.members_omitted self.assertTrue(parse({"members_omitted": True})) - self.assertTrue(parse({"org.matrix.msc3706.partial_state": True})) - self.assertFalse(parse({"members_omitted": False})) - self.assertFalse(parse({"org.matrix.msc3706.partial_state": False})) - - # If there's a conflict, the stable field wins. - self.assertTrue( - parse({"members_omitted": True, "org.matrix.msc3706.partial_state": False}) - ) - self.assertFalse( - parse({"members_omitted": False, "org.matrix.msc3706.partial_state": True}) - ) def test_servers_in_room(self) -> None: """Check that the servers_in_room field is correctly parsed""" @@ -113,28 +102,10 @@ class SendJoinParserTestCase(TestCase): parsed_response = parser.finish() return parsed_response.servers_in_room - self.assertEqual( - parse({"org.matrix.msc3706.servers_in_room": ["hs1", "hs2"]}), - ["hs1", "hs2"], - ) self.assertEqual(parse({"servers_in_room": ["example.com"]}), ["example.com"]) - # If both are provided, the stable identifier should win - self.assertEqual( - parse( - { - "org.matrix.msc3706.servers_in_room": ["old"], - "servers_in_room": ["new"], - } - ), - ["new"], - ) - - # And lastly, we should be able to tell if neither field was present. - self.assertEqual( - parse({}), - None, - ) + # We should be able to tell the field is not present. + self.assertEqual(parse({}), None) def test_errors_closing_coroutines(self) -> None: """Check we close all coroutines, even if closing the first raises an Exception. @@ -143,7 +114,7 @@ class SendJoinParserTestCase(TestCase): assertions about its attributes or type. """ parser = SendJoinParser(RoomVersions.V1, False) - response = {"org.matrix.msc3706.servers_in_room": ["hs1", "hs2"]} + response = {"servers_in_room": ["hs1", "hs2"]} serialisation = json.dumps(response).encode() # Mock the coroutines managed by this parser. -- cgit 1.5.1 From 89a23c940672944acd98db58085cdc38191515a8 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 19 May 2023 08:06:54 -0400 Subject: Do not allow deactivated users to login with JWT. (#15624) To improve the organization of this code it moves the JWT login checks to a separate handler and then fixes the bug (and a deprecation warning). --- changelog.d/15624.bugfix | 1 + synapse/handlers/jwt.py | 118 ++++++++++++++++++++++++++++++++++++++++ synapse/rest/client/login.py | 77 ++++---------------------- synapse/server.py | 7 +++ tests/rest/client/test_login.py | 20 ++++++- 5 files changed, 156 insertions(+), 67 deletions(-) create mode 100644 changelog.d/15624.bugfix create mode 100644 synapse/handlers/jwt.py diff --git a/changelog.d/15624.bugfix b/changelog.d/15624.bugfix new file mode 100644 index 0000000000..fde515ba62 --- /dev/null +++ b/changelog.d/15624.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug where deactivated users were still able to login using the custom `org.matrix.login.jwt` login type (if enabled). diff --git a/synapse/handlers/jwt.py b/synapse/handlers/jwt.py new file mode 100644 index 0000000000..5fddc0e315 --- /dev/null +++ b/synapse/handlers/jwt.py @@ -0,0 +1,118 @@ +# Copyright 2023 Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import TYPE_CHECKING + +from authlib.jose import JsonWebToken, JWTClaims +from authlib.jose.errors import BadSignatureError, InvalidClaimError, JoseError + +from synapse.api.errors import Codes, LoginError, StoreError, UserDeactivatedError +from synapse.types import JsonDict, UserID + +if TYPE_CHECKING: + from synapse.server import HomeServer + + +class JwtHandler: + def __init__(self, hs: "HomeServer"): + self.hs = hs + self._main_store = hs.get_datastores().main + + self.jwt_secret = hs.config.jwt.jwt_secret + self.jwt_subject_claim = hs.config.jwt.jwt_subject_claim + self.jwt_algorithm = hs.config.jwt.jwt_algorithm + self.jwt_issuer = hs.config.jwt.jwt_issuer + self.jwt_audiences = hs.config.jwt.jwt_audiences + + async def validate_login(self, login_submission: JsonDict) -> str: + """ + Authenticates the user for the /login API + + Args: + login_submission: the whole of the login submission + (including 'type' and other relevant fields) + + Returns: + The user ID that is logging in. + + Raises: + LoginError if there was an authentication problem. + """ + token = login_submission.get("token", None) + if token is None: + raise LoginError( + 403, "Token field for JWT is missing", errcode=Codes.FORBIDDEN + ) + + jwt = JsonWebToken([self.jwt_algorithm]) + claim_options = {} + if self.jwt_issuer is not None: + claim_options["iss"] = {"value": self.jwt_issuer, "essential": True} + if self.jwt_audiences is not None: + claim_options["aud"] = {"values": self.jwt_audiences, "essential": True} + + try: + claims = jwt.decode( + token, + key=self.jwt_secret, + claims_cls=JWTClaims, + claims_options=claim_options, + ) + except BadSignatureError: + # We handle this case separately to provide a better error message + raise LoginError( + 403, + "JWT validation failed: Signature verification failed", + errcode=Codes.FORBIDDEN, + ) + except JoseError as e: + # A JWT error occurred, return some info back to the client. + raise LoginError( + 403, + "JWT validation failed: %s" % (str(e),), + errcode=Codes.FORBIDDEN, + ) + + try: + claims.validate(leeway=120) # allows 2 min of clock skew + + # Enforce the old behavior which is rolled out in productive + # servers: if the JWT contains an 'aud' claim but none is + # configured, the login attempt will fail + if claims.get("aud") is not None: + if self.jwt_audiences is None or len(self.jwt_audiences) == 0: + raise InvalidClaimError("aud") + except JoseError as e: + raise LoginError( + 403, + "JWT validation failed: %s" % (str(e),), + errcode=Codes.FORBIDDEN, + ) + + user = claims.get(self.jwt_subject_claim, None) + if user is None: + raise LoginError(403, "Invalid JWT", errcode=Codes.FORBIDDEN) + + user_id = UserID(user, self.hs.hostname).to_string() + + # If the account has been deactivated, do not proceed with the login + # flow. + try: + deactivated = await self._main_store.get_user_deactivated_status(user_id) + except StoreError: + # JWT lazily creates users, so they may not exist in the database yet. + deactivated = False + if deactivated: + raise UserDeactivatedError("This account has been deactivated") + + return user_id diff --git a/synapse/rest/client/login.py b/synapse/rest/client/login.py index a348720131..afdbf821b5 100644 --- a/synapse/rest/client/login.py +++ b/synapse/rest/client/login.py @@ -87,11 +87,6 @@ class LoginRestServlet(RestServlet): # JWT configuration variables. self.jwt_enabled = hs.config.jwt.jwt_enabled - self.jwt_secret = hs.config.jwt.jwt_secret - self.jwt_subject_claim = hs.config.jwt.jwt_subject_claim - self.jwt_algorithm = hs.config.jwt.jwt_algorithm - self.jwt_issuer = hs.config.jwt.jwt_issuer - self.jwt_audiences = hs.config.jwt.jwt_audiences # SSO configuration. self.saml2_enabled = hs.config.saml2.saml2_enabled @@ -427,7 +422,7 @@ class LoginRestServlet(RestServlet): self, login_submission: JsonDict, should_issue_refresh_token: bool = False ) -> LoginResponse: """ - Handle the final stage of SSO login. + Handle token login. Args: login_submission: The JSON request body. @@ -452,72 +447,24 @@ class LoginRestServlet(RestServlet): async def _do_jwt_login( self, login_submission: JsonDict, should_issue_refresh_token: bool = False ) -> LoginResponse: - token = login_submission.get("token", None) - if token is None: - raise LoginError( - 403, "Token field for JWT is missing", errcode=Codes.FORBIDDEN - ) - - from authlib.jose import JsonWebToken, JWTClaims - from authlib.jose.errors import BadSignatureError, InvalidClaimError, JoseError - - jwt = JsonWebToken([self.jwt_algorithm]) - claim_options = {} - if self.jwt_issuer is not None: - claim_options["iss"] = {"value": self.jwt_issuer, "essential": True} - if self.jwt_audiences is not None: - claim_options["aud"] = {"values": self.jwt_audiences, "essential": True} - - try: - claims = jwt.decode( - token, - key=self.jwt_secret, - claims_cls=JWTClaims, - claims_options=claim_options, - ) - except BadSignatureError: - # We handle this case separately to provide a better error message - raise LoginError( - 403, - "JWT validation failed: Signature verification failed", - errcode=Codes.FORBIDDEN, - ) - except JoseError as e: - # A JWT error occurred, return some info back to the client. - raise LoginError( - 403, - "JWT validation failed: %s" % (str(e),), - errcode=Codes.FORBIDDEN, - ) - - try: - claims.validate(leeway=120) # allows 2 min of clock skew - - # Enforce the old behavior which is rolled out in productive - # servers: if the JWT contains an 'aud' claim but none is - # configured, the login attempt will fail - if claims.get("aud") is not None: - if self.jwt_audiences is None or len(self.jwt_audiences) == 0: - raise InvalidClaimError("aud") - except JoseError as e: - raise LoginError( - 403, - "JWT validation failed: %s" % (str(e),), - errcode=Codes.FORBIDDEN, - ) + """ + Handle the custom JWT login. - user = claims.get(self.jwt_subject_claim, None) - if user is None: - raise LoginError(403, "Invalid JWT", errcode=Codes.FORBIDDEN) + Args: + login_submission: The JSON request body. + should_issue_refresh_token: True if this login should issue + a refresh token alongside the access token. - user_id = UserID(user, self.hs.hostname).to_string() - result = await self._complete_login( + Returns: + The body of the JSON response. + """ + user_id = await self.hs.get_jwt_handler().validate_login(login_submission) + return await self._complete_login( user_id, login_submission, create_non_existent_users=True, should_issue_refresh_token=should_issue_refresh_token, ) - return result def _get_auth_flow_dict_for_idp(idp: SsoIdentityProvider) -> JsonDict: diff --git a/synapse/server.py b/synapse/server.py index b307295789..aa90465047 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -147,6 +147,7 @@ logger = logging.getLogger(__name__) if TYPE_CHECKING: from txredisapi import ConnectionHandler + from synapse.handlers.jwt import JwtHandler from synapse.handlers.oidc import OidcHandler from synapse.handlers.saml import SamlHandler @@ -533,6 +534,12 @@ class HomeServer(metaclass=abc.ABCMeta): def get_sso_handler(self) -> SsoHandler: return SsoHandler(self) + @cache_in_self + def get_jwt_handler(self) -> "JwtHandler": + from synapse.handlers.jwt import JwtHandler + + return JwtHandler(self) + @cache_in_self def get_sync_handler(self) -> SyncHandler: return SyncHandler(self) diff --git a/tests/rest/client/test_login.py b/tests/rest/client/test_login.py index 62acf4f44e..dc32982e22 100644 --- a/tests/rest/client/test_login.py +++ b/tests/rest/client/test_login.py @@ -42,7 +42,7 @@ from tests.test_utils.html_parsers import TestHtmlParser from tests.unittest import HomeserverTestCase, override_config, skip_unless try: - from authlib.jose import jwk, jwt + from authlib.jose import JsonWebKey, jwt HAS_JWT = True except ImportError: @@ -1054,6 +1054,22 @@ class JWTTestCase(unittest.HomeserverTestCase): self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN") self.assertEqual(channel.json_body["error"], "Token field for JWT is missing") + def test_deactivated_user(self) -> None: + """Logging in as a deactivated account should error.""" + user_id = self.register_user("kermit", "monkey") + self.get_success( + self.hs.get_deactivate_account_handler().deactivate_account( + user_id, erase_data=False, requester=create_requester(user_id) + ) + ) + + channel = self.jwt_login({"sub": "kermit"}) + self.assertEqual(channel.code, 403, msg=channel.result) + self.assertEqual(channel.json_body["errcode"], "M_USER_DEACTIVATED") + self.assertEqual( + channel.json_body["error"], "This account has been deactivated" + ) + # The JWTPubKeyTestCase is a complement to JWTTestCase where we instead use # RSS256, with a public key configured in synapse as "jwt_secret", and tokens @@ -1121,7 +1137,7 @@ class JWTPubKeyTestCase(unittest.HomeserverTestCase): def jwt_encode(self, payload: Dict[str, Any], secret: str = jwt_privatekey) -> str: header = {"alg": "RS256"} if secret.startswith("-----BEGIN RSA PRIVATE KEY-----"): - secret = jwk.dumps(secret, kty="RSA") + secret = JsonWebKey.import_key(secret, {"kty": "RSA"}) result: bytes = jwt.encode(header, payload, secret) return result.decode("ascii") -- cgit 1.5.1 From 1e89976b268c296e1fd8fface36ade29c0354254 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 19 May 2023 08:25:25 -0400 Subject: Rename blacklist/whitelist internally. (#15620) Avoid renaming configuration settings for now and rename internal code to use blocklist and allowlist instead. --- changelog.d/15606.misc | 2 +- changelog.d/15620.misc | 1 + synapse/config/repository.py | 8 +- synapse/config/server.py | 24 ++--- synapse/handlers/federation.py | 2 +- synapse/handlers/identity.py | 18 ++-- synapse/handlers/sso.py | 2 +- synapse/http/client.py | 119 ++++++++++----------- synapse/http/federation/matrix_federation_agent.py | 24 ++--- synapse/http/matrixfederationclient.py | 14 +-- synapse/http/proxyagent.py | 2 +- synapse/media/url_previewer.py | 16 +-- synapse/push/httppusher.py | 2 +- synapse/server.py | 10 +- synapse/storage/database.py | 5 +- tests/federation/test_federation_server.py | 2 +- tests/handlers/test_sso.py | 2 +- .../federation/test_matrix_federation_agent.py | 8 +- tests/http/test_client.py | 26 ++--- tests/http/test_matrixfederationclient.py | 16 +-- tests/http/test_proxyagent.py | 18 ++-- tests/http/test_simple_client.py | 18 ++-- tests/push/test_http.py | 2 +- tests/replication/test_pusher_shard.py | 6 +- tests/rest/media/test_url_preview.py | 48 ++++----- 25 files changed, 189 insertions(+), 206 deletions(-) create mode 100644 changelog.d/15620.misc diff --git a/changelog.d/15606.misc b/changelog.d/15606.misc index 44265fbf02..568c0d3fc5 100644 --- a/changelog.d/15606.misc +++ b/changelog.d/15606.misc @@ -1 +1 @@ -Update internal terminology for workers. +Update internal terminology. diff --git a/changelog.d/15620.misc b/changelog.d/15620.misc new file mode 100644 index 0000000000..568c0d3fc5 --- /dev/null +++ b/changelog.d/15620.misc @@ -0,0 +1 @@ +Update internal terminology. diff --git a/synapse/config/repository.py b/synapse/config/repository.py index 655f06505b..f6cfdd3e04 100644 --- a/synapse/config/repository.py +++ b/synapse/config/repository.py @@ -224,20 +224,20 @@ class ContentRepositoryConfig(Config): if "http" in proxy_env or "https" in proxy_env: logger.warning("".join(HTTP_PROXY_SET_WARNING)) - # we always blacklist '0.0.0.0' and '::', which are supposed to be + # we always block '0.0.0.0' and '::', which are supposed to be # unroutable addresses. - self.url_preview_ip_range_blacklist = generate_ip_set( + self.url_preview_ip_range_blocklist = generate_ip_set( config["url_preview_ip_range_blacklist"], ["0.0.0.0", "::"], config_path=("url_preview_ip_range_blacklist",), ) - self.url_preview_ip_range_whitelist = generate_ip_set( + self.url_preview_ip_range_allowlist = generate_ip_set( config.get("url_preview_ip_range_whitelist", ()), config_path=("url_preview_ip_range_whitelist",), ) - self.url_preview_url_blacklist = config.get("url_preview_url_blacklist", ()) + self.url_preview_url_blocklist = config.get("url_preview_url_blacklist", ()) self.url_preview_accept_language = config.get( "url_preview_accept_language" diff --git a/synapse/config/server.py b/synapse/config/server.py index 64201238d6..b46fa51593 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -115,7 +115,7 @@ def generate_ip_set( # IP ranges that are considered private / unroutable / don't make sense. -DEFAULT_IP_RANGE_BLACKLIST = [ +DEFAULT_IP_RANGE_BLOCKLIST = [ # Localhost "127.0.0.0/8", # Private networks. @@ -501,36 +501,36 @@ class ServerConfig(Config): # due to resource constraints self.admin_contact = config.get("admin_contact", None) - ip_range_blacklist = config.get( - "ip_range_blacklist", DEFAULT_IP_RANGE_BLACKLIST + ip_range_blocklist = config.get( + "ip_range_blacklist", DEFAULT_IP_RANGE_BLOCKLIST ) # Attempt to create an IPSet from the given ranges - # Always blacklist 0.0.0.0, :: - self.ip_range_blacklist = generate_ip_set( - ip_range_blacklist, ["0.0.0.0", "::"], config_path=("ip_range_blacklist",) + # Always block 0.0.0.0, :: + self.ip_range_blocklist = generate_ip_set( + ip_range_blocklist, ["0.0.0.0", "::"], config_path=("ip_range_blacklist",) ) - self.ip_range_whitelist = generate_ip_set( + self.ip_range_allowlist = generate_ip_set( config.get("ip_range_whitelist", ()), config_path=("ip_range_whitelist",) ) # The federation_ip_range_blacklist is used for backwards-compatibility # and only applies to federation and identity servers. if "federation_ip_range_blacklist" in config: - # Always blacklist 0.0.0.0, :: - self.federation_ip_range_blacklist = generate_ip_set( + # Always block 0.0.0.0, :: + self.federation_ip_range_blocklist = generate_ip_set( config["federation_ip_range_blacklist"], ["0.0.0.0", "::"], config_path=("federation_ip_range_blacklist",), ) # 'federation_ip_range_whitelist' was never a supported configuration option. - self.federation_ip_range_whitelist = None + self.federation_ip_range_allowlist = None else: # No backwards-compatiblity requrired, as federation_ip_range_blacklist # is not given. Default to ip_range_blacklist and ip_range_whitelist. - self.federation_ip_range_blacklist = self.ip_range_blacklist - self.federation_ip_range_whitelist = self.ip_range_whitelist + self.federation_ip_range_blocklist = self.ip_range_blocklist + self.federation_ip_range_allowlist = self.ip_range_allowlist # (undocumented) option for torturing the worker-mode replication a bit, # for testing. The value defines the number of milliseconds to pause before diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 19dec4812f..2eb28d55ac 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -148,7 +148,7 @@ class FederationHandler: self._event_auth_handler = hs.get_event_auth_handler() self._server_notices_mxid = hs.config.servernotices.server_notices_mxid self.config = hs.config - self.http_client = hs.get_proxied_blacklisted_http_client() + self.http_client = hs.get_proxied_blocklisted_http_client() self._replication = hs.get_replication_data_handler() self._federation_event_handler = hs.get_federation_event_handler() self._device_handler = hs.get_device_handler() diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py index bf0f7acf80..3031384d25 100644 --- a/synapse/handlers/identity.py +++ b/synapse/handlers/identity.py @@ -52,10 +52,10 @@ class IdentityHandler: # An HTTP client for contacting trusted URLs. self.http_client = SimpleHttpClient(hs) # An HTTP client for contacting identity servers specified by clients. - self.blacklisting_http_client = SimpleHttpClient( + self._http_client = SimpleHttpClient( hs, - ip_blacklist=hs.config.server.federation_ip_range_blacklist, - ip_whitelist=hs.config.server.federation_ip_range_whitelist, + ip_blocklist=hs.config.server.federation_ip_range_blocklist, + ip_allowlist=hs.config.server.federation_ip_range_allowlist, ) self.federation_http_client = hs.get_federation_http_client() self.hs = hs @@ -197,7 +197,7 @@ class IdentityHandler: try: # Use the blacklisting http client as this call is only to identity servers # provided by a client - data = await self.blacklisting_http_client.post_json_get_json( + data = await self._http_client.post_json_get_json( bind_url, bind_data, headers=headers ) @@ -308,9 +308,7 @@ class IdentityHandler: try: # Use the blacklisting http client as this call is only to identity servers # provided by a client - await self.blacklisting_http_client.post_json_get_json( - url, content, headers - ) + await self._http_client.post_json_get_json(url, content, headers) changed = True except HttpResponseException as e: changed = False @@ -579,7 +577,7 @@ class IdentityHandler: """ # Check what hashing details are supported by this identity server try: - hash_details = await self.blacklisting_http_client.get_json( + hash_details = await self._http_client.get_json( "%s%s/_matrix/identity/v2/hash_details" % (id_server_scheme, id_server), {"access_token": id_access_token}, ) @@ -646,7 +644,7 @@ class IdentityHandler: headers = {"Authorization": create_id_access_token_header(id_access_token)} try: - lookup_results = await self.blacklisting_http_client.post_json_get_json( + lookup_results = await self._http_client.post_json_get_json( "%s%s/_matrix/identity/v2/lookup" % (id_server_scheme, id_server), { "addresses": [lookup_value], @@ -752,7 +750,7 @@ class IdentityHandler: url = "%s%s/_matrix/identity/v2/store-invite" % (id_server_scheme, id_server) try: - data = await self.blacklisting_http_client.post_json_get_json( + data = await self._http_client.post_json_get_json( url, invite_config, {"Authorization": create_id_access_token_header(id_access_token)}, diff --git a/synapse/handlers/sso.py b/synapse/handlers/sso.py index 25fd2eb3a1..c3a51722bd 100644 --- a/synapse/handlers/sso.py +++ b/synapse/handlers/sso.py @@ -204,7 +204,7 @@ class SsoHandler: self._media_repo = ( hs.get_media_repository() if hs.config.media.can_load_media_repo else None ) - self._http_client = hs.get_proxied_blacklisted_http_client() + self._http_client = hs.get_proxied_blocklisted_http_client() # The following template is shown after a successful user interactive # authentication session. It tells the user they can close the window. diff --git a/synapse/http/client.py b/synapse/http/client.py index c9479c81ff..f1ab7a8bc9 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -117,22 +117,22 @@ RawHeaderValue = Union[ ] -def check_against_blacklist( - ip_address: IPAddress, ip_whitelist: Optional[IPSet], ip_blacklist: IPSet +def _is_ip_blocked( + ip_address: IPAddress, allowlist: Optional[IPSet], blocklist: IPSet ) -> bool: """ Compares an IP address to allowed and disallowed IP sets. Args: ip_address: The IP address to check - ip_whitelist: Allowed IP addresses. - ip_blacklist: Disallowed IP addresses. + allowlist: Allowed IP addresses. + blocklist: Disallowed IP addresses. Returns: - True if the IP address is in the blacklist and not in the whitelist. + True if the IP address is in the blocklist and not in the allowlist. """ - if ip_address in ip_blacklist: - if ip_whitelist is None or ip_address not in ip_whitelist: + if ip_address in blocklist: + if allowlist is None or ip_address not in allowlist: return True return False @@ -154,27 +154,27 @@ def _make_scheduler( return _scheduler -class _IPBlacklistingResolver: +class _IPBlockingResolver: """ - A proxy for reactor.nameResolver which only produces non-blacklisted IP - addresses, preventing DNS rebinding attacks on URL preview. + A proxy for reactor.nameResolver which only produces non-blocklisted IP + addresses, preventing DNS rebinding attacks. """ def __init__( self, reactor: IReactorPluggableNameResolver, - ip_whitelist: Optional[IPSet], - ip_blacklist: IPSet, + ip_allowlist: Optional[IPSet], + ip_blocklist: IPSet, ): """ Args: reactor: The twisted reactor. - ip_whitelist: IP addresses to allow. - ip_blacklist: IP addresses to disallow. + ip_allowlist: IP addresses to allow. + ip_blocklist: IP addresses to disallow. """ self._reactor = reactor - self._ip_whitelist = ip_whitelist - self._ip_blacklist = ip_blacklist + self._ip_allowlist = ip_allowlist + self._ip_blocklist = ip_blocklist def resolveHostName( self, recv: IResolutionReceiver, hostname: str, portNumber: int = 0 @@ -191,16 +191,13 @@ class _IPBlacklistingResolver: ip_address = IPAddress(address.host) - if check_against_blacklist( - ip_address, self._ip_whitelist, self._ip_blacklist - ): + if _is_ip_blocked(ip_address, self._ip_allowlist, self._ip_blocklist): logger.info( - "Dropped %s from DNS resolution to %s due to blacklist" - % (ip_address, hostname) + "Blocked %s from DNS resolution to %s" % (ip_address, hostname) ) has_bad_ip = True - # if we have a blacklisted IP, we'd like to raise an error to block the + # if we have a blocked IP, we'd like to raise an error to block the # request, but all we can really do from here is claim that there were no # valid results. if not has_bad_ip: @@ -232,24 +229,24 @@ class _IPBlacklistingResolver: # ISynapseReactor implies IReactorCore, but explicitly marking it this as an implementer # of IReactorCore seems to keep mypy-zope happier. @implementer(IReactorCore, ISynapseReactor) -class BlacklistingReactorWrapper: +class BlocklistingReactorWrapper: """ - A Reactor wrapper which will prevent DNS resolution to blacklisted IP + A Reactor wrapper which will prevent DNS resolution to blocked IP addresses, to prevent DNS rebinding. """ def __init__( self, reactor: IReactorPluggableNameResolver, - ip_whitelist: Optional[IPSet], - ip_blacklist: IPSet, + ip_allowlist: Optional[IPSet], + ip_blocklist: IPSet, ): self._reactor = reactor - # We need to use a DNS resolver which filters out blacklisted IP + # We need to use a DNS resolver which filters out blocked IP # addresses, to prevent DNS rebinding. - self._nameResolver = _IPBlacklistingResolver( - self._reactor, ip_whitelist, ip_blacklist + self._nameResolver = _IPBlockingResolver( + self._reactor, ip_allowlist, ip_blocklist ) def __getattr__(self, attr: str) -> Any: @@ -260,7 +257,7 @@ class BlacklistingReactorWrapper: return getattr(self._reactor, attr) -class BlacklistingAgentWrapper(Agent): +class BlocklistingAgentWrapper(Agent): """ An Agent wrapper which will prevent access to IP addresses being accessed directly (without an IP address lookup). @@ -269,18 +266,18 @@ class BlacklistingAgentWrapper(Agent): def __init__( self, agent: IAgent, - ip_blacklist: IPSet, - ip_whitelist: Optional[IPSet] = None, + ip_blocklist: IPSet, + ip_allowlist: Optional[IPSet] = None, ): """ Args: agent: The Agent to wrap. - ip_whitelist: IP addresses to allow. - ip_blacklist: IP addresses to disallow. + ip_allowlist: IP addresses to allow. + ip_blocklist: IP addresses to disallow. """ self._agent = agent - self._ip_whitelist = ip_whitelist - self._ip_blacklist = ip_blacklist + self._ip_allowlist = ip_allowlist + self._ip_blocklist = ip_blocklist def request( self, @@ -299,13 +296,9 @@ class BlacklistingAgentWrapper(Agent): # Not an IP pass else: - if check_against_blacklist( - ip_address, self._ip_whitelist, self._ip_blacklist - ): - logger.info("Blocking access to %s due to blacklist" % (ip_address,)) - e = SynapseError( - HTTPStatus.FORBIDDEN, "IP address blocked by IP blacklist entry" - ) + if _is_ip_blocked(ip_address, self._ip_allowlist, self._ip_blocklist): + logger.info("Blocking access to %s" % (ip_address,)) + e = SynapseError(HTTPStatus.FORBIDDEN, "IP address blocked") return defer.fail(Failure(e)) return self._agent.request( @@ -763,10 +756,9 @@ class SimpleHttpClient(BaseHttpClient): Args: hs: The HomeServer instance to pass in treq_args: Extra keyword arguments to be given to treq.request. - ip_blacklist: The IP addresses that are blacklisted that - we may not request. - ip_whitelist: The whitelisted IP addresses, that we can - request if it were otherwise caught in a blacklist. + ip_blocklist: The IP addresses that we may not request. + ip_allowlist: The allowed IP addresses, that we can + request if it were otherwise caught in a blocklist. use_proxy: Whether proxy settings should be discovered and used from conventional environment variables. """ @@ -775,19 +767,19 @@ class SimpleHttpClient(BaseHttpClient): self, hs: "HomeServer", treq_args: Optional[Dict[str, Any]] = None, - ip_whitelist: Optional[IPSet] = None, - ip_blacklist: Optional[IPSet] = None, + ip_allowlist: Optional[IPSet] = None, + ip_blocklist: Optional[IPSet] = None, use_proxy: bool = False, ): super().__init__(hs, treq_args=treq_args) - self._ip_whitelist = ip_whitelist - self._ip_blacklist = ip_blacklist - - if self._ip_blacklist: - # If we have an IP blacklist, we need to use a DNS resolver which - # filters out blacklisted IP addresses, to prevent DNS rebinding. - self.reactor: ISynapseReactor = BlacklistingReactorWrapper( - self.reactor, self._ip_whitelist, self._ip_blacklist + self._ip_allowlist = ip_allowlist + self._ip_blocklist = ip_blocklist + + if self._ip_blocklist: + # If we have an IP blocklist, we need to use a DNS resolver which + # filters out blocked IP addresses, to prevent DNS rebinding. + self.reactor: ISynapseReactor = BlocklistingReactorWrapper( + self.reactor, self._ip_allowlist, self._ip_blocklist ) # the pusher makes lots of concurrent SSL connections to Sygnal, and tends to @@ -809,14 +801,13 @@ class SimpleHttpClient(BaseHttpClient): use_proxy=use_proxy, ) - if self._ip_blacklist: - # If we have an IP blacklist, we then install the blacklisting Agent - # which prevents direct access to IP addresses, that are not caught - # by the DNS resolution. - self.agent = BlacklistingAgentWrapper( + if self._ip_blocklist: + # If we have an IP blocklist, we then install the Agent which prevents + # direct access to IP addresses, that are not caught by the DNS resolution. + self.agent = BlocklistingAgentWrapper( self.agent, - ip_blacklist=self._ip_blacklist, - ip_whitelist=self._ip_whitelist, + ip_blocklist=self._ip_blocklist, + ip_allowlist=self._ip_allowlist, ) diff --git a/synapse/http/federation/matrix_federation_agent.py b/synapse/http/federation/matrix_federation_agent.py index 8d7d0a3875..7e8cf31682 100644 --- a/synapse/http/federation/matrix_federation_agent.py +++ b/synapse/http/federation/matrix_federation_agent.py @@ -36,7 +36,7 @@ from twisted.web.iweb import IAgent, IAgentEndpointFactory, IBodyProducer, IResp from synapse.crypto.context_factory import FederationPolicyForHTTPS from synapse.http import proxyagent -from synapse.http.client import BlacklistingAgentWrapper, BlacklistingReactorWrapper +from synapse.http.client import BlocklistingAgentWrapper, BlocklistingReactorWrapper from synapse.http.connectproxyclient import HTTPConnectProxyEndpoint from synapse.http.federation.srv_resolver import Server, SrvResolver from synapse.http.federation.well_known_resolver import WellKnownResolver @@ -65,12 +65,12 @@ class MatrixFederationAgent: user_agent: The user agent header to use for federation requests. - ip_whitelist: Allowed IP addresses. + ip_allowlist: Allowed IP addresses. - ip_blacklist: Disallowed IP addresses. + ip_blocklist: Disallowed IP addresses. proxy_reactor: twisted reactor to use for connections to the proxy server - reactor might have some blacklisting applied (i.e. for DNS queries), + reactor might have some blocking applied (i.e. for DNS queries), but we need unblocked access to the proxy. _srv_resolver: @@ -87,17 +87,17 @@ class MatrixFederationAgent: reactor: ISynapseReactor, tls_client_options_factory: Optional[FederationPolicyForHTTPS], user_agent: bytes, - ip_whitelist: Optional[IPSet], - ip_blacklist: IPSet, + ip_allowlist: Optional[IPSet], + ip_blocklist: IPSet, _srv_resolver: Optional[SrvResolver] = None, _well_known_resolver: Optional[WellKnownResolver] = None, ): - # proxy_reactor is not blacklisted + # proxy_reactor is not blocklisting reactor proxy_reactor = reactor - # We need to use a DNS resolver which filters out blacklisted IP + # We need to use a DNS resolver which filters out blocked IP # addresses, to prevent DNS rebinding. - reactor = BlacklistingReactorWrapper(reactor, ip_whitelist, ip_blacklist) + reactor = BlocklistingReactorWrapper(reactor, ip_allowlist, ip_blocklist) self._clock = Clock(reactor) self._pool = HTTPConnectionPool(reactor) @@ -120,7 +120,7 @@ class MatrixFederationAgent: if _well_known_resolver is None: _well_known_resolver = WellKnownResolver( reactor, - agent=BlacklistingAgentWrapper( + agent=BlocklistingAgentWrapper( ProxyAgent( reactor, proxy_reactor, @@ -128,7 +128,7 @@ class MatrixFederationAgent: contextFactory=tls_client_options_factory, use_proxy=True, ), - ip_blacklist=ip_blacklist, + ip_blocklist=ip_blocklist, ), user_agent=self.user_agent, ) @@ -256,7 +256,7 @@ class MatrixHostnameEndpoint: Args: reactor: twisted reactor to use for underlying requests proxy_reactor: twisted reactor to use for connections to the proxy server. - 'reactor' might have some blacklisting applied (i.e. for DNS queries), + 'reactor' might have some blocking applied (i.e. for DNS queries), but we need unblocked access to the proxy. tls_client_options_factory: factory to use for fetching client tls options, or none to disable TLS. diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 634882487c..9094dab0fe 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -64,7 +64,7 @@ from synapse.api.errors import ( from synapse.crypto.context_factory import FederationPolicyForHTTPS from synapse.http import QuieterFileBodyProducer from synapse.http.client import ( - BlacklistingAgentWrapper, + BlocklistingAgentWrapper, BodyExceededMaxSize, ByteWriteable, _make_scheduler, @@ -392,15 +392,15 @@ class MatrixFederationHttpClient: self.reactor, tls_client_options_factory, user_agent.encode("ascii"), - hs.config.server.federation_ip_range_whitelist, - hs.config.server.federation_ip_range_blacklist, + hs.config.server.federation_ip_range_allowlist, + hs.config.server.federation_ip_range_blocklist, ) - # Use a BlacklistingAgentWrapper to prevent circumventing the IP - # blacklist via IP literals in server names - self.agent = BlacklistingAgentWrapper( + # Use a BlocklistingAgentWrapper to prevent circumventing the IP + # blocking via IP literals in server names + self.agent = BlocklistingAgentWrapper( federation_agent, - ip_blacklist=hs.config.server.federation_ip_range_blacklist, + ip_blocklist=hs.config.server.federation_ip_range_blocklist, ) self.clock = hs.get_clock() diff --git a/synapse/http/proxyagent.py b/synapse/http/proxyagent.py index 94ef737b9e..7bdc4acae7 100644 --- a/synapse/http/proxyagent.py +++ b/synapse/http/proxyagent.py @@ -53,7 +53,7 @@ class ProxyAgent(_AgentBase): connections. proxy_reactor: twisted reactor to use for connections to the proxy server - reactor might have some blacklisting applied (i.e. for DNS queries), + reactor might have some blocking applied (i.e. for DNS queries), but we need unblocked access to the proxy. contextFactory: A factory for TLS contexts, to control the diff --git a/synapse/media/url_previewer.py b/synapse/media/url_previewer.py index dbdb1fd20e..70b32cee17 100644 --- a/synapse/media/url_previewer.py +++ b/synapse/media/url_previewer.py @@ -105,7 +105,7 @@ class UrlPreviewer: When Synapse is asked to preview a URL it does the following: - 1. Checks against a URL blacklist (defined as `url_preview_url_blacklist` in the + 1. Checks against a URL blocklist (defined as `url_preview_url_blacklist` in the config). 2. Checks the URL against an in-memory cache and returns the result if it exists. (This is also used to de-duplicate processing of multiple in-flight requests at once.) @@ -167,8 +167,8 @@ class UrlPreviewer: self.client = SimpleHttpClient( hs, treq_args={"browser_like_redirects": True}, - ip_whitelist=hs.config.media.url_preview_ip_range_whitelist, - ip_blacklist=hs.config.media.url_preview_ip_range_blacklist, + ip_allowlist=hs.config.media.url_preview_ip_range_allowlist, + ip_blocklist=hs.config.media.url_preview_ip_range_blocklist, use_proxy=True, ) self.media_repo = media_repo @@ -186,7 +186,7 @@ class UrlPreviewer: or instance_running_jobs == hs.get_instance_name() ) - self.url_preview_url_blacklist = hs.config.media.url_preview_url_blacklist + self.url_preview_url_blocklist = hs.config.media.url_preview_url_blocklist self.url_preview_accept_language = hs.config.media.url_preview_accept_language # memory cache mapping urls to an ObservableDeferred returning @@ -391,7 +391,7 @@ class UrlPreviewer: True if the URL is blocked, False if it is allowed. """ url_tuple = urlsplit(url) - for entry in self.url_preview_url_blacklist: + for entry in self.url_preview_url_blocklist: match = True # Iterate over each entry. If *all* attributes of that entry match # the current URL, then reject it. @@ -426,7 +426,7 @@ class UrlPreviewer: # All fields matched, return true (the URL is blocked). if match: - logger.warning("URL %s blocked by url_blacklist entry %s", url, entry) + logger.warning("URL %s blocked by entry %s", url, entry) return match # No matches were found, the URL is allowed. @@ -472,7 +472,7 @@ class UrlPreviewer: except DNSLookupError: # DNS lookup returned no results # Note: This will also be the case if one of the resolved IP - # addresses is blacklisted + # addresses is blocked. raise SynapseError( 502, "DNS resolution failure during URL preview generation", @@ -575,7 +575,7 @@ class UrlPreviewer: if self._is_url_blocked(url): raise SynapseError( - 403, "URL blocked by url pattern blacklist entry", Codes.UNKNOWN + 403, "URL blocked by url pattern blocklist entry", Codes.UNKNOWN ) # TODO: we should probably honour robots.txt... except in practice diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py index e91ee05e99..50027680cb 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py @@ -143,7 +143,7 @@ class HttpPusher(Pusher): ) self.url = url - self.http_client = hs.get_proxied_blacklisted_http_client() + self.http_client = hs.get_proxied_blocklisted_http_client() self.data_minus_url = {} self.data_minus_url.update(self.data) del self.data_minus_url["url"] diff --git a/synapse/server.py b/synapse/server.py index aa90465047..f6e245569c 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -454,15 +454,15 @@ class HomeServer(metaclass=abc.ABCMeta): return SimpleHttpClient(self, use_proxy=True) @cache_in_self - def get_proxied_blacklisted_http_client(self) -> SimpleHttpClient: + def get_proxied_blocklisted_http_client(self) -> SimpleHttpClient: """ - An HTTP client that uses configured HTTP(S) proxies and blacklists IPs - based on the IP range blacklist/whitelist. + An HTTP client that uses configured HTTP(S) proxies and blocks IPs + based on the configured IP ranges. """ return SimpleHttpClient( self, - ip_whitelist=self.config.server.ip_range_whitelist, - ip_blacklist=self.config.server.ip_range_blacklist, + ip_allowlist=self.config.server.ip_range_allowlist, + ip_blocklist=self.config.server.ip_range_blocklist, use_proxy=True, ) diff --git a/synapse/storage/database.py b/synapse/storage/database.py index 313cf1a8d0..bdaa508dbe 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -565,9 +565,8 @@ class DatabasePool: # A set of tables that are not safe to use native upserts in. self._unsafe_to_upsert_tables = set(UNIQUE_INDEX_BACKGROUND_UPDATES.keys()) - # We add the user_directory_search table to the blacklist on SQLite - # because the existing search table does not have an index, making it - # unsafe to use native upserts. + # The user_directory_search table is unsafe to use native upserts + # on SQLite because the existing search table does not have an index. if isinstance(self.engine, Sqlite3Engine): self._unsafe_to_upsert_tables.add("user_directory_search") diff --git a/tests/federation/test_federation_server.py b/tests/federation/test_federation_server.py index 6c7738d810..5c850d1843 100644 --- a/tests/federation/test_federation_server.py +++ b/tests/federation/test_federation_server.py @@ -63,7 +63,7 @@ class FederationServerTests(unittest.FederatingHomeserverTestCase): class ServerACLsTestCase(unittest.TestCase): - def test_blacklisted_server(self) -> None: + def test_blocked_server(self) -> None: e = _create_acl_event({"allow": ["*"], "deny": ["evil.com"]}) logging.info("ACL event: %s", e.content) diff --git a/tests/handlers/test_sso.py b/tests/handlers/test_sso.py index 620ae3a4ba..b9ffdb4ced 100644 --- a/tests/handlers/test_sso.py +++ b/tests/handlers/test_sso.py @@ -31,7 +31,7 @@ class TestSSOHandler(unittest.HomeserverTestCase): self.http_client.get_file.side_effect = mock_get_file self.http_client.user_agent = b"Synapse Test" hs = self.setup_test_homeserver( - proxied_blacklisted_http_client=self.http_client + proxied_blocklisted_http_client=self.http_client ) return hs diff --git a/tests/http/federation/test_matrix_federation_agent.py b/tests/http/federation/test_matrix_federation_agent.py index eb7f53fee5..105b4caefa 100644 --- a/tests/http/federation/test_matrix_federation_agent.py +++ b/tests/http/federation/test_matrix_federation_agent.py @@ -269,8 +269,8 @@ class MatrixFederationAgentTests(unittest.TestCase): reactor=cast(ISynapseReactor, self.reactor), tls_client_options_factory=self.tls_factory, user_agent=b"test-agent", # Note that this is unused since _well_known_resolver is provided. - ip_whitelist=IPSet(), - ip_blacklist=IPSet(), + ip_allowlist=IPSet(), + ip_blocklist=IPSet(), _srv_resolver=self.mock_resolver, _well_known_resolver=self.well_known_resolver, ) @@ -997,8 +997,8 @@ class MatrixFederationAgentTests(unittest.TestCase): reactor=self.reactor, tls_client_options_factory=tls_factory, user_agent=b"test-agent", # This is unused since _well_known_resolver is passed below. - ip_whitelist=IPSet(), - ip_blacklist=IPSet(), + ip_allowlist=IPSet(), + ip_blocklist=IPSet(), _srv_resolver=self.mock_resolver, _well_known_resolver=WellKnownResolver( cast(ISynapseReactor, self.reactor), diff --git a/tests/http/test_client.py b/tests/http/test_client.py index 57b6a84e23..a05b9f17a6 100644 --- a/tests/http/test_client.py +++ b/tests/http/test_client.py @@ -27,8 +27,8 @@ from twisted.web.iweb import UNKNOWN_LENGTH from synapse.api.errors import SynapseError from synapse.http.client import ( - BlacklistingAgentWrapper, - BlacklistingReactorWrapper, + BlocklistingAgentWrapper, + BlocklistingReactorWrapper, BodyExceededMaxSize, _DiscardBodyWithMaxSizeProtocol, read_body_with_max_size, @@ -140,7 +140,7 @@ class ReadBodyWithMaxSizeTests(TestCase): self.assertEqual(result.getvalue(), b"") -class BlacklistingAgentTest(TestCase): +class BlocklistingAgentTest(TestCase): def setUp(self) -> None: self.reactor, self.clock = get_clock() @@ -157,16 +157,16 @@ class BlacklistingAgentTest(TestCase): self.reactor.lookups[domain.decode()] = ip.decode() self.reactor.lookups[ip.decode()] = ip.decode() - self.ip_whitelist = IPSet([self.allowed_ip.decode()]) - self.ip_blacklist = IPSet(["5.0.0.0/8"]) + self.ip_allowlist = IPSet([self.allowed_ip.decode()]) + self.ip_blocklist = IPSet(["5.0.0.0/8"]) def test_reactor(self) -> None: - """Apply the blacklisting reactor and ensure it properly blocks connections to particular domains and IPs.""" + """Apply the blocklisting reactor and ensure it properly blocks connections to particular domains and IPs.""" agent = Agent( - BlacklistingReactorWrapper( + BlocklistingReactorWrapper( self.reactor, - ip_whitelist=self.ip_whitelist, - ip_blacklist=self.ip_blacklist, + ip_allowlist=self.ip_allowlist, + ip_blocklist=self.ip_blocklist, ), ) @@ -207,11 +207,11 @@ class BlacklistingAgentTest(TestCase): self.assertEqual(response.code, 200) def test_agent(self) -> None: - """Apply the blacklisting agent and ensure it properly blocks connections to particular IPs.""" - agent = BlacklistingAgentWrapper( + """Apply the blocklisting agent and ensure it properly blocks connections to particular IPs.""" + agent = BlocklistingAgentWrapper( Agent(self.reactor), - ip_blacklist=self.ip_blacklist, - ip_whitelist=self.ip_whitelist, + ip_blocklist=self.ip_blocklist, + ip_allowlist=self.ip_allowlist, ) # The unsafe IPs should be rejected. diff --git a/tests/http/test_matrixfederationclient.py b/tests/http/test_matrixfederationclient.py index d89a91c59d..0dfc03ce50 100644 --- a/tests/http/test_matrixfederationclient.py +++ b/tests/http/test_matrixfederationclient.py @@ -231,11 +231,11 @@ class FederationClientTests(HomeserverTestCase): self.assertIsInstance(f.value, RequestSendFailed) self.assertIsInstance(f.value.inner_exception, ResponseNeverReceived) - def test_client_ip_range_blacklist(self) -> None: - """Ensure that Synapse does not try to connect to blacklisted IPs""" + def test_client_ip_range_blocklist(self) -> None: + """Ensure that Synapse does not try to connect to blocked IPs""" - # Set up the ip_range blacklist - self.hs.config.server.federation_ip_range_blacklist = IPSet( + # Set up the ip_range blocklist + self.hs.config.server.federation_ip_range_blocklist = IPSet( ["127.0.0.0/8", "fe80::/64"] ) self.reactor.lookups["internal"] = "127.0.0.1" @@ -243,7 +243,7 @@ class FederationClientTests(HomeserverTestCase): self.reactor.lookups["fine"] = "10.20.30.40" cl = MatrixFederationHttpClient(self.hs, None) - # Try making a GET request to a blacklisted IPv4 address + # Try making a GET request to a blocked IPv4 address # ------------------------------------------------------ # Make the request d = defer.ensureDeferred(cl.get_json("internal:8008", "foo/bar", timeout=10000)) @@ -261,7 +261,7 @@ class FederationClientTests(HomeserverTestCase): self.assertIsInstance(f.value, RequestSendFailed) self.assertIsInstance(f.value.inner_exception, DNSLookupError) - # Try making a POST request to a blacklisted IPv6 address + # Try making a POST request to a blocked IPv6 address # ------------------------------------------------------- # Make the request d = defer.ensureDeferred( @@ -278,11 +278,11 @@ class FederationClientTests(HomeserverTestCase): clients = self.reactor.tcpClients self.assertEqual(len(clients), 0) - # Check that it was due to a blacklisted DNS lookup + # Check that it was due to a blocked DNS lookup f = self.failureResultOf(d, RequestSendFailed) self.assertIsInstance(f.value.inner_exception, DNSLookupError) - # Try making a GET request to a non-blacklisted IPv4 address + # Try making a GET request to an allowed IPv4 address # ---------------------------------------------------------- # Make the request d = defer.ensureDeferred(cl.post_json("fine:8008", "foo/bar", timeout=10000)) diff --git a/tests/http/test_proxyagent.py b/tests/http/test_proxyagent.py index cc175052ac..e0ae5a88ff 100644 --- a/tests/http/test_proxyagent.py +++ b/tests/http/test_proxyagent.py @@ -32,7 +32,7 @@ from twisted.internet.protocol import Factory, Protocol from twisted.protocols.tls import TLSMemoryBIOFactory, TLSMemoryBIOProtocol from twisted.web.http import HTTPChannel -from synapse.http.client import BlacklistingReactorWrapper +from synapse.http.client import BlocklistingReactorWrapper from synapse.http.connectproxyclient import ProxyCredentials from synapse.http.proxyagent import ProxyAgent, parse_proxy @@ -684,11 +684,11 @@ class MatrixFederationAgentTests(TestCase): self.assertEqual(body, b"result") @patch.dict(os.environ, {"http_proxy": "proxy.com:8888"}) - def test_http_request_via_proxy_with_blacklist(self) -> None: - # The blacklist includes the configured proxy IP. + def test_http_request_via_proxy_with_blocklist(self) -> None: + # The blocklist includes the configured proxy IP. agent = ProxyAgent( - BlacklistingReactorWrapper( - self.reactor, ip_whitelist=None, ip_blacklist=IPSet(["1.0.0.0/8"]) + BlocklistingReactorWrapper( + self.reactor, ip_allowlist=None, ip_blocklist=IPSet(["1.0.0.0/8"]) ), self.reactor, use_proxy=True, @@ -730,11 +730,11 @@ class MatrixFederationAgentTests(TestCase): self.assertEqual(body, b"result") @patch.dict(os.environ, {"HTTPS_PROXY": "proxy.com"}) - def test_https_request_via_uppercase_proxy_with_blacklist(self) -> None: - # The blacklist includes the configured proxy IP. + def test_https_request_via_uppercase_proxy_with_blocklist(self) -> None: + # The blocklist includes the configured proxy IP. agent = ProxyAgent( - BlacklistingReactorWrapper( - self.reactor, ip_whitelist=None, ip_blacklist=IPSet(["1.0.0.0/8"]) + BlocklistingReactorWrapper( + self.reactor, ip_allowlist=None, ip_blocklist=IPSet(["1.0.0.0/8"]) ), self.reactor, contextFactory=get_test_https_policy(), diff --git a/tests/http/test_simple_client.py b/tests/http/test_simple_client.py index 010601da4b..be731645bf 100644 --- a/tests/http/test_simple_client.py +++ b/tests/http/test_simple_client.py @@ -123,17 +123,17 @@ class SimpleHttpClientTests(HomeserverTestCase): self.assertIsInstance(f.value, RequestTimedOutError) - def test_client_ip_range_blacklist(self) -> None: - """Ensure that Synapse does not try to connect to blacklisted IPs""" + def test_client_ip_range_blocklist(self) -> None: + """Ensure that Synapse does not try to connect to blocked IPs""" - # Add some DNS entries we'll blacklist + # Add some DNS entries we'll block self.reactor.lookups["internal"] = "127.0.0.1" self.reactor.lookups["internalv6"] = "fe80:0:0:0:0:8a2e:370:7337" - ip_blacklist = IPSet(["127.0.0.0/8", "fe80::/64"]) + ip_blocklist = IPSet(["127.0.0.0/8", "fe80::/64"]) - cl = SimpleHttpClient(self.hs, ip_blacklist=ip_blacklist) + cl = SimpleHttpClient(self.hs, ip_blocklist=ip_blocklist) - # Try making a GET request to a blacklisted IPv4 address + # Try making a GET request to a blocked IPv4 address # ------------------------------------------------------ # Make the request d = defer.ensureDeferred(cl.get_json("http://internal:8008/foo/bar")) @@ -145,7 +145,7 @@ class SimpleHttpClientTests(HomeserverTestCase): self.failureResultOf(d, DNSLookupError) - # Try making a POST request to a blacklisted IPv6 address + # Try making a POST request to a blocked IPv6 address # ------------------------------------------------------- # Make the request d = defer.ensureDeferred( @@ -159,10 +159,10 @@ class SimpleHttpClientTests(HomeserverTestCase): clients = self.reactor.tcpClients self.assertEqual(len(clients), 0) - # Check that it was due to a blacklisted DNS lookup + # Check that it was due to a blocked DNS lookup self.failureResultOf(d, DNSLookupError) - # Try making a GET request to a non-blacklisted IPv4 address + # Try making a GET request to a non-blocked IPv4 address # ---------------------------------------------------------- # Make the request d = defer.ensureDeferred(cl.get_json("http://testserv:8008/foo/bar")) diff --git a/tests/push/test_http.py b/tests/push/test_http.py index 54f558742d..e68a979ee0 100644 --- a/tests/push/test_http.py +++ b/tests/push/test_http.py @@ -52,7 +52,7 @@ class HTTPPusherTests(HomeserverTestCase): m.post_json_get_json = post_json_get_json - hs = self.setup_test_homeserver(proxied_blacklisted_http_client=m) + hs = self.setup_test_homeserver(proxied_blocklisted_http_client=m) return hs diff --git a/tests/replication/test_pusher_shard.py b/tests/replication/test_pusher_shard.py index dcb3e6669b..875811669c 100644 --- a/tests/replication/test_pusher_shard.py +++ b/tests/replication/test_pusher_shard.py @@ -93,7 +93,7 @@ class PusherShardTestCase(BaseMultiWorkerStreamTestCase): self.make_worker_hs( "synapse.app.generic_worker", {"worker_name": "pusher1", "pusher_instances": ["pusher1"]}, - proxied_blacklisted_http_client=http_client_mock, + proxied_blocklisted_http_client=http_client_mock, ) event_id = self._create_pusher_and_send_msg("user") @@ -126,7 +126,7 @@ class PusherShardTestCase(BaseMultiWorkerStreamTestCase): "worker_name": "pusher1", "pusher_instances": ["pusher1", "pusher2"], }, - proxied_blacklisted_http_client=http_client_mock1, + proxied_blocklisted_http_client=http_client_mock1, ) http_client_mock2 = Mock(spec_set=["post_json_get_json"]) @@ -140,7 +140,7 @@ class PusherShardTestCase(BaseMultiWorkerStreamTestCase): "worker_name": "pusher2", "pusher_instances": ["pusher1", "pusher2"], }, - proxied_blacklisted_http_client=http_client_mock2, + proxied_blocklisted_http_client=http_client_mock2, ) # We choose a user name that we know should go to pusher1. diff --git a/tests/rest/media/test_url_preview.py b/tests/rest/media/test_url_preview.py index 7517155cf3..170fb0534a 100644 --- a/tests/rest/media/test_url_preview.py +++ b/tests/rest/media/test_url_preview.py @@ -418,9 +418,9 @@ class URLPreviewTests(unittest.HomeserverTestCase): channel.json_body, {"og:title": "~matrix~", "og:description": "hi"} ) - def test_blacklisted_ip_specific(self) -> None: + def test_blocked_ip_specific(self) -> None: """ - Blacklisted IP addresses, found via DNS, are not spidered. + Blocked IP addresses, found via DNS, are not spidered. """ self.lookups["example.com"] = [(IPv4Address, "192.168.1.1")] @@ -439,9 +439,9 @@ class URLPreviewTests(unittest.HomeserverTestCase): }, ) - def test_blacklisted_ip_range(self) -> None: + def test_blocked_ip_range(self) -> None: """ - Blacklisted IP ranges, IPs found over DNS, are not spidered. + Blocked IP ranges, IPs found over DNS, are not spidered. """ self.lookups["example.com"] = [(IPv4Address, "1.1.1.2")] @@ -458,9 +458,9 @@ class URLPreviewTests(unittest.HomeserverTestCase): }, ) - def test_blacklisted_ip_specific_direct(self) -> None: + def test_blocked_ip_specific_direct(self) -> None: """ - Blacklisted IP addresses, accessed directly, are not spidered. + Blocked IP addresses, accessed directly, are not spidered. """ channel = self.make_request( "GET", "preview_url?url=http://192.168.1.1", shorthand=False @@ -470,16 +470,13 @@ class URLPreviewTests(unittest.HomeserverTestCase): self.assertEqual(len(self.reactor.tcpClients), 0) self.assertEqual( channel.json_body, - { - "errcode": "M_UNKNOWN", - "error": "IP address blocked by IP blacklist entry", - }, + {"errcode": "M_UNKNOWN", "error": "IP address blocked"}, ) self.assertEqual(channel.code, 403) - def test_blacklisted_ip_range_direct(self) -> None: + def test_blocked_ip_range_direct(self) -> None: """ - Blacklisted IP ranges, accessed directly, are not spidered. + Blocked IP ranges, accessed directly, are not spidered. """ channel = self.make_request( "GET", "preview_url?url=http://1.1.1.2", shorthand=False @@ -488,15 +485,12 @@ class URLPreviewTests(unittest.HomeserverTestCase): self.assertEqual(channel.code, 403) self.assertEqual( channel.json_body, - { - "errcode": "M_UNKNOWN", - "error": "IP address blocked by IP blacklist entry", - }, + {"errcode": "M_UNKNOWN", "error": "IP address blocked"}, ) - def test_blacklisted_ip_range_whitelisted_ip(self) -> None: + def test_blocked_ip_range_whitelisted_ip(self) -> None: """ - Blacklisted but then subsequently whitelisted IP addresses can be + Blocked but then subsequently whitelisted IP addresses can be spidered. """ self.lookups["example.com"] = [(IPv4Address, "1.1.1.1")] @@ -527,10 +521,10 @@ class URLPreviewTests(unittest.HomeserverTestCase): channel.json_body, {"og:title": "~matrix~", "og:description": "hi"} ) - def test_blacklisted_ip_with_external_ip(self) -> None: + def test_blocked_ip_with_external_ip(self) -> None: """ - If a hostname resolves a blacklisted IP, even if there's a - non-blacklisted one, it will be rejected. + If a hostname resolves a blocked IP, even if there's a non-blocked one, + it will be rejected. """ # Hardcode the URL resolving to the IP we want. self.lookups["example.com"] = [ @@ -550,9 +544,9 @@ class URLPreviewTests(unittest.HomeserverTestCase): }, ) - def test_blacklisted_ipv6_specific(self) -> None: + def test_blocked_ipv6_specific(self) -> None: """ - Blacklisted IP addresses, found via DNS, are not spidered. + Blocked IP addresses, found via DNS, are not spidered. """ self.lookups["example.com"] = [ (IPv6Address, "3fff:ffff:ffff:ffff:ffff:ffff:ffff:ffff") @@ -573,9 +567,9 @@ class URLPreviewTests(unittest.HomeserverTestCase): }, ) - def test_blacklisted_ipv6_range(self) -> None: + def test_blocked_ipv6_range(self) -> None: """ - Blacklisted IP ranges, IPs found over DNS, are not spidered. + Blocked IP ranges, IPs found over DNS, are not spidered. """ self.lookups["example.com"] = [(IPv6Address, "2001:800::1")] @@ -1359,7 +1353,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): @unittest.override_config({"url_preview_url_blacklist": [{"port": "*"}]}) def test_blocked_port(self) -> None: - """Tests that blacklisting URLs with a port makes previewing such URLs + """Tests that blocking URLs with a port makes previewing such URLs fail with a 403 error and doesn't impact other previews. """ self.lookups["matrix.org"] = [(IPv4Address, "10.1.2.3")] @@ -1401,7 +1395,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): {"url_preview_url_blacklist": [{"netloc": "example.com"}]} ) def test_blocked_url(self) -> None: - """Tests that blacklisting URLs with a host makes previewing such URLs + """Tests that blocking URLs with a host makes previewing such URLs fail with a 403 error. """ self.lookups["example.com"] = [(IPv4Address, "10.1.2.3")] -- cgit 1.5.1 From 736199b7638175c439fff10a1f8a2d7da96838e5 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Fri, 19 May 2023 16:13:44 +0000 Subject: Remove old R30 because R30v2 supercedes it (#10428) R30v2 has been out since 2021-07-19 (https://github.com/matrix-org/synapse/pull/10332) and we started collecting stats on 2021-08-16. Since it's been over a year now (almost 2 years), this is enough grace period for us to now rip it out. --- changelog.d/10428.removal | 1 + .../reporting_homeserver_usage_statistics.md | 5 - synapse/app/phone_stats_home.py | 4 - synapse/storage/databases/main/metrics.py | 83 ----------- tests/app/test_phone_stats_home.py | 154 --------------------- 5 files changed, 1 insertion(+), 246 deletions(-) create mode 100644 changelog.d/10428.removal diff --git a/changelog.d/10428.removal b/changelog.d/10428.removal new file mode 100644 index 0000000000..c056e89585 --- /dev/null +++ b/changelog.d/10428.removal @@ -0,0 +1 @@ +Remove the old version of the R30 (30-day retained users) phone-home metric. diff --git a/docs/usage/administration/monitoring/reporting_homeserver_usage_statistics.md b/docs/usage/administration/monitoring/reporting_homeserver_usage_statistics.md index 3a7ed7c806..60b758e33b 100644 --- a/docs/usage/administration/monitoring/reporting_homeserver_usage_statistics.md +++ b/docs/usage/administration/monitoring/reporting_homeserver_usage_statistics.md @@ -42,11 +42,6 @@ The following statistics are sent to the configured reporting endpoint: | `daily_e2ee_messages` | int | The number of (state) events with the type `m.room.encrypted` seen in the last 24 hours. | | `daily_sent_messages` | int | The number of (state) events sent by a local user with the type `m.room.message` seen in the last 24 hours. | | `daily_sent_e2ee_messages` | int | The number of (state) events sent by a local user with the type `m.room.encrypted` seen in the last 24 hours. | -| `r30_users_all` | int | The number of 30 day retained users, defined as users who have created their accounts more than 30 days ago, where they were last seen at most 30 days ago and where those two timestamps are over 30 days apart. Includes clients that do not fit into the below r30 client types. | -| `r30_users_android` | int | The number of 30 day retained users, as defined above. Filtered only to clients with "Android" in the user agent string. | -| `r30_users_ios` | int | The number of 30 day retained users, as defined above. Filtered only to clients with "iOS" in the user agent string. | -| `r30_users_electron` | int | The number of 30 day retained users, as defined above. Filtered only to clients with "Electron" in the user agent string. | -| `r30_users_web` | int | The number of 30 day retained users, as defined above. Filtered only to clients with "Mozilla" or "Gecko" in the user agent string. | | `r30v2_users_all` | int | The number of 30 day retained users, with a revised algorithm. Defined as users that appear more than once in the past 60 days, and have more than 30 days between the most and least recent appearances in the past 60 days. Includes clients that do not fit into the below r30 client types. | | `r30v2_users_android` | int | The number of 30 day retained users, as defined above. Filtered only to clients with ("riot" or "element") and "android" (case-insensitive) in the user agent string. | | `r30v2_users_ios` | int | The number of 30 day retained users, as defined above. Filtered only to clients with ("riot" or "element") and "ios" (case-insensitive) in the user agent string. | diff --git a/synapse/app/phone_stats_home.py b/synapse/app/phone_stats_home.py index 897dd3edac..09988670da 100644 --- a/synapse/app/phone_stats_home.py +++ b/synapse/app/phone_stats_home.py @@ -127,10 +127,6 @@ async def phone_stats_home( daily_sent_messages = await store.count_daily_sent_messages() stats["daily_sent_messages"] = daily_sent_messages - r30_results = await store.count_r30_users() - for name, count in r30_results.items(): - stats["r30_users_" + name] = count - r30v2_results = await store.count_r30v2_users() for name, count in r30v2_results.items(): stats["r30v2_users_" + name] = count diff --git a/synapse/storage/databases/main/metrics.py b/synapse/storage/databases/main/metrics.py index 14294a0bb8..595e22982e 100644 --- a/synapse/storage/databases/main/metrics.py +++ b/synapse/storage/databases/main/metrics.py @@ -248,89 +248,6 @@ class ServerMetricsStore(EventPushActionsWorkerStore, SQLBaseStore): (count,) = cast(Tuple[int], txn.fetchone()) return count - async def count_r30_users(self) -> Dict[str, int]: - """ - Counts the number of 30 day retained users, defined as:- - * Users who have created their accounts more than 30 days ago - * Where last seen at most 30 days ago - * Where account creation and last_seen are > 30 days apart - - Returns: - A mapping of counts globally as well as broken out by platform. - """ - - def _count_r30_users(txn: LoggingTransaction) -> Dict[str, int]: - thirty_days_in_secs = 86400 * 30 - now = int(self._clock.time()) - thirty_days_ago_in_secs = now - thirty_days_in_secs - - sql = """ - SELECT platform, COUNT(*) FROM ( - SELECT - users.name, platform, users.creation_ts * 1000, - MAX(uip.last_seen) - FROM users - INNER JOIN ( - SELECT - user_id, - last_seen, - CASE - WHEN user_agent LIKE '%%Android%%' THEN 'android' - WHEN user_agent LIKE '%%iOS%%' THEN 'ios' - WHEN user_agent LIKE '%%Electron%%' THEN 'electron' - WHEN user_agent LIKE '%%Mozilla%%' THEN 'web' - WHEN user_agent LIKE '%%Gecko%%' THEN 'web' - ELSE 'unknown' - END - AS platform - FROM user_ips - ) uip - ON users.name = uip.user_id - AND users.appservice_id is NULL - AND users.creation_ts < ? - AND uip.last_seen/1000 > ? - AND (uip.last_seen/1000) - users.creation_ts > 86400 * 30 - GROUP BY users.name, platform, users.creation_ts - ) u GROUP BY platform - """ - - results = {} - txn.execute(sql, (thirty_days_ago_in_secs, thirty_days_ago_in_secs)) - - for row in txn: - if row[0] == "unknown": - pass - results[row[0]] = row[1] - - sql = """ - SELECT COUNT(*) FROM ( - SELECT users.name, users.creation_ts * 1000, - MAX(uip.last_seen) - FROM users - INNER JOIN ( - SELECT - user_id, - last_seen - FROM user_ips - ) uip - ON users.name = uip.user_id - AND appservice_id is NULL - AND users.creation_ts < ? - AND uip.last_seen/1000 > ? - AND (uip.last_seen/1000) - users.creation_ts > 86400 * 30 - GROUP BY users.name, users.creation_ts - ) u - """ - - txn.execute(sql, (thirty_days_ago_in_secs, thirty_days_ago_in_secs)) - - (count,) = cast(Tuple[int], txn.fetchone()) - results["all"] = count - - return results - - return await self.db_pool.runInteraction("count_r30_users", _count_r30_users) - async def count_r30v2_users(self) -> Dict[str, int]: """ Counts the number of 30 day retained users, defined as users that: diff --git a/tests/app/test_phone_stats_home.py b/tests/app/test_phone_stats_home.py index a860eedbcf..9305b758d7 100644 --- a/tests/app/test_phone_stats_home.py +++ b/tests/app/test_phone_stats_home.py @@ -4,7 +4,6 @@ from synapse.rest.client import login, room from synapse.server import HomeServer from synapse.util import Clock -from tests import unittest from tests.server import ThreadedMemoryReactorClock from tests.unittest import HomeserverTestCase @@ -12,154 +11,6 @@ FIVE_MINUTES_IN_SECONDS = 300 ONE_DAY_IN_SECONDS = 86400 -class PhoneHomeTestCase(HomeserverTestCase): - servlets = [ - synapse.rest.admin.register_servlets_for_client_rest_resource, - room.register_servlets, - login.register_servlets, - ] - - # Override the retention time for the user_ips table because otherwise it - # gets pruned too aggressively for our R30 test. - @unittest.override_config({"user_ips_max_age": "365d"}) - def test_r30_minimum_usage(self) -> None: - """ - Tests the minimum amount of interaction necessary for the R30 metric - to consider a user 'retained'. - """ - - # Register a user, log it in, create a room and send a message - user_id = self.register_user("u1", "secret!") - access_token = self.login("u1", "secret!") - room_id = self.helper.create_room_as(room_creator=user_id, tok=access_token) - self.helper.send(room_id, "message", tok=access_token) - - # Check the R30 results do not count that user. - r30_results = self.get_success(self.hs.get_datastores().main.count_r30_users()) - self.assertEqual(r30_results, {"all": 0}) - - # Advance 30 days (+ 1 second, because strict inequality causes issues if we are - # bang on 30 days later). - self.reactor.advance(30 * ONE_DAY_IN_SECONDS + 1) - - # (Make sure the user isn't somehow counted by this point.) - r30_results = self.get_success(self.hs.get_datastores().main.count_r30_users()) - self.assertEqual(r30_results, {"all": 0}) - - # Send a message (this counts as activity) - self.helper.send(room_id, "message2", tok=access_token) - - # We have to wait some time for _update_client_ips_batch to get - # called and update the user_ips table. - self.reactor.advance(2 * 60 * 60) - - # *Now* the user is counted. - r30_results = self.get_success(self.hs.get_datastores().main.count_r30_users()) - self.assertEqual(r30_results, {"all": 1, "unknown": 1}) - - # Advance 29 days. The user has now not posted for 29 days. - self.reactor.advance(29 * ONE_DAY_IN_SECONDS) - - # The user is still counted. - r30_results = self.get_success(self.hs.get_datastores().main.count_r30_users()) - self.assertEqual(r30_results, {"all": 1, "unknown": 1}) - - # Advance another day. The user has now not posted for 30 days. - self.reactor.advance(ONE_DAY_IN_SECONDS) - - # The user is now no longer counted in R30. - r30_results = self.get_success(self.hs.get_datastores().main.count_r30_users()) - self.assertEqual(r30_results, {"all": 0}) - - def test_r30_minimum_usage_using_default_config(self) -> None: - """ - Tests the minimum amount of interaction necessary for the R30 metric - to consider a user 'retained'. - - N.B. This test does not override the `user_ips_max_age` config setting, - which defaults to 28 days. - """ - - # Register a user, log it in, create a room and send a message - user_id = self.register_user("u1", "secret!") - access_token = self.login("u1", "secret!") - room_id = self.helper.create_room_as(room_creator=user_id, tok=access_token) - self.helper.send(room_id, "message", tok=access_token) - - # Check the R30 results do not count that user. - r30_results = self.get_success(self.hs.get_datastores().main.count_r30_users()) - self.assertEqual(r30_results, {"all": 0}) - - # Advance 30 days (+ 1 second, because strict inequality causes issues if we are - # bang on 30 days later). - self.reactor.advance(30 * ONE_DAY_IN_SECONDS + 1) - - # (Make sure the user isn't somehow counted by this point.) - r30_results = self.get_success(self.hs.get_datastores().main.count_r30_users()) - self.assertEqual(r30_results, {"all": 0}) - - # Send a message (this counts as activity) - self.helper.send(room_id, "message2", tok=access_token) - - # We have to wait some time for _update_client_ips_batch to get - # called and update the user_ips table. - self.reactor.advance(2 * 60 * 60) - - # *Now* the user is counted. - r30_results = self.get_success(self.hs.get_datastores().main.count_r30_users()) - self.assertEqual(r30_results, {"all": 1, "unknown": 1}) - - # Advance 27 days. The user has now not posted for 27 days. - self.reactor.advance(27 * ONE_DAY_IN_SECONDS) - - # The user is still counted. - r30_results = self.get_success(self.hs.get_datastores().main.count_r30_users()) - self.assertEqual(r30_results, {"all": 1, "unknown": 1}) - - # Advance another day. The user has now not posted for 28 days. - self.reactor.advance(ONE_DAY_IN_SECONDS) - - # The user is now no longer counted in R30. - # (This is because the user_ips table has been pruned, which by default - # only preserves the last 28 days of entries.) - r30_results = self.get_success(self.hs.get_datastores().main.count_r30_users()) - self.assertEqual(r30_results, {"all": 0}) - - def test_r30_user_must_be_retained_for_at_least_a_month(self) -> None: - """ - Tests that a newly-registered user must be retained for a whole month - before appearing in the R30 statistic, even if they post every day - during that time! - """ - # Register a user and send a message - user_id = self.register_user("u1", "secret!") - access_token = self.login("u1", "secret!") - room_id = self.helper.create_room_as(room_creator=user_id, tok=access_token) - self.helper.send(room_id, "message", tok=access_token) - - # Check the user does not contribute to R30 yet. - r30_results = self.get_success(self.hs.get_datastores().main.count_r30_users()) - self.assertEqual(r30_results, {"all": 0}) - - for _ in range(30): - # This loop posts a message every day for 30 days - self.reactor.advance(ONE_DAY_IN_SECONDS) - self.helper.send(room_id, "I'm still here", tok=access_token) - - # Notice that the user *still* does not contribute to R30! - r30_results = self.get_success( - self.hs.get_datastores().main.count_r30_users() - ) - self.assertEqual(r30_results, {"all": 0}) - - self.reactor.advance(ONE_DAY_IN_SECONDS) - self.helper.send(room_id, "Still here!", tok=access_token) - - # *Now* the user appears in R30. - r30_results = self.get_success(self.hs.get_datastores().main.count_r30_users()) - self.assertEqual(r30_results, {"all": 1, "unknown": 1}) - - class PhoneHomeR30V2TestCase(HomeserverTestCase): servlets = [ synapse.rest.admin.register_servlets_for_client_rest_resource, @@ -363,11 +214,6 @@ class PhoneHomeR30V2TestCase(HomeserverTestCase): r30_results, {"all": 0, "android": 0, "electron": 0, "ios": 0, "web": 0} ) - # Check that this is a situation where old R30 differs: - # old R30 DOES count this as 'retained'. - r30_results = self.get_success(store.count_r30_users()) - self.assertEqual(r30_results, {"all": 1, "ios": 1}) - # Now we want to check that the user will still be able to appear in # R30v2 as long as the user performs some other activity between # 30 and 60 days later. -- cgit 1.5.1 From ca3c07e833816e69bbaf0372e6cc79f52e6db88e Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Fri, 19 May 2023 11:18:45 -0500 Subject: Trace how many new events from the backfill response we need to process (#15633) You can kinda derive this information from how many `_process_pulled_event` spans there are but it would be nice to quickly glance. --- changelog.d/15633.misc | 1 + synapse/handlers/federation_event.py | 5 +++++ 2 files changed, 6 insertions(+) create mode 100644 changelog.d/15633.misc diff --git a/changelog.d/15633.misc b/changelog.d/15633.misc new file mode 100644 index 0000000000..4126a20602 --- /dev/null +++ b/changelog.d/15633.misc @@ -0,0 +1 @@ +Trace how many new events from the backfill response we need to process. diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index 06343d40e4..9a08618da5 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -890,6 +890,11 @@ class FederationEventHandler: # Continue on with the events that are new to us. new_events.append(event) + set_tag( + SynapseTags.RESULT_PREFIX + "new_events.length", + str(len(new_events)), + ) + # We want to sort these by depth so we process them and # tell clients about them in order. sorted_events = sorted(new_events, key=lambda x: x.depth) -- cgit 1.5.1 From 703a8f9c67cfe25b956dfdcca654818d52fa7ebd Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Fri, 19 May 2023 12:26:58 -0500 Subject: Instrument `state` and `state_group` storage related things (tracing) (#15610) Instrument `state` and `state_group` storage related things (tracing) so it's a little more clear where these database transactions are coming from as there is a lot of wires crossing in these functions. Part of `/messages` performance investigation: https://github.com/matrix-org/synapse/issues/13356 --- changelog.d/15610.misc | 1 + synapse/events/snapshot.py | 5 ++++ synapse/state/__init__.py | 4 ++++ synapse/storage/controllers/state.py | 33 +++++++++++++++++++++++++++ synapse/storage/databases/state/bg_updates.py | 5 ++++ synapse/storage/databases/state/store.py | 15 ++++++++++++ 6 files changed, 63 insertions(+) create mode 100644 changelog.d/15610.misc diff --git a/changelog.d/15610.misc b/changelog.d/15610.misc new file mode 100644 index 0000000000..2eff30f6e3 --- /dev/null +++ b/changelog.d/15610.misc @@ -0,0 +1 @@ +Instrument `state` and `state_group` storage-related operations to better picture what's happening when tracing. diff --git a/synapse/events/snapshot.py b/synapse/events/snapshot.py index 9b4d692cf4..e7e8225b8e 100644 --- a/synapse/events/snapshot.py +++ b/synapse/events/snapshot.py @@ -19,6 +19,7 @@ from immutabledict import immutabledict from synapse.appservice import ApplicationService from synapse.events import EventBase +from synapse.logging.opentracing import tag_args, trace from synapse.types import JsonDict, StateMap if TYPE_CHECKING: @@ -242,6 +243,8 @@ class EventContext(UnpersistedEventContextBase): return self._state_group + @trace + @tag_args async def get_current_state_ids( self, state_filter: Optional["StateFilter"] = None ) -> Optional[StateMap[str]]: @@ -275,6 +278,8 @@ class EventContext(UnpersistedEventContextBase): return prev_state_ids + @trace + @tag_args async def get_prev_state_ids( self, state_filter: Optional["StateFilter"] = None ) -> StateMap[str]: diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index 6031095249..9bc0c3b7b9 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -45,6 +45,7 @@ from synapse.events.snapshot import ( UnpersistedEventContextBase, ) from synapse.logging.context import ContextResourceUsage +from synapse.logging.opentracing import tag_args, trace from synapse.replication.http.state import ReplicationUpdateCurrentStateRestServlet from synapse.state import v1, v2 from synapse.storage.databases.main.events_worker import EventRedactBehaviour @@ -270,6 +271,8 @@ class StateHandler: state = await entry.get_state(self._state_storage_controller, StateFilter.all()) return await self.store.get_joined_hosts(room_id, state, entry) + @trace + @tag_args async def calculate_context_info( self, event: EventBase, @@ -465,6 +468,7 @@ class StateHandler: return await unpersisted_context.persist(event) + @trace @measure_func() async def resolve_state_groups_for_events( self, room_id: str, event_ids: Collection[str], await_full_state: bool = True diff --git a/synapse/storage/controllers/state.py b/synapse/storage/controllers/state.py index 9d7a8a792f..06a80869eb 100644 --- a/synapse/storage/controllers/state.py +++ b/synapse/storage/controllers/state.py @@ -67,6 +67,8 @@ class StateStorageController: """ self._partial_state_room_tracker.notify_un_partial_stated(room_id) + @trace + @tag_args async def get_state_group_delta( self, state_group: int ) -> Tuple[Optional[int], Optional[StateMap[str]]]: @@ -84,6 +86,8 @@ class StateStorageController: state_group_delta = await self.stores.state.get_state_group_delta(state_group) return state_group_delta.prev_group, state_group_delta.delta_ids + @trace + @tag_args async def get_state_groups_ids( self, _room_id: str, event_ids: Collection[str], await_full_state: bool = True ) -> Dict[int, MutableStateMap[str]]: @@ -114,6 +118,8 @@ class StateStorageController: return group_to_state + @trace + @tag_args async def get_state_ids_for_group( self, state_group: int, state_filter: Optional[StateFilter] = None ) -> StateMap[str]: @@ -130,6 +136,8 @@ class StateStorageController: return group_to_state[state_group] + @trace + @tag_args async def get_state_groups( self, room_id: str, event_ids: Collection[str] ) -> Dict[int, List[EventBase]]: @@ -165,6 +173,8 @@ class StateStorageController: for group, event_id_map in group_to_ids.items() } + @trace + @tag_args def _get_state_groups_from_groups( self, groups: List[int], state_filter: StateFilter ) -> Awaitable[Dict[int, StateMap[str]]]: @@ -183,6 +193,7 @@ class StateStorageController: return self.stores.state._get_state_groups_from_groups(groups, state_filter) @trace + @tag_args async def get_state_for_events( self, event_ids: Collection[str], state_filter: Optional[StateFilter] = None ) -> Dict[str, StateMap[EventBase]]: @@ -280,6 +291,8 @@ class StateStorageController: return {event: event_to_state[event] for event in event_ids} + @trace + @tag_args async def get_state_for_event( self, event_id: str, state_filter: Optional[StateFilter] = None ) -> StateMap[EventBase]: @@ -303,6 +316,7 @@ class StateStorageController: return state_map[event_id] @trace + @tag_args async def get_state_ids_for_event( self, event_id: str, @@ -333,6 +347,8 @@ class StateStorageController: ) return state_map[event_id] + @trace + @tag_args def get_state_for_groups( self, groups: Iterable[int], state_filter: Optional[StateFilter] = None ) -> Awaitable[Dict[int, MutableStateMap[str]]]: @@ -402,6 +418,8 @@ class StateStorageController: event_id, room_id, prev_group, delta_ids, current_state_ids ) + @trace + @tag_args @cancellable async def get_current_state_ids( self, @@ -442,6 +460,8 @@ class StateStorageController: room_id, on_invalidate=on_invalidate ) + @trace + @tag_args async def get_canonical_alias_for_room(self, room_id: str) -> Optional[str]: """Get canonical alias for room, if any @@ -466,6 +486,8 @@ class StateStorageController: return event.content.get("canonical_alias") + @trace + @tag_args async def get_current_state_deltas( self, prev_stream_id: int, max_stream_id: int ) -> Tuple[int, List[Dict[str, Any]]]: @@ -500,6 +522,7 @@ class StateStorageController: ) @trace + @tag_args async def get_current_state( self, room_id: str, state_filter: Optional[StateFilter] = None ) -> StateMap[EventBase]: @@ -516,6 +539,8 @@ class StateStorageController: return state_map + @trace + @tag_args async def get_current_state_event( self, room_id: str, event_type: str, state_key: str ) -> Optional[EventBase]: @@ -527,6 +552,8 @@ class StateStorageController: ) return state_map.get(key) + @trace + @tag_args async def get_current_hosts_in_room(self, room_id: str) -> AbstractSet[str]: """Get current hosts in room based on current state. @@ -538,6 +565,8 @@ class StateStorageController: return await self.stores.main.get_current_hosts_in_room(room_id) + @trace + @tag_args async def get_current_hosts_in_room_ordered(self, room_id: str) -> List[str]: """Get current hosts in room based on current state. @@ -553,6 +582,8 @@ class StateStorageController: return await self.stores.main.get_current_hosts_in_room_ordered(room_id) + @trace + @tag_args async def get_current_hosts_in_room_or_partial_state_approximation( self, room_id: str ) -> Collection[str]: @@ -582,6 +613,8 @@ class StateStorageController: return hosts + @trace + @tag_args async def get_users_in_room_with_profiles( self, room_id: str ) -> Mapping[str, ProfileInfo]: diff --git a/synapse/storage/databases/state/bg_updates.py b/synapse/storage/databases/state/bg_updates.py index 097dea5182..86eb1a8a08 100644 --- a/synapse/storage/databases/state/bg_updates.py +++ b/synapse/storage/databases/state/bg_updates.py @@ -15,6 +15,7 @@ import logging from typing import TYPE_CHECKING, Dict, List, Mapping, Optional, Tuple, Union +from synapse.logging.opentracing import tag_args, trace from synapse.storage._base import SQLBaseStore from synapse.storage.database import ( DatabasePool, @@ -40,6 +41,8 @@ class StateGroupBackgroundUpdateStore(SQLBaseStore): updates. """ + @trace + @tag_args def _count_state_group_hops_txn( self, txn: LoggingTransaction, state_group: int ) -> int: @@ -83,6 +86,8 @@ class StateGroupBackgroundUpdateStore(SQLBaseStore): return count + @trace + @tag_args def _get_state_groups_from_groups_txn( self, txn: LoggingTransaction, diff --git a/synapse/storage/databases/state/store.py b/synapse/storage/databases/state/store.py index 29ff64e876..6984d11352 100644 --- a/synapse/storage/databases/state/store.py +++ b/synapse/storage/databases/state/store.py @@ -20,6 +20,7 @@ import attr from synapse.api.constants import EventTypes from synapse.events import EventBase from synapse.events.snapshot import UnpersistedEventContext, UnpersistedEventContextBase +from synapse.logging.opentracing import tag_args, trace from synapse.storage._base import SQLBaseStore from synapse.storage.database import ( DatabasePool, @@ -159,6 +160,8 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): "get_state_group_delta", _get_state_group_delta_txn ) + @trace + @tag_args @cancellable async def _get_state_groups_from_groups( self, groups: List[int], state_filter: StateFilter @@ -187,6 +190,8 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): return results + @trace + @tag_args def _get_state_for_group_using_cache( self, cache: DictionaryCache[int, StateKey, str], @@ -239,6 +244,8 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): return state_filter.filter_state(state_dict_ids), not missing_types + @trace + @tag_args @cancellable async def _get_state_for_groups( self, groups: Iterable[int], state_filter: Optional[StateFilter] = None @@ -305,6 +312,8 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): return state + @trace + @tag_args def _get_state_for_groups_using_cache( self, groups: Iterable[int], @@ -403,6 +412,8 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): fetched_keys=non_member_types, ) + @trace + @tag_args async def store_state_deltas_for_batched( self, events_and_context: List[Tuple[EventBase, UnpersistedEventContextBase]], @@ -520,6 +531,8 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): prev_group, ) + @trace + @tag_args async def store_state_group( self, event_id: str, @@ -772,6 +785,8 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): ((sg,) for sg in state_groups_to_delete), ) + @trace + @tag_args async def get_previous_state_groups( self, state_groups: Iterable[int] ) -> Dict[int, int]: -- cgit 1.5.1 From adae1cfc8ce7c9a67aa8e1f51c393222365cae36 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 22 May 2023 10:37:50 +0100 Subject: Bump types-setuptools from 67.7.0.2 to 67.8.0.0 (#15639) * Bump types-setuptools from 67.7.0.2 to 67.8.0.0 Bumps [types-setuptools](https://github.com/python/typeshed) from 67.7.0.2 to 67.8.0.0. - [Commits](https://github.com/python/typeshed/commits) --- updated-dependencies: - dependency-name: types-setuptools dependency-type: direct:development update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Changelog --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- changelog.d/15639.misc | 1 + poetry.lock | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/15639.misc diff --git a/changelog.d/15639.misc b/changelog.d/15639.misc new file mode 100644 index 0000000000..92230e206f --- /dev/null +++ b/changelog.d/15639.misc @@ -0,0 +1 @@ +Bump types-setuptools from 67.7.0.2 to 67.8.0.0. diff --git a/poetry.lock b/poetry.lock index 48a752986d..40af64a00d 100644 --- a/poetry.lock +++ b/poetry.lock @@ -3124,14 +3124,14 @@ types-urllib3 = "*" [[package]] name = "types-setuptools" -version = "67.7.0.2" +version = "67.8.0.0" description = "Typing stubs for setuptools" category = "dev" optional = false python-versions = "*" files = [ - {file = "types-setuptools-67.7.0.2.tar.gz", hash = "sha256:155789e85e79d5682b0d341919d4beb6140408ae52bac922af25b54e36ab25c0"}, - {file = "types_setuptools-67.7.0.2-py3-none-any.whl", hash = "sha256:bd30f6dbe9b83f0a7e6e3eab6d2df748aa4f55700d54e9f077d3aa30cc019445"}, + {file = "types-setuptools-67.8.0.0.tar.gz", hash = "sha256:95c9ed61871d6c0e258433373a4e1753c0a7c3627a46f4d4058c7b5a08ab844f"}, + {file = "types_setuptools-67.8.0.0-py3-none-any.whl", hash = "sha256:6df73340d96b238a4188b7b7668814b37e8018168aef1eef94a3b1872e3f60ff"}, ] [[package]] -- cgit 1.5.1 From 8516001566362d4659c2ab498f83c90bd547106c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 22 May 2023 10:38:01 +0100 Subject: Bump types-pillow from 9.5.0.2 to 9.5.0.4 (#15640) * Bump types-pillow from 9.5.0.2 to 9.5.0.4 Bumps [types-pillow](https://github.com/python/typeshed) from 9.5.0.2 to 9.5.0.4. - [Commits](https://github.com/python/typeshed/commits) --- updated-dependencies: - dependency-name: types-pillow dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Changelog --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- changelog.d/15640.misc | 1 + poetry.lock | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/15640.misc diff --git a/changelog.d/15640.misc b/changelog.d/15640.misc new file mode 100644 index 0000000000..4c2a3dbc52 --- /dev/null +++ b/changelog.d/15640.misc @@ -0,0 +1 @@ +Bump types-pillow from 9.5.0.2 to 9.5.0.4. diff --git a/poetry.lock b/poetry.lock index 40af64a00d..56a89fe5b5 100644 --- a/poetry.lock +++ b/poetry.lock @@ -3058,14 +3058,14 @@ files = [ [[package]] name = "types-pillow" -version = "9.5.0.2" +version = "9.5.0.4" description = "Typing stubs for Pillow" category = "dev" optional = false python-versions = "*" files = [ - {file = "types-Pillow-9.5.0.2.tar.gz", hash = "sha256:b3f9f621f259566c19c1deca21901017c8b1e3e200ed2e49e0a2d83c0a5175db"}, - {file = "types_Pillow-9.5.0.2-py3-none-any.whl", hash = "sha256:58fdebd0ffa2353ecccdd622adde23bce89da5c0c8b96c34f2d1eca7b7e42d0e"}, + {file = "types-Pillow-9.5.0.4.tar.gz", hash = "sha256:f1b6af47abd151847ee25911ffeba784899bc7dc7f9eba8ca6a5aac522b012ef"}, + {file = "types_Pillow-9.5.0.4-py3-none-any.whl", hash = "sha256:69427d9fa4320ff6e30f00fb9c0dd71185dc0a16de4757774220104759483466"}, ] [[package]] -- cgit 1.5.1 From 875015d512a80c2c72379340ead796fc9ca1c189 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 22 May 2023 10:38:08 +0100 Subject: Bump sphinx from 6.1.3 to 6.2.1 (#15641) * Bump sphinx from 6.1.3 to 6.2.1 Bumps [sphinx](https://github.com/sphinx-doc/sphinx) from 6.1.3 to 6.2.1. - [Release notes](https://github.com/sphinx-doc/sphinx/releases) - [Changelog](https://github.com/sphinx-doc/sphinx/blob/master/CHANGES) - [Commits](https://github.com/sphinx-doc/sphinx/compare/v6.1.3...v6.2.1) --- updated-dependencies: - dependency-name: sphinx dependency-type: direct:development update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Changelog --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- changelog.d/15641.misc | 1 + poetry.lock | 10 +++++----- 2 files changed, 6 insertions(+), 5 deletions(-) create mode 100644 changelog.d/15641.misc diff --git a/changelog.d/15641.misc b/changelog.d/15641.misc new file mode 100644 index 0000000000..a85d85c58e --- /dev/null +++ b/changelog.d/15641.misc @@ -0,0 +1 @@ +Bump sphinx from 6.1.3 to 6.2.1. diff --git a/poetry.lock b/poetry.lock index 56a89fe5b5..4c2f554afc 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2565,21 +2565,21 @@ files = [ [[package]] name = "sphinx" -version = "6.1.3" +version = "6.2.1" description = "Python documentation generator" category = "dev" optional = false python-versions = ">=3.8" files = [ - {file = "Sphinx-6.1.3.tar.gz", hash = "sha256:0dac3b698538ffef41716cf97ba26c1c7788dba73ce6f150c1ff5b4720786dd2"}, - {file = "sphinx-6.1.3-py3-none-any.whl", hash = "sha256:807d1cb3d6be87eb78a381c3e70ebd8d346b9a25f3753e9947e866b2786865fc"}, + {file = "Sphinx-6.2.1.tar.gz", hash = "sha256:6d56a34697bb749ffa0152feafc4b19836c755d90a7c59b72bc7dfd371b9cc6b"}, + {file = "sphinx-6.2.1-py3-none-any.whl", hash = "sha256:97787ff1fa3256a3eef9eda523a63dbf299f7b47e053cfcf684a1c2a8380c912"}, ] [package.dependencies] alabaster = ">=0.7,<0.8" babel = ">=2.9" colorama = {version = ">=0.4.5", markers = "sys_platform == \"win32\""} -docutils = ">=0.18,<0.20" +docutils = ">=0.18.1,<0.20" imagesize = ">=1.3" importlib-metadata = {version = ">=4.8", markers = "python_version < \"3.10\""} Jinja2 = ">=3.0" @@ -2597,7 +2597,7 @@ sphinxcontrib-serializinghtml = ">=1.1.5" [package.extras] docs = ["sphinxcontrib-websupport"] lint = ["docutils-stubs", "flake8 (>=3.5.0)", "flake8-simplify", "isort", "mypy (>=0.990)", "ruff", "sphinx-lint", "types-requests"] -test = ["cython", "html5lib", "pytest (>=4.6)"] +test = ["cython", "filelock", "html5lib", "pytest (>=4.6)"] [[package]] name = "sphinx-autodoc2" -- cgit 1.5.1 From a47b2065f066396f41a306076282e86750d06728 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 22 May 2023 12:12:59 +0100 Subject: Bump furo from 2023.3.27 to 2023.5.20 (#15642) * Bump furo from 2023.3.27 to 2023.5.20 Bumps [furo](https://github.com/pradyunsg/furo) from 2023.3.27 to 2023.5.20. - [Release notes](https://github.com/pradyunsg/furo/releases) - [Changelog](https://github.com/pradyunsg/furo/blob/main/docs/changelog.md) - [Commits](https://github.com/pradyunsg/furo/compare/2023.03.27...2023.05.20) --- updated-dependencies: - dependency-name: furo dependency-type: direct:development update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Changelog --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- changelog.d/15642.misc | 1 + poetry.lock | 8 ++++---- 2 files changed, 5 insertions(+), 4 deletions(-) create mode 100644 changelog.d/15642.misc diff --git a/changelog.d/15642.misc b/changelog.d/15642.misc new file mode 100644 index 0000000000..5d6125140d --- /dev/null +++ b/changelog.d/15642.misc @@ -0,0 +1 @@ +Bump furo from 2023.3.27 to 2023.5.20. diff --git a/poetry.lock b/poetry.lock index 4c2f554afc..b756406714 100644 --- a/poetry.lock +++ b/poetry.lock @@ -580,20 +580,20 @@ dev = ["Sphinx", "coverage", "flake8", "lxml", "lxml-stubs", "memory-profiler", [[package]] name = "furo" -version = "2023.3.27" +version = "2023.5.20" description = "A clean customisable Sphinx documentation theme." category = "dev" optional = false python-versions = ">=3.7" files = [ - {file = "furo-2023.3.27-py3-none-any.whl", hash = "sha256:4ab2be254a2d5e52792d0ca793a12c35582dd09897228a6dd47885dabd5c9521"}, - {file = "furo-2023.3.27.tar.gz", hash = "sha256:b99e7867a5cc833b2b34d7230631dd6558c7a29f93071fdbb5709634bb33c5a5"}, + {file = "furo-2023.5.20-py3-none-any.whl", hash = "sha256:594a8436ddfe0c071f3a9e9a209c314a219d8341f3f1af33fdf7c69544fab9e6"}, + {file = "furo-2023.5.20.tar.gz", hash = "sha256:40e09fa17c6f4b22419d122e933089226dcdb59747b5b6c79363089827dea16f"}, ] [package.dependencies] beautifulsoup4 = "*" pygments = ">=2.7" -sphinx = ">=5.0,<7.0" +sphinx = ">=6.0,<8.0" sphinx-basic-ng = "*" [[package]] -- cgit 1.5.1 From cc53c96bf813045c08593d4996deb57915fbb0e5 Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Mon, 22 May 2023 13:25:39 +0100 Subject: Limit the size of the `HomeServerConfig` cache in trial test runs (#15646) ...to try to control memory usage. `HomeServerConfig`s hold on to many Jinja2 objects, which come out to over 0.5 MiB per config. Over the course of a full test run, the cache grows to ~360 entries. Limit it to 8 entries. Part of #15622. Signed-off-by: Sean Quah --- changelog.d/15646.misc | 1 + tests/unittest.py | 23 +++++++---------------- 2 files changed, 8 insertions(+), 16 deletions(-) create mode 100644 changelog.d/15646.misc diff --git a/changelog.d/15646.misc b/changelog.d/15646.misc new file mode 100644 index 0000000000..872afe30b8 --- /dev/null +++ b/changelog.d/15646.misc @@ -0,0 +1 @@ +Limit the size of the `HomeServerConfig` cache in trial test runs. diff --git a/tests/unittest.py b/tests/unittest.py index 623c5a75a2..c73195b32b 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -13,6 +13,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import functools import gc import hashlib import hmac @@ -150,7 +151,11 @@ def deepcopy_config(config: _TConfig) -> _TConfig: return new_config -_make_homeserver_config_obj_cache: Dict[str, Union[RootConfig, Config]] = {} +@functools.lru_cache(maxsize=8) +def _parse_config_dict(config: str) -> RootConfig: + config_obj = HomeServerConfig() + config_obj.parse_config_dict(json.loads(config), "", "") + return config_obj def make_homeserver_config_obj(config: Dict[str, Any]) -> RootConfig: @@ -164,21 +169,7 @@ def make_homeserver_config_obj(config: Dict[str, Any]) -> RootConfig: but it keeps a cache of `HomeServerConfig` instances and deepcopies them as needed, to avoid validating the whole configuration every time. """ - cache_key = json.dumps(config) - - if cache_key in _make_homeserver_config_obj_cache: - # Cache hit: reuse the existing instance - config_obj = _make_homeserver_config_obj_cache[cache_key] - else: - # Cache miss; create the actual instance - config_obj = HomeServerConfig() - config_obj.parse_config_dict(config, "", "") - - # Add to the cache - _make_homeserver_config_obj_cache[cache_key] = config_obj - - assert isinstance(config_obj, RootConfig) - + config_obj = _parse_config_dict(json.dumps(config, sort_keys=True)) return deepcopy_config(config_obj) -- cgit 1.5.1 From 201597fc86b2fb88f26b529a7cc5f077efe3dd9c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 22 May 2023 15:39:19 +0100 Subject: Bump pygithub from 1.58.1 to 1.58.2 (#15643) * Bump pygithub from 1.58.1 to 1.58.2 Bumps [pygithub](https://github.com/pygithub/pygithub) from 1.58.1 to 1.58.2. - [Release notes](https://github.com/pygithub/pygithub/releases) - [Changelog](https://github.com/PyGithub/PyGithub/blob/v1.58.2/doc/changes.rst) - [Commits](https://github.com/pygithub/pygithub/compare/v1.58.1...v1.58.2) --- updated-dependencies: - dependency-name: pygithub dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Changelog --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- changelog.d/15643.misc | 1 + poetry.lock | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/15643.misc diff --git a/changelog.d/15643.misc b/changelog.d/15643.misc new file mode 100644 index 0000000000..5bd2e74071 --- /dev/null +++ b/changelog.d/15643.misc @@ -0,0 +1 @@ +Bump pygithub from 1.58.1 to 1.58.2. diff --git a/poetry.lock b/poetry.lock index b756406714..6f0374bb3f 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1940,14 +1940,14 @@ email = ["email-validator (>=1.0.3)"] [[package]] name = "pygithub" -version = "1.58.1" +version = "1.58.2" description = "Use the full Github API v3" category = "dev" optional = false python-versions = ">=3.7" files = [ - {file = "PyGithub-1.58.1-py3-none-any.whl", hash = "sha256:4e7fe9c3ec30d5fde5b4fbb97f18821c9dbf372bf6df337fe66f6689a65e0a83"}, - {file = "PyGithub-1.58.1.tar.gz", hash = "sha256:7d528b4ad92bc13122129fafd444ce3d04c47d2d801f6446b6e6ee2d410235b3"}, + {file = "PyGithub-1.58.2-py3-none-any.whl", hash = "sha256:f435884af617c6debaa76cbc355372d1027445a56fbc39972a3b9ed4968badc8"}, + {file = "PyGithub-1.58.2.tar.gz", hash = "sha256:1e6b1b7afe31f75151fb81f7ab6b984a7188a852bdb123dbb9ae90023c3ce60f"}, ] [package.dependencies] -- cgit 1.5.1 From c5d1e6d414fa7b4074bc72ca3719c1341a1c5379 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 22 May 2023 11:31:22 -0400 Subject: Properly parse event_fields in filters (#15607) The event_fields property in filters should use the proper escape rules, namely backslashes can be escaped with an additional backslash. This adds tests (adapted from matrix-js-sdk) and implements the logic to properly split the event_fields strings. --- changelog.d/15607.bugfix | 1 + synapse/api/filtering.py | 15 +--------- synapse/events/utils.py | 72 ++++++++++++++++++++++++++++++++++++--------- tests/api/test_filtering.py | 6 ---- tests/events/test_utils.py | 39 ++++++++++++++++++++++++ 5 files changed, 99 insertions(+), 34 deletions(-) create mode 100644 changelog.d/15607.bugfix diff --git a/changelog.d/15607.bugfix b/changelog.d/15607.bugfix new file mode 100644 index 0000000000..a2767adbe2 --- /dev/null +++ b/changelog.d/15607.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug where filters with multiple backslashes were rejected. diff --git a/synapse/api/filtering.py b/synapse/api/filtering.py index de7c56bc0f..82aeef8d19 100644 --- a/synapse/api/filtering.py +++ b/synapse/api/filtering.py @@ -128,20 +128,7 @@ USER_FILTER_SCHEMA = { "account_data": {"$ref": "#/definitions/filter"}, "room": {"$ref": "#/definitions/room_filter"}, "event_format": {"type": "string", "enum": ["client", "federation"]}, - "event_fields": { - "type": "array", - "items": { - "type": "string", - # Don't allow '\\' in event field filters. This makes matching - # events a lot easier as we can then use a negative lookbehind - # assertion to split '\.' If we allowed \\ then it would - # incorrectly split '\\.' See synapse.events.utils.serialize_event - # - # Note that because this is a regular expression, we have to escape - # each backslash in the pattern. - "pattern": r"^((?!\\\\).)*$", - }, - }, + "event_fields": {"type": "array", "items": {"type": "string"}}, }, "additionalProperties": True, # Allow new fields for forward compatibility } diff --git a/synapse/events/utils.py b/synapse/events/utils.py index e6d040176b..e7b7b78b84 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -22,6 +22,7 @@ from typing import ( Iterable, List, Mapping, + Match, MutableMapping, Optional, Union, @@ -46,12 +47,10 @@ if TYPE_CHECKING: from synapse.handlers.relations import BundledAggregations -# Split strings on "." but not "\." This uses a negative lookbehind assertion for '\' -# (? None: sub_out_dict[key_to_move] = sub_dict[key_to_move] +def _escape_slash(m: Match[str]) -> str: + """ + Replacement function; replace a backslash-backslash or backslash-dot with the + second character. Leaves any other string alone. + """ + if m.group(1) in ("\\", "."): + return m.group(1) + return m.group(0) + + +def _split_field(field: str) -> List[str]: + """ + Splits strings on unescaped dots and removes escaping. + + Args: + field: A string representing a path to a field. + + Returns: + A list of nested fields to traverse. + """ + + # Convert the field and remove escaping: + # + # 1. "content.body.thing\.with\.dots" + # 2. ["content", "body", "thing\.with\.dots"] + # 3. ["content", "body", "thing.with.dots"] + + # Find all dots (and their preceding backslashes). If the dot is unescaped + # then emit a new field part. + result = [] + prev_start = 0 + for match in SPLIT_FIELD_REGEX.finditer(field): + # If the match is an *even* number of characters than the dot was escaped. + if len(match.group()) % 2 == 0: + continue + + # Add a new part (up to the dot, exclusive) after escaping. + result.append( + ESCAPE_SEQUENCE_PATTERN.sub( + _escape_slash, field[prev_start : match.end() - 1] + ) + ) + prev_start = match.end() + + # Add any part of the field after the last unescaped dot. (Note that if the + # character is a dot this correctly adds a blank string.) + result.append(re.sub(r"\\(.)", _escape_slash, field[prev_start:])) + + return result + + def only_fields(dictionary: JsonDict, fields: List[str]) -> JsonDict: """Return a new dict with only the fields in 'dictionary' which are present in 'fields'. @@ -260,7 +310,7 @@ def only_fields(dictionary: JsonDict, fields: List[str]) -> JsonDict: If there are no event fields specified then all fields are included. The entries may include '.' characters to indicate sub-fields. So ['content.body'] will include the 'body' field of the 'content' object. - A literal '.' character in a field name may be escaped using a '\'. + A literal '.' or '\' character in a field name may be escaped using a '\'. Args: dictionary: The dictionary to read from. @@ -275,13 +325,7 @@ def only_fields(dictionary: JsonDict, fields: List[str]) -> JsonDict: # for each field, convert it: # ["content.body.thing\.with\.dots"] => [["content", "body", "thing\.with\.dots"]] - split_fields = [SPLIT_FIELD_REGEX.split(f) for f in fields] - - # for each element of the output array of arrays: - # remove escaping so we can use the right key names. - split_fields[:] = [ - [f.replace(r"\.", r".") for f in field_array] for field_array in split_fields - ] + split_fields = [_split_field(f) for f in fields] output: JsonDict = {} for field_array in split_fields: diff --git a/tests/api/test_filtering.py b/tests/api/test_filtering.py index 222449baac..aa6af5ad7b 100644 --- a/tests/api/test_filtering.py +++ b/tests/api/test_filtering.py @@ -48,8 +48,6 @@ class FilteringTestCase(unittest.HomeserverTestCase): invalid_filters: List[JsonDict] = [ # `account_data` must be a dictionary {"account_data": "Hello World"}, - # `event_fields` entries must not contain backslashes - {"event_fields": [r"\\foo"]}, # `event_format` must be "client" or "federation" {"event_format": "other"}, # `not_rooms` must contain valid room IDs @@ -114,10 +112,6 @@ class FilteringTestCase(unittest.HomeserverTestCase): "event_format": "client", "event_fields": ["type", "content", "sender"], }, - # a single backslash should be permitted (though it is debatable whether - # it should be permitted before anything other than `.`, and what that - # actually means) - # # (note that event_fields is implemented in # synapse.events.utils.serialize_event, and so whether this actually works # is tested elsewhere. We just want to check that it is allowed through the diff --git a/tests/events/test_utils.py b/tests/events/test_utils.py index e40eac2eb0..c9a610db9a 100644 --- a/tests/events/test_utils.py +++ b/tests/events/test_utils.py @@ -16,6 +16,7 @@ import unittest as stdlib_unittest from typing import Any, List, Mapping, Optional import attr +from parameterized import parameterized from synapse.api.constants import EventContentFields from synapse.api.room_versions import RoomVersions @@ -23,6 +24,7 @@ from synapse.events import EventBase, make_event_from_dict from synapse.events.utils import ( PowerLevelsContent, SerializeEventConfig, + _split_field, copy_and_fixup_power_levels_contents, maybe_upsert_event_field, prune_event, @@ -794,3 +796,40 @@ class CopyPowerLevelsContentTestCase(stdlib_unittest.TestCase): def test_invalid_nesting_raises_type_error(self) -> None: with self.assertRaises(TypeError): copy_and_fixup_power_levels_contents({"a": {"b": {"c": 1}}}) # type: ignore[dict-item] + + +class SplitFieldTestCase(stdlib_unittest.TestCase): + @parameterized.expand( + [ + # A field with no dots. + ["m", ["m"]], + # Simple dotted fields. + ["m.foo", ["m", "foo"]], + ["m.foo.bar", ["m", "foo", "bar"]], + # Backslash is used as an escape character. + [r"m\.foo", ["m.foo"]], + [r"m\\.foo", ["m\\", "foo"]], + [r"m\\\.foo", [r"m\.foo"]], + [r"m\\\\.foo", ["m\\\\", "foo"]], + [r"m\foo", [r"m\foo"]], + [r"m\\foo", [r"m\foo"]], + [r"m\\\foo", [r"m\\foo"]], + [r"m\\\\foo", [r"m\\foo"]], + # Ensure that escapes at the end don't cause issues. + ["m.foo\\", ["m", "foo\\"]], + ["m.foo\\", ["m", "foo\\"]], + [r"m.foo\.", ["m", "foo."]], + [r"m.foo\\.", ["m", "foo\\", ""]], + [r"m.foo\\\.", ["m", r"foo\."]], + # Empty parts (corresponding to properties which are an empty string) are allowed. + [".m", ["", "m"]], + ["..m", ["", "", "m"]], + ["m.", ["m", ""]], + ["m..", ["m", "", ""]], + ["m..foo", ["m", "", "foo"]], + # Invalid escape sequences. + [r"\m", [r"\m"]], + ] + ) + def test_split_field(self, input: str, expected: str) -> None: + self.assertEqual(_split_field(input), expected) -- cgit 1.5.1 From 737f7ddf5873a28d4334dc7f6b25edbaaaf934c7 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Mon, 22 May 2023 18:58:58 +0100 Subject: Remove outdated comment in log config (#15648) --- changelog.d/15648.doc | 1 + docs/sample_log_config.yaml | 4 +--- synapse/config/logger.py | 4 +--- 3 files changed, 3 insertions(+), 6 deletions(-) create mode 100644 changelog.d/15648.doc diff --git a/changelog.d/15648.doc b/changelog.d/15648.doc new file mode 100644 index 0000000000..70f65ebbff --- /dev/null +++ b/changelog.d/15648.doc @@ -0,0 +1 @@ +Remove outdated comment from the generated and sample homeserver log configs. \ No newline at end of file diff --git a/docs/sample_log_config.yaml b/docs/sample_log_config.yaml index 6339160d00..ae0318122e 100644 --- a/docs/sample_log_config.yaml +++ b/docs/sample_log_config.yaml @@ -68,9 +68,7 @@ root: # Write logs to the `buffer` handler, which will buffer them together in memory, # then write them to a file. # - # Replace "buffer" with "console" to log to stderr instead. (Note that you'll - # also need to update the configuration for the `twisted` logger above, in - # this case.) + # Replace "buffer" with "console" to log to stderr instead. # handlers: [buffer] diff --git a/synapse/config/logger.py b/synapse/config/logger.py index 56db875b25..1e080133dc 100644 --- a/synapse/config/logger.py +++ b/synapse/config/logger.py @@ -117,9 +117,7 @@ root: # Write logs to the `buffer` handler, which will buffer them together in memory, # then write them to a file. # - # Replace "buffer" with "console" to log to stderr instead. (Note that you'll - # also need to update the configuration for the `twisted` logger above, in - # this case.) + # Replace "buffer" with "console" to log to stderr instead. # handlers: [buffer] -- cgit 1.5.1 From 1903c7e5edccc86f6d28aed33dc2995b43d941b7 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Mon, 22 May 2023 13:49:01 -0500 Subject: Remove duplicate timestamp from test logs (`_trial_temp/test.log`) (#15636) Fix https://github.com/matrix-org/synapse/issues/15618 ### Before ``` 2023-05-17 22:51:36-0500 [-] 2023-05-17 22:51:36,889 - synapse.server - 338 - INFO - sentinel - Finished setting up. ``` ### After ``` 2023-05-19 18:16:20-0500 [-] synapse.server - 338 - INFO - sentinel - Finished setting up. ``` ### Dev notes The `Twisted.Logger` controls the `2023-05-19 18:16:20-0500 [-]` prefix, see : [`twisted/twisted` -> `src/twisted/logger/_format.py#L362-L374`](https://github.com/twisted/twisted/blob/34b161e66bc7c9f9efbb95e82c770a863933e498/src/twisted/logger/_format.py#L362-L374) And we delegate our logs to the Twisted Logger for the tests which puts it in `_trial_temp/test.log` --- changelog.d/15636.misc | 1 + tests/test_utils/logging_setup.py | 7 +++---- 2 files changed, 4 insertions(+), 4 deletions(-) create mode 100644 changelog.d/15636.misc diff --git a/changelog.d/15636.misc b/changelog.d/15636.misc new file mode 100644 index 0000000000..82329c5e43 --- /dev/null +++ b/changelog.d/15636.misc @@ -0,0 +1 @@ +Remove duplicate timestamp from test logs (`_trial_temp/test.log`). diff --git a/tests/test_utils/logging_setup.py b/tests/test_utils/logging_setup.py index b522163a34..c37f205ed0 100644 --- a/tests/test_utils/logging_setup.py +++ b/tests/test_utils/logging_setup.py @@ -40,10 +40,9 @@ def setup_logging() -> None: """ root_logger = logging.getLogger() - log_format = ( - "%(asctime)s - %(name)s - %(lineno)d - " - "%(levelname)s - %(request)s - %(message)s" - ) + # We exclude `%(asctime)s` from this format because the Twisted logger adds its own + # timestamp + log_format = "%(name)s - %(lineno)d - " "%(levelname)s - %(request)s - %(message)s" handler = ToTwistedHandler() formatter = logging.Formatter(log_format) -- cgit 1.5.1 From 11ff4884e70457431ec2f816001f3772ac68a522 Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Tue, 23 May 2023 10:57:39 +0100 Subject: 1.84.0 --- CHANGES.md | 9 +++++++++ changelog.d/15599.bugfix | 1 - debian/changelog | 6 ++++++ pyproject.toml | 2 +- 4 files changed, 16 insertions(+), 2 deletions(-) delete mode 100644 changelog.d/15599.bugfix diff --git a/CHANGES.md b/CHANGES.md index 4877ba9d44..ca594a9532 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,12 @@ +Synapse 1.84.0 (2023-05-23) +=========================== + +Bugfixes +-------- + +- Print full error and stack-trace of any exception that occurs during startup/initialization. ([\#15599](https://github.com/matrix-org/synapse/issues/15599)) + + Synapse 1.84.0rc1 (2023-05-16) ============================== diff --git a/changelog.d/15599.bugfix b/changelog.d/15599.bugfix deleted file mode 100644 index b58af8ad55..0000000000 --- a/changelog.d/15599.bugfix +++ /dev/null @@ -1 +0,0 @@ -Print full error and stack-trace of any exception that occurs during startup/initialization. diff --git a/debian/changelog b/debian/changelog index ad163add2b..51935e03b6 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.84.0) stable; urgency=medium + + * New Synapse release 1.84.0. + + -- Synapse Packaging team Tue, 23 May 2023 10:57:22 +0100 + matrix-synapse-py3 (1.84.0~rc1) stable; urgency=medium * New Synapse release 1.84.0rc1. diff --git a/pyproject.toml b/pyproject.toml index 86e1537a6d..9c77f9294a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -89,7 +89,7 @@ manifest-path = "rust/Cargo.toml" [tool.poetry] name = "matrix-synapse" -version = "1.84.0rc1" +version = "1.84.0" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" -- cgit 1.5.1 From ea6fcda98d56dd8f34712de5691e77c99fc5c0ae Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Tue, 23 May 2023 11:03:06 +0100 Subject: Tweak changelog --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index ca594a9532..dc564d5479 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -4,7 +4,7 @@ Synapse 1.84.0 (2023-05-23) Bugfixes -------- -- Print full error and stack-trace of any exception that occurs during startup/initialization. ([\#15599](https://github.com/matrix-org/synapse/issues/15599)) +- Fix a bug introduced in Synapse 1.84.0rc1 where errors during startup were not reported correctly on Python < 3.10. ([\#15599](https://github.com/matrix-org/synapse/issues/15599)) Synapse 1.84.0rc1 (2023-05-16) -- cgit 1.5.1 From 5cae9158e67babe0553bc356802495a068222685 Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Tue, 23 May 2023 11:13:38 +0100 Subject: Tweak changelog and upgrade notes --- CHANGES.md | 9 ++++++++- docs/upgrade.md | 23 +++++++++++++++++------ 2 files changed, 25 insertions(+), 7 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index dc564d5479..e9397158f1 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,6 +1,8 @@ Synapse 1.84.0 (2023-05-23) =========================== +The `worker_replication_*` configuration settings have been deprecated in favour of configuring the main process consistently with other instances in the `instance_map`. The deprecated settings will be removed in Synapse v1.88.0, but changing your configuration in advance is recommended. See the [upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.84/docs/upgrade.md#upgrading-to-v1840) for more information. + Bugfixes -------- @@ -32,6 +34,12 @@ Bugfixes - Require at least poetry-core v1.1.0. ([\#15566](https://github.com/matrix-org/synapse/issues/15566), [\#15571](https://github.com/matrix-org/synapse/issues/15571)) +Deprecations and Removals +------------------------- + +- Remove need for `worker_replication_*` based settings in worker configuration yaml by placing this data directly on the `instance_map` instead. ([\#15491](https://github.com/matrix-org/synapse/issues/15491)) + + Updates to the Docker image --------------------------- @@ -51,7 +59,6 @@ Internal Changes - Use oEmbed to generate URL previews for YouTube Shorts. ([\#15025](https://github.com/matrix-org/synapse/issues/15025)) - Create new `Client` for use with HTTP Replication between workers. Contributed by Jason Little. ([\#15470](https://github.com/matrix-org/synapse/issues/15470)) -- Remove need for `worker_replication_*` based settings in worker configuration yaml by placing this data directly on the `instance_map` instead. ([\#15491](https://github.com/matrix-org/synapse/issues/15491)) - Bump pyicu from 2.10.2 to 2.11. ([\#15509](https://github.com/matrix-org/synapse/issues/15509)) - Remove references to supporting per-user flag for [MSC2654](https://github.com/matrix-org/matrix-spec-proposals/pull/2654). ([\#15522](https://github.com/matrix-org/synapse/issues/15522)) - Don't use a trusted key server when running the demo scripts. ([\#15527](https://github.com/matrix-org/synapse/issues/15527)) diff --git a/docs/upgrade.md b/docs/upgrade.md index 0625de8afb..af999dd91f 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -92,15 +92,22 @@ process, for example: ## Deprecation of `worker_replication_*` configuration settings -When using workers, +When using workers, + * `worker_replication_host` * `worker_replication_http_port` * `worker_replication_http_tls` -can now be removed from individual worker YAML configuration ***if*** you add the main process to the `instance_map` in the shared YAML configuration, -using the name `main`. +should now be removed from individual worker YAML configurations and the main process should instead be added to the `instance_map` +in the shared YAML configuration, using the name `main`. + +The old `worker_replication_*` settings are now considered deprecated and are expected to be removed in Synapse v1.88.0. + + +### Example change + +#### Before: -### Before: Shared YAML ```yaml instance_map: @@ -109,6 +116,7 @@ instance_map: port: 5678 tls: false ``` + Worker YAML ```yaml worker_app: synapse.app.generic_worker @@ -130,7 +138,10 @@ worker_listeners: worker_log_config: /etc/matrix-synapse/generic-worker-log.yaml ``` -### After: + + +#### After: + Shared YAML ```yaml instance_map: @@ -143,6 +154,7 @@ instance_map: port: 5678 tls: false ``` + Worker YAML ```yaml worker_app: synapse.app.generic_worker @@ -165,7 +177,6 @@ Notes: * `tls` is optional but mirrors the functionality of `worker_replication_http_tls` - # Upgrading to v1.81.0 ## Application service path & authentication deprecations -- cgit 1.5.1 From 03042e435b23c82a1c911e7ca4011a333e3ecb71 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 23 May 2023 07:28:51 -0400 Subject: Bump requests from 2.28.2 to 2.31.0 (#15651) --- changelog.d/15651.misc | 1 + poetry.lock | 10 +++++----- 2 files changed, 6 insertions(+), 5 deletions(-) create mode 100644 changelog.d/15651.misc diff --git a/changelog.d/15651.misc b/changelog.d/15651.misc new file mode 100644 index 0000000000..4d7c0248b2 --- /dev/null +++ b/changelog.d/15651.misc @@ -0,0 +1 @@ +Bump requests from 2.28.2 to 2.31.0. diff --git a/poetry.lock b/poetry.lock index 6f0374bb3f..3f8bf7c304 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2251,21 +2251,21 @@ md = ["cmarkgfm (>=0.8.0)"] [[package]] name = "requests" -version = "2.28.2" +version = "2.31.0" description = "Python HTTP for Humans." category = "main" optional = false -python-versions = ">=3.7, <4" +python-versions = ">=3.7" files = [ - {file = "requests-2.28.2-py3-none-any.whl", hash = "sha256:64299f4909223da747622c030b781c0d7811e359c37124b4bd368fb8c6518baa"}, - {file = "requests-2.28.2.tar.gz", hash = "sha256:98b1b2782e3c6c4904938b84c0eb932721069dfdb9134313beff7c83c2df24bf"}, + {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"}, + {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"}, ] [package.dependencies] certifi = ">=2017.4.17" charset-normalizer = ">=2,<4" idna = ">=2.5,<4" -urllib3 = ">=1.21.1,<1.27" +urllib3 = ">=1.21.1,<3" [package.extras] socks = ["PySocks (>=1.5.6,!=1.5.7)"] -- cgit 1.5.1 From 1df0221bda65cc90ee3a15d210b87e8065bc865f Mon Sep 17 00:00:00 2001 From: Jason Little Date: Tue, 23 May 2023 08:05:30 -0500 Subject: Use a custom scheme & the worker name for replication requests. (#15578) All the information needed is already in the `instance_map`, so use that instead of passing the hostname / IP & port manually for each replication request. This consolidates logic for future improvements of using e.g. UNIX sockets for workers. --- changelog.d/15578.misc | 1 + synapse/http/client.py | 1 + synapse/http/replicationagent.py | 34 +++++++++++++++++++++++++++------- synapse/replication/http/_base.py | 18 ++++++------------ 4 files changed, 35 insertions(+), 19 deletions(-) create mode 100644 changelog.d/15578.misc diff --git a/changelog.d/15578.misc b/changelog.d/15578.misc new file mode 100644 index 0000000000..a54422239b --- /dev/null +++ b/changelog.d/15578.misc @@ -0,0 +1 @@ +Allow connecting to HTTP Replication Endpoints by using `worker_name` when constructing the request. diff --git a/synapse/http/client.py b/synapse/http/client.py index f1ab7a8bc9..09ea93e10d 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -835,6 +835,7 @@ class ReplicationClient(BaseHttpClient): self.agent: IAgent = ReplicationAgent( hs.get_reactor(), + hs.config.worker.instance_map, contextFactory=hs.get_http_client_context_factory(), pool=pool, ) diff --git a/synapse/http/replicationagent.py b/synapse/http/replicationagent.py index 5ecd08be0f..800f21873d 100644 --- a/synapse/http/replicationagent.py +++ b/synapse/http/replicationagent.py @@ -13,7 +13,7 @@ # limitations under the License. import logging -from typing import Optional +from typing import Dict, Optional from zope.interface import implementer @@ -32,6 +32,7 @@ from twisted.web.iweb import ( IResponse, ) +from synapse.config.workers import InstanceLocationConfig from synapse.types import ISynapseReactor logger = logging.getLogger(__name__) @@ -44,9 +45,11 @@ class ReplicationEndpointFactory: def __init__( self, reactor: ISynapseReactor, + instance_map: Dict[str, InstanceLocationConfig], context_factory: IPolicyForHTTPS, ) -> None: self.reactor = reactor + self.instance_map = instance_map self.context_factory = context_factory def endpointForURI(self, uri: URI) -> IStreamClientEndpoint: @@ -58,15 +61,29 @@ class ReplicationEndpointFactory: Returns: The correct client endpoint object """ - if uri.scheme in (b"http", b"https"): - endpoint = HostnameEndpoint(self.reactor, uri.host, uri.port) - if uri.scheme == b"https": + # The given URI has a special scheme and includes the worker name. The + # actual connection details are pulled from the instance map. + worker_name = uri.netloc.decode("utf-8") + scheme = self.instance_map[worker_name].scheme() + + if scheme in ("http", "https"): + endpoint = HostnameEndpoint( + self.reactor, + self.instance_map[worker_name].host, + self.instance_map[worker_name].port, + ) + if scheme == "https": endpoint = wrapClientTLS( - self.context_factory.creatorForNetloc(uri.host, uri.port), endpoint + # The 'port' argument below isn't actually used by the function + self.context_factory.creatorForNetloc( + self.instance_map[worker_name].host, + self.instance_map[worker_name].port, + ), + endpoint, ) return endpoint else: - raise SchemeNotSupported(f"Unsupported scheme: {uri.scheme!r}") + raise SchemeNotSupported(f"Unsupported scheme: {scheme}") @implementer(IAgent) @@ -80,6 +97,7 @@ class ReplicationAgent(_AgentBase): def __init__( self, reactor: ISynapseReactor, + instance_map: Dict[str, InstanceLocationConfig], contextFactory: IPolicyForHTTPS, connectTimeout: Optional[float] = None, bindAddress: Optional[bytes] = None, @@ -102,7 +120,9 @@ class ReplicationAgent(_AgentBase): created. """ _AgentBase.__init__(self, reactor, pool) - endpoint_factory = ReplicationEndpointFactory(reactor, contextFactory) + endpoint_factory = ReplicationEndpointFactory( + reactor, instance_map, contextFactory + ) self._endpointFactory = endpoint_factory def request( diff --git a/synapse/replication/http/_base.py b/synapse/replication/http/_base.py index dc7820f963..63cf24a14d 100644 --- a/synapse/replication/http/_base.py +++ b/synapse/replication/http/_base.py @@ -219,11 +219,7 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta): with outgoing_gauge.track_inprogress(): if instance_name == local_instance_name: raise Exception("Trying to send HTTP request to self") - if instance_name in instance_map: - host = instance_map[instance_name].host - port = instance_map[instance_name].port - tls = instance_map[instance_name].tls - else: + if instance_name not in instance_map: raise Exception( "Instance %r not in 'instance_map' config" % (instance_name,) ) @@ -271,13 +267,11 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta): "Unknown METHOD on %s replication endpoint" % (cls.NAME,) ) - # Here the protocol is hard coded to be http by default or https in case the replication - # port is set to have tls true. - scheme = "https" if tls else "http" - uri = "%s://%s:%s/_synapse/replication/%s/%s" % ( - scheme, - host, - port, + # Hard code a special scheme to show this only used for replication. The + # instance_name will be passed into the ReplicationEndpointFactory to + # determine connection details from the instance_map. + uri = "synapse-replication://%s/_synapse/replication/%s/%s" % ( + instance_name, cls.NAME, "/".join(url_args), ) -- cgit 1.5.1 From 7c9b91790c013d11ca88a9d01e0054939eda8523 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 23 May 2023 10:35:43 -0400 Subject: Consolidate logic to check for deactivated users. (#15634) This moves the deactivated user check to the method which all login types call. Additionally updates the application service tests to be more realistic by removing invalid tests and fixing server names. --- changelog.d/15634.bugfix | 1 + docs/modules/password_auth_provider_callbacks.md | 3 ++ synapse/appservice/__init__.py | 3 +- synapse/handlers/auth.py | 14 ++---- synapse/handlers/jwt.py | 19 ++------ synapse/rest/client/login.py | 23 +++++++-- tests/handlers/test_password_providers.py | 59 +++++++++--------------- 7 files changed, 55 insertions(+), 67 deletions(-) create mode 100644 changelog.d/15634.bugfix diff --git a/changelog.d/15634.bugfix b/changelog.d/15634.bugfix new file mode 100644 index 0000000000..ef39e8a689 --- /dev/null +++ b/changelog.d/15634.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug where deactivated users were able to login in uncommon situations. diff --git a/docs/modules/password_auth_provider_callbacks.md b/docs/modules/password_auth_provider_callbacks.md index 8275f7ebdc..d66ac7df31 100644 --- a/docs/modules/password_auth_provider_callbacks.md +++ b/docs/modules/password_auth_provider_callbacks.md @@ -46,6 +46,9 @@ instead. If the authentication is unsuccessful, the module must return `None`. +Note that the user is not automatically registered, the `register_user(..)` method of +the [module API](writing_a_module.html) can be used to lazily create users. + If multiple modules register an auth checker for the same login type but with different fields, Synapse will refuse to start. diff --git a/synapse/appservice/__init__.py b/synapse/appservice/__init__.py index 35c330a3c4..2260a8f589 100644 --- a/synapse/appservice/__init__.py +++ b/synapse/appservice/__init__.py @@ -86,6 +86,7 @@ class ApplicationService: url.rstrip("/") if isinstance(url, str) else None ) # url must not end with a slash self.hs_token = hs_token + # The full Matrix ID for this application service's sender. self.sender = sender self.namespaces = self._check_namespaces(namespaces) self.id = id @@ -212,7 +213,7 @@ class ApplicationService: True if the application service is interested in the user, False if not. """ return ( - # User is the appservice's sender_localpart user + # User is the appservice's configured sender_localpart user user_id == self.sender # User is in the appservice's user namespace or self.is_user_in_namespace(user_id) diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 59e340974d..d001f2fb2f 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -52,7 +52,6 @@ from synapse.api.errors import ( NotFoundError, StoreError, SynapseError, - UserDeactivatedError, ) from synapse.api.ratelimiting import Ratelimiter from synapse.handlers.ui_auth import ( @@ -1419,12 +1418,6 @@ class AuthHandler: return None (user_id, password_hash) = lookupres - # If the password hash is None, the account has likely been deactivated - if not password_hash: - deactivated = await self.store.get_user_deactivated_status(user_id) - if deactivated: - raise UserDeactivatedError("This account has been deactivated") - result = await self.validate_hash(password, password_hash) if not result: logger.warning("Failed password login for user %s", user_id) @@ -1749,8 +1742,11 @@ class AuthHandler: registered. auth_provider_session_id: The session ID from the SSO IdP received during login. """ - # If the account has been deactivated, do not proceed with the login - # flow. + # If the account has been deactivated, do not proceed with the login. + # + # This gets checked again when the token is submitted but this lets us + # provide an HTML error page to the user (instead of issuing a token and + # having it error later). deactivated = await self.store.get_user_deactivated_status(registered_user_id) if deactivated: respond_with_html(request, 403, self._sso_account_deactivated_template) diff --git a/synapse/handlers/jwt.py b/synapse/handlers/jwt.py index 5fddc0e315..740bf9b3c4 100644 --- a/synapse/handlers/jwt.py +++ b/synapse/handlers/jwt.py @@ -16,7 +16,7 @@ from typing import TYPE_CHECKING from authlib.jose import JsonWebToken, JWTClaims from authlib.jose.errors import BadSignatureError, InvalidClaimError, JoseError -from synapse.api.errors import Codes, LoginError, StoreError, UserDeactivatedError +from synapse.api.errors import Codes, LoginError from synapse.types import JsonDict, UserID if TYPE_CHECKING: @@ -26,7 +26,6 @@ if TYPE_CHECKING: class JwtHandler: def __init__(self, hs: "HomeServer"): self.hs = hs - self._main_store = hs.get_datastores().main self.jwt_secret = hs.config.jwt.jwt_secret self.jwt_subject_claim = hs.config.jwt.jwt_subject_claim @@ -34,7 +33,7 @@ class JwtHandler: self.jwt_issuer = hs.config.jwt.jwt_issuer self.jwt_audiences = hs.config.jwt.jwt_audiences - async def validate_login(self, login_submission: JsonDict) -> str: + def validate_login(self, login_submission: JsonDict) -> str: """ Authenticates the user for the /login API @@ -103,16 +102,4 @@ class JwtHandler: if user is None: raise LoginError(403, "Invalid JWT", errcode=Codes.FORBIDDEN) - user_id = UserID(user, self.hs.hostname).to_string() - - # If the account has been deactivated, do not proceed with the login - # flow. - try: - deactivated = await self._main_store.get_user_deactivated_status(user_id) - except StoreError: - # JWT lazily creates users, so they may not exist in the database yet. - deactivated = False - if deactivated: - raise UserDeactivatedError("This account has been deactivated") - - return user_id + return UserID(user, self.hs.hostname).to_string() diff --git a/synapse/rest/client/login.py b/synapse/rest/client/login.py index afdbf821b5..6ca61ffbd0 100644 --- a/synapse/rest/client/login.py +++ b/synapse/rest/client/login.py @@ -35,6 +35,7 @@ from synapse.api.errors import ( LoginError, NotApprovedError, SynapseError, + UserDeactivatedError, ) from synapse.api.ratelimiting import Ratelimiter from synapse.api.urls import CLIENT_API_PREFIX @@ -84,6 +85,7 @@ class LoginRestServlet(RestServlet): def __init__(self, hs: "HomeServer"): super().__init__() self.hs = hs + self._main_store = hs.get_datastores().main # JWT configuration variables. self.jwt_enabled = hs.config.jwt.jwt_enabled @@ -112,13 +114,13 @@ class LoginRestServlet(RestServlet): self._well_known_builder = WellKnownBuilder(hs) self._address_ratelimiter = Ratelimiter( - store=hs.get_datastores().main, + store=self._main_store, clock=hs.get_clock(), rate_hz=self.hs.config.ratelimiting.rc_login_address.per_second, burst_count=self.hs.config.ratelimiting.rc_login_address.burst_count, ) self._account_ratelimiter = Ratelimiter( - store=hs.get_datastores().main, + store=self._main_store, clock=hs.get_clock(), rate_hz=self.hs.config.ratelimiting.rc_login_account.per_second, burst_count=self.hs.config.ratelimiting.rc_login_account.burst_count, @@ -280,6 +282,9 @@ class LoginRestServlet(RestServlet): login_submission, ratelimit=appservice.is_rate_limited(), should_issue_refresh_token=should_issue_refresh_token, + # The user represented by an appservice's configured sender_localpart + # is not actually created in Synapse. + should_check_deactivated=qualified_user_id != appservice.sender, ) async def _do_other_login( @@ -326,6 +331,7 @@ class LoginRestServlet(RestServlet): auth_provider_id: Optional[str] = None, should_issue_refresh_token: bool = False, auth_provider_session_id: Optional[str] = None, + should_check_deactivated: bool = True, ) -> LoginResponse: """Called when we've successfully authed the user and now need to actually login them in (e.g. create devices). This gets called on @@ -345,6 +351,11 @@ class LoginRestServlet(RestServlet): should_issue_refresh_token: True if this login should issue a refresh token alongside the access token. auth_provider_session_id: The session ID got during login from the SSO IdP. + should_check_deactivated: True if the user should be checked for + deactivation status before logging in. + + This exists purely for appservice's configured sender_localpart + which doesn't have an associated user in the database. Returns: Dictionary of account information after successful login. @@ -364,6 +375,12 @@ class LoginRestServlet(RestServlet): ) user_id = canonical_uid + # If the account has been deactivated, do not proceed with the login. + if should_check_deactivated: + deactivated = await self._main_store.get_user_deactivated_status(user_id) + if deactivated: + raise UserDeactivatedError("This account has been deactivated") + device_id = login_submission.get("device_id") # If device_id is present, check that device_id is not longer than a reasonable 512 characters @@ -458,7 +475,7 @@ class LoginRestServlet(RestServlet): Returns: The body of the JSON response. """ - user_id = await self.hs.get_jwt_handler().validate_login(login_submission) + user_id = self.hs.get_jwt_handler().validate_login(login_submission) return await self._complete_login( user_id, login_submission, diff --git a/tests/handlers/test_password_providers.py b/tests/handlers/test_password_providers.py index aa91bc0a3d..394006f5f3 100644 --- a/tests/handlers/test_password_providers.py +++ b/tests/handlers/test_password_providers.py @@ -18,13 +18,17 @@ from http import HTTPStatus from typing import Any, Dict, List, Optional, Type, Union from unittest.mock import Mock +from twisted.test.proto_helpers import MemoryReactor + import synapse from synapse.api.constants import LoginType from synapse.api.errors import Codes from synapse.handlers.account import AccountHandler from synapse.module_api import ModuleApi from synapse.rest.client import account, devices, login, logout, register +from synapse.server import HomeServer from synapse.types import JsonDict, UserID +from synapse.util import Clock from tests import unittest from tests.server import FakeChannel @@ -162,10 +166,16 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): CALLBACK_USERNAME = "get_username_for_registration" CALLBACK_DISPLAYNAME = "get_displayname_for_registration" - def setUp(self) -> None: + def prepare( + self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer + ) -> None: # we use a global mock device, so make sure we are starting with a clean slate mock_password_provider.reset_mock() - super().setUp() + + # The mock password provider doesn't register the users, so ensure they + # are registered first. + self.register_user("u", "not-the-tested-password") + self.register_user("user", "not-the-tested-password") @override_config(legacy_providers_config(LegacyPasswordOnlyAuthProvider)) def test_password_only_auth_progiver_login_legacy(self) -> None: @@ -185,22 +195,12 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): mock_password_provider.reset_mock() # login with mxid should work too - channel = self._send_password_login("@u:bz", "p") + channel = self._send_password_login("@u:test", "p") self.assertEqual(channel.code, HTTPStatus.OK, channel.result) - self.assertEqual("@u:bz", channel.json_body["user_id"]) - mock_password_provider.check_password.assert_called_once_with("@u:bz", "p") + self.assertEqual("@u:test", channel.json_body["user_id"]) + mock_password_provider.check_password.assert_called_once_with("@u:test", "p") mock_password_provider.reset_mock() - # try a weird username / pass. Honestly it's unclear what we *expect* to happen - # in these cases, but at least we can guard against the API changing - # unexpectedly - channel = self._send_password_login(" USER🙂NAME ", " pASS\U0001F622word ") - self.assertEqual(channel.code, HTTPStatus.OK, channel.result) - self.assertEqual("@ USER🙂NAME :test", channel.json_body["user_id"]) - mock_password_provider.check_password.assert_called_once_with( - "@ USER🙂NAME :test", " pASS😢word " - ) - @override_config(legacy_providers_config(LegacyPasswordOnlyAuthProvider)) def test_password_only_auth_provider_ui_auth_legacy(self) -> None: self.password_only_auth_provider_ui_auth_test_body() @@ -208,10 +208,6 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): def password_only_auth_provider_ui_auth_test_body(self) -> None: """UI Auth should delegate correctly to the password provider""" - # create the user, otherwise access doesn't work - module_api = self.hs.get_module_api() - self.get_success(module_api.register_user("u")) - # log in twice, to get two devices mock_password_provider.check_password.return_value = make_awaitable(True) tok1 = self.login("u", "p") @@ -401,29 +397,16 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): mock_password_provider.check_auth.assert_not_called() mock_password_provider.check_auth.return_value = make_awaitable( - ("@user:bz", None) + ("@user:test", None) ) channel = self._send_login("test.login_type", "u", test_field="y") self.assertEqual(channel.code, HTTPStatus.OK, channel.result) - self.assertEqual("@user:bz", channel.json_body["user_id"]) + self.assertEqual("@user:test", channel.json_body["user_id"]) mock_password_provider.check_auth.assert_called_once_with( "u", "test.login_type", {"test_field": "y"} ) mock_password_provider.reset_mock() - # try a weird username. Again, it's unclear what we *expect* to happen - # in these cases, but at least we can guard against the API changing - # unexpectedly - mock_password_provider.check_auth.return_value = make_awaitable( - ("@ MALFORMED! :bz", None) - ) - channel = self._send_login("test.login_type", " USER🙂NAME ", test_field=" abc ") - self.assertEqual(channel.code, HTTPStatus.OK, channel.result) - self.assertEqual("@ MALFORMED! :bz", channel.json_body["user_id"]) - mock_password_provider.check_auth.assert_called_once_with( - " USER🙂NAME ", "test.login_type", {"test_field": " abc "} - ) - @override_config(legacy_providers_config(LegacyCustomAuthProvider)) def test_custom_auth_provider_ui_auth_legacy(self) -> None: self.custom_auth_provider_ui_auth_test_body() @@ -465,7 +448,7 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): # right params, but authing as the wrong user mock_password_provider.check_auth.return_value = make_awaitable( - ("@user:bz", None) + ("@user:test", None) ) body["auth"]["test_field"] = "foo" channel = self._delete_device(tok1, "dev2", body) @@ -498,11 +481,11 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): callback = Mock(return_value=make_awaitable(None)) mock_password_provider.check_auth.return_value = make_awaitable( - ("@user:bz", callback) + ("@user:test", callback) ) channel = self._send_login("test.login_type", "u", test_field="y") self.assertEqual(channel.code, HTTPStatus.OK, channel.result) - self.assertEqual("@user:bz", channel.json_body["user_id"]) + self.assertEqual("@user:test", channel.json_body["user_id"]) mock_password_provider.check_auth.assert_called_once_with( "u", "test.login_type", {"test_field": "y"} ) @@ -512,7 +495,7 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): call_args, call_kwargs = callback.call_args # should be one positional arg self.assertEqual(len(call_args), 1) - self.assertEqual(call_args[0]["user_id"], "@user:bz") + self.assertEqual(call_args[0]["user_id"], "@user:test") for p in ["user_id", "access_token", "device_id", "home_server"]: self.assertIn(p, call_args[0]) -- cgit 1.5.1 From 379eb2d7abc8e3215cc9fd14deefb975137c9494 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Tue, 23 May 2023 12:26:25 -0500 Subject: Fix `@trace` not wrapping some state methods that return coroutines correctly (#15647) ``` 2023-05-21 09:30:09,288 - synapse.logging.opentracing - 940 - ERROR - POST-1 - @trace may not have wrapped StateStorageController.get_state_for_groups correctly! The function is not async but returned a coroutine ``` Tracing instrumentation for these functions originally introduced in https://github.com/matrix-org/synapse/pull/15610 --- changelog.d/15647.bugfix | 1 + synapse/storage/controllers/state.py | 15 ++++++++------- 2 files changed, 9 insertions(+), 7 deletions(-) create mode 100644 changelog.d/15647.bugfix diff --git a/changelog.d/15647.bugfix b/changelog.d/15647.bugfix new file mode 100644 index 0000000000..2eff30f6e3 --- /dev/null +++ b/changelog.d/15647.bugfix @@ -0,0 +1 @@ +Instrument `state` and `state_group` storage-related operations to better picture what's happening when tracing. diff --git a/synapse/storage/controllers/state.py b/synapse/storage/controllers/state.py index 06a80869eb..7089b0a1d8 100644 --- a/synapse/storage/controllers/state.py +++ b/synapse/storage/controllers/state.py @@ -16,7 +16,6 @@ from typing import ( TYPE_CHECKING, AbstractSet, Any, - Awaitable, Callable, Collection, Dict, @@ -175,9 +174,9 @@ class StateStorageController: @trace @tag_args - def _get_state_groups_from_groups( + async def _get_state_groups_from_groups( self, groups: List[int], state_filter: StateFilter - ) -> Awaitable[Dict[int, StateMap[str]]]: + ) -> Dict[int, StateMap[str]]: """Returns the state groups for a given set of groups, filtering on types of state events. @@ -190,7 +189,9 @@ class StateStorageController: Dict of state group to state map. """ - return self.stores.state._get_state_groups_from_groups(groups, state_filter) + return await self.stores.state._get_state_groups_from_groups( + groups, state_filter + ) @trace @tag_args @@ -349,9 +350,9 @@ class StateStorageController: @trace @tag_args - def get_state_for_groups( + async def get_state_for_groups( self, groups: Iterable[int], state_filter: Optional[StateFilter] = None - ) -> Awaitable[Dict[int, MutableStateMap[str]]]: + ) -> Dict[int, MutableStateMap[str]]: """Gets the state at each of a list of state groups, optionally filtering by type/state_key @@ -363,7 +364,7 @@ class StateStorageController: Returns: Dict of state group to state map. """ - return self.stores.state._get_state_for_groups( + return await self.stores.state._get_state_for_groups( groups, state_filter or StateFilter.all() ) -- cgit 1.5.1 From 1f55c04cbca6dc56085896dd980defa26ffe3b5b Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 24 May 2023 08:59:31 -0400 Subject: Improve type hints for cached decorator. (#15658) The cached decorators always return a Deferred, which was not properly propagated. It was close enough when wrapping coroutines, but failed if a bare function was wrapped. --- changelog.d/15658.misc | 1 + scripts-dev/mypy_synapse_plugin.py | 34 +++++++++++- synapse/storage/databases/main/roommember.py | 2 +- synapse/util/caches/descriptors.py | 6 +- tests/appservice/test_appservice.py | 82 ++++++++++------------------ tests/storage/test_transactions.py | 11 ++-- 6 files changed, 73 insertions(+), 63 deletions(-) create mode 100644 changelog.d/15658.misc diff --git a/changelog.d/15658.misc b/changelog.d/15658.misc new file mode 100644 index 0000000000..93ceaeafc9 --- /dev/null +++ b/changelog.d/15658.misc @@ -0,0 +1 @@ +Improve type hints. diff --git a/scripts-dev/mypy_synapse_plugin.py b/scripts-dev/mypy_synapse_plugin.py index 2c377533c0..8058e9c993 100644 --- a/scripts-dev/mypy_synapse_plugin.py +++ b/scripts-dev/mypy_synapse_plugin.py @@ -18,10 +18,11 @@ can crop up, e.g the cache descriptors. from typing import Callable, Optional, Type +from mypy.erasetype import remove_instance_last_known_values from mypy.nodes import ARG_NAMED_OPT from mypy.plugin import MethodSigContext, Plugin from mypy.typeops import bind_self -from mypy.types import CallableType, NoneType, UnionType +from mypy.types import CallableType, Instance, NoneType, UnionType class SynapsePlugin(Plugin): @@ -92,10 +93,41 @@ def cached_function_method_signature(ctx: MethodSigContext) -> CallableType: arg_names.append("on_invalidate") arg_kinds.append(ARG_NAMED_OPT) # Arg is an optional kwarg. + # Finally we ensure the return type is a Deferred. + if ( + isinstance(signature.ret_type, Instance) + and signature.ret_type.type.fullname == "twisted.internet.defer.Deferred" + ): + # If it is already a Deferred, nothing to do. + ret_type = signature.ret_type + else: + ret_arg = None + if isinstance(signature.ret_type, Instance): + # If a coroutine, wrap the coroutine's return type in a Deferred. + if signature.ret_type.type.fullname == "typing.Coroutine": + ret_arg = signature.ret_type.args[2] + + # If an awaitable, wrap the awaitable's final value in a Deferred. + elif signature.ret_type.type.fullname == "typing.Awaitable": + ret_arg = signature.ret_type.args[0] + + # Otherwise, wrap the return value in a Deferred. + if ret_arg is None: + ret_arg = signature.ret_type + + # This should be able to use ctx.api.named_generic_type, but that doesn't seem + # to find the correct symbol for anything more than 1 module deep. + # + # modules is not part of CheckerPluginInterface. The following is a combination + # of TypeChecker.named_generic_type and TypeChecker.lookup_typeinfo. + sym = ctx.api.modules["twisted.internet.defer"].names.get("Deferred") # type: ignore[attr-defined] + ret_type = Instance(sym.node, [remove_instance_last_known_values(ret_arg)]) + signature = signature.copy_modified( arg_types=arg_types, arg_names=arg_names, arg_kinds=arg_kinds, + ret_type=ret_type, ) return signature diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index e068f27a10..ae9c201b87 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -1099,7 +1099,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): # `get_joined_hosts` is called with the "current" state group for the # room, and so consecutive calls will be for consecutive state groups # which point to the previous state group. - cache = await self._get_joined_hosts_cache(room_id) # type: ignore[misc] + cache = await self._get_joined_hosts_cache(room_id) # If the state group in the cache matches, we already have the data we need. if state_entry.state_group == cache.state_group: diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index 81df71a0c5..8514a75a1c 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -220,7 +220,9 @@ class DeferredCacheDescriptor(_CacheDescriptorBase): self.iterable = iterable self.prune_unread_entries = prune_unread_entries - def __get__(self, obj: Optional[Any], owner: Optional[Type]) -> Callable[..., Any]: + def __get__( + self, obj: Optional[Any], owner: Optional[Type] + ) -> Callable[..., "defer.Deferred[Any]"]: cache: DeferredCache[CacheKey, Any] = DeferredCache( name=self.name, max_entries=self.max_entries, @@ -232,7 +234,7 @@ class DeferredCacheDescriptor(_CacheDescriptorBase): get_cache_key = self.cache_key_builder @functools.wraps(self.orig) - def _wrapped(*args: Any, **kwargs: Any) -> Any: + def _wrapped(*args: Any, **kwargs: Any) -> "defer.Deferred[Any]": # If we're passed a cache_context then we'll want to call its invalidate() # whenever we are invalidated invalidate_callback = kwargs.pop("on_invalidate", None) diff --git a/tests/appservice/test_appservice.py b/tests/appservice/test_appservice.py index dee976356f..66753c60c4 100644 --- a/tests/appservice/test_appservice.py +++ b/tests/appservice/test_appservice.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import re -from typing import Generator +from typing import Any, Generator from unittest.mock import Mock from twisted.internet import defer @@ -49,15 +49,13 @@ class ApplicationServiceTestCase(unittest.TestCase): @defer.inlineCallbacks def test_regex_user_id_prefix_match( self, - ) -> Generator["defer.Deferred[object]", object, None]: + ) -> Generator["defer.Deferred[Any]", object, None]: self.service.namespaces[ApplicationService.NS_USERS].append(_regex("@irc_.*")) self.event.sender = "@irc_foobar:matrix.org" self.assertTrue( ( - yield defer.ensureDeferred( - self.service.is_interested_in_event( - self.event.event_id, self.event, self.store - ) + yield self.service.is_interested_in_event( + self.event.event_id, self.event, self.store ) ) ) @@ -65,15 +63,13 @@ class ApplicationServiceTestCase(unittest.TestCase): @defer.inlineCallbacks def test_regex_user_id_prefix_no_match( self, - ) -> Generator["defer.Deferred[object]", object, None]: + ) -> Generator["defer.Deferred[Any]", object, None]: self.service.namespaces[ApplicationService.NS_USERS].append(_regex("@irc_.*")) self.event.sender = "@someone_else:matrix.org" self.assertFalse( ( - yield defer.ensureDeferred( - self.service.is_interested_in_event( - self.event.event_id, self.event, self.store - ) + yield self.service.is_interested_in_event( + self.event.event_id, self.event, self.store ) ) ) @@ -81,17 +77,15 @@ class ApplicationServiceTestCase(unittest.TestCase): @defer.inlineCallbacks def test_regex_room_member_is_checked( self, - ) -> Generator["defer.Deferred[object]", object, None]: + ) -> Generator["defer.Deferred[Any]", object, None]: self.service.namespaces[ApplicationService.NS_USERS].append(_regex("@irc_.*")) self.event.sender = "@someone_else:matrix.org" self.event.type = "m.room.member" self.event.state_key = "@irc_foobar:matrix.org" self.assertTrue( ( - yield defer.ensureDeferred( - self.service.is_interested_in_event( - self.event.event_id, self.event, self.store - ) + yield self.service.is_interested_in_event( + self.event.event_id, self.event, self.store ) ) ) @@ -99,17 +93,15 @@ class ApplicationServiceTestCase(unittest.TestCase): @defer.inlineCallbacks def test_regex_room_id_match( self, - ) -> Generator["defer.Deferred[object]", object, None]: + ) -> Generator["defer.Deferred[Any]", object, None]: self.service.namespaces[ApplicationService.NS_ROOMS].append( _regex("!some_prefix.*some_suffix:matrix.org") ) self.event.room_id = "!some_prefixs0m3th1nGsome_suffix:matrix.org" self.assertTrue( ( - yield defer.ensureDeferred( - self.service.is_interested_in_event( - self.event.event_id, self.event, self.store - ) + yield self.service.is_interested_in_event( + self.event.event_id, self.event, self.store ) ) ) @@ -117,25 +109,21 @@ class ApplicationServiceTestCase(unittest.TestCase): @defer.inlineCallbacks def test_regex_room_id_no_match( self, - ) -> Generator["defer.Deferred[object]", object, None]: + ) -> Generator["defer.Deferred[Any]", object, None]: self.service.namespaces[ApplicationService.NS_ROOMS].append( _regex("!some_prefix.*some_suffix:matrix.org") ) self.event.room_id = "!XqBunHwQIXUiqCaoxq:matrix.org" self.assertFalse( ( - yield defer.ensureDeferred( - self.service.is_interested_in_event( - self.event.event_id, self.event, self.store - ) + yield self.service.is_interested_in_event( + self.event.event_id, self.event, self.store ) ) ) @defer.inlineCallbacks - def test_regex_alias_match( - self, - ) -> Generator["defer.Deferred[object]", object, None]: + def test_regex_alias_match(self) -> Generator["defer.Deferred[Any]", object, None]: self.service.namespaces[ApplicationService.NS_ALIASES].append( _regex("#irc_.*:matrix.org") ) @@ -145,10 +133,8 @@ class ApplicationServiceTestCase(unittest.TestCase): self.store.get_local_users_in_room = simple_async_mock([]) self.assertTrue( ( - yield defer.ensureDeferred( - self.service.is_interested_in_event( - self.event.event_id, self.event, self.store - ) + yield self.service.is_interested_in_event( + self.event.event_id, self.event, self.store ) ) ) @@ -192,7 +178,7 @@ class ApplicationServiceTestCase(unittest.TestCase): @defer.inlineCallbacks def test_regex_alias_no_match( self, - ) -> Generator["defer.Deferred[object]", object, None]: + ) -> Generator["defer.Deferred[Any]", object, None]: self.service.namespaces[ApplicationService.NS_ALIASES].append( _regex("#irc_.*:matrix.org") ) @@ -213,7 +199,7 @@ class ApplicationServiceTestCase(unittest.TestCase): @defer.inlineCallbacks def test_regex_multiple_matches( self, - ) -> Generator["defer.Deferred[object]", object, None]: + ) -> Generator["defer.Deferred[Any]", object, None]: self.service.namespaces[ApplicationService.NS_ALIASES].append( _regex("#irc_.*:matrix.org") ) @@ -223,18 +209,14 @@ class ApplicationServiceTestCase(unittest.TestCase): self.store.get_local_users_in_room = simple_async_mock([]) self.assertTrue( ( - yield defer.ensureDeferred( - self.service.is_interested_in_event( - self.event.event_id, self.event, self.store - ) + yield self.service.is_interested_in_event( + self.event.event_id, self.event, self.store ) ) ) @defer.inlineCallbacks - def test_interested_in_self( - self, - ) -> Generator["defer.Deferred[object]", object, None]: + def test_interested_in_self(self) -> Generator["defer.Deferred[Any]", object, None]: # make sure invites get through self.service.sender = "@appservice:name" self.service.namespaces[ApplicationService.NS_USERS].append(_regex("@irc_.*")) @@ -243,18 +225,14 @@ class ApplicationServiceTestCase(unittest.TestCase): self.event.state_key = self.service.sender self.assertTrue( ( - yield defer.ensureDeferred( - self.service.is_interested_in_event( - self.event.event_id, self.event, self.store - ) + yield self.service.is_interested_in_event( + self.event.event_id, self.event, self.store ) ) ) @defer.inlineCallbacks - def test_member_list_match( - self, - ) -> Generator["defer.Deferred[object]", object, None]: + def test_member_list_match(self) -> Generator["defer.Deferred[Any]", object, None]: self.service.namespaces[ApplicationService.NS_USERS].append(_regex("@irc_.*")) # Note that @irc_fo:here is the AS user. self.store.get_local_users_in_room = simple_async_mock( @@ -265,10 +243,8 @@ class ApplicationServiceTestCase(unittest.TestCase): self.event.sender = "@xmpp_foobar:matrix.org" self.assertTrue( ( - yield defer.ensureDeferred( - self.service.is_interested_in_event( - self.event.event_id, self.event, self.store - ) + yield self.service.is_interested_in_event( + self.event.event_id, self.event, self.store ) ) ) diff --git a/tests/storage/test_transactions.py b/tests/storage/test_transactions.py index db9ee9955e..2fab84a529 100644 --- a/tests/storage/test_transactions.py +++ b/tests/storage/test_transactions.py @@ -33,15 +33,14 @@ class TransactionStoreTestCase(HomeserverTestCase): destination retries, as well as testing tht we can set and get correctly. """ - d = self.store.get_destination_retry_timings("example.com") - r = self.get_success(d) + r = self.get_success(self.store.get_destination_retry_timings("example.com")) self.assertIsNone(r) - d = self.store.set_destination_retry_timings("example.com", 1000, 50, 100) - self.get_success(d) + self.get_success( + self.store.set_destination_retry_timings("example.com", 1000, 50, 100) + ) - d = self.store.get_destination_retry_timings("example.com") - r = self.get_success(d) + r = self.get_success(self.store.get_destination_retry_timings("example.com")) self.assertEqual( DestinationRetryTimings( -- cgit 1.5.1 From c7e9c1d5ae2fd0fa68b28c51a3bce503194c4718 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 24 May 2023 15:13:28 +0100 Subject: Speed up user directory rebuild for users some more... (#15665) --- changelog.d/15665.misc | 1 + synapse/storage/databases/main/user_directory.py | 190 ++++++++++++++--------- 2 files changed, 115 insertions(+), 76 deletions(-) create mode 100644 changelog.d/15665.misc diff --git a/changelog.d/15665.misc b/changelog.d/15665.misc new file mode 100644 index 0000000000..7ad424d8df --- /dev/null +++ b/changelog.d/15665.misc @@ -0,0 +1 @@ +Speed up rebuilding of the user directory for local users. diff --git a/synapse/storage/databases/main/user_directory.py b/synapse/storage/databases/main/user_directory.py index b7d58978de..a0319575f0 100644 --- a/synapse/storage/databases/main/user_directory.py +++ b/synapse/storage/databases/main/user_directory.py @@ -17,6 +17,7 @@ import re import unicodedata from typing import ( TYPE_CHECKING, + Collection, Iterable, List, Mapping, @@ -45,7 +46,7 @@ from synapse.util.stringutils import non_null_str_or_none if TYPE_CHECKING: from synapse.server import HomeServer -from synapse.api.constants import EventTypes, HistoryVisibility, JoinRules +from synapse.api.constants import EventTypes, HistoryVisibility, JoinRules, UserTypes from synapse.storage.database import ( DatabasePool, LoggingDatabaseConnection, @@ -356,13 +357,30 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore): Add all local users to the user directory. """ - def _get_next_batch(txn: LoggingTransaction) -> Optional[List[str]]: - sql = "SELECT user_id FROM %s LIMIT %s" % ( - TEMP_TABLE + "_users", - str(batch_size), - ) - txn.execute(sql) - user_result = cast(List[Tuple[str]], txn.fetchall()) + def _populate_user_directory_process_users_txn( + txn: LoggingTransaction, + ) -> Optional[int]: + if self.database_engine.supports_returning: + # Note: we use an ORDER BY in the SELECT to force usage of an + # index. Otherwise, postgres does a sequential scan that is + # surprisingly slow (I think due to the fact it will read/skip + # over lots of already deleted rows). + sql = f""" + DELETE FROM {TEMP_TABLE + "_users"} + WHERE user_id IN ( + SELECT user_id FROM {TEMP_TABLE + "_users"} ORDER BY user_id LIMIT ? + ) + RETURNING user_id + """ + txn.execute(sql, (batch_size,)) + user_result = cast(List[Tuple[str]], txn.fetchall()) + else: + sql = "SELECT user_id FROM %s ORDER BY user_id LIMIT %s" % ( + TEMP_TABLE + "_users", + str(batch_size), + ) + txn.execute(sql) + user_result = cast(List[Tuple[str]], txn.fetchall()) if not user_result: return None @@ -378,85 +396,81 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore): assert count_result is not None progress["remaining"] = count_result[0] - return users_to_work_on - - users_to_work_on = await self.db_pool.runInteraction( - "populate_user_directory_temp_read", _get_next_batch - ) + if not users_to_work_on: + return None - # No more users -- complete the transaction. - if not users_to_work_on: - await self.db_pool.updates._end_background_update( - "populate_user_directory_process_users" + logger.debug( + "Processing the next %d users of %d remaining", + len(users_to_work_on), + progress["remaining"], ) - return 1 - - logger.debug( - "Processing the next %d users of %d remaining" - % (len(users_to_work_on), progress["remaining"]) - ) - # First filter down to users we want to insert into the user directory. - users_to_insert = [ - user_id - for user_id in users_to_work_on - if await self.should_include_local_user_in_dir(user_id) - ] + # First filter down to users we want to insert into the user directory. + users_to_insert = self._filter_local_users_for_dir_txn( + txn, users_to_work_on + ) - # Next fetch their profiles. Note that the `user_id` here is the - # *localpart*, and that not all users have profiles. - profile_rows = await self.db_pool.simple_select_many_batch( - table="profiles", - column="user_id", - iterable=[get_localpart_from_id(u) for u in users_to_insert], - retcols=( - "user_id", - "displayname", - "avatar_url", - ), - keyvalues={}, - desc="populate_user_directory_process_users_get_profiles", - ) - profiles = { - f"@{row['user_id']}:{self.server_name}": _UserDirProfile( - f"@{row['user_id']}:{self.server_name}", - row["displayname"], - row["avatar_url"], + # Next fetch their profiles. Note that the `user_id` here is the + # *localpart*, and that not all users have profiles. + profile_rows = self.db_pool.simple_select_many_txn( + txn, + table="profiles", + column="user_id", + iterable=[get_localpart_from_id(u) for u in users_to_insert], + retcols=( + "user_id", + "displayname", + "avatar_url", + ), + keyvalues={}, ) - for row in profile_rows - } + profiles = { + f"@{row['user_id']}:{self.server_name}": _UserDirProfile( + f"@{row['user_id']}:{self.server_name}", + row["displayname"], + row["avatar_url"], + ) + for row in profile_rows + } - profiles_to_insert = [ - profiles.get(user_id) or _UserDirProfile(user_id) - for user_id in users_to_insert - ] + profiles_to_insert = [ + profiles.get(user_id) or _UserDirProfile(user_id) + for user_id in users_to_insert + ] + + # Actually insert the users with their profiles into the directory. + self._update_profiles_in_user_dir_txn(txn, profiles_to_insert) + + # We've finished processing the users. Delete it from the table, if + # we haven't already. + if not self.database_engine.supports_returning: + self.db_pool.simple_delete_many_txn( + txn, + table=TEMP_TABLE + "_users", + column="user_id", + values=users_to_work_on, + keyvalues={}, + ) - # Actually insert the users with their profiles into the directory. - await self.db_pool.runInteraction( - "populate_user_directory_process_users_insertion", - self._update_profiles_in_user_dir_txn, - profiles_to_insert, - ) + # Update the remaining counter. + progress["remaining"] -= len(users_to_work_on) + self.db_pool.updates._background_update_progress_txn( + txn, "populate_user_directory_process_users", progress + ) + return len(users_to_work_on) - # We've finished processing the users. Delete it from the table. - await self.db_pool.simple_delete_many( - table=TEMP_TABLE + "_users", - column="user_id", - iterable=users_to_work_on, - keyvalues={}, - desc="populate_user_directory_process_users_delete", + processed_count = await self.db_pool.runInteraction( + "populate_user_directory_temp", _populate_user_directory_process_users_txn ) - # Update the remaining counter. - progress["remaining"] -= len(users_to_work_on) - await self.db_pool.runInteraction( - "populate_user_directory", - self.db_pool.updates._background_update_progress_txn, - "populate_user_directory_process_users", - progress, - ) + # No more users -- complete the transaction. + if not processed_count: + await self.db_pool.updates._end_background_update( + "populate_user_directory_process_users" + ) + return 1 - return len(users_to_work_on) + return processed_count async def should_include_local_user_in_dir(self, user: str) -> bool: """Certain classes of local user are omitted from the user directory. @@ -494,6 +508,30 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore): return True + def _filter_local_users_for_dir_txn( + self, txn: LoggingTransaction, users: Collection[str] + ) -> Collection[str]: + """A batched version of `should_include_local_user_in_dir`""" + users = [ + user + for user in users + if self.get_app_service_by_user_id(user) is None # type: ignore[attr-defined] + and not self.get_if_app_services_interested_in_user(user) # type: ignore[attr-defined] + ] + + rows = self.db_pool.simple_select_many_txn( + txn, + table="users", + column="name", + iterable=users, + keyvalues={ + "deactivated": 0, + }, + retcols=("name", "user_type"), + ) + + return [row["name"] for row in rows if row["user_type"] != UserTypes.SUPPORT] + async def is_room_world_readable_or_publicly_joinable(self, room_id: str) -> bool: """Check if the room is either world_readable or publically joinable""" -- cgit 1.5.1 From ca5c4be92166775ec1de9e79a04db1e136609a1f Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 24 May 2023 10:18:52 -0400 Subject: Add type hints to test_descriptors. (#15659) Require type hints in test_descriptors and add missing ones. --- changelog.d/15659.misc | 1 + mypy.ini | 3 - tests/util/caches/test_descriptors.py | 197 ++++++++++++++++++---------------- 3 files changed, 105 insertions(+), 96 deletions(-) create mode 100644 changelog.d/15659.misc diff --git a/changelog.d/15659.misc b/changelog.d/15659.misc new file mode 100644 index 0000000000..93ceaeafc9 --- /dev/null +++ b/changelog.d/15659.misc @@ -0,0 +1 @@ +Improve type hints. diff --git a/mypy.ini b/mypy.ini index 3363c6daee..a7ec66196d 100644 --- a/mypy.ini +++ b/mypy.ini @@ -32,9 +32,6 @@ warn_unused_ignores = False [mypy-synapse.util.caches.treecache] disallow_untyped_defs = False -[mypy-tests.util.caches.test_descriptors] -disallow_untyped_defs = False - ;; Dependencies without annotations ;; Before ignoring a module, check to see if type stubs are available. ;; The `typeshed` project maintains stubs here: diff --git a/tests/util/caches/test_descriptors.py b/tests/util/caches/test_descriptors.py index 13f1edd533..064f4987df 100644 --- a/tests/util/caches/test_descriptors.py +++ b/tests/util/caches/test_descriptors.py @@ -13,7 +13,18 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from typing import Iterable, Set, Tuple, cast +from typing import ( + Any, + Dict, + Generator, + Iterable, + List, + NoReturn, + Optional, + Set, + Tuple, + cast, +) from unittest import mock from twisted.internet import defer, reactor @@ -29,7 +40,7 @@ from synapse.logging.context import ( make_deferred_yieldable, ) from synapse.util.caches import descriptors -from synapse.util.caches.descriptors import cached, cachedList +from synapse.util.caches.descriptors import _CacheContext, cached, cachedList from tests import unittest from tests.test_utils import get_awaitable_result @@ -37,21 +48,21 @@ from tests.test_utils import get_awaitable_result logger = logging.getLogger(__name__) -def run_on_reactor(): - d: "Deferred[int]" = defer.Deferred() +def run_on_reactor() -> "Deferred[int]": + d: "Deferred[int]" = Deferred() cast(IReactorTime, reactor).callLater(0, d.callback, 0) return make_deferred_yieldable(d) class DescriptorTestCase(unittest.TestCase): @defer.inlineCallbacks - def test_cache(self): + def test_cache(self) -> Generator["Deferred[Any]", object, None]: class Cls: - def __init__(self): + def __init__(self) -> None: self.mock = mock.Mock() @descriptors.cached() - def fn(self, arg1, arg2): + def fn(self, arg1: int, arg2: int) -> str: return self.mock(arg1, arg2) obj = Cls() @@ -77,15 +88,15 @@ class DescriptorTestCase(unittest.TestCase): obj.mock.assert_not_called() @defer.inlineCallbacks - def test_cache_num_args(self): + def test_cache_num_args(self) -> Generator["Deferred[Any]", object, None]: """Only the first num_args arguments should matter to the cache""" class Cls: - def __init__(self): + def __init__(self) -> None: self.mock = mock.Mock() @descriptors.cached(num_args=1) - def fn(self, arg1, arg2): + def fn(self, arg1: int, arg2: int) -> mock.Mock: return self.mock(arg1, arg2) obj = Cls() @@ -111,7 +122,7 @@ class DescriptorTestCase(unittest.TestCase): obj.mock.assert_not_called() @defer.inlineCallbacks - def test_cache_uncached_args(self): + def test_cache_uncached_args(self) -> Generator["Deferred[Any]", object, None]: """ Only the arguments not named in uncached_args should matter to the cache @@ -123,10 +134,10 @@ class DescriptorTestCase(unittest.TestCase): # Note that it is important that this is not the last argument to # test behaviour of skipping arguments properly. @descriptors.cached(uncached_args=("arg2",)) - def fn(self, arg1, arg2, arg3): + def fn(self, arg1: int, arg2: int, arg3: int) -> str: return self.mock(arg1, arg2, arg3) - def __init__(self): + def __init__(self) -> None: self.mock = mock.Mock() obj = Cls() @@ -152,15 +163,15 @@ class DescriptorTestCase(unittest.TestCase): obj.mock.assert_not_called() @defer.inlineCallbacks - def test_cache_kwargs(self): + def test_cache_kwargs(self) -> Generator["Deferred[Any]", object, None]: """Test that keyword arguments are treated properly""" class Cls: - def __init__(self): + def __init__(self) -> None: self.mock = mock.Mock() @descriptors.cached() - def fn(self, arg1, kwarg1=2): + def fn(self, arg1: int, kwarg1: int = 2) -> str: return self.mock(arg1, kwarg1=kwarg1) obj = Cls() @@ -188,12 +199,12 @@ class DescriptorTestCase(unittest.TestCase): self.assertEqual(r, "fish") obj.mock.assert_not_called() - def test_cache_with_sync_exception(self): + def test_cache_with_sync_exception(self) -> None: """If the wrapped function throws synchronously, things should continue to work""" class Cls: @cached() - def fn(self, arg1): + def fn(self, arg1: int) -> NoReturn: raise SynapseError(100, "mai spoon iz too big!!1") obj = Cls() @@ -209,15 +220,15 @@ class DescriptorTestCase(unittest.TestCase): d = obj.fn(1) self.failureResultOf(d, SynapseError) - def test_cache_with_async_exception(self): + def test_cache_with_async_exception(self) -> None: """The wrapped function returns a failure""" class Cls: - result = None + result: Optional[Deferred] = None call_count = 0 @cached() - def fn(self, arg1): + def fn(self, arg1: int) -> Optional[Deferred]: self.call_count += 1 return self.result @@ -225,7 +236,7 @@ class DescriptorTestCase(unittest.TestCase): callbacks: Set[str] = set() # set off an asynchronous request - origin_d: Deferred = defer.Deferred() + origin_d: Deferred = Deferred() obj.result = origin_d d1 = obj.fn(1, on_invalidate=lambda: callbacks.add("d1")) @@ -260,17 +271,17 @@ class DescriptorTestCase(unittest.TestCase): self.assertEqual(self.successResultOf(d3), 100) self.assertEqual(obj.call_count, 2) - def test_cache_logcontexts(self): + def test_cache_logcontexts(self) -> Deferred: """Check that logcontexts are set and restored correctly when using the cache.""" - complete_lookup: Deferred = defer.Deferred() + complete_lookup: Deferred = Deferred() class Cls: @descriptors.cached() - def fn(self, arg1): + def fn(self, arg1: int) -> "Deferred[int]": @defer.inlineCallbacks - def inner_fn(): + def inner_fn() -> Generator["Deferred[object]", object, int]: with PreserveLoggingContext(): yield complete_lookup return 1 @@ -278,13 +289,13 @@ class DescriptorTestCase(unittest.TestCase): return inner_fn() @defer.inlineCallbacks - def do_lookup(): + def do_lookup() -> Generator["Deferred[Any]", object, int]: with LoggingContext("c1") as c1: r = yield obj.fn(1) self.assertEqual(current_context(), c1) - return r + return cast(int, r) - def check_result(r): + def check_result(r: int) -> None: self.assertEqual(r, 1) obj = Cls() @@ -304,15 +315,15 @@ class DescriptorTestCase(unittest.TestCase): return defer.gatherResults([d1, d2]) - def test_cache_logcontexts_with_exception(self): + def test_cache_logcontexts_with_exception(self) -> "Deferred[None]": """Check that the cache sets and restores logcontexts correctly when the lookup function throws an exception""" class Cls: @descriptors.cached() - def fn(self, arg1): + def fn(self, arg1: int) -> Deferred: @defer.inlineCallbacks - def inner_fn(): + def inner_fn() -> Generator["Deferred[Any]", object, NoReturn]: # we want this to behave like an asynchronous function yield run_on_reactor() raise SynapseError(400, "blah") @@ -320,7 +331,7 @@ class DescriptorTestCase(unittest.TestCase): return inner_fn() @defer.inlineCallbacks - def do_lookup(): + def do_lookup() -> Generator["Deferred[object]", object, None]: with LoggingContext("c1") as c1: try: d = obj.fn(1) @@ -347,13 +358,13 @@ class DescriptorTestCase(unittest.TestCase): return d1 @defer.inlineCallbacks - def test_cache_default_args(self): + def test_cache_default_args(self) -> Generator["Deferred[Any]", object, None]: class Cls: - def __init__(self): + def __init__(self) -> None: self.mock = mock.Mock() @descriptors.cached() - def fn(self, arg1, arg2=2, arg3=3): + def fn(self, arg1: int, arg2: int = 2, arg3: int = 3) -> str: return self.mock(arg1, arg2, arg3) obj = Cls() @@ -384,13 +395,13 @@ class DescriptorTestCase(unittest.TestCase): self.assertEqual(r, "chips") obj.mock.assert_not_called() - def test_cache_iterable(self): + def test_cache_iterable(self) -> None: class Cls: - def __init__(self): + def __init__(self) -> None: self.mock = mock.Mock() @descriptors.cached(iterable=True) - def fn(self, arg1, arg2): + def fn(self, arg1: int, arg2: int) -> List[str]: return self.mock(arg1, arg2) obj = Cls() @@ -417,12 +428,12 @@ class DescriptorTestCase(unittest.TestCase): self.assertEqual(r.result, ["chips"]) obj.mock.assert_not_called() - def test_cache_iterable_with_sync_exception(self): + def test_cache_iterable_with_sync_exception(self) -> None: """If the wrapped function throws synchronously, things should continue to work""" class Cls: @descriptors.cached(iterable=True) - def fn(self, arg1): + def fn(self, arg1: int) -> NoReturn: raise SynapseError(100, "mai spoon iz too big!!1") obj = Cls() @@ -438,20 +449,20 @@ class DescriptorTestCase(unittest.TestCase): d = obj.fn(1) self.failureResultOf(d, SynapseError) - def test_invalidate_cascade(self): + def test_invalidate_cascade(self) -> None: """Invalidations should cascade up through cache contexts""" class Cls: @cached(cache_context=True) - async def func1(self, key, cache_context): + async def func1(self, key: str, cache_context: _CacheContext) -> int: return await self.func2(key, on_invalidate=cache_context.invalidate) @cached(cache_context=True) - async def func2(self, key, cache_context): + async def func2(self, key: str, cache_context: _CacheContext) -> int: return await self.func3(key, on_invalidate=cache_context.invalidate) @cached(cache_context=True) - async def func3(self, key, cache_context): + async def func3(self, key: str, cache_context: _CacheContext) -> int: self.invalidate = cache_context.invalidate return 42 @@ -463,13 +474,13 @@ class DescriptorTestCase(unittest.TestCase): obj.invalidate() top_invalidate.assert_called_once() - def test_cancel(self): + def test_cancel(self) -> None: """Test that cancelling a lookup does not cancel other lookups""" complete_lookup: "Deferred[None]" = Deferred() class Cls: @cached() - async def fn(self, arg1): + async def fn(self, arg1: int) -> str: await complete_lookup return str(arg1) @@ -488,7 +499,7 @@ class DescriptorTestCase(unittest.TestCase): self.failureResultOf(d1, CancelledError) self.assertEqual(d2.result, "123") - def test_cancel_logcontexts(self): + def test_cancel_logcontexts(self) -> None: """Test that cancellation does not break logcontexts. * The `CancelledError` must be raised with the correct logcontext. @@ -501,14 +512,14 @@ class DescriptorTestCase(unittest.TestCase): inner_context_was_finished = False @cached() - async def fn(self, arg1): + async def fn(self, arg1: int) -> str: await make_deferred_yieldable(complete_lookup) self.inner_context_was_finished = current_context().finished return str(arg1) obj = Cls() - async def do_lookup(): + async def do_lookup() -> None: with LoggingContext("c1") as c1: try: await obj.fn(123) @@ -542,10 +553,10 @@ class CacheDecoratorTestCase(unittest.HomeserverTestCase): """ @defer.inlineCallbacks - def test_passthrough(self): + def test_passthrough(self) -> Generator["Deferred[Any]", object, None]: class A: @cached() - def func(self, key): + def func(self, key: str) -> str: return key a = A() @@ -554,12 +565,12 @@ class CacheDecoratorTestCase(unittest.HomeserverTestCase): self.assertEqual((yield a.func("bar")), "bar") @defer.inlineCallbacks - def test_hit(self): + def test_hit(self) -> Generator["Deferred[Any]", object, None]: callcount = [0] class A: @cached() - def func(self, key): + def func(self, key: str) -> str: callcount[0] += 1 return key @@ -572,12 +583,12 @@ class CacheDecoratorTestCase(unittest.HomeserverTestCase): self.assertEqual(callcount[0], 1) @defer.inlineCallbacks - def test_invalidate(self): + def test_invalidate(self) -> Generator["Deferred[Any]", object, None]: callcount = [0] class A: @cached() - def func(self, key): + def func(self, key: str) -> str: callcount[0] += 1 return key @@ -592,21 +603,21 @@ class CacheDecoratorTestCase(unittest.HomeserverTestCase): self.assertEqual(callcount[0], 2) - def test_invalidate_missing(self): + def test_invalidate_missing(self) -> None: class A: @cached() - def func(self, key): + def func(self, key: str) -> str: return key A().func.invalidate(("what",)) @defer.inlineCallbacks - def test_max_entries(self): + def test_max_entries(self) -> Generator["Deferred[Any]", object, None]: callcount = [0] class A: @cached(max_entries=10) - def func(self, key): + def func(self, key: int) -> int: callcount[0] += 1 return key @@ -626,14 +637,14 @@ class CacheDecoratorTestCase(unittest.HomeserverTestCase): callcount[0] >= 14, msg="Expected callcount >= 14, got %d" % (callcount[0]) ) - def test_prefill(self): + def test_prefill(self) -> None: callcount = [0] d = defer.succeed(123) class A: @cached() - def func(self, key): + def func(self, key: str) -> "Deferred[int]": callcount[0] += 1 return d @@ -645,18 +656,18 @@ class CacheDecoratorTestCase(unittest.HomeserverTestCase): self.assertEqual(callcount[0], 0) @defer.inlineCallbacks - def test_invalidate_context(self): + def test_invalidate_context(self) -> Generator["Deferred[Any]", object, None]: callcount = [0] callcount2 = [0] class A: @cached() - def func(self, key): + def func(self, key: str) -> str: callcount[0] += 1 return key @cached(cache_context=True) - def func2(self, key, cache_context): + def func2(self, key: str, cache_context: _CacheContext) -> "Deferred[str]": callcount2[0] += 1 return self.func(key, on_invalidate=cache_context.invalidate) @@ -678,18 +689,18 @@ class CacheDecoratorTestCase(unittest.HomeserverTestCase): self.assertEqual(callcount2[0], 2) @defer.inlineCallbacks - def test_eviction_context(self): + def test_eviction_context(self) -> Generator["Deferred[Any]", object, None]: callcount = [0] callcount2 = [0] class A: @cached(max_entries=2) - def func(self, key): + def func(self, key: str) -> str: callcount[0] += 1 return key @cached(cache_context=True) - def func2(self, key, cache_context): + def func2(self, key: str, cache_context: _CacheContext) -> "Deferred[str]": callcount2[0] += 1 return self.func(key, on_invalidate=cache_context.invalidate) @@ -715,18 +726,18 @@ class CacheDecoratorTestCase(unittest.HomeserverTestCase): self.assertEqual(callcount2[0], 3) @defer.inlineCallbacks - def test_double_get(self): + def test_double_get(self) -> Generator["Deferred[Any]", object, None]: callcount = [0] callcount2 = [0] class A: @cached() - def func(self, key): + def func(self, key: str) -> str: callcount[0] += 1 return key @cached(cache_context=True) - def func2(self, key, cache_context): + def func2(self, key: str, cache_context: _CacheContext) -> "Deferred[str]": callcount2[0] += 1 return self.func(key, on_invalidate=cache_context.invalidate) @@ -763,17 +774,17 @@ class CacheDecoratorTestCase(unittest.HomeserverTestCase): class CachedListDescriptorTestCase(unittest.TestCase): @defer.inlineCallbacks - def test_cache(self): + def test_cache(self) -> Generator["Deferred[Any]", object, None]: class Cls: - def __init__(self): + def __init__(self) -> None: self.mock = mock.Mock() @descriptors.cached() - def fn(self, arg1, arg2): + def fn(self, arg1: int, arg2: int) -> None: pass @descriptors.cachedList(cached_method_name="fn", list_name="args1") - async def list_fn(self, args1, arg2): + async def list_fn(self, args1: Iterable[int], arg2: int) -> Dict[int, str]: context = current_context() assert isinstance(context, LoggingContext) assert context.name == "c1" @@ -824,19 +835,19 @@ class CachedListDescriptorTestCase(unittest.TestCase): obj.mock.assert_called_once_with({40}, 2) self.assertEqual(r, {10: "fish", 40: "gravy"}) - def test_concurrent_lookups(self): + def test_concurrent_lookups(self) -> None: """All concurrent lookups should get the same result""" class Cls: - def __init__(self): + def __init__(self) -> None: self.mock = mock.Mock() @descriptors.cached() - def fn(self, arg1): + def fn(self, arg1: int) -> None: pass @descriptors.cachedList(cached_method_name="fn", list_name="args1") - def list_fn(self, args1) -> "Deferred[dict]": + def list_fn(self, args1: List[int]) -> "Deferred[dict]": return self.mock(args1) obj = Cls() @@ -867,19 +878,19 @@ class CachedListDescriptorTestCase(unittest.TestCase): self.assertEqual(self.successResultOf(d3), {10: "peas"}) @defer.inlineCallbacks - def test_invalidate(self): + def test_invalidate(self) -> Generator["Deferred[Any]", object, None]: """Make sure that invalidation callbacks are called.""" class Cls: - def __init__(self): + def __init__(self) -> None: self.mock = mock.Mock() @descriptors.cached() - def fn(self, arg1, arg2): + def fn(self, arg1: int, arg2: int) -> None: pass @descriptors.cachedList(cached_method_name="fn", list_name="args1") - async def list_fn(self, args1, arg2): + async def list_fn(self, args1: List[int], arg2: int) -> Dict[int, str]: # we want this to behave like an asynchronous function await run_on_reactor() return self.mock(args1, arg2) @@ -908,17 +919,17 @@ class CachedListDescriptorTestCase(unittest.TestCase): invalidate0.assert_called_once() invalidate1.assert_called_once() - def test_cancel(self): + def test_cancel(self) -> None: """Test that cancelling a lookup does not cancel other lookups""" complete_lookup: "Deferred[None]" = Deferred() class Cls: @cached() - def fn(self, arg1): + def fn(self, arg1: int) -> None: pass @cachedList(cached_method_name="fn", list_name="args") - async def list_fn(self, args): + async def list_fn(self, args: List[int]) -> Dict[int, str]: await complete_lookup return {arg: str(arg) for arg in args} @@ -936,7 +947,7 @@ class CachedListDescriptorTestCase(unittest.TestCase): self.failureResultOf(d1, CancelledError) self.assertEqual(d2.result, {123: "123", 456: "456", 789: "789"}) - def test_cancel_logcontexts(self): + def test_cancel_logcontexts(self) -> None: """Test that cancellation does not break logcontexts. * The `CancelledError` must be raised with the correct logcontext. @@ -949,18 +960,18 @@ class CachedListDescriptorTestCase(unittest.TestCase): inner_context_was_finished = False @cached() - def fn(self, arg1): + def fn(self, arg1: int) -> None: pass @cachedList(cached_method_name="fn", list_name="args") - async def list_fn(self, args): + async def list_fn(self, args: List[int]) -> Dict[int, str]: await make_deferred_yieldable(complete_lookup) self.inner_context_was_finished = current_context().finished return {arg: str(arg) for arg in args} obj = Cls() - async def do_lookup(): + async def do_lookup() -> None: with LoggingContext("c1") as c1: try: await obj.list_fn([123]) @@ -983,7 +994,7 @@ class CachedListDescriptorTestCase(unittest.TestCase): ) self.assertEqual(current_context(), SENTINEL_CONTEXT) - def test_num_args_mismatch(self): + def test_num_args_mismatch(self) -> None: """ Make sure someone does not accidentally use @cachedList on a method with a mismatch in the number args to the underlying single cache method. @@ -991,14 +1002,14 @@ class CachedListDescriptorTestCase(unittest.TestCase): class Cls: @descriptors.cached(tree=True) - def fn(self, room_id, event_id): + def fn(self, room_id: str, event_id: str) -> None: pass # This is wrong ❌. `@cachedList` expects to be given the same number # of arguments as the underlying cached function, just with one of # the arguments being an iterable @descriptors.cachedList(cached_method_name="fn", list_name="keys") - def list_fn(self, keys: Iterable[Tuple[str, str]]): + def list_fn(self, keys: Iterable[Tuple[str, str]]) -> None: pass # Corrected syntax ✅ -- cgit 1.5.1 From 8839b6c2f8b07d5d122a15e79b1ebdbdd5f3e26b Mon Sep 17 00:00:00 2001 From: Shay Date: Wed, 24 May 2023 13:23:26 -0700 Subject: Add requesting user id parameter to key claim methods in `TransportLayerClient` (#15663) --- changelog.d/15663.misc | 1 + synapse/federation/federation_client.py | 6 ++++-- synapse/federation/transport/client.py | 16 +++++++++++++--- synapse/handlers/e2e_keys.py | 3 ++- synapse/rest/client/keys.py | 8 ++++---- tests/handlers/test_e2e_keys.py | 16 +++++++++++++++- 6 files changed, 39 insertions(+), 11 deletions(-) create mode 100644 changelog.d/15663.misc diff --git a/changelog.d/15663.misc b/changelog.d/15663.misc new file mode 100644 index 0000000000..cc5f801543 --- /dev/null +++ b/changelog.d/15663.misc @@ -0,0 +1 @@ +Add requesting user id parameter to key claim methods in `TransportLayerClient`. diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 076b9287c6..a2cf3a96c6 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -236,6 +236,7 @@ class FederationClient(FederationBase): async def claim_client_keys( self, + user: UserID, destination: str, query: Dict[str, Dict[str, Dict[str, int]]], timeout: Optional[int], @@ -243,6 +244,7 @@ class FederationClient(FederationBase): """Claims one-time keys for a device hosted on a remote server. Args: + user: The user id of the requesting user destination: Domain name of the remote homeserver content: The query content. @@ -279,7 +281,7 @@ class FederationClient(FederationBase): if use_unstable: try: return await self.transport_layer.claim_client_keys_unstable( - destination, unstable_content, timeout + user, destination, unstable_content, timeout ) except HttpResponseException as e: # If an error is received that is due to an unrecognised endpoint, @@ -295,7 +297,7 @@ class FederationClient(FederationBase): logger.debug("Skipping unstable claim client keys API") return await self.transport_layer.claim_client_keys( - destination, content, timeout + user, destination, content, timeout ) @trace diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index 1cfc4446c4..0b17f713ea 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -45,7 +45,7 @@ from synapse.events import EventBase, make_event_from_dict from synapse.federation.units import Transaction from synapse.http.matrixfederationclient import ByteParser, LegacyJsonSendParser from synapse.http.types import QueryParams -from synapse.types import JsonDict +from synapse.types import JsonDict, UserID from synapse.util import ExceptionBundle if TYPE_CHECKING: @@ -630,7 +630,11 @@ class TransportLayerClient: ) async def claim_client_keys( - self, destination: str, query_content: JsonDict, timeout: Optional[int] + self, + user: UserID, + destination: str, + query_content: JsonDict, + timeout: Optional[int], ) -> JsonDict: """Claim one-time keys for a list of devices hosted on a remote server. @@ -655,6 +659,7 @@ class TransportLayerClient: } Args: + user: the user_id of the requesting user destination: The server to query. query_content: The user ids to query. Returns: @@ -671,7 +676,11 @@ class TransportLayerClient: ) async def claim_client_keys_unstable( - self, destination: str, query_content: JsonDict, timeout: Optional[int] + self, + user: UserID, + destination: str, + query_content: JsonDict, + timeout: Optional[int], ) -> JsonDict: """Claim one-time keys for a list of devices hosted on a remote server. @@ -696,6 +705,7 @@ class TransportLayerClient: } Args: + user: the user_id of the requesting user destination: The server to query. query_content: The user ids to query. Returns: diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index 24741b667b..ad075497c8 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -661,6 +661,7 @@ class E2eKeysHandler: async def claim_one_time_keys( self, query: Dict[str, Dict[str, Dict[str, int]]], + user: UserID, timeout: Optional[int], always_include_fallback_keys: bool, ) -> JsonDict: @@ -703,7 +704,7 @@ class E2eKeysHandler: device_keys = remote_queries[destination] try: remote_result = await self.federation.claim_client_keys( - destination, device_keys, timeout=timeout + user, destination, device_keys, timeout=timeout ) for user_id, keys in remote_result["one_time_keys"].items(): if user_id in device_keys: diff --git a/synapse/rest/client/keys.py b/synapse/rest/client/keys.py index 9bbab5e624..413edd8a4d 100644 --- a/synapse/rest/client/keys.py +++ b/synapse/rest/client/keys.py @@ -287,7 +287,7 @@ class OneTimeKeyServlet(RestServlet): self.e2e_keys_handler = hs.get_e2e_keys_handler() async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: - await self.auth.get_user_by_req(request, allow_guest=True) + requester = await self.auth.get_user_by_req(request, allow_guest=True) timeout = parse_integer(request, "timeout", 10 * 1000) body = parse_json_object_from_request(request) @@ -298,7 +298,7 @@ class OneTimeKeyServlet(RestServlet): query.setdefault(user_id, {})[device_id] = {algorithm: 1} result = await self.e2e_keys_handler.claim_one_time_keys( - query, timeout, always_include_fallback_keys=False + query, requester.user, timeout, always_include_fallback_keys=False ) return 200, result @@ -335,7 +335,7 @@ class UnstableOneTimeKeyServlet(RestServlet): self.e2e_keys_handler = hs.get_e2e_keys_handler() async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: - await self.auth.get_user_by_req(request, allow_guest=True) + requester = await self.auth.get_user_by_req(request, allow_guest=True) timeout = parse_integer(request, "timeout", 10 * 1000) body = parse_json_object_from_request(request) @@ -346,7 +346,7 @@ class UnstableOneTimeKeyServlet(RestServlet): query.setdefault(user_id, {})[device_id] = Counter(algorithms) result = await self.e2e_keys_handler.claim_one_time_keys( - query, timeout, always_include_fallback_keys=True + query, requester.user, timeout, always_include_fallback_keys=True ) return 200, result diff --git a/tests/handlers/test_e2e_keys.py b/tests/handlers/test_e2e_keys.py index 72d0584061..2eaffe511e 100644 --- a/tests/handlers/test_e2e_keys.py +++ b/tests/handlers/test_e2e_keys.py @@ -27,7 +27,7 @@ from synapse.appservice import ApplicationService from synapse.handlers.device import DeviceHandler from synapse.server import HomeServer from synapse.storage.databases.main.appservice import _make_exclusive_regex -from synapse.types import JsonDict +from synapse.types import JsonDict, UserID from synapse.util import Clock from tests import unittest @@ -45,6 +45,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.handler = hs.get_e2e_keys_handler() self.store = self.hs.get_datastores().main + self.requester = UserID.from_string(f"@test_requester:{self.hs.hostname}") def test_query_local_devices_no_devices(self) -> None: """If the user has no devices, we expect an empty list.""" @@ -161,6 +162,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): res2 = self.get_success( self.handler.claim_one_time_keys( {local_user: {device_id: {"alg1": 1}}}, + self.requester, timeout=None, always_include_fallback_keys=False, ) @@ -206,6 +208,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): claim_res = self.get_success( self.handler.claim_one_time_keys( {local_user: {device_id: {"alg1": 1}}}, + self.requester, timeout=None, always_include_fallback_keys=False, ) @@ -225,6 +228,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): claim_res = self.get_success( self.handler.claim_one_time_keys( {local_user: {device_id: {"alg1": 1}}}, + self.requester, timeout=None, always_include_fallback_keys=False, ) @@ -274,6 +278,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): claim_res = self.get_success( self.handler.claim_one_time_keys( {local_user: {device_id: {"alg1": 1}}}, + self.requester, timeout=None, always_include_fallback_keys=False, ) @@ -286,6 +291,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): claim_res = self.get_success( self.handler.claim_one_time_keys( {local_user: {device_id: {"alg1": 1}}}, + self.requester, timeout=None, always_include_fallback_keys=False, ) @@ -307,6 +313,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): claim_res = self.get_success( self.handler.claim_one_time_keys( {local_user: {device_id: {"alg1": 1}}}, + self.requester, timeout=None, always_include_fallback_keys=False, ) @@ -348,6 +355,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): claim_res = self.get_success( self.handler.claim_one_time_keys( {local_user: {device_id: {"alg1": 1}}}, + self.requester, timeout=None, always_include_fallback_keys=True, ) @@ -370,6 +378,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): claim_res = self.get_success( self.handler.claim_one_time_keys( {local_user: {device_id: {"alg1": 1}}}, + self.requester, timeout=None, always_include_fallback_keys=True, ) @@ -1080,6 +1089,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): claim_res = self.get_success( self.handler.claim_one_time_keys( {local_user: {device_id_1: {"alg1": 1}, device_id_2: {"alg1": 1}}}, + self.requester, timeout=None, always_include_fallback_keys=False, ) @@ -1125,6 +1135,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): claim_res = self.get_success( self.handler.claim_one_time_keys( {local_user: {device_id_1: {"alg1": 1}}}, + self.requester, timeout=None, always_include_fallback_keys=True, ) @@ -1169,6 +1180,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): claim_res = self.get_success( self.handler.claim_one_time_keys( {local_user: {device_id_1: {"alg1": 1}}}, + self.requester, timeout=None, always_include_fallback_keys=True, ) @@ -1202,6 +1214,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): claim_res = self.get_success( self.handler.claim_one_time_keys( {local_user: {device_id_1: {"alg1": 1}}}, + self.requester, timeout=None, always_include_fallback_keys=True, ) @@ -1229,6 +1242,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): claim_res = self.get_success( self.handler.claim_one_time_keys( {local_user: {device_id_1: {"alg1": 1}}}, + self.requester, timeout=None, always_include_fallback_keys=True, ) -- cgit 1.5.1 From 77156a4bc1f87e98754e3f7f86e52a84a4253a10 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Wed, 24 May 2023 23:22:24 -0500 Subject: Process previously failed backfill events in the background (#15585) Process previously failed backfill events in the background because they are bound to fail again and we don't need to waste time holding up the request for something that is bound to fail again. Fix https://github.com/matrix-org/synapse/issues/13623 Follow-up to https://github.com/matrix-org/synapse/issues/13621 and https://github.com/matrix-org/synapse/issues/13622 Part of making `/messages` faster: https://github.com/matrix-org/synapse/issues/13356 --- changelog.d/15585.feature | 1 + synapse/handlers/federation_event.py | 70 ++++++++++++++-- synapse/storage/databases/main/event_federation.py | 31 ++++++- synapse/util/iterutils.py | 27 ++++++ tests/handlers/test_federation_event.py | 95 ++++++++++++++++++++++ tests/storage/test_event_federation.py | 37 +++++++++ 6 files changed, 252 insertions(+), 9 deletions(-) create mode 100644 changelog.d/15585.feature diff --git a/changelog.d/15585.feature b/changelog.d/15585.feature new file mode 100644 index 0000000000..1adcfb69ee --- /dev/null +++ b/changelog.d/15585.feature @@ -0,0 +1 @@ +Process previously failed backfill events in the background to avoid blocking requests for something that is bound to fail again. diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index 9a08618da5..42141d3670 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -88,7 +88,7 @@ from synapse.types import ( ) from synapse.types.state import StateFilter from synapse.util.async_helpers import Linearizer, concurrently_execute -from synapse.util.iterutils import batch_iter +from synapse.util.iterutils import batch_iter, partition from synapse.util.retryutils import NotRetryingDestination from synapse.util.stringutils import shortstr @@ -865,7 +865,7 @@ class FederationEventHandler: [event.event_id for event in events] ) - new_events = [] + new_events: List[EventBase] = [] for event in events: event_id = event.event_id @@ -895,12 +895,66 @@ class FederationEventHandler: str(len(new_events)), ) - # We want to sort these by depth so we process them and - # tell clients about them in order. - sorted_events = sorted(new_events, key=lambda x: x.depth) - for ev in sorted_events: - with nested_logging_context(ev.event_id): - await self._process_pulled_event(origin, ev, backfilled=backfilled) + @trace + async def _process_new_pulled_events(new_events: Collection[EventBase]) -> None: + # We want to sort these by depth so we process them and tell clients about + # them in order. It's also more efficient to backfill this way (`depth` + # ascending) because one backfill event is likely to be the `prev_event` of + # the next event we're going to process. + sorted_events = sorted(new_events, key=lambda x: x.depth) + for ev in sorted_events: + with nested_logging_context(ev.event_id): + await self._process_pulled_event(origin, ev, backfilled=backfilled) + + # Check if we've already tried to process these events at some point in the + # past. We aren't concerned with the expontntial backoff here, just whether it + # has failed to be processed before. + event_ids_with_failed_pull_attempts = ( + await self._store.get_event_ids_with_failed_pull_attempts( + [event.event_id for event in new_events] + ) + ) + + # We construct the event lists in source order from `/backfill` response because + # it's a) easiest, but also b) the order in which we process things matters for + # MSC2716 historical batches because many historical events are all at the same + # `depth` and we rely on the tenuous sort that the other server gave us and hope + # they're doing their best. The brittle nature of this ordering for historical + # messages over federation is one of the reasons why we don't want to continue + # on MSC2716 until we have online topological ordering. + events_with_failed_pull_attempts, fresh_events = partition( + new_events, lambda e: e.event_id in event_ids_with_failed_pull_attempts + ) + set_tag( + SynapseTags.FUNC_ARG_PREFIX + "events_with_failed_pull_attempts", + str(event_ids_with_failed_pull_attempts), + ) + set_tag( + SynapseTags.RESULT_PREFIX + "events_with_failed_pull_attempts.length", + str(len(events_with_failed_pull_attempts)), + ) + set_tag( + SynapseTags.FUNC_ARG_PREFIX + "fresh_events", + str([event.event_id for event in fresh_events]), + ) + set_tag( + SynapseTags.RESULT_PREFIX + "fresh_events.length", + str(len(fresh_events)), + ) + + # Process previously failed backfill events in the background to not waste + # time on something that is likely to fail again. + if len(events_with_failed_pull_attempts) > 0: + run_as_background_process( + "_process_new_pulled_events_with_failed_pull_attempts", + _process_new_pulled_events, + events_with_failed_pull_attempts, + ) + + # We can optimistically try to process and wait for the event to be fully + # persisted if we've never tried before. + if len(fresh_events) > 0: + await _process_new_pulled_events(fresh_events) @trace @tag_args diff --git a/synapse/storage/databases/main/event_federation.py b/synapse/storage/databases/main/event_federation.py index ac19de183c..2681917d0b 100644 --- a/synapse/storage/databases/main/event_federation.py +++ b/synapse/storage/databases/main/event_federation.py @@ -46,7 +46,7 @@ from synapse.storage.database import ( from synapse.storage.databases.main.events_worker import EventsWorkerStore from synapse.storage.databases.main.signatures import SignatureWorkerStore from synapse.storage.engines import PostgresEngine, Sqlite3Engine -from synapse.types import JsonDict +from synapse.types import JsonDict, StrCollection from synapse.util import json_encoder from synapse.util.caches.descriptors import cached from synapse.util.caches.lrucache import LruCache @@ -1583,6 +1583,35 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas txn.execute(sql, (room_id, event_id, 1, self._clock.time_msec(), cause)) + @trace + async def get_event_ids_with_failed_pull_attempts( + self, event_ids: StrCollection + ) -> Set[str]: + """ + Filter the given list of `event_ids` and return events which have any failed + pull attempts. + + Args: + event_ids: A list of events to filter down. + + Returns: + A filtered down list of `event_ids` that have previous failed pull attempts. + """ + + rows = await self.db_pool.simple_select_many_batch( + table="event_failed_pull_attempts", + column="event_id", + iterable=event_ids, + keyvalues={}, + retcols=("event_id",), + desc="get_event_ids_with_failed_pull_attempts", + ) + event_ids_with_failed_pull_attempts: Set[str] = { + row["event_id"] for row in rows + } + + return event_ids_with_failed_pull_attempts + @trace async def get_event_ids_to_not_pull_from_backoff( self, diff --git a/synapse/util/iterutils.py b/synapse/util/iterutils.py index 4938ddf703..a0efb96d3b 100644 --- a/synapse/util/iterutils.py +++ b/synapse/util/iterutils.py @@ -15,11 +15,13 @@ import heapq from itertools import islice from typing import ( + Callable, Collection, Dict, Generator, Iterable, Iterator, + List, Mapping, Set, Sized, @@ -71,6 +73,31 @@ def chunk_seq(iseq: S, maxlen: int) -> Iterator[S]: return (iseq[i : i + maxlen] for i in range(0, len(iseq), maxlen)) +def partition( + iterable: Iterable[T], predicate: Callable[[T], bool] +) -> Tuple[List[T], List[T]]: + """ + Separate a given iterable into two lists based on the result of a predicate function. + + Args: + iterable: the iterable to partition (separate) + predicate: a function that takes an item from the iterable and returns a boolean + + Returns: + A tuple of two lists, the first containing all items for which the predicate + returned True, the second containing all items for which the predicate returned + False + """ + true_results = [] + false_results = [] + for item in iterable: + if predicate(item): + true_results.append(item) + else: + false_results.append(item) + return true_results, false_results + + def sorted_topologically( nodes: Iterable[T], graph: Mapping[T, Collection[T]], diff --git a/tests/handlers/test_federation_event.py b/tests/handlers/test_federation_event.py index c067e5bfe3..23f1b33b2f 100644 --- a/tests/handlers/test_federation_event.py +++ b/tests/handlers/test_federation_event.py @@ -664,6 +664,101 @@ class FederationEventHandlerTests(unittest.FederatingHomeserverTestCase): StoreError, ) + def test_backfill_process_previously_failed_pull_attempt_event_in_the_background( + self, + ) -> None: + """ + Sanity check that events are still processed even if it is in the background + for events that already have failed pull attempts. + """ + OTHER_USER = f"@user:{self.OTHER_SERVER_NAME}" + main_store = self.hs.get_datastores().main + + # Create the room + user_id = self.register_user("kermit", "test") + tok = self.login("kermit", "test") + room_id = self.helper.create_room_as(room_creator=user_id, tok=tok) + room_version = self.get_success(main_store.get_room_version(room_id)) + + # Allow the remote user to send state events + self.helper.send_state( + room_id, + "m.room.power_levels", + {"events_default": 0, "state_default": 0}, + tok=tok, + ) + + # Add the remote user to the room + member_event = self.get_success( + event_injection.inject_member_event(self.hs, room_id, OTHER_USER, "join") + ) + + initial_state_map = self.get_success( + main_store.get_partial_current_state_ids(room_id) + ) + + auth_event_ids = [ + initial_state_map[("m.room.create", "")], + initial_state_map[("m.room.power_levels", "")], + member_event.event_id, + ] + + # Create a regular event that should process + pulled_event = make_event_from_dict( + self.add_hashes_and_signatures_from_other_server( + { + "type": "test_regular_type", + "room_id": room_id, + "sender": OTHER_USER, + "prev_events": [ + member_event.event_id, + ], + "auth_events": auth_event_ids, + "origin_server_ts": 1, + "depth": 12, + "content": {"body": "pulled_event"}, + } + ), + room_version, + ) + + # Record a failed pull attempt for this event which will cause us to backfill it + # in the background from here on out. + self.get_success( + main_store.record_event_failed_pull_attempt( + room_id, pulled_event.event_id, "fake cause" + ) + ) + + # We expect an outbound request to /backfill, so stub that out + self.mock_federation_transport_client.backfill.return_value = make_awaitable( + { + "origin": self.OTHER_SERVER_NAME, + "origin_server_ts": 123, + "pdus": [ + pulled_event.get_pdu_json(), + ], + } + ) + + # The function under test: try to backfill and process the pulled event + with LoggingContext("test"): + self.get_success( + self.hs.get_federation_event_handler().backfill( + self.OTHER_SERVER_NAME, + room_id, + limit=1, + extremities=["$some_extremity"], + ) + ) + + # Ensure `run_as_background_process(...)` has a chance to run (essentially + # `wait_for_background_processes()`) + self.reactor.pump((0.1,)) + + # Make sure we processed and persisted the pulled event + self.get_success(main_store.get_event(pulled_event.event_id, allow_none=False)) + def test_process_pulled_event_with_rejected_missing_state(self) -> None: """Ensure that we correctly handle pulled events with missing state containing a rejected state event diff --git a/tests/storage/test_event_federation.py b/tests/storage/test_event_federation.py index 81e50bdd55..4b8d8328d7 100644 --- a/tests/storage/test_event_federation.py +++ b/tests/storage/test_event_federation.py @@ -1134,6 +1134,43 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase): backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points] self.assertEqual(backfill_event_ids, ["insertion_eventA"]) + def test_get_event_ids_with_failed_pull_attempts(self) -> None: + """ + Test to make sure we properly get event_ids based on whether they have any + failed pull attempts. + """ + # Create the room + user_id = self.register_user("alice", "test") + tok = self.login("alice", "test") + room_id = self.helper.create_room_as(room_creator=user_id, tok=tok) + + self.get_success( + self.store.record_event_failed_pull_attempt( + room_id, "$failed_event_id1", "fake cause" + ) + ) + self.get_success( + self.store.record_event_failed_pull_attempt( + room_id, "$failed_event_id2", "fake cause" + ) + ) + + event_ids_with_failed_pull_attempts = self.get_success( + self.store.get_event_ids_with_failed_pull_attempts( + event_ids=[ + "$failed_event_id1", + "$fresh_event_id1", + "$failed_event_id2", + "$fresh_event_id2", + ] + ) + ) + + self.assertEqual( + event_ids_with_failed_pull_attempts, + {"$failed_event_id1", "$failed_event_id2"}, + ) + def test_get_event_ids_to_not_pull_from_backoff(self) -> None: """ Test to make sure only event IDs we should backoff from are returned. -- cgit 1.5.1 From 2d8a2ca374916e8a24ff43355c0ad24d456fab25 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Fri, 26 May 2023 10:53:10 +0000 Subject: Add `dch` and `notify-send` to the development Nix flake so that the release script can be used. (#15673) * Add dch and notify-send to the Nix dev flake * Newsfile Signed-off-by: Olivier Wilkinson (reivilibre) --------- Signed-off-by: Olivier Wilkinson (reivilibre) --- changelog.d/15673.misc | 1 + flake.nix | 4 ++++ 2 files changed, 5 insertions(+) create mode 100644 changelog.d/15673.misc diff --git a/changelog.d/15673.misc b/changelog.d/15673.misc new file mode 100644 index 0000000000..52148fc63f --- /dev/null +++ b/changelog.d/15673.misc @@ -0,0 +1 @@ +Add `dch` and `notify-send` to the development Nix flake so that the release script can be used. \ No newline at end of file diff --git a/flake.nix b/flake.nix index 7351571e61..8c7a4f8769 100644 --- a/flake.nix +++ b/flake.nix @@ -100,6 +100,10 @@ # For building the Synapse documentation website. mdbook + + # For releasing Synapse + debian-devscripts # (`dch` for manipulating the Debian changelog) + libnotify # (the release script uses `notify-send` to tell you when CI jobs are done) ]; # Install Python and manage a virtualenv with Poetry. -- cgit 1.5.1 From 4e013093a87094c711eb047a41e2de3807c7873e Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Fri, 26 May 2023 05:46:13 -0600 Subject: Add MSC3820 (room version 11) option 2 unstable room version. (#15666) --- changelog.d/15666.misc | 1 + synapse/api/room_versions.py | 24 ++++++++++++++++++++++++ 2 files changed, 25 insertions(+) create mode 100644 changelog.d/15666.misc diff --git a/changelog.d/15666.misc b/changelog.d/15666.misc new file mode 100644 index 0000000000..92eae49952 --- /dev/null +++ b/changelog.d/15666.misc @@ -0,0 +1 @@ +Implement "option 2" for [MSC3820](https://github.com/matrix-org/matrix-spec-proposals/pull/3820): Room version 11. \ No newline at end of file diff --git a/synapse/api/room_versions.py b/synapse/api/room_versions.py index 7030b133d3..035a14171b 100644 --- a/synapse/api/room_versions.py +++ b/synapse/api/room_versions.py @@ -485,6 +485,30 @@ class RoomVersions: msc3931_push_features=(), msc3989_redaction_rules=True, ) + MSC3820opt2 = RoomVersion( + # Based upon v10 + "org.matrix.msc3820.opt2", + RoomDisposition.UNSTABLE, + EventFormatVersions.ROOM_V4_PLUS, + StateResolutionVersions.V2, + enforce_key_validity=True, + special_case_aliases_auth=False, + strict_canonicaljson=True, + limit_notifications_power_levels=True, + msc2175_implicit_room_creator=True, # Used by MSC3820 + msc2176_redaction_rules=True, # Used by MSC3820 + msc3083_join_rules=True, + msc3375_redaction_rules=True, + msc2403_knocking=True, + msc2716_historical=False, + msc2716_redactions=False, + msc3389_relation_redactions=False, + msc3787_knock_restricted_join_rule=True, + msc3667_int_only_power_levels=True, + msc3821_redaction_rules=True, # Used by MSC3820 + msc3931_push_features=(), + msc3989_redaction_rules=True, # Used by MSC3820 + ) KNOWN_ROOM_VERSIONS: Dict[str, RoomVersion] = { -- cgit 1.5.1 From c775d80b73b7930b9541e353fc24dcef66579e48 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Fri, 26 May 2023 14:28:55 +0000 Subject: Fix a bug introduced in Synapse v1.84.0 where workers do not start up when no `instance_map` was provided. (#15672) * Fix #15669: always populate instance map even if it was empty * Fix some tests * Fix more tests * Newsfile Signed-off-by: Olivier Wilkinson (reivilibre) * CI fix: don't forget to update apt repository sources before installing olddeps deps * Add test testing the backwards compatibility --------- Signed-off-by: Olivier Wilkinson (reivilibre) --- .github/workflows/tests.yml | 1 + changelog.d/15672.bugfix | 1 + synapse/config/workers.py | 2 +- tests/app/test_homeserver_start.py | 2 ++ tests/app/test_openid_listener.py | 1 + tests/config/test_workers.py | 43 +++++++++++++++++++++++++++++--- tests/replication/test_federation_ack.py | 1 + tests/storage/test_rollback_worker.py | 1 + 8 files changed, 47 insertions(+), 5 deletions(-) create mode 100644 changelog.d/15672.bugfix diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 51cbeb3298..ce3a57fb01 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -314,6 +314,7 @@ jobs: # There aren't wheels for some of the older deps, so we need to install # their build dependencies - run: | + sudo apt-get -qq update sudo apt-get -qq install build-essential libffi-dev python-dev \ libxml2-dev libxslt-dev xmlsec1 zlib1g-dev libjpeg-dev libwebp-dev diff --git a/changelog.d/15672.bugfix b/changelog.d/15672.bugfix new file mode 100644 index 0000000000..c81d7332b7 --- /dev/null +++ b/changelog.d/15672.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse v1.84.0 where workers do not start up when no `instance_map` was provided. \ No newline at end of file diff --git a/synapse/config/workers.py b/synapse/config/workers.py index d2311cc857..38e13dd7b5 100644 --- a/synapse/config/workers.py +++ b/synapse/config/workers.py @@ -222,7 +222,7 @@ class WorkerConfig(Config): # itself doesn't need this data as it would never have to talk to itself. instance_map: Dict[str, Any] = config.get("instance_map", {}) - if instance_map and self.instance_name is not MAIN_PROCESS_INSTANCE_NAME: + if self.instance_name is not MAIN_PROCESS_INSTANCE_NAME: # The host used to connect to the main synapse main_host = config.get("worker_replication_host", None) diff --git a/tests/app/test_homeserver_start.py b/tests/app/test_homeserver_start.py index 788c935537..cd117b7394 100644 --- a/tests/app/test_homeserver_start.py +++ b/tests/app/test_homeserver_start.py @@ -25,6 +25,8 @@ class HomeserverAppStartTestCase(ConfigFileTestCase): # Add a blank line as otherwise the next addition ends up on a line with a comment self.add_lines_to_config([" "]) self.add_lines_to_config(["worker_app: test_worker_app"]) + self.add_lines_to_config(["worker_replication_host: 127.0.0.1"]) + self.add_lines_to_config(["worker_replication_http_port: 0"]) # Ensure that starting master process with worker config raises an exception with self.assertRaises(ConfigError): diff --git a/tests/app/test_openid_listener.py b/tests/app/test_openid_listener.py index 2ee343d8a4..056d9402a4 100644 --- a/tests/app/test_openid_listener.py +++ b/tests/app/test_openid_listener.py @@ -42,6 +42,7 @@ class FederationReaderOpenIDListenerTests(HomeserverTestCase): # have to tell the FederationHandler not to try to access stuff that is only # in the primary store. conf["worker_app"] = "yes" + conf["instance_map"] = {"main": {"host": "127.0.0.1", "port": 0}} return conf diff --git a/tests/config/test_workers.py b/tests/config/test_workers.py index 49a6bdf408..086359fd71 100644 --- a/tests/config/test_workers.py +++ b/tests/config/test_workers.py @@ -17,7 +17,7 @@ from unittest.mock import Mock from immutabledict import immutabledict from synapse.config import ConfigError -from synapse.config.workers import WorkerConfig +from synapse.config.workers import InstanceLocationConfig, WorkerConfig from tests.unittest import TestCase @@ -94,6 +94,7 @@ class WorkerDutyConfigTestCase(TestCase): # so that it doesn't raise an exception here. # (This is not read by `_should_this_worker_perform_duty`.) "notify_appservices": False, + "instance_map": {"main": {"host": "127.0.0.1", "port": 0}}, }, ) @@ -138,7 +139,9 @@ class WorkerDutyConfigTestCase(TestCase): """ main_process_config = self._make_worker_config( - worker_app="synapse.app.homeserver", worker_name=None + worker_app="synapse.app.homeserver", + worker_name=None, + extras={"instance_map": {"main": {"host": "127.0.0.1", "port": 0}}}, ) self.assertTrue( @@ -203,6 +206,7 @@ class WorkerDutyConfigTestCase(TestCase): # so that it doesn't raise an exception here. # (This is not read by `_should_this_worker_perform_duty`.) "notify_appservices": False, + "instance_map": {"main": {"host": "127.0.0.1", "port": 0}}, }, ) @@ -236,7 +240,9 @@ class WorkerDutyConfigTestCase(TestCase): Tests new config options. This is for the master's config. """ main_process_config = self._make_worker_config( - worker_app="synapse.app.homeserver", worker_name=None + worker_app="synapse.app.homeserver", + worker_name=None, + extras={"instance_map": {"main": {"host": "127.0.0.1", "port": 0}}}, ) self.assertTrue( @@ -262,7 +268,9 @@ class WorkerDutyConfigTestCase(TestCase): Tests new config options. This is for the worker's config. """ appservice_worker_config = self._make_worker_config( - worker_app="synapse.app.generic_worker", worker_name="worker1" + worker_app="synapse.app.generic_worker", + worker_name="worker1", + extras={"instance_map": {"main": {"host": "127.0.0.1", "port": 0}}}, ) self.assertTrue( @@ -298,6 +306,7 @@ class WorkerDutyConfigTestCase(TestCase): extras={ "notify_appservices_from_worker": "worker2", "update_user_directory_from_worker": "worker1", + "instance_map": {"main": {"host": "127.0.0.1", "port": 0}}, }, ) self.assertFalse(worker1_config.should_notify_appservices) @@ -309,7 +318,33 @@ class WorkerDutyConfigTestCase(TestCase): extras={ "notify_appservices_from_worker": "worker2", "update_user_directory_from_worker": "worker1", + "instance_map": {"main": {"host": "127.0.0.1", "port": 0}}, }, ) self.assertTrue(worker2_config.should_notify_appservices) self.assertFalse(worker2_config.should_update_user_directory) + + def test_worker_instance_map_compat(self) -> None: + """ + Test that `worker_replication_*` settings are compatibly handled by + adding them to the instance map as a `main` entry. + """ + + worker1_config = self._make_worker_config( + worker_app="synapse.app.generic_worker", + worker_name="worker1", + extras={ + "notify_appservices_from_worker": "worker2", + "update_user_directory_from_worker": "worker1", + "worker_replication_host": "127.0.0.42", + "worker_replication_http_port": 1979, + }, + ) + self.assertEqual( + worker1_config.instance_map, + { + "master": InstanceLocationConfig( + host="127.0.0.42", port=1979, tls=False + ), + }, + ) diff --git a/tests/replication/test_federation_ack.py b/tests/replication/test_federation_ack.py index 12668b34c5..cf59b1a204 100644 --- a/tests/replication/test_federation_ack.py +++ b/tests/replication/test_federation_ack.py @@ -32,6 +32,7 @@ class FederationAckTestCase(HomeserverTestCase): config["worker_app"] = "synapse.app.generic_worker" config["worker_name"] = "federation_sender1" config["federation_sender_instances"] = ["federation_sender1"] + config["instance_map"] = {"main": {"host": "127.0.0.1", "port": 0}} return config def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: diff --git a/tests/storage/test_rollback_worker.py b/tests/storage/test_rollback_worker.py index 966aafea6f..6861d3a6c9 100644 --- a/tests/storage/test_rollback_worker.py +++ b/tests/storage/test_rollback_worker.py @@ -55,6 +55,7 @@ class WorkerSchemaTests(HomeserverTestCase): # Mark this as a worker app. conf["worker_app"] = "yes" + conf["instance_map"] = {"main": {"host": "127.0.0.1", "port": 0}} return conf -- cgit 1.5.1 From 65bf5f3649fd108d91fe64795186d27940e80426 Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Fri, 26 May 2023 16:17:50 +0100 Subject: 1.84.1 --- CHANGES.md | 19 +++++++++++++++++++ changelog.d/15672.bugfix | 1 - changelog.d/15673.misc | 1 - debian/changelog | 6 ++++++ pyproject.toml | 2 +- 5 files changed, 26 insertions(+), 3 deletions(-) delete mode 100644 changelog.d/15672.bugfix delete mode 100644 changelog.d/15673.misc diff --git a/CHANGES.md b/CHANGES.md index e9397158f1..1fe1d013c6 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,22 @@ +Synapse 1.84.1 (2023-05-26) +=========================== + +This patch release fixes a major issue with homeservers that does not have an `instance_map` defined but which do use workers. +If you have already upgraded to Synapse 1.84.0 and your homeserver is working normally, then there is no need to update to this patch release. + + +Bugfixes +-------- + +- Fix a bug introduced in Synapse v1.84.0 where workers do not start up when no `instance_map` was provided. ([\#15672](https://github.com/matrix-org/synapse/issues/15672)) + + +Internal Changes +---------------- + +- Add `dch` and `notify-send` to the development Nix flake so that the release script can be used. ([\#15673](https://github.com/matrix-org/synapse/issues/15673)) + + Synapse 1.84.0 (2023-05-23) =========================== diff --git a/changelog.d/15672.bugfix b/changelog.d/15672.bugfix deleted file mode 100644 index c81d7332b7..0000000000 --- a/changelog.d/15672.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse v1.84.0 where workers do not start up when no `instance_map` was provided. \ No newline at end of file diff --git a/changelog.d/15673.misc b/changelog.d/15673.misc deleted file mode 100644 index 52148fc63f..0000000000 --- a/changelog.d/15673.misc +++ /dev/null @@ -1 +0,0 @@ -Add `dch` and `notify-send` to the development Nix flake so that the release script can be used. \ No newline at end of file diff --git a/debian/changelog b/debian/changelog index 51935e03b6..fbdc9c177e 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.84.1) stable; urgency=medium + + * New Synapse release 1.84.1. + + -- Synapse Packaging team Fri, 26 May 2023 16:15:30 +0100 + matrix-synapse-py3 (1.84.0) stable; urgency=medium * New Synapse release 1.84.0. diff --git a/pyproject.toml b/pyproject.toml index 9c77f9294a..6e9bce65b6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -89,7 +89,7 @@ manifest-path = "rust/Cargo.toml" [tool.poetry] name = "matrix-synapse" -version = "1.84.0" +version = "1.84.1" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" -- cgit 1.5.1 From cb6f4a84a6a8f2b79b80851f37eb5fa4c7c5264a Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Fri, 26 May 2023 16:18:35 +0100 Subject: Fix a typographical error in changelog --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 1fe1d013c6..85c9af8ce4 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,7 +1,7 @@ Synapse 1.84.1 (2023-05-26) =========================== -This patch release fixes a major issue with homeservers that does not have an `instance_map` defined but which do use workers. +This patch release fixes a major issue with homeservers that do not have an `instance_map` defined but which do use workers. If you have already upgraded to Synapse 1.84.0 and your homeserver is working normally, then there is no need to update to this patch release. -- cgit 1.5.1 From 2ad91ec628126753590c1a90c432270d6c8fa8fd Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 26 May 2023 13:16:08 -0400 Subject: Set thread_id column to non-null for event_push_{actions,actions_staging,summary} (#15597) Updates the database schema to require a thread_id (by adding a constraint that the column is non-null) for event_push_actions, event_push_actions_staging, and event_push_actions_summary. For PostgreSQL we add the constraint as NOT VALID, then VALIDATE the constraint a background job to avoid locking the table during an upgrade. Each table is updated as a separate schema delta to avoid deadlocks between them. For SQLite we simply rebuild the table & copy the data. --- changelog.d/15597.misc | 1 + synapse/storage/background_updates.py | 44 ++++ .../storage/databases/main/event_push_actions.py | 254 +++------------------ synapse/storage/schema/__init__.py | 3 + .../delta/77/05thread_notifications_backfill.sql | 28 +++ .../77/06thread_notifications_not_null.sql.sqlite | 102 +++++++++ ...ations_not_null_event_push_actions.sql.postgres | 27 +++ ...ot_null_event_push_actions_staging.sql.postgres | 27 +++ ...ations_not_null_event_push_summary.sql.postgres | 29 +++ 9 files changed, 292 insertions(+), 223 deletions(-) create mode 100644 changelog.d/15597.misc create mode 100644 synapse/storage/schema/main/delta/77/05thread_notifications_backfill.sql create mode 100644 synapse/storage/schema/main/delta/77/06thread_notifications_not_null.sql.sqlite create mode 100644 synapse/storage/schema/main/delta/77/06thread_notifications_not_null_event_push_actions.sql.postgres create mode 100644 synapse/storage/schema/main/delta/77/06thread_notifications_not_null_event_push_actions_staging.sql.postgres create mode 100644 synapse/storage/schema/main/delta/77/06thread_notifications_not_null_event_push_summary.sql.postgres diff --git a/changelog.d/15597.misc b/changelog.d/15597.misc new file mode 100644 index 0000000000..2dea23784f --- /dev/null +++ b/changelog.d/15597.misc @@ -0,0 +1 @@ +Make the `thread_id` column on `event_push_actions`, `event_push_actions_staging`, and `event_push_summary` non-null. diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py index a99aea8926..ca085ef800 100644 --- a/synapse/storage/background_updates.py +++ b/synapse/storage/background_updates.py @@ -561,6 +561,50 @@ class BackgroundUpdater: updater, oneshot=True ) + def register_background_validate_constraint( + self, update_name: str, constraint_name: str, table: str + ) -> None: + """Helper for store classes to do a background validate constraint. + + This only applies on PostgreSQL. + + To use: + + 1. use a schema delta file to add a background update. Example: + INSERT INTO background_updates (update_name, progress_json) VALUES + ('validate_my_constraint', '{}'); + + 2. In the Store constructor, call this method + + Args: + update_name: update_name to register for + constraint_name: name of constraint to validate + table: table the constraint is applied to + """ + + def runner(conn: Connection) -> None: + c = conn.cursor() + + sql = f""" + ALTER TABLE {table} VALIDATE CONSTRAINT {constraint_name}; + """ + logger.debug("[SQL] %s", sql) + c.execute(sql) + + async def updater(progress: JsonDict, batch_size: int) -> int: + assert isinstance( + self.db_pool.engine, engines.PostgresEngine + ), "validate constraint background update registered for non-Postres database" + + logger.info("Validating constraint %s to %s", constraint_name, table) + await self.db_pool.runWithConnection(runner) + await self._end_background_update(update_name) + return 1 + + self._background_update_handlers[update_name] = _BackgroundUpdateHandler( + updater, oneshot=True + ) + async def create_index_in_background( self, index_name: str, diff --git a/synapse/storage/databases/main/event_push_actions.py b/synapse/storage/databases/main/event_push_actions.py index 6fdb1e292e..07bda7d6be 100644 --- a/synapse/storage/databases/main/event_push_actions.py +++ b/synapse/storage/databases/main/event_push_actions.py @@ -289,179 +289,52 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas unique=True, ) - self.db_pool.updates.register_background_update_handler( - "event_push_backfill_thread_id", - self._background_backfill_thread_id, + self.db_pool.updates.register_background_validate_constraint( + "event_push_actions_staging_thread_id", + constraint_name="event_push_actions_staging_thread_id", + table="event_push_actions_staging", ) - - # Indexes which will be used to quickly make the thread_id column non-null. - self.db_pool.updates.register_background_index_update( - "event_push_actions_thread_id_null", - index_name="event_push_actions_thread_id_null", + self.db_pool.updates.register_background_validate_constraint( + "event_push_actions_thread_id", + constraint_name="event_push_actions_thread_id", table="event_push_actions", - columns=["thread_id"], - where_clause="thread_id IS NULL", ) - self.db_pool.updates.register_background_index_update( - "event_push_summary_thread_id_null", - index_name="event_push_summary_thread_id_null", + self.db_pool.updates.register_background_validate_constraint( + "event_push_summary_thread_id", + constraint_name="event_push_summary_thread_id", table="event_push_summary", - columns=["thread_id"], - where_clause="thread_id IS NULL", ) - # Check ASAP (and then later, every 1s) to see if we have finished - # background updates the event_push_actions and event_push_summary tables. - self._clock.call_later(0.0, self._check_event_push_backfill_thread_id) - self._event_push_backfill_thread_id_done = False - - @wrap_as_background_process("check_event_push_backfill_thread_id") - async def _check_event_push_backfill_thread_id(self) -> None: - """ - Has thread_id finished backfilling? - - If not, we need to just-in-time update it so the queries work. - """ - done = await self.db_pool.updates.has_completed_background_update( - "event_push_backfill_thread_id" + self.db_pool.updates.register_background_update_handler( + "event_push_drop_null_thread_id_indexes", + self._background_drop_null_thread_id_indexes, ) - if done: - self._event_push_backfill_thread_id_done = True - else: - # Reschedule to run. - self._clock.call_later(15.0, self._check_event_push_backfill_thread_id) - - async def _background_backfill_thread_id( + async def _background_drop_null_thread_id_indexes( self, progress: JsonDict, batch_size: int ) -> int: """ - Fill in the thread_id field for event_push_actions and event_push_summary. - - This is preparatory so that it can be made non-nullable in the future. - - Because all current (null) data is done in an unthreaded manner this - simply assumes it is on the "main" timeline. Since event_push_actions - are periodically cleared it is not possible to correctly re-calculate - the thread_id. + Drop the indexes used to find null thread_ids for event_push_actions and + event_push_summary. """ - event_push_actions_done = progress.get("event_push_actions_done", False) - def add_thread_id_txn( - txn: LoggingTransaction, start_stream_ordering: int - ) -> int: - sql = """ - SELECT stream_ordering - FROM event_push_actions - WHERE - thread_id IS NULL - AND stream_ordering > ? - ORDER BY stream_ordering - LIMIT ? - """ - txn.execute(sql, (start_stream_ordering, batch_size)) - - # No more rows to process. - rows = txn.fetchall() - if not rows: - progress["event_push_actions_done"] = True - self.db_pool.updates._background_update_progress_txn( - txn, "event_push_backfill_thread_id", progress - ) - return 0 + def drop_null_thread_id_indexes_txn(txn: LoggingTransaction) -> None: + sql = "DROP INDEX IF EXISTS event_push_actions_thread_id_null" + logger.debug("[SQL] %s", sql) + txn.execute(sql) - # Update the thread ID for any of those rows. - max_stream_ordering = rows[-1][0] + sql = "DROP INDEX IF EXISTS event_push_summary_thread_id_null" + logger.debug("[SQL] %s", sql) + txn.execute(sql) - sql = """ - UPDATE event_push_actions - SET thread_id = 'main' - WHERE ? < stream_ordering AND stream_ordering <= ? AND thread_id IS NULL - """ - txn.execute( - sql, - ( - start_stream_ordering, - max_stream_ordering, - ), - ) - - # Update progress. - processed_rows = txn.rowcount - progress["max_event_push_actions_stream_ordering"] = max_stream_ordering - self.db_pool.updates._background_update_progress_txn( - txn, "event_push_backfill_thread_id", progress - ) - - return processed_rows - - def add_thread_id_summary_txn(txn: LoggingTransaction) -> int: - min_user_id = progress.get("max_summary_user_id", "") - min_room_id = progress.get("max_summary_room_id", "") - - # Slightly overcomplicated query for getting the Nth user ID / room - # ID tuple, or the last if there are less than N remaining. - sql = """ - SELECT user_id, room_id FROM ( - SELECT user_id, room_id FROM event_push_summary - WHERE (user_id, room_id) > (?, ?) - AND thread_id IS NULL - ORDER BY user_id, room_id - LIMIT ? - ) AS e - ORDER BY user_id DESC, room_id DESC - LIMIT 1 - """ - - txn.execute(sql, (min_user_id, min_room_id, batch_size)) - row = txn.fetchone() - if not row: - return 0 - - max_user_id, max_room_id = row - - sql = """ - UPDATE event_push_summary - SET thread_id = 'main' - WHERE - (?, ?) < (user_id, room_id) AND (user_id, room_id) <= (?, ?) - AND thread_id IS NULL - """ - txn.execute(sql, (min_user_id, min_room_id, max_user_id, max_room_id)) - processed_rows = txn.rowcount - - progress["max_summary_user_id"] = max_user_id - progress["max_summary_room_id"] = max_room_id - self.db_pool.updates._background_update_progress_txn( - txn, "event_push_backfill_thread_id", progress - ) - - return processed_rows - - # First update the event_push_actions table, then the event_push_summary table. - # - # Note that the event_push_actions_staging table is ignored since it is - # assumed that items in that table will only exist for a short period of - # time. - if not event_push_actions_done: - result = await self.db_pool.runInteraction( - "event_push_backfill_thread_id", - add_thread_id_txn, - progress.get("max_event_push_actions_stream_ordering", 0), - ) - else: - result = await self.db_pool.runInteraction( - "event_push_backfill_thread_id", - add_thread_id_summary_txn, - ) - - # Only done after the event_push_summary table is done. - if not result: - await self.db_pool.updates._end_background_update( - "event_push_backfill_thread_id" - ) - - return result + await self.db_pool.runInteraction( + "drop_null_thread_id_indexes_txn", + drop_null_thread_id_indexes_txn, + ) + await self.db_pool.updates._end_background_update( + "event_push_drop_null_thread_id_indexes" + ) + return 0 async def get_unread_counts_by_room_for_user(self, user_id: str) -> Dict[str, int]: """Get the notification count by room for a user. Only considers notifications, @@ -711,25 +584,6 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas (ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE), ) - # First ensure that the existing rows have an updated thread_id field. - if not self._event_push_backfill_thread_id_done: - txn.execute( - """ - UPDATE event_push_summary - SET thread_id = ? - WHERE room_id = ? AND user_id = ? AND thread_id is NULL - """, - (MAIN_TIMELINE, room_id, user_id), - ) - txn.execute( - """ - UPDATE event_push_actions - SET thread_id = ? - WHERE room_id = ? AND user_id = ? AND thread_id is NULL - """, - (MAIN_TIMELINE, room_id, user_id), - ) - # First we pull the counts from the summary table. # # We check that `last_receipt_stream_ordering` matches the stream ordering of the @@ -1545,25 +1399,6 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas (room_id, user_id, stream_ordering, *thread_args), ) - # First ensure that the existing rows have an updated thread_id field. - if not self._event_push_backfill_thread_id_done: - txn.execute( - """ - UPDATE event_push_summary - SET thread_id = ? - WHERE room_id = ? AND user_id = ? AND thread_id is NULL - """, - (MAIN_TIMELINE, room_id, user_id), - ) - txn.execute( - """ - UPDATE event_push_actions - SET thread_id = ? - WHERE room_id = ? AND user_id = ? AND thread_id is NULL - """, - (MAIN_TIMELINE, room_id, user_id), - ) - # Fetch the notification counts between the stream ordering of the # latest receipt and what was previously summarised. unread_counts = self._get_notif_unread_count_for_user_room( @@ -1698,19 +1533,6 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas rotate_to_stream_ordering: The new maximum event stream ordering to summarise. """ - # Ensure that any new actions have an updated thread_id. - if not self._event_push_backfill_thread_id_done: - txn.execute( - """ - UPDATE event_push_actions - SET thread_id = ? - WHERE ? < stream_ordering AND stream_ordering <= ? AND thread_id IS NULL - """, - (MAIN_TIMELINE, old_rotate_stream_ordering, rotate_to_stream_ordering), - ) - - # XXX Do we need to update summaries here too? - # Calculate the new counts that should be upserted into event_push_summary sql = """ SELECT user_id, room_id, thread_id, @@ -1773,20 +1595,6 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas logger.info("Rotating notifications, handling %d rows", len(summaries)) - # Ensure that any updated threads have the proper thread_id. - if not self._event_push_backfill_thread_id_done: - txn.execute_batch( - """ - UPDATE event_push_summary - SET thread_id = ? - WHERE room_id = ? AND user_id = ? AND thread_id is NULL - """, - [ - (MAIN_TIMELINE, room_id, user_id) - for user_id, room_id, _ in summaries - ], - ) - self.db_pool.simple_upsert_many_txn( txn, table="event_push_summary", diff --git a/synapse/storage/schema/__init__.py b/synapse/storage/schema/__init__.py index df2cc31ca6..5cc786f030 100644 --- a/synapse/storage/schema/__init__.py +++ b/synapse/storage/schema/__init__.py @@ -110,6 +110,9 @@ SCHEMA_COMPAT_VERSION = ( # Queries against `event_stream_ordering` columns in membership tables must # be disambiguated. # + # The threads_id column must written to with non-null values for the + # event_push_actions, event_push_actions_staging, and event_push_summary tables. + # # insertions to the column `full_user_id` of tables profiles and user_filters can no # longer be null 76 diff --git a/synapse/storage/schema/main/delta/77/05thread_notifications_backfill.sql b/synapse/storage/schema/main/delta/77/05thread_notifications_backfill.sql new file mode 100644 index 0000000000..ce6f9ff937 --- /dev/null +++ b/synapse/storage/schema/main/delta/77/05thread_notifications_backfill.sql @@ -0,0 +1,28 @@ +/* Copyright 2023 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- Force the background updates from 06thread_notifications.sql to run in the +-- foreground as code will now require those to be "done". + +DELETE FROM background_updates WHERE update_name = 'event_push_backfill_thread_id'; + +-- Overwrite any null thread_id values. +UPDATE event_push_actions_staging SET thread_id = 'main' WHERE thread_id IS NULL; +UPDATE event_push_actions SET thread_id = 'main' WHERE thread_id IS NULL; +UPDATE event_push_summary SET thread_id = 'main' WHERE thread_id IS NULL; + +-- Drop the background updates to calculate the indexes used to find null thread_ids. +DELETE FROM background_updates WHERE update_name = 'event_push_actions_thread_id_null'; +DELETE FROM background_updates WHERE update_name = 'event_push_summary_thread_id_null'; diff --git a/synapse/storage/schema/main/delta/77/06thread_notifications_not_null.sql.sqlite b/synapse/storage/schema/main/delta/77/06thread_notifications_not_null.sql.sqlite new file mode 100644 index 0000000000..d19b9648b5 --- /dev/null +++ b/synapse/storage/schema/main/delta/77/06thread_notifications_not_null.sql.sqlite @@ -0,0 +1,102 @@ +/* Copyright 2022 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + -- The thread_id columns can now be made non-nullable. +-- +-- SQLite doesn't support modifying columns to an existing table, so it must +-- be recreated. + +-- Create the new tables. +CREATE TABLE event_push_actions_staging_new ( + event_id TEXT NOT NULL, + user_id TEXT NOT NULL, + actions TEXT NOT NULL, + notif SMALLINT NOT NULL, + highlight SMALLINT NOT NULL, + unread SMALLINT, + thread_id TEXT, + inserted_ts BIGINT, + CONSTRAINT event_push_actions_staging_thread_id CHECK (thread_id is NOT NULL) +); + +CREATE TABLE event_push_actions_new ( + room_id TEXT NOT NULL, + event_id TEXT NOT NULL, + user_id TEXT NOT NULL, + profile_tag VARCHAR(32), + actions TEXT NOT NULL, + topological_ordering BIGINT, + stream_ordering BIGINT, + notif SMALLINT, + highlight SMALLINT, + unread SMALLINT, + thread_id TEXT, + CONSTRAINT event_id_user_id_profile_tag_uniqueness UNIQUE (room_id, event_id, user_id, profile_tag), + CONSTRAINT event_push_actions_thread_id CHECK (thread_id is NOT NULL) +); + +CREATE TABLE event_push_summary_new ( + user_id TEXT NOT NULL, + room_id TEXT NOT NULL, + notif_count BIGINT NOT NULL, + stream_ordering BIGINT NOT NULL, + unread_count BIGINT, + last_receipt_stream_ordering BIGINT, + thread_id TEXT, + CONSTRAINT event_push_summary_thread_id CHECK (thread_id is NOT NULL) +); + +-- Copy the data. +INSERT INTO event_push_actions_staging_new (event_id, user_id, actions, notif, highlight, unread, thread_id, inserted_ts) + SELECT event_id, user_id, actions, notif, highlight, unread, thread_id, inserted_ts + FROM event_push_actions_staging; + +INSERT INTO event_push_actions_new (room_id, event_id, user_id, profile_tag, actions, topological_ordering, stream_ordering, notif, highlight, unread, thread_id) + SELECT room_id, event_id, user_id, profile_tag, actions, topological_ordering, stream_ordering, notif, highlight, unread, thread_id + FROM event_push_actions; + +INSERT INTO event_push_summary_new (user_id, room_id, notif_count, stream_ordering, unread_count, last_receipt_stream_ordering, thread_id) + SELECT user_id, room_id, notif_count, stream_ordering, unread_count, last_receipt_stream_ordering, thread_id + FROM event_push_summary; + +-- Drop the old tables. +DROP TABLE event_push_actions_staging; +DROP TABLE event_push_actions; +DROP TABLE event_push_summary; + +-- Rename the tables. +ALTER TABLE event_push_actions_staging_new RENAME TO event_push_actions_staging; +ALTER TABLE event_push_actions_new RENAME TO event_push_actions; +ALTER TABLE event_push_summary_new RENAME TO event_push_summary; + +-- Recreate the indexes. +CREATE INDEX event_push_actions_staging_id ON event_push_actions_staging(event_id); + +CREATE INDEX event_push_actions_highlights_index ON event_push_actions (user_id, room_id, topological_ordering, stream_ordering); +CREATE INDEX event_push_actions_rm_tokens on event_push_actions( user_id, room_id, topological_ordering, stream_ordering ); +CREATE INDEX event_push_actions_room_id_user_id on event_push_actions(room_id, user_id); +CREATE INDEX event_push_actions_stream_ordering on event_push_actions( stream_ordering, user_id ); +CREATE INDEX event_push_actions_u_highlight ON event_push_actions (user_id, stream_ordering); + +CREATE UNIQUE INDEX event_push_summary_unique_index2 ON event_push_summary (user_id, room_id, thread_id) ; + +-- Recreate some indexes in the background, by re-running the background updates +-- from 72/02event_push_actions_index.sql and 72/06thread_notifications.sql. +INSERT INTO background_updates (ordering, update_name, progress_json) VALUES + (7706, 'event_push_summary_unique_index2', '{}') + ON CONFLICT (update_name) DO UPDATE SET progress_json = '{}'; +INSERT INTO background_updates (ordering, update_name, progress_json) VALUES + (7706, 'event_push_actions_stream_highlight_index', '{}') + ON CONFLICT (update_name) DO UPDATE SET progress_json = '{}'; diff --git a/synapse/storage/schema/main/delta/77/06thread_notifications_not_null_event_push_actions.sql.postgres b/synapse/storage/schema/main/delta/77/06thread_notifications_not_null_event_push_actions.sql.postgres new file mode 100644 index 0000000000..381184b5e2 --- /dev/null +++ b/synapse/storage/schema/main/delta/77/06thread_notifications_not_null_event_push_actions.sql.postgres @@ -0,0 +1,27 @@ +/* Copyright 2022 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- The thread_id columns can now be made non-nullable, this is done by using a +-- constraint (and not altering the column) to avoid taking out a full table lock. +-- +-- We initially add an invalid constraint which guards against new data (this +-- doesn't lock the table). +ALTER TABLE event_push_actions + ADD CONSTRAINT event_push_actions_thread_id CHECK (thread_id IS NOT NULL) NOT VALID; + +-- We then validate the constraint which doesn't need to worry about new data. It +-- only needs a SHARE UPDATE EXCLUSIVE lock but can still take a while to complete. +INSERT INTO background_updates (ordering, update_name, progress_json, depends_on) VALUES + (7706, 'event_push_actions_thread_id', '{}', 'event_push_actions_staging_thread_id'); diff --git a/synapse/storage/schema/main/delta/77/06thread_notifications_not_null_event_push_actions_staging.sql.postgres b/synapse/storage/schema/main/delta/77/06thread_notifications_not_null_event_push_actions_staging.sql.postgres new file mode 100644 index 0000000000..395f9c7260 --- /dev/null +++ b/synapse/storage/schema/main/delta/77/06thread_notifications_not_null_event_push_actions_staging.sql.postgres @@ -0,0 +1,27 @@ +/* Copyright 2022 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- The thread_id columns can now be made non-nullable, this is done by using a +-- constraint (and not altering the column) to avoid taking out a full table lock. +-- +-- We initially add an invalid constraint which guards against new data (this +-- doesn't lock the table). +ALTER TABLE event_push_actions_staging + ADD CONSTRAINT event_push_actions_staging_thread_id CHECK (thread_id IS NOT NULL) NOT VALID; + +-- We then validate the constraint which doesn't need to worry about new data. It +-- only needs a SHARE UPDATE EXCLUSIVE lock but can still take a while to complete. +INSERT INTO background_updates (ordering, update_name, progress_json) VALUES + (7706, 'event_push_actions_staging_thread_id', '{}'); diff --git a/synapse/storage/schema/main/delta/77/06thread_notifications_not_null_event_push_summary.sql.postgres b/synapse/storage/schema/main/delta/77/06thread_notifications_not_null_event_push_summary.sql.postgres new file mode 100644 index 0000000000..140ceff1fa --- /dev/null +++ b/synapse/storage/schema/main/delta/77/06thread_notifications_not_null_event_push_summary.sql.postgres @@ -0,0 +1,29 @@ +/* Copyright 2022 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- The thread_id columns can now be made non-nullable, this is done by using a +-- constraint (and not altering the column) to avoid taking out a full table lock. +-- +-- We initially add an invalid constraint which guards against new data (this +-- doesn't lock the table). +ALTER TABLE event_push_summary + ADD CONSTRAINT event_push_summary_thread_id CHECK (thread_id IS NOT NULL) NOT VALID; + +-- We then validate the constraint which doesn't need to worry about new data. It +-- only needs a SHARE UPDATE EXCLUSIVE lock but can still take a while to complete. +INSERT INTO background_updates (ordering, update_name, progress_json, depends_on) VALUES + (7706, 'event_push_summary_thread_id', '{}', 'event_push_actions_thread_id'), + -- Also clean-up the old indexes. + (7706, 'event_push_drop_null_thread_id_indexes', '{}', 'event_push_summary_thread_id'); -- cgit 1.5.1 From 179f0f851e456c8dda3c7092bcb72bd2ec5e65cc Mon Sep 17 00:00:00 2001 From: Grant McLean Date: Sat, 27 May 2023 05:28:04 +1200 Subject: Documentation improvements to contributing guide (#15667) (#15668) Fix #15667 - Reiterate the importance of getting Rust installed and set up before attempting to install the Python dependencies. - Mention the importance of confirming that `poetry install` completed successfully and include a typical error that the user might see if it did not. - Expand on "Now edit homeserver.yaml" to give examples of things likely to need changing and to link to the relevant sections of the Synapse server documentation. --- changelog.d/15668.doc | 1 + docs/development/contributing_guide.md | 33 ++++++++++++++++++++++++++++----- 2 files changed, 29 insertions(+), 5 deletions(-) create mode 100644 changelog.d/15668.doc diff --git a/changelog.d/15668.doc b/changelog.d/15668.doc new file mode 100644 index 0000000000..3526a4d50c --- /dev/null +++ b/changelog.d/15668.doc @@ -0,0 +1 @@ +Improve contributor docs to make it more clear that Rust is a necessary prerequisite. Contributed by @grantm. diff --git a/docs/development/contributing_guide.md b/docs/development/contributing_guide.md index 56cf4ba81e..f5ba55afb7 100644 --- a/docs/development/contributing_guide.md +++ b/docs/development/contributing_guide.md @@ -22,6 +22,9 @@ on Windows is not officially supported. The code of Synapse is written in Python 3. To do pretty much anything, you'll need [a recent version of Python 3](https://www.python.org/downloads/). Your Python also needs support for [virtual environments](https://docs.python.org/3/library/venv.html). This is usually built-in, but some Linux distributions like Debian and Ubuntu split it out into its own package. Running `sudo apt install python3-venv` should be enough. +A recent version of the Rust compiler is needed to build the native modules. The +easiest way of installing the latest version is to use [rustup](https://rustup.rs/). + Synapse can connect to PostgreSQL via the [psycopg2](https://pypi.org/project/psycopg2/) Python library. Building this library from source requires access to PostgreSQL's C header files. On Debian or Ubuntu Linux, these can be installed with `sudo apt install libpq-dev`. Synapse has an optional, improved user search with better Unicode support. For that you need the development package of `libicu`. On Debian or Ubuntu Linux, this can be installed with `sudo apt install libicu-dev`. @@ -30,9 +33,6 @@ The source code of Synapse is hosted on GitHub. You will also need [a recent ver For some tests, you will need [a recent version of Docker](https://docs.docker.com/get-docker/). -A recent version of the Rust compiler is needed to build the native modules. The -easiest way of installing the latest version is to use [rustup](https://rustup.rs/). - # 3. Get the source. @@ -53,6 +53,11 @@ can find many good git tutorials on the web. # 4. Install the dependencies + +Before installing the Python dependencies, make sure you have installed a recent version +of Rust (see the "What do I need?" section above). The easiest way of installing the +latest version is to use [rustup](https://rustup.rs/). + Synapse uses the [poetry](https://python-poetry.org/) project to manage its dependencies and development environment. Once you have installed Python 3 and added the source, you should install `poetry`. @@ -76,7 +81,8 @@ cd path/where/you/have/cloned/the/repository poetry install --extras all ``` -This will install the runtime and developer dependencies for the project. +This will install the runtime and developer dependencies for the project. Be sure to check +that the `poetry install` step completed cleanly. ## Running Synapse via poetry @@ -84,14 +90,31 @@ To start a local instance of Synapse in the locked poetry environment, create a ```sh cp docs/sample_config.yaml homeserver.yaml +cp docs/sample_log_config.yaml log_config.yaml ``` -Now edit homeserver.yaml, and run Synapse with: +Now edit `homeserver.yaml`, things you might want to change include: + +- Set a `server_name` +- Adjusting paths to be correct for your system like the `log_config` to point to the log config you just copied +- Using a [PostgreSQL database instead of SQLite](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#database) +- Adding a [`registration_shared_secret`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#registration_shared_secret) so you can use [`register_new_matrix_user` command](https://matrix-org.github.io/synapse/latest/setup/installation.html#registering-a-user). + +And then run Synapse with the following command: ```sh poetry run python -m synapse.app.homeserver -c homeserver.yaml ``` +If you get an error like the following: + +``` +importlib.metadata.PackageNotFoundError: matrix-synapse +``` + +this probably indicates that the `poetry install` step did not complete cleanly - go back and +resolve any issues and re-run until successful. + # 5. Get in touch. Join our developer community on Matrix: [#synapse-dev:matrix.org](https://matrix.to/#/#synapse-dev:matrix.org)! -- cgit 1.5.1 From 50918c494057dc93bfa6e37f7d140d68711846d1 Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Fri, 26 May 2023 12:05:24 -0600 Subject: Add `MSC3820opt2` as a known room version (#15678) --- changelog.d/15678.misc | 1 + synapse/api/room_versions.py | 1 + 2 files changed, 2 insertions(+) create mode 100644 changelog.d/15678.misc diff --git a/changelog.d/15678.misc b/changelog.d/15678.misc new file mode 100644 index 0000000000..92eae49952 --- /dev/null +++ b/changelog.d/15678.misc @@ -0,0 +1 @@ +Implement "option 2" for [MSC3820](https://github.com/matrix-org/matrix-spec-proposals/pull/3820): Room version 11. \ No newline at end of file diff --git a/synapse/api/room_versions.py b/synapse/api/room_versions.py index 035a14171b..c5c71e242f 100644 --- a/synapse/api/room_versions.py +++ b/synapse/api/room_versions.py @@ -528,6 +528,7 @@ KNOWN_ROOM_VERSIONS: Dict[str, RoomVersion] = { RoomVersions.V10, RoomVersions.MSC2716v4, RoomVersions.MSC3989, + RoomVersions.MSC3820opt2, ) } -- cgit 1.5.1 From c835befd10ae0087c3c54a36989ba347313b68af Mon Sep 17 00:00:00 2001 From: Jason Little Date: Fri, 26 May 2023 14:28:39 -0500 Subject: Add Unix socket support for Redis connections (#15644) Adds a new configuration setting to connect to Redis via a Unix socket instead of over TCP. Disabled by default. --- changelog.d/15644.feature | 1 + docs/usage/configuration/config_documentation.md | 4 ++ stubs/txredisapi.pyi | 3 ++ synapse/config/redis.py | 1 + synapse/replication/tcp/handler.py | 10 +++- synapse/replication/tcp/redis.py | 62 +++++++++++++++++++++--- synapse/server.py | 42 ++++++++++------ 7 files changed, 100 insertions(+), 23 deletions(-) create mode 100644 changelog.d/15644.feature diff --git a/changelog.d/15644.feature b/changelog.d/15644.feature new file mode 100644 index 0000000000..1b6126af53 --- /dev/null +++ b/changelog.d/15644.feature @@ -0,0 +1 @@ +Add Unix socket support for Redis connections. Contributed by Jason Little. diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 93b132b6e4..5ede6d0a82 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -3979,6 +3979,8 @@ This setting has the following sub-options: * `enabled`: whether to use Redis support. Defaults to false. * `host` and `port`: Optional host and port to use to connect to redis. Defaults to localhost and 6379 +* `path`: The full path to a local Unix socket file. **If this is used, `host` and + `port` are ignored.** Defaults to `/tmp/redis.sock' * `password`: Optional password if configured on the Redis instance. * `dbid`: Optional redis dbid if needs to connect to specific redis logical db. * `use_tls`: Whether to use tls connection. Defaults to false. @@ -3991,6 +3993,8 @@ This setting has the following sub-options: _Changed in Synapse 1.84.0: Added use\_tls, certificate\_file, private\_key\_file, ca\_file and ca\_path attributes_ + _Changed in Synapse 1.85.0: Added path option to use a local Unix socket_ + Example configuration: ```yaml redis: diff --git a/stubs/txredisapi.pyi b/stubs/txredisapi.pyi index 695a2307c2..b7bd59d2ea 100644 --- a/stubs/txredisapi.pyi +++ b/stubs/txredisapi.pyi @@ -61,6 +61,9 @@ def lazyConnection( # most methods to it via ConnectionHandler.__getattr__. class ConnectionHandler(RedisProtocol): def disconnect(self) -> "Deferred[None]": ... + def __repr__(self) -> str: ... + +class UnixConnectionHandler(ConnectionHandler): ... class RedisFactory(protocol.ReconnectingClientFactory): continueTrying: bool diff --git a/synapse/config/redis.py b/synapse/config/redis.py index 636cb450b8..3c4c499e22 100644 --- a/synapse/config/redis.py +++ b/synapse/config/redis.py @@ -33,6 +33,7 @@ class RedisConfig(Config): self.redis_host = redis_config.get("host", "localhost") self.redis_port = redis_config.get("port", 6379) + self.redis_path = redis_config.get("path", None) self.redis_dbid = redis_config.get("dbid", None) self.redis_password = redis_config.get("password") diff --git a/synapse/replication/tcp/handler.py b/synapse/replication/tcp/handler.py index 233ad61d49..5d108fe11b 100644 --- a/synapse/replication/tcp/handler.py +++ b/synapse/replication/tcp/handler.py @@ -352,7 +352,15 @@ class ReplicationCommandHandler: reactor = hs.get_reactor() redis_config = hs.config.redis - if hs.config.redis.redis_use_tls: + if redis_config.redis_path is not None: + reactor.connectUNIX( + redis_config.redis_path, + self._factory, + timeout=30, + checkPID=False, + ) + + elif hs.config.redis.redis_use_tls: ssl_context_factory = ClientContextFactory(hs.config.redis) reactor.connectSSL( redis_config.redis_host, diff --git a/synapse/replication/tcp/redis.py b/synapse/replication/tcp/redis.py index c8f4bf8b27..7e96145b3b 100644 --- a/synapse/replication/tcp/redis.py +++ b/synapse/replication/tcp/redis.py @@ -17,7 +17,12 @@ from inspect import isawaitable from typing import TYPE_CHECKING, Any, Generic, List, Optional, Type, TypeVar, cast import attr -import txredisapi +from txredisapi import ( + ConnectionHandler, + RedisFactory, + SubscriberProtocol, + UnixConnectionHandler, +) from zope.interface import implementer from twisted.internet.address import IPv4Address, IPv6Address @@ -68,7 +73,7 @@ class ConstantProperty(Generic[T, V]): @implementer(IReplicationConnection) -class RedisSubscriber(txredisapi.SubscriberProtocol): +class RedisSubscriber(SubscriberProtocol): """Connection to redis subscribed to replication stream. This class fulfils two functions: @@ -95,7 +100,7 @@ class RedisSubscriber(txredisapi.SubscriberProtocol): synapse_handler: "ReplicationCommandHandler" synapse_stream_prefix: str synapse_channel_names: List[str] - synapse_outbound_redis_connection: txredisapi.ConnectionHandler + synapse_outbound_redis_connection: ConnectionHandler def __init__(self, *args: Any, **kwargs: Any): super().__init__(*args, **kwargs) @@ -229,7 +234,7 @@ class RedisSubscriber(txredisapi.SubscriberProtocol): ) -class SynapseRedisFactory(txredisapi.RedisFactory): +class SynapseRedisFactory(RedisFactory): """A subclass of RedisFactory that periodically sends pings to ensure that we detect dead connections. """ @@ -245,7 +250,7 @@ class SynapseRedisFactory(txredisapi.RedisFactory): dbid: Optional[int], poolsize: int, isLazy: bool = False, - handler: Type = txredisapi.ConnectionHandler, + handler: Type = ConnectionHandler, charset: str = "utf-8", password: Optional[str] = None, replyTimeout: int = 30, @@ -326,7 +331,7 @@ class RedisDirectTcpReplicationClientFactory(SynapseRedisFactory): def __init__( self, hs: "HomeServer", - outbound_redis_connection: txredisapi.ConnectionHandler, + outbound_redis_connection: ConnectionHandler, channel_names: List[str], ): super().__init__( @@ -368,7 +373,7 @@ def lazyConnection( reconnect: bool = True, password: Optional[str] = None, replyTimeout: int = 30, -) -> txredisapi.ConnectionHandler: +) -> ConnectionHandler: """Creates a connection to Redis that is lazily set up and reconnects if the connections is lost. """ @@ -380,7 +385,7 @@ def lazyConnection( dbid=dbid, poolsize=1, isLazy=True, - handler=txredisapi.ConnectionHandler, + handler=ConnectionHandler, password=password, replyTimeout=replyTimeout, ) @@ -408,3 +413,44 @@ def lazyConnection( ) return factory.handler + + +def lazyUnixConnection( + hs: "HomeServer", + path: str = "/tmp/redis.sock", + dbid: Optional[int] = None, + reconnect: bool = True, + password: Optional[str] = None, + replyTimeout: int = 30, +) -> ConnectionHandler: + """Creates a connection to Redis that is lazily set up and reconnects if the + connection is lost. + + Returns: + A subclass of ConnectionHandler, which is a UnixConnectionHandler in this case. + """ + + uuid = path + + factory = SynapseRedisFactory( + hs, + uuid=uuid, + dbid=dbid, + poolsize=1, + isLazy=True, + handler=UnixConnectionHandler, + password=password, + replyTimeout=replyTimeout, + ) + factory.continueTrying = reconnect + + reactor = hs.get_reactor() + + reactor.connectUNIX( + path, + factory, + timeout=30, + checkPID=False, + ) + + return factory.handler diff --git a/synapse/server.py b/synapse/server.py index f6e245569c..cce5fb66ff 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -864,22 +864,36 @@ class HomeServer(metaclass=abc.ABCMeta): # We only want to import redis module if we're using it, as we have # `txredisapi` as an optional dependency. - from synapse.replication.tcp.redis import lazyConnection + from synapse.replication.tcp.redis import lazyConnection, lazyUnixConnection - logger.info( - "Connecting to redis (host=%r port=%r) for external cache", - self.config.redis.redis_host, - self.config.redis.redis_port, - ) + if self.config.redis.redis_path is None: + logger.info( + "Connecting to redis (host=%r port=%r) for external cache", + self.config.redis.redis_host, + self.config.redis.redis_port, + ) - return lazyConnection( - hs=self, - host=self.config.redis.redis_host, - port=self.config.redis.redis_port, - dbid=self.config.redis.redis_dbid, - password=self.config.redis.redis_password, - reconnect=True, - ) + return lazyConnection( + hs=self, + host=self.config.redis.redis_host, + port=self.config.redis.redis_port, + dbid=self.config.redis.redis_dbid, + password=self.config.redis.redis_password, + reconnect=True, + ) + else: + logger.info( + "Connecting to redis (path=%r) for external cache", + self.config.redis.redis_path, + ) + + return lazyUnixConnection( + hs=self, + path=self.config.redis.redis_path, + dbid=self.config.redis.redis_dbid, + password=self.config.redis.redis_password, + reconnect=True, + ) def should_send_federation(self) -> bool: "Should this server be sending federation traffic directly?" -- cgit 1.5.1 From 4f07c2a170aceb8f0ede67f654805d55301b422e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 29 May 2023 14:07:25 -0400 Subject: Bump types-pyyaml from 6.0.12.9 to 6.0.12.10 (#15683) --- changelog.d/15683.misc | 1 + poetry.lock | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/15683.misc diff --git a/changelog.d/15683.misc b/changelog.d/15683.misc new file mode 100644 index 0000000000..147f13b99c --- /dev/null +++ b/changelog.d/15683.misc @@ -0,0 +1 @@ +Bump types-pyyaml from 6.0.12.9 to 6.0.12.10. diff --git a/poetry.lock b/poetry.lock index 3f8bf7c304..83ea43b59a 100644 --- a/poetry.lock +++ b/poetry.lock @@ -3097,14 +3097,14 @@ cryptography = ">=35.0.0" [[package]] name = "types-pyyaml" -version = "6.0.12.9" +version = "6.0.12.10" description = "Typing stubs for PyYAML" category = "dev" optional = false python-versions = "*" files = [ - {file = "types-PyYAML-6.0.12.9.tar.gz", hash = "sha256:c51b1bd6d99ddf0aa2884a7a328810ebf70a4262c292195d3f4f9a0005f9eeb6"}, - {file = "types_PyYAML-6.0.12.9-py3-none-any.whl", hash = "sha256:5aed5aa66bd2d2e158f75dda22b059570ede988559f030cf294871d3b647e3e8"}, + {file = "types-PyYAML-6.0.12.10.tar.gz", hash = "sha256:ebab3d0700b946553724ae6ca636ea932c1b0868701d4af121630e78d695fc97"}, + {file = "types_PyYAML-6.0.12.10-py3-none-any.whl", hash = "sha256:662fa444963eff9b68120d70cda1af5a5f2aa57900003c2006d7626450eaae5f"}, ] [[package]] -- cgit 1.5.1 From ea634a9f811fe768efec51edab5b9a9af6ef53e0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 29 May 2023 14:13:40 -0400 Subject: Bump prometheus-client from 0.16.0 to 0.17.0 (#15682) --- changelog.d/15682.misc | 1 + poetry.lock | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/15682.misc diff --git a/changelog.d/15682.misc b/changelog.d/15682.misc new file mode 100644 index 0000000000..687af7d8d7 --- /dev/null +++ b/changelog.d/15682.misc @@ -0,0 +1 @@ +Bump prometheus-client from 0.16.0 to 0.17.0. diff --git a/poetry.lock b/poetry.lock index 83ea43b59a..ecf704ea93 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1781,14 +1781,14 @@ test = ["appdirs (==1.4.4)", "covdefaults (>=2.2.2)", "pytest (>=7.2.1)", "pytes [[package]] name = "prometheus-client" -version = "0.16.0" +version = "0.17.0" description = "Python client for the Prometheus monitoring system." category = "main" optional = false python-versions = ">=3.6" files = [ - {file = "prometheus_client-0.16.0-py3-none-any.whl", hash = "sha256:0836af6eb2c8f4fed712b2f279f6c0a8bbab29f9f4aa15276b91c7cb0d1616ab"}, - {file = "prometheus_client-0.16.0.tar.gz", hash = "sha256:a03e35b359f14dd1630898543e2120addfdeacd1a6069c1367ae90fd93ad3f48"}, + {file = "prometheus_client-0.17.0-py3-none-any.whl", hash = "sha256:a77b708cf083f4d1a3fb3ce5c95b4afa32b9c521ae363354a4a910204ea095ce"}, + {file = "prometheus_client-0.17.0.tar.gz", hash = "sha256:9c3b26f1535945e85b8934fb374678d263137b78ef85f305b1156c7c881cd11b"}, ] [package.extras] -- cgit 1.5.1 From eb48b10f4fa28ee9839a2b42418889b47c7c36bd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 29 May 2023 14:14:58 -0400 Subject: Bump pydantic from 1.10.7 to 1.10.8 (#15685) --- changelog.d/15685.misc | 1 + poetry.lock | 74 +++++++++++++++++++++++++------------------------- 2 files changed, 38 insertions(+), 37 deletions(-) create mode 100644 changelog.d/15685.misc diff --git a/changelog.d/15685.misc b/changelog.d/15685.misc new file mode 100644 index 0000000000..7d4cf65bf3 --- /dev/null +++ b/changelog.d/15685.misc @@ -0,0 +1 @@ +Bump pydantic from 1.10.7 to 1.10.8. diff --git a/poetry.lock b/poetry.lock index ecf704ea93..60f09219fe 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1887,48 +1887,48 @@ files = [ [[package]] name = "pydantic" -version = "1.10.7" +version = "1.10.8" description = "Data validation and settings management using python type hints" category = "main" optional = false python-versions = ">=3.7" files = [ - {file = "pydantic-1.10.7-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e79e999e539872e903767c417c897e729e015872040e56b96e67968c3b918b2d"}, - {file = "pydantic-1.10.7-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:01aea3a42c13f2602b7ecbbea484a98169fb568ebd9e247593ea05f01b884b2e"}, - {file = "pydantic-1.10.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:516f1ed9bc2406a0467dd777afc636c7091d71f214d5e413d64fef45174cfc7a"}, - {file = "pydantic-1.10.7-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ae150a63564929c675d7f2303008d88426a0add46efd76c3fc797cd71cb1b46f"}, - {file = "pydantic-1.10.7-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:ecbbc51391248116c0a055899e6c3e7ffbb11fb5e2a4cd6f2d0b93272118a209"}, - {file = "pydantic-1.10.7-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f4a2b50e2b03d5776e7f21af73e2070e1b5c0d0df255a827e7c632962f8315af"}, - {file = "pydantic-1.10.7-cp310-cp310-win_amd64.whl", hash = "sha256:a7cd2251439988b413cb0a985c4ed82b6c6aac382dbaff53ae03c4b23a70e80a"}, - {file = "pydantic-1.10.7-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:68792151e174a4aa9e9fc1b4e653e65a354a2fa0fed169f7b3d09902ad2cb6f1"}, - {file = "pydantic-1.10.7-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:dfe2507b8ef209da71b6fb5f4e597b50c5a34b78d7e857c4f8f3115effaef5fe"}, - {file = "pydantic-1.10.7-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10a86d8c8db68086f1e30a530f7d5f83eb0685e632e411dbbcf2d5c0150e8dcd"}, - {file = "pydantic-1.10.7-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d75ae19d2a3dbb146b6f324031c24f8a3f52ff5d6a9f22f0683694b3afcb16fb"}, - {file = "pydantic-1.10.7-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:464855a7ff7f2cc2cf537ecc421291b9132aa9c79aef44e917ad711b4a93163b"}, - {file = "pydantic-1.10.7-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:193924c563fae6ddcb71d3f06fa153866423ac1b793a47936656e806b64e24ca"}, - {file = "pydantic-1.10.7-cp311-cp311-win_amd64.whl", hash = "sha256:b4a849d10f211389502059c33332e91327bc154acc1845f375a99eca3afa802d"}, - {file = "pydantic-1.10.7-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:cc1dde4e50a5fc1336ee0581c1612215bc64ed6d28d2c7c6f25d2fe3e7c3e918"}, - {file = "pydantic-1.10.7-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e0cfe895a504c060e5d36b287ee696e2fdad02d89e0d895f83037245218a87fe"}, - {file = "pydantic-1.10.7-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:670bb4683ad1e48b0ecb06f0cfe2178dcf74ff27921cdf1606e527d2617a81ee"}, - {file = "pydantic-1.10.7-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:950ce33857841f9a337ce07ddf46bc84e1c4946d2a3bba18f8280297157a3fd1"}, - {file = "pydantic-1.10.7-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:c15582f9055fbc1bfe50266a19771bbbef33dd28c45e78afbe1996fd70966c2a"}, - {file = "pydantic-1.10.7-cp37-cp37m-win_amd64.whl", hash = "sha256:82dffb306dd20bd5268fd6379bc4bfe75242a9c2b79fec58e1041fbbdb1f7914"}, - {file = "pydantic-1.10.7-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:8c7f51861d73e8b9ddcb9916ae7ac39fb52761d9ea0df41128e81e2ba42886cd"}, - {file = "pydantic-1.10.7-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6434b49c0b03a51021ade5c4daa7d70c98f7a79e95b551201fff682fc1661245"}, - {file = "pydantic-1.10.7-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:64d34ab766fa056df49013bb6e79921a0265204c071984e75a09cbceacbbdd5d"}, - {file = "pydantic-1.10.7-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:701daea9ffe9d26f97b52f1d157e0d4121644f0fcf80b443248434958fd03dc3"}, - {file = "pydantic-1.10.7-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:cf135c46099ff3f919d2150a948ce94b9ce545598ef2c6c7bf55dca98a304b52"}, - {file = "pydantic-1.10.7-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b0f85904f73161817b80781cc150f8b906d521fa11e3cdabae19a581c3606209"}, - {file = "pydantic-1.10.7-cp38-cp38-win_amd64.whl", hash = "sha256:9f6f0fd68d73257ad6685419478c5aece46432f4bdd8d32c7345f1986496171e"}, - {file = "pydantic-1.10.7-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c230c0d8a322276d6e7b88c3f7ce885f9ed16e0910354510e0bae84d54991143"}, - {file = "pydantic-1.10.7-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:976cae77ba6a49d80f461fd8bba183ff7ba79f44aa5cfa82f1346b5626542f8e"}, - {file = "pydantic-1.10.7-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7d45fc99d64af9aaf7e308054a0067fdcd87ffe974f2442312372dfa66e1001d"}, - {file = "pydantic-1.10.7-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d2a5ebb48958754d386195fe9e9c5106f11275867051bf017a8059410e9abf1f"}, - {file = "pydantic-1.10.7-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:abfb7d4a7cd5cc4e1d1887c43503a7c5dd608eadf8bc615413fc498d3e4645cd"}, - {file = "pydantic-1.10.7-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:80b1fab4deb08a8292d15e43a6edccdffa5377a36a4597bb545b93e79c5ff0a5"}, - {file = "pydantic-1.10.7-cp39-cp39-win_amd64.whl", hash = "sha256:d71e69699498b020ea198468e2480a2f1e7433e32a3a99760058c6520e2bea7e"}, - {file = "pydantic-1.10.7-py3-none-any.whl", hash = "sha256:0cd181f1d0b1d00e2b705f1bf1ac7799a2d938cce3376b8007df62b29be3c2c6"}, - {file = "pydantic-1.10.7.tar.gz", hash = "sha256:cfc83c0678b6ba51b0532bea66860617c4cd4251ecf76e9846fa5a9f3454e97e"}, + {file = "pydantic-1.10.8-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1243d28e9b05003a89d72e7915fdb26ffd1d39bdd39b00b7dbe4afae4b557f9d"}, + {file = "pydantic-1.10.8-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c0ab53b609c11dfc0c060d94335993cc2b95b2150e25583bec37a49b2d6c6c3f"}, + {file = "pydantic-1.10.8-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f9613fadad06b4f3bc5db2653ce2f22e0de84a7c6c293909b48f6ed37b83c61f"}, + {file = "pydantic-1.10.8-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:df7800cb1984d8f6e249351139667a8c50a379009271ee6236138a22a0c0f319"}, + {file = "pydantic-1.10.8-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:0c6fafa0965b539d7aab0a673a046466d23b86e4b0e8019d25fd53f4df62c277"}, + {file = "pydantic-1.10.8-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e82d4566fcd527eae8b244fa952d99f2ca3172b7e97add0b43e2d97ee77f81ab"}, + {file = "pydantic-1.10.8-cp310-cp310-win_amd64.whl", hash = "sha256:ab523c31e22943713d80d8d342d23b6f6ac4b792a1e54064a8d0cf78fd64e800"}, + {file = "pydantic-1.10.8-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:666bdf6066bf6dbc107b30d034615d2627e2121506c555f73f90b54a463d1f33"}, + {file = "pydantic-1.10.8-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:35db5301b82e8661fa9c505c800d0990bc14e9f36f98932bb1d248c0ac5cada5"}, + {file = "pydantic-1.10.8-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f90c1e29f447557e9e26afb1c4dbf8768a10cc676e3781b6a577841ade126b85"}, + {file = "pydantic-1.10.8-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:93e766b4a8226e0708ef243e843105bf124e21331694367f95f4e3b4a92bbb3f"}, + {file = "pydantic-1.10.8-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:88f195f582851e8db960b4a94c3e3ad25692c1c1539e2552f3df7a9e972ef60e"}, + {file = "pydantic-1.10.8-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:34d327c81e68a1ecb52fe9c8d50c8a9b3e90d3c8ad991bfc8f953fb477d42fb4"}, + {file = "pydantic-1.10.8-cp311-cp311-win_amd64.whl", hash = "sha256:d532bf00f381bd6bc62cabc7d1372096b75a33bc197a312b03f5838b4fb84edd"}, + {file = "pydantic-1.10.8-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:7d5b8641c24886d764a74ec541d2fc2c7fb19f6da2a4001e6d580ba4a38f7878"}, + {file = "pydantic-1.10.8-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7b1f6cb446470b7ddf86c2e57cd119a24959af2b01e552f60705910663af09a4"}, + {file = "pydantic-1.10.8-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c33b60054b2136aef8cf190cd4c52a3daa20b2263917c49adad20eaf381e823b"}, + {file = "pydantic-1.10.8-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:1952526ba40b220b912cdc43c1c32bcf4a58e3f192fa313ee665916b26befb68"}, + {file = "pydantic-1.10.8-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:bb14388ec45a7a0dc429e87def6396f9e73c8c77818c927b6a60706603d5f2ea"}, + {file = "pydantic-1.10.8-cp37-cp37m-win_amd64.whl", hash = "sha256:16f8c3e33af1e9bb16c7a91fc7d5fa9fe27298e9f299cff6cb744d89d573d62c"}, + {file = "pydantic-1.10.8-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1ced8375969673929809d7f36ad322934c35de4af3b5e5b09ec967c21f9f7887"}, + {file = "pydantic-1.10.8-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:93e6bcfccbd831894a6a434b0aeb1947f9e70b7468f274154d03d71fabb1d7c6"}, + {file = "pydantic-1.10.8-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:191ba419b605f897ede9892f6c56fb182f40a15d309ef0142212200a10af4c18"}, + {file = "pydantic-1.10.8-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:052d8654cb65174d6f9490cc9b9a200083a82cf5c3c5d3985db765757eb3b375"}, + {file = "pydantic-1.10.8-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:ceb6a23bf1ba4b837d0cfe378329ad3f351b5897c8d4914ce95b85fba96da5a1"}, + {file = "pydantic-1.10.8-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6f2e754d5566f050954727c77f094e01793bcb5725b663bf628fa6743a5a9108"}, + {file = "pydantic-1.10.8-cp38-cp38-win_amd64.whl", hash = "sha256:6a82d6cda82258efca32b40040228ecf43a548671cb174a1e81477195ed3ed56"}, + {file = "pydantic-1.10.8-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3e59417ba8a17265e632af99cc5f35ec309de5980c440c255ab1ca3ae96a3e0e"}, + {file = "pydantic-1.10.8-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:84d80219c3f8d4cad44575e18404099c76851bc924ce5ab1c4c8bb5e2a2227d0"}, + {file = "pydantic-1.10.8-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2e4148e635994d57d834be1182a44bdb07dd867fa3c2d1b37002000646cc5459"}, + {file = "pydantic-1.10.8-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:12f7b0bf8553e310e530e9f3a2f5734c68699f42218bf3568ef49cd9b0e44df4"}, + {file = "pydantic-1.10.8-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:42aa0c4b5c3025483240a25b09f3c09a189481ddda2ea3a831a9d25f444e03c1"}, + {file = "pydantic-1.10.8-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:17aef11cc1b997f9d574b91909fed40761e13fac438d72b81f902226a69dac01"}, + {file = "pydantic-1.10.8-cp39-cp39-win_amd64.whl", hash = "sha256:66a703d1983c675a6e0fed8953b0971c44dba48a929a2000a493c3772eb61a5a"}, + {file = "pydantic-1.10.8-py3-none-any.whl", hash = "sha256:7456eb22ed9aaa24ff3e7b4757da20d9e5ce2a81018c1b3ebd81a0b88a18f3b2"}, + {file = "pydantic-1.10.8.tar.gz", hash = "sha256:1410275520dfa70effadf4c21811d755e7ef9bb1f1d077a21958153a92c8d9ca"}, ] [package.dependencies] -- cgit 1.5.1 From 04798b710dc2cc8cf5a8cfb8a454f03cbfa8840c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 29 May 2023 14:15:49 -0400 Subject: Bump log from 0.4.17 to 0.4.18 (#15681) --- Cargo.lock | 7 ++----- changelog.d/15681.misc | 1 + 2 files changed, 3 insertions(+), 5 deletions(-) create mode 100644 changelog.d/15681.misc diff --git a/Cargo.lock b/Cargo.lock index e169a665b6..08331385c0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -132,12 +132,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.17" +version = "0.4.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" -dependencies = [ - "cfg-if", -] +checksum = "518ef76f2f87365916b142844c16d8fefd85039bc5699050210a7778ee1cd1de" [[package]] name = "memchr" diff --git a/changelog.d/15681.misc b/changelog.d/15681.misc new file mode 100644 index 0000000000..2de551dd63 --- /dev/null +++ b/changelog.d/15681.misc @@ -0,0 +1 @@ +Bump log from 0.4.17 to 0.4.18. -- cgit 1.5.1 From 2b6c9150dca9fa1884c0f2e27d5ee268be243c2b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 30 May 2023 11:03:58 +0100 Subject: Bump types-requests from 2.30.0.0 to 2.31.0.0 (#15684) * Bump types-requests from 2.30.0.0 to 2.31.0.0 Bumps [types-requests](https://github.com/python/typeshed) from 2.30.0.0 to 2.31.0.0. - [Commits](https://github.com/python/typeshed/commits) --- updated-dependencies: - dependency-name: types-requests dependency-type: direct:development update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Changelog --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- changelog.d/15684.misc | 1 + poetry.lock | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/15684.misc diff --git a/changelog.d/15684.misc b/changelog.d/15684.misc new file mode 100644 index 0000000000..4c2edf87fd --- /dev/null +++ b/changelog.d/15684.misc @@ -0,0 +1 @@ +Bump types-requests from 2.30.0.0 to 2.31.0.0. diff --git a/poetry.lock b/poetry.lock index 60f09219fe..4057ef04e3 100644 --- a/poetry.lock +++ b/poetry.lock @@ -3109,14 +3109,14 @@ files = [ [[package]] name = "types-requests" -version = "2.30.0.0" +version = "2.31.0.0" description = "Typing stubs for requests" category = "dev" optional = false python-versions = "*" files = [ - {file = "types-requests-2.30.0.0.tar.gz", hash = "sha256:dec781054324a70ba64430ae9e62e7e9c8e4618c185a5cb3f87a6738251b5a31"}, - {file = "types_requests-2.30.0.0-py3-none-any.whl", hash = "sha256:c6cf08e120ca9f0dc4fa4e32c3f953c3fba222bcc1db6b97695bce8da1ba9864"}, + {file = "types-requests-2.31.0.0.tar.gz", hash = "sha256:c1c29d20ab8d84dff468d7febfe8e0cb0b4664543221b386605e14672b44ea25"}, + {file = "types_requests-2.31.0.0-py3-none-any.whl", hash = "sha256:7c5cea7940f8e92ec560bbc468f65bf684aa3dcf0554a6f8c4710f5f708dc598"}, ] [package.dependencies] -- cgit 1.5.1 From 626bd75f4847f36747c162348e309b65cc1646b6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 30 May 2023 11:13:04 +0100 Subject: Bump types-bleach from 6.0.0.1 to 6.0.0.3 (#15686) * Bump types-bleach from 6.0.0.1 to 6.0.0.3 Bumps [types-bleach](https://github.com/python/typeshed) from 6.0.0.1 to 6.0.0.3. - [Commits](https://github.com/python/typeshed/commits) --- updated-dependencies: - dependency-name: types-bleach dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Changelog --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions Co-authored-by: Patrick Cloke Co-authored-by: David Robertson --- changelog.d/15686.misc | 1 + poetry.lock | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/15686.misc diff --git a/changelog.d/15686.misc b/changelog.d/15686.misc new file mode 100644 index 0000000000..feacbf35d6 --- /dev/null +++ b/changelog.d/15686.misc @@ -0,0 +1 @@ +Bump types-bleach from 6.0.0.1 to 6.0.0.3. diff --git a/poetry.lock b/poetry.lock index 4057ef04e3..0879e64cf1 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2998,14 +2998,14 @@ files = [ [[package]] name = "types-bleach" -version = "6.0.0.1" +version = "6.0.0.3" description = "Typing stubs for bleach" category = "dev" optional = false python-versions = "*" files = [ - {file = "types-bleach-6.0.0.1.tar.gz", hash = "sha256:43d9129deb9e82918747437edf78f09ff440f2973f4702625b61994f3e698518"}, - {file = "types_bleach-6.0.0.1-py3-none-any.whl", hash = "sha256:440df967254007be80bb0f4d851f026c29c709cc48359bf4935d2b2f3a6f9f90"}, + {file = "types-bleach-6.0.0.3.tar.gz", hash = "sha256:8ce7896d4f658c562768674ffcf07492c7730e128018f03edd163ff912bfadee"}, + {file = "types_bleach-6.0.0.3-py3-none-any.whl", hash = "sha256:d43eaf30a643ca824e16e2dcdb0c87ef9226237e2fa3ac4732a50cb3f32e145f"}, ] [[package]] -- cgit 1.5.1 From 42786d8a477b6d44075b0e56949820331d9962d8 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 30 May 2023 13:54:50 +0100 Subject: Create dependabot changelogs at release time (#15481) * Ditch dependabot changelog workflow * Summarise dependabot commits in release script * Changelog * Update scripts-dev/release.py --- .github/workflows/dependabot_changelog.yml | 49 ---------------------------- changelog.d/15481.misc | 1 + docs/development/dependencies.md | 12 ++++--- scripts-dev/release.py | 52 ++++++++++++++++++++++++++++-- 4 files changed, 57 insertions(+), 57 deletions(-) delete mode 100644 .github/workflows/dependabot_changelog.yml create mode 100644 changelog.d/15481.misc diff --git a/.github/workflows/dependabot_changelog.yml b/.github/workflows/dependabot_changelog.yml deleted file mode 100644 index df47e3dcba..0000000000 --- a/.github/workflows/dependabot_changelog.yml +++ /dev/null @@ -1,49 +0,0 @@ -name: Write changelog for dependabot PR -on: - pull_request: - types: - - opened - - reopened # For debugging! - -permissions: - # Needed to be able to push the commit. See - # https://docs.github.com/en/code-security/dependabot/working-with-dependabot/automating-dependabot-with-github-actions#enable-auto-merge-on-a-pull-request - # for a similar example - contents: write - -jobs: - add-changelog: - runs-on: 'ubuntu-latest' - if: ${{ github.actor == 'dependabot[bot]' }} - steps: - - uses: actions/checkout@v3 - with: - ref: ${{ github.event.pull_request.head.ref }} - - name: Write, commit and push changelog - env: - PR_TITLE: ${{ github.event.pull_request.title }} - PR_NUMBER: ${{ github.event.pull_request.number }} - run: | - echo "${PR_TITLE}." > "changelog.d/${PR_NUMBER}".misc - git add changelog.d - git config user.email "github-actions[bot]@users.noreply.github.com" - git config user.name "GitHub Actions" - git commit -m "Changelog" - git push - shell: bash - # The `git push` above does not trigger CI on the dependabot PR. - # - # By default, workflows can't trigger other workflows when they're just using the - # default `GITHUB_TOKEN` access token. (This is intended to stop you from writing - # recursive workflow loops by accident, because that'll get very expensive very - # quickly.) Instead, you have to manually call out to another workflow, or else - # make your changes (i.e. the `git push` above) using a personal access token. - # See - # https://docs.github.com/en/actions/using-workflows/triggering-a-workflow#triggering-a-workflow-from-a-workflow - # - # I have tried and failed to find a way to trigger CI on the "merge ref" of the PR. - # See git commit history for previous attempts. If anyone desperately wants to try - # again in the future, make a matrix-bot account and use its access token to git push. - - # THIS WORKFLOW HAS WRITE PERMISSIONS---do not add other jobs here unless they - # are sufficiently locked down to dependabot only as above. diff --git a/changelog.d/15481.misc b/changelog.d/15481.misc new file mode 100644 index 0000000000..a6e088c164 --- /dev/null +++ b/changelog.d/15481.misc @@ -0,0 +1 @@ +Create dependabot changelogs at release time. diff --git a/docs/development/dependencies.md b/docs/development/dependencies.md index c4449c51f7..b5926d96ff 100644 --- a/docs/development/dependencies.md +++ b/docs/development/dependencies.md @@ -260,15 +260,17 @@ doesn't require poetry. (It's what we use in CI too). However, you could try ## ...handle a Dependabot pull request? -Synapse uses Dependabot to keep the `poetry.lock` file up-to-date. When it -creates a pull request a GitHub Action will run to automatically create a changelog -file. Ensure that: +Synapse uses Dependabot to keep the `poetry.lock` and `Cargo.lock` file +up-to-date with the latest releases of our dependencies. The changelog check is +omitted for Dependabot PRs; the release script will include them in the +changelog. + +When reviewing a dependabot PR, ensure that: * the lockfile changes look reasonable; * the upstream changelog file (linked in the description) doesn't include any breaking changes; -* continuous integration passes (due to permissions, the GitHub Actions run on - the changelog commit will fail, look at the initial commit of the pull request); +* continuous integration passes. In particular, any updates to the type hints (usually packages which start with `types-`) should be safe to merge if linting passes. diff --git a/scripts-dev/release.py b/scripts-dev/release.py index ec92a59bb8..257d1e9ebd 100755 --- a/scripts-dev/release.py +++ b/scripts-dev/release.py @@ -27,7 +27,7 @@ import time import urllib.request from os import path from tempfile import TemporaryDirectory -from typing import Any, List, Optional +from typing import Any, List, Match, Optional, Union import attr import click @@ -233,7 +233,7 @@ def _prepare() -> None: subprocess.check_output(["poetry", "version", new_version]) # Generate changelogs. - generate_and_write_changelog(current_version, new_version) + generate_and_write_changelog(synapse_repo, current_version, new_version) # Generate debian changelogs if parsed_new_version.pre is not None: @@ -814,7 +814,7 @@ def get_changes_for_version(wanted_version: version.Version) -> str: def generate_and_write_changelog( - current_version: version.Version, new_version: str + repo: Repo, current_version: version.Version, new_version: str ) -> None: # We do this by getting a draft so that we can edit it before writing to the # changelog. @@ -827,6 +827,10 @@ def generate_and_write_changelog( new_changes = new_changes.replace( "No significant changes.", f"No significant changes since {current_version}." ) + new_changes += build_dependabot_changelog( + repo, + current_version, + ) # Prepend changes to changelog with open("CHANGES.md", "r+") as f: @@ -841,5 +845,47 @@ def generate_and_write_changelog( os.remove(filename) +def build_dependabot_changelog(repo: Repo, current_version: version.Version) -> str: + """Summarise dependabot commits between `current_version` and `release_branch`. + + Returns an empty string if there have been no such commits; otherwise outputs a + third-level markdown header followed by an unordered list.""" + last_release_commit = repo.tag("v" + str(current_version)).commit + rev_spec = f"{last_release_commit.hexsha}.." + commits = list(git.objects.Commit.iter_items(repo, rev_spec)) + messages = [] + for commit in reversed(commits): + if commit.author.name == "dependabot[bot]": + message: Union[str, bytes] = commit.message + if isinstance(message, bytes): + message = message.decode("utf-8") + messages.append(message.split("\n", maxsplit=1)[0]) + + if not messages: + print(f"No dependabot commits in range {rev_spec}", file=sys.stderr) + return "" + + messages.sort() + + def replacer(match: Match[str]) -> str: + desc = match.group(1) + number = match.group(2) + return f"* {desc}. ([\\#{number}](https://github.com/matrix-org/synapse/issues/{number}))" + + for i, message in enumerate(messages): + messages[i] = re.sub(r"(.*) \(#(\d+)\)$", replacer, message) + messages.insert(0, "### Updates to locked dependencies\n") + return "\n".join(messages) + + +@cli.command() +@click.argument("since") +def test_dependabot_changelog(since: str) -> None: + """Test building the dependabot changelog. + + Summarises all dependabot commits between the SINCE tag and the current git HEAD.""" + print(build_dependabot_changelog(git.Repo("."), version.Version(since))) + + if __name__ == "__main__": cli() -- cgit 1.5.1 From a103b874dddc6246b06b168992fbdb7aaeb0183f Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 30 May 2023 14:03:22 +0100 Subject: 1.85.0rc1 --- CHANGES.md | 75 +++++++++++++++++++++++++++++++++++++++++++++++ changelog.d/10428.removal | 1 - changelog.d/15464.bugfix | 1 - changelog.d/15481.misc | 1 - changelog.d/15537.misc | 1 - changelog.d/15578.misc | 1 - changelog.d/15585.feature | 1 - changelog.d/15597.misc | 1 - changelog.d/15599.bugfix | 1 - changelog.d/15601.bugfix | 1 - changelog.d/15602.misc | 1 - changelog.d/15604.misc | 1 - changelog.d/15606.misc | 1 - changelog.d/15607.bugfix | 1 - changelog.d/15610.misc | 1 - changelog.d/15611.feature | 1 - changelog.d/15613.doc | 1 - changelog.d/15614.bugfix | 1 - changelog.d/15615.misc | 1 - changelog.d/15620.misc | 1 - changelog.d/15621.misc | 1 - changelog.d/15624.bugfix | 1 - changelog.d/15625.misc | 1 - changelog.d/15626.misc | 1 - changelog.d/15630.misc | 1 - changelog.d/15633.misc | 1 - changelog.d/15634.bugfix | 1 - changelog.d/15636.misc | 1 - changelog.d/15639.misc | 1 - changelog.d/15640.misc | 1 - changelog.d/15641.misc | 1 - changelog.d/15642.misc | 1 - changelog.d/15643.misc | 1 - changelog.d/15644.feature | 1 - changelog.d/15646.misc | 1 - changelog.d/15647.bugfix | 1 - changelog.d/15648.doc | 1 - changelog.d/15651.misc | 1 - changelog.d/15658.misc | 1 - changelog.d/15659.misc | 1 - changelog.d/15663.misc | 1 - changelog.d/15665.misc | 1 - changelog.d/15666.misc | 1 - changelog.d/15668.doc | 1 - changelog.d/15678.misc | 1 - changelog.d/15681.misc | 1 - changelog.d/15682.misc | 1 - changelog.d/15683.misc | 1 - changelog.d/15684.misc | 1 - changelog.d/15685.misc | 1 - changelog.d/15686.misc | 1 - debian/changelog | 6 ++++ pyproject.toml | 2 +- 53 files changed, 82 insertions(+), 51 deletions(-) delete mode 100644 changelog.d/10428.removal delete mode 100644 changelog.d/15464.bugfix delete mode 100644 changelog.d/15481.misc delete mode 100644 changelog.d/15537.misc delete mode 100644 changelog.d/15578.misc delete mode 100644 changelog.d/15585.feature delete mode 100644 changelog.d/15597.misc delete mode 100644 changelog.d/15599.bugfix delete mode 100644 changelog.d/15601.bugfix delete mode 100644 changelog.d/15602.misc delete mode 100644 changelog.d/15604.misc delete mode 100644 changelog.d/15606.misc delete mode 100644 changelog.d/15607.bugfix delete mode 100644 changelog.d/15610.misc delete mode 100644 changelog.d/15611.feature delete mode 100644 changelog.d/15613.doc delete mode 100644 changelog.d/15614.bugfix delete mode 100644 changelog.d/15615.misc delete mode 100644 changelog.d/15620.misc delete mode 100644 changelog.d/15621.misc delete mode 100644 changelog.d/15624.bugfix delete mode 100644 changelog.d/15625.misc delete mode 100644 changelog.d/15626.misc delete mode 100644 changelog.d/15630.misc delete mode 100644 changelog.d/15633.misc delete mode 100644 changelog.d/15634.bugfix delete mode 100644 changelog.d/15636.misc delete mode 100644 changelog.d/15639.misc delete mode 100644 changelog.d/15640.misc delete mode 100644 changelog.d/15641.misc delete mode 100644 changelog.d/15642.misc delete mode 100644 changelog.d/15643.misc delete mode 100644 changelog.d/15644.feature delete mode 100644 changelog.d/15646.misc delete mode 100644 changelog.d/15647.bugfix delete mode 100644 changelog.d/15648.doc delete mode 100644 changelog.d/15651.misc delete mode 100644 changelog.d/15658.misc delete mode 100644 changelog.d/15659.misc delete mode 100644 changelog.d/15663.misc delete mode 100644 changelog.d/15665.misc delete mode 100644 changelog.d/15666.misc delete mode 100644 changelog.d/15668.doc delete mode 100644 changelog.d/15678.misc delete mode 100644 changelog.d/15681.misc delete mode 100644 changelog.d/15682.misc delete mode 100644 changelog.d/15683.misc delete mode 100644 changelog.d/15684.misc delete mode 100644 changelog.d/15685.misc delete mode 100644 changelog.d/15686.misc diff --git a/CHANGES.md b/CHANGES.md index 85c9af8ce4..ba0995aa6f 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,78 @@ +Synapse 1.85.0rc1 (2023-05-30) +============================== + +Features +-------- + +- Process previously failed backfill events in the background to avoid blocking requests for something that is bound to fail again. ([\#15585](https://github.com/matrix-org/synapse/issues/15585)) +- Add a new [admin API](https://matrix-org.github.io/synapse/v1.75/usage/administration/admin_api/index.html) to [create a new device for a user](https://matrix-org.github.io/synapse/v1.75/admin_api/user_admin_api.html#create-a-device). ([\#15611](https://github.com/matrix-org/synapse/issues/15611)) +- Add Unix socket support for Redis connections. Contributed by Jason Little. ([\#15644](https://github.com/matrix-org/synapse/issues/15644)) + + +Bugfixes +-------- + +- Fix a long-standing bug where setting the read marker could fail when using message retention. Contributed by Nick @ Beeper (@fizzadar). ([\#15464](https://github.com/matrix-org/synapse/issues/15464)) +- Fix a long-standing bug where the `url_preview_url_blacklist` configuration setting was not applied to oEmbed or image URLs found while previewing a URL. ([\#15601](https://github.com/matrix-org/synapse/issues/15601)) +- Fix a long-standing bug where filters with multiple backslashes were rejected. ([\#15607](https://github.com/matrix-org/synapse/issues/15607)) +- Fix a bug introduced in Synapse 1.82.0 where the error message displayed when validation of the `app_service_config_files` config option fails would be incorrectly formatted. ([\#15614](https://github.com/matrix-org/synapse/issues/15614)) +- Fix a long-standing bug where deactivated users were still able to login using the custom `org.matrix.login.jwt` login type (if enabled). ([\#15624](https://github.com/matrix-org/synapse/issues/15624)) +- Fix a long-standing bug where deactivated users were able to login in uncommon situations. ([\#15634](https://github.com/matrix-org/synapse/issues/15634)) + + +Improved Documentation +---------------------- + +- Warn users that at least 3.75GB of space is needed for the nix Synapse development environment. ([\#15613](https://github.com/matrix-org/synapse/issues/15613)) +- Remove outdated comment from the generated and sample homeserver log configs. ([\#15648](https://github.com/matrix-org/synapse/issues/15648)) +- Improve contributor docs to make it more clear that Rust is a necessary prerequisite. Contributed by @grantm. ([\#15668](https://github.com/matrix-org/synapse/issues/15668)) + + +Deprecations and Removals +------------------------- + +- Remove the old version of the R30 (30-day retained users) phone-home metric. ([\#10428](https://github.com/matrix-org/synapse/issues/10428)) + + +Internal Changes +---------------- + +- Create dependabot changelogs at release time. ([\#15481](https://github.com/matrix-org/synapse/issues/15481)) +- Add not null constraint to column `full_user_id` of tables `profiles` and `user_filters`. ([\#15537](https://github.com/matrix-org/synapse/issues/15537)) +- Allow connecting to HTTP Replication Endpoints by using `worker_name` when constructing the request. ([\#15578](https://github.com/matrix-org/synapse/issues/15578)) +- Make the `thread_id` column on `event_push_actions`, `event_push_actions_staging`, and `event_push_summary` non-null. ([\#15597](https://github.com/matrix-org/synapse/issues/15597)) +- Run mypy type checking with the minimum supported Python version to catch new usage that isn't backwards-compatible. ([\#15602](https://github.com/matrix-org/synapse/issues/15602)) +- Fix subscriptable type usage in Python <3.9. ([\#15604](https://github.com/matrix-org/synapse/issues/15604)) +- Update internal terminology. ([\#15606](https://github.com/matrix-org/synapse/issues/15606), [\#15620](https://github.com/matrix-org/synapse/issues/15620)) +- Instrument `state` and `state_group` storage-related operations to better picture what's happening when tracing. ([\#15610](https://github.com/matrix-org/synapse/issues/15610), [\#15647](https://github.com/matrix-org/synapse/issues/15647)) +- Trace how many new events from the backfill response we need to process. ([\#15633](https://github.com/matrix-org/synapse/issues/15633)) +- Re-type config paths in `ConfigError`s to be `StrSequence`s instead of `Iterable[str]`s. ([\#15615](https://github.com/matrix-org/synapse/issues/15615)) +- Update Mutual Rooms (MSC2666) implementation to match new proposal text. ([\#15621](https://github.com/matrix-org/synapse/issues/15621)) +- Remove the unstable identifiers from faster joins ([MSC3706](https://github.com/matrix-org/matrix-spec-proposals/pull/3706). ([\#15625](https://github.com/matrix-org/synapse/issues/15625)) +- Fix the olddeps CI. ([\#15626](https://github.com/matrix-org/synapse/issues/15626)) +- Remove duplicate timestamp from test logs (`_trial_temp/test.log`). ([\#15636](https://github.com/matrix-org/synapse/issues/15636)) +- Fix two memory leaks in `trial` test runs. ([\#15630](https://github.com/matrix-org/synapse/issues/15630)) +- Limit the size of the `HomeServerConfig` cache in trial test runs. ([\#15646](https://github.com/matrix-org/synapse/issues/15646)) +- Improve type hints. ([\#15658](https://github.com/matrix-org/synapse/issues/15658), [\#15659](https://github.com/matrix-org/synapse/issues/15659)) +- Add requesting user id parameter to key claim methods in `TransportLayerClient`. ([\#15663](https://github.com/matrix-org/synapse/issues/15663)) +- Speed up rebuilding of the user directory for local users. ([\#15665](https://github.com/matrix-org/synapse/issues/15665)) +- Implement "option 2" for [MSC3820](https://github.com/matrix-org/matrix-spec-proposals/pull/3820): Room version 11. ([\#15666](https://github.com/matrix-org/synapse/issues/15666), [\#15678](https://github.com/matrix-org/synapse/issues/15678)) + +### Updates to locked dependencies + +* Bump furo from 2023.3.27 to 2023.5.20. ([\#15642](https://github.com/matrix-org/synapse/issues/15642)) +* Bump log from 0.4.17 to 0.4.18. ([\#15681](https://github.com/matrix-org/synapse/issues/15681)) +* Bump prometheus-client from 0.16.0 to 0.17.0. ([\#15682](https://github.com/matrix-org/synapse/issues/15682)) +* Bump pydantic from 1.10.7 to 1.10.8. ([\#15685](https://github.com/matrix-org/synapse/issues/15685)) +* Bump pygithub from 1.58.1 to 1.58.2. ([\#15643](https://github.com/matrix-org/synapse/issues/15643)) +* Bump requests from 2.28.2 to 2.31.0. ([\#15651](https://github.com/matrix-org/synapse/issues/15651)) +* Bump sphinx from 6.1.3 to 6.2.1. ([\#15641](https://github.com/matrix-org/synapse/issues/15641)) +* Bump types-bleach from 6.0.0.1 to 6.0.0.3. ([\#15686](https://github.com/matrix-org/synapse/issues/15686)) +* Bump types-pillow from 9.5.0.2 to 9.5.0.4. ([\#15640](https://github.com/matrix-org/synapse/issues/15640)) +* Bump types-pyyaml from 6.0.12.9 to 6.0.12.10. ([\#15683](https://github.com/matrix-org/synapse/issues/15683)) +* Bump types-requests from 2.30.0.0 to 2.31.0.0. ([\#15684](https://github.com/matrix-org/synapse/issues/15684)) +* Bump types-setuptools from 67.7.0.2 to 67.8.0.0. ([\#15639](https://github.com/matrix-org/synapse/issues/15639)) + Synapse 1.84.1 (2023-05-26) =========================== diff --git a/changelog.d/10428.removal b/changelog.d/10428.removal deleted file mode 100644 index c056e89585..0000000000 --- a/changelog.d/10428.removal +++ /dev/null @@ -1 +0,0 @@ -Remove the old version of the R30 (30-day retained users) phone-home metric. diff --git a/changelog.d/15464.bugfix b/changelog.d/15464.bugfix deleted file mode 100644 index 3c655989b3..0000000000 --- a/changelog.d/15464.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug where setting the read marker could fail when using message retention. Contributed by Nick @ Beeper (@fizzadar). diff --git a/changelog.d/15481.misc b/changelog.d/15481.misc deleted file mode 100644 index a6e088c164..0000000000 --- a/changelog.d/15481.misc +++ /dev/null @@ -1 +0,0 @@ -Create dependabot changelogs at release time. diff --git a/changelog.d/15537.misc b/changelog.d/15537.misc deleted file mode 100644 index 979e0ba977..0000000000 --- a/changelog.d/15537.misc +++ /dev/null @@ -1 +0,0 @@ -Add not null constraint to column full_user_id of tables profiles and user_filters. diff --git a/changelog.d/15578.misc b/changelog.d/15578.misc deleted file mode 100644 index a54422239b..0000000000 --- a/changelog.d/15578.misc +++ /dev/null @@ -1 +0,0 @@ -Allow connecting to HTTP Replication Endpoints by using `worker_name` when constructing the request. diff --git a/changelog.d/15585.feature b/changelog.d/15585.feature deleted file mode 100644 index 1adcfb69ee..0000000000 --- a/changelog.d/15585.feature +++ /dev/null @@ -1 +0,0 @@ -Process previously failed backfill events in the background to avoid blocking requests for something that is bound to fail again. diff --git a/changelog.d/15597.misc b/changelog.d/15597.misc deleted file mode 100644 index 2dea23784f..0000000000 --- a/changelog.d/15597.misc +++ /dev/null @@ -1 +0,0 @@ -Make the `thread_id` column on `event_push_actions`, `event_push_actions_staging`, and `event_push_summary` non-null. diff --git a/changelog.d/15599.bugfix b/changelog.d/15599.bugfix deleted file mode 100644 index b58af8ad55..0000000000 --- a/changelog.d/15599.bugfix +++ /dev/null @@ -1 +0,0 @@ -Print full error and stack-trace of any exception that occurs during startup/initialization. diff --git a/changelog.d/15601.bugfix b/changelog.d/15601.bugfix deleted file mode 100644 index 426db6cea3..0000000000 --- a/changelog.d/15601.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug where the `url_preview_url_blacklist` configuration setting was not applied to oEmbed or image URLs found while previewing a URL. diff --git a/changelog.d/15602.misc b/changelog.d/15602.misc deleted file mode 100644 index cdd0c039bd..0000000000 --- a/changelog.d/15602.misc +++ /dev/null @@ -1 +0,0 @@ -Run mypy type checking with the minimum supported Python version to catch new usage that isn't backwards-compatible. diff --git a/changelog.d/15604.misc b/changelog.d/15604.misc deleted file mode 100644 index 92d1d600bc..0000000000 --- a/changelog.d/15604.misc +++ /dev/null @@ -1 +0,0 @@ -Fix subscriptable type usage in Python <3.9. diff --git a/changelog.d/15606.misc b/changelog.d/15606.misc deleted file mode 100644 index 568c0d3fc5..0000000000 --- a/changelog.d/15606.misc +++ /dev/null @@ -1 +0,0 @@ -Update internal terminology. diff --git a/changelog.d/15607.bugfix b/changelog.d/15607.bugfix deleted file mode 100644 index a2767adbe2..0000000000 --- a/changelog.d/15607.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug where filters with multiple backslashes were rejected. diff --git a/changelog.d/15610.misc b/changelog.d/15610.misc deleted file mode 100644 index 2eff30f6e3..0000000000 --- a/changelog.d/15610.misc +++ /dev/null @@ -1 +0,0 @@ -Instrument `state` and `state_group` storage-related operations to better picture what's happening when tracing. diff --git a/changelog.d/15611.feature b/changelog.d/15611.feature deleted file mode 100644 index 7cfb46fd0a..0000000000 --- a/changelog.d/15611.feature +++ /dev/null @@ -1 +0,0 @@ -Add a new admin API to create a new device for a user. diff --git a/changelog.d/15613.doc b/changelog.d/15613.doc deleted file mode 100644 index 94733facf0..0000000000 --- a/changelog.d/15613.doc +++ /dev/null @@ -1 +0,0 @@ -Warn users that at least 3.75GB of space is needed for the nix Synapse development environment. diff --git a/changelog.d/15614.bugfix b/changelog.d/15614.bugfix deleted file mode 100644 index b523ae6eb1..0000000000 --- a/changelog.d/15614.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse 1.82.0 where the error message displayed when validation of the `app_service_config_files` config option fails would be incorrectly formatted. diff --git a/changelog.d/15615.misc b/changelog.d/15615.misc deleted file mode 100644 index a39fd0a098..0000000000 --- a/changelog.d/15615.misc +++ /dev/null @@ -1 +0,0 @@ -Re-type config paths in `ConfigError`s to be `StrSequence`s instead of `Iterable[str]`s. diff --git a/changelog.d/15620.misc b/changelog.d/15620.misc deleted file mode 100644 index 568c0d3fc5..0000000000 --- a/changelog.d/15620.misc +++ /dev/null @@ -1 +0,0 @@ -Update internal terminology. diff --git a/changelog.d/15621.misc b/changelog.d/15621.misc deleted file mode 100644 index 5d060f4dbc..0000000000 --- a/changelog.d/15621.misc +++ /dev/null @@ -1 +0,0 @@ -Update Mutual Rooms (MSC2666) implementation to match new proposal text. \ No newline at end of file diff --git a/changelog.d/15624.bugfix b/changelog.d/15624.bugfix deleted file mode 100644 index fde515ba62..0000000000 --- a/changelog.d/15624.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug where deactivated users were still able to login using the custom `org.matrix.login.jwt` login type (if enabled). diff --git a/changelog.d/15625.misc b/changelog.d/15625.misc deleted file mode 100644 index 7ea8cc9433..0000000000 --- a/changelog.d/15625.misc +++ /dev/null @@ -1 +0,0 @@ -Remove the unstable identifiers from faster joins ([MSC3706](https://github.com/matrix-org/matrix-spec-proposals/pull/3706). diff --git a/changelog.d/15626.misc b/changelog.d/15626.misc deleted file mode 100644 index 0016cdbf10..0000000000 --- a/changelog.d/15626.misc +++ /dev/null @@ -1 +0,0 @@ -Fix the olddeps CI. diff --git a/changelog.d/15630.misc b/changelog.d/15630.misc deleted file mode 100644 index a30304bfd6..0000000000 --- a/changelog.d/15630.misc +++ /dev/null @@ -1 +0,0 @@ -Fix two memory leaks in `trial` test runs. diff --git a/changelog.d/15633.misc b/changelog.d/15633.misc deleted file mode 100644 index 4126a20602..0000000000 --- a/changelog.d/15633.misc +++ /dev/null @@ -1 +0,0 @@ -Trace how many new events from the backfill response we need to process. diff --git a/changelog.d/15634.bugfix b/changelog.d/15634.bugfix deleted file mode 100644 index ef39e8a689..0000000000 --- a/changelog.d/15634.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug where deactivated users were able to login in uncommon situations. diff --git a/changelog.d/15636.misc b/changelog.d/15636.misc deleted file mode 100644 index 82329c5e43..0000000000 --- a/changelog.d/15636.misc +++ /dev/null @@ -1 +0,0 @@ -Remove duplicate timestamp from test logs (`_trial_temp/test.log`). diff --git a/changelog.d/15639.misc b/changelog.d/15639.misc deleted file mode 100644 index 92230e206f..0000000000 --- a/changelog.d/15639.misc +++ /dev/null @@ -1 +0,0 @@ -Bump types-setuptools from 67.7.0.2 to 67.8.0.0. diff --git a/changelog.d/15640.misc b/changelog.d/15640.misc deleted file mode 100644 index 4c2a3dbc52..0000000000 --- a/changelog.d/15640.misc +++ /dev/null @@ -1 +0,0 @@ -Bump types-pillow from 9.5.0.2 to 9.5.0.4. diff --git a/changelog.d/15641.misc b/changelog.d/15641.misc deleted file mode 100644 index a85d85c58e..0000000000 --- a/changelog.d/15641.misc +++ /dev/null @@ -1 +0,0 @@ -Bump sphinx from 6.1.3 to 6.2.1. diff --git a/changelog.d/15642.misc b/changelog.d/15642.misc deleted file mode 100644 index 5d6125140d..0000000000 --- a/changelog.d/15642.misc +++ /dev/null @@ -1 +0,0 @@ -Bump furo from 2023.3.27 to 2023.5.20. diff --git a/changelog.d/15643.misc b/changelog.d/15643.misc deleted file mode 100644 index 5bd2e74071..0000000000 --- a/changelog.d/15643.misc +++ /dev/null @@ -1 +0,0 @@ -Bump pygithub from 1.58.1 to 1.58.2. diff --git a/changelog.d/15644.feature b/changelog.d/15644.feature deleted file mode 100644 index 1b6126af53..0000000000 --- a/changelog.d/15644.feature +++ /dev/null @@ -1 +0,0 @@ -Add Unix socket support for Redis connections. Contributed by Jason Little. diff --git a/changelog.d/15646.misc b/changelog.d/15646.misc deleted file mode 100644 index 872afe30b8..0000000000 --- a/changelog.d/15646.misc +++ /dev/null @@ -1 +0,0 @@ -Limit the size of the `HomeServerConfig` cache in trial test runs. diff --git a/changelog.d/15647.bugfix b/changelog.d/15647.bugfix deleted file mode 100644 index 2eff30f6e3..0000000000 --- a/changelog.d/15647.bugfix +++ /dev/null @@ -1 +0,0 @@ -Instrument `state` and `state_group` storage-related operations to better picture what's happening when tracing. diff --git a/changelog.d/15648.doc b/changelog.d/15648.doc deleted file mode 100644 index 70f65ebbff..0000000000 --- a/changelog.d/15648.doc +++ /dev/null @@ -1 +0,0 @@ -Remove outdated comment from the generated and sample homeserver log configs. \ No newline at end of file diff --git a/changelog.d/15651.misc b/changelog.d/15651.misc deleted file mode 100644 index 4d7c0248b2..0000000000 --- a/changelog.d/15651.misc +++ /dev/null @@ -1 +0,0 @@ -Bump requests from 2.28.2 to 2.31.0. diff --git a/changelog.d/15658.misc b/changelog.d/15658.misc deleted file mode 100644 index 93ceaeafc9..0000000000 --- a/changelog.d/15658.misc +++ /dev/null @@ -1 +0,0 @@ -Improve type hints. diff --git a/changelog.d/15659.misc b/changelog.d/15659.misc deleted file mode 100644 index 93ceaeafc9..0000000000 --- a/changelog.d/15659.misc +++ /dev/null @@ -1 +0,0 @@ -Improve type hints. diff --git a/changelog.d/15663.misc b/changelog.d/15663.misc deleted file mode 100644 index cc5f801543..0000000000 --- a/changelog.d/15663.misc +++ /dev/null @@ -1 +0,0 @@ -Add requesting user id parameter to key claim methods in `TransportLayerClient`. diff --git a/changelog.d/15665.misc b/changelog.d/15665.misc deleted file mode 100644 index 7ad424d8df..0000000000 --- a/changelog.d/15665.misc +++ /dev/null @@ -1 +0,0 @@ -Speed up rebuilding of the user directory for local users. diff --git a/changelog.d/15666.misc b/changelog.d/15666.misc deleted file mode 100644 index 92eae49952..0000000000 --- a/changelog.d/15666.misc +++ /dev/null @@ -1 +0,0 @@ -Implement "option 2" for [MSC3820](https://github.com/matrix-org/matrix-spec-proposals/pull/3820): Room version 11. \ No newline at end of file diff --git a/changelog.d/15668.doc b/changelog.d/15668.doc deleted file mode 100644 index 3526a4d50c..0000000000 --- a/changelog.d/15668.doc +++ /dev/null @@ -1 +0,0 @@ -Improve contributor docs to make it more clear that Rust is a necessary prerequisite. Contributed by @grantm. diff --git a/changelog.d/15678.misc b/changelog.d/15678.misc deleted file mode 100644 index 92eae49952..0000000000 --- a/changelog.d/15678.misc +++ /dev/null @@ -1 +0,0 @@ -Implement "option 2" for [MSC3820](https://github.com/matrix-org/matrix-spec-proposals/pull/3820): Room version 11. \ No newline at end of file diff --git a/changelog.d/15681.misc b/changelog.d/15681.misc deleted file mode 100644 index 2de551dd63..0000000000 --- a/changelog.d/15681.misc +++ /dev/null @@ -1 +0,0 @@ -Bump log from 0.4.17 to 0.4.18. diff --git a/changelog.d/15682.misc b/changelog.d/15682.misc deleted file mode 100644 index 687af7d8d7..0000000000 --- a/changelog.d/15682.misc +++ /dev/null @@ -1 +0,0 @@ -Bump prometheus-client from 0.16.0 to 0.17.0. diff --git a/changelog.d/15683.misc b/changelog.d/15683.misc deleted file mode 100644 index 147f13b99c..0000000000 --- a/changelog.d/15683.misc +++ /dev/null @@ -1 +0,0 @@ -Bump types-pyyaml from 6.0.12.9 to 6.0.12.10. diff --git a/changelog.d/15684.misc b/changelog.d/15684.misc deleted file mode 100644 index 4c2edf87fd..0000000000 --- a/changelog.d/15684.misc +++ /dev/null @@ -1 +0,0 @@ -Bump types-requests from 2.30.0.0 to 2.31.0.0. diff --git a/changelog.d/15685.misc b/changelog.d/15685.misc deleted file mode 100644 index 7d4cf65bf3..0000000000 --- a/changelog.d/15685.misc +++ /dev/null @@ -1 +0,0 @@ -Bump pydantic from 1.10.7 to 1.10.8. diff --git a/changelog.d/15686.misc b/changelog.d/15686.misc deleted file mode 100644 index feacbf35d6..0000000000 --- a/changelog.d/15686.misc +++ /dev/null @@ -1 +0,0 @@ -Bump types-bleach from 6.0.0.1 to 6.0.0.3. diff --git a/debian/changelog b/debian/changelog index fbdc9c177e..2d88cd9d29 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.85.0~rc1) stable; urgency=medium + + * New Synapse release 1.85.0rc1. + + -- Synapse Packaging team Tue, 30 May 2023 13:56:54 +0100 + matrix-synapse-py3 (1.84.1) stable; urgency=medium * New Synapse release 1.84.1. diff --git a/pyproject.toml b/pyproject.toml index 6e9bce65b6..7227bc7523 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -89,7 +89,7 @@ manifest-path = "rust/Cargo.toml" [tool.poetry] name = "matrix-synapse" -version = "1.84.1" +version = "1.85.0rc1" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" -- cgit 1.5.1 From cebff6f4d584683bc122686e38342dbd8699818e Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 30 May 2023 14:05:44 +0100 Subject: Tweak release script dependabot wording --- scripts-dev/release.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/scripts-dev/release.py b/scripts-dev/release.py index 257d1e9ebd..89ffba8d92 100755 --- a/scripts-dev/release.py +++ b/scripts-dev/release.py @@ -875,6 +875,8 @@ def build_dependabot_changelog(repo: Repo, current_version: version.Version) -> for i, message in enumerate(messages): messages[i] = re.sub(r"(.*) \(#(\d+)\)$", replacer, message) messages.insert(0, "### Updates to locked dependencies\n") + # Add an extra blank line to the bottom of the section + messages.append("") return "\n".join(messages) -- cgit 1.5.1 From 3389653e1522c9aaea227b2afa36acd5db3ad9fe Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 30 May 2023 14:18:42 +0100 Subject: Update changelog --- CHANGES.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index ba0995aa6f..636c591568 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -47,8 +47,8 @@ Internal Changes - Instrument `state` and `state_group` storage-related operations to better picture what's happening when tracing. ([\#15610](https://github.com/matrix-org/synapse/issues/15610), [\#15647](https://github.com/matrix-org/synapse/issues/15647)) - Trace how many new events from the backfill response we need to process. ([\#15633](https://github.com/matrix-org/synapse/issues/15633)) - Re-type config paths in `ConfigError`s to be `StrSequence`s instead of `Iterable[str]`s. ([\#15615](https://github.com/matrix-org/synapse/issues/15615)) -- Update Mutual Rooms (MSC2666) implementation to match new proposal text. ([\#15621](https://github.com/matrix-org/synapse/issues/15621)) -- Remove the unstable identifiers from faster joins ([MSC3706](https://github.com/matrix-org/matrix-spec-proposals/pull/3706). ([\#15625](https://github.com/matrix-org/synapse/issues/15625)) +- Update Mutual Rooms ([MSC2666](https://github.com/matrix-org/matrix-spec-proposals/pull/2666)) implementation to match new proposal text. ([\#15621](https://github.com/matrix-org/synapse/issues/15621)) +- Remove the unstable identifiers from faster joins ([MSC3706](https://github.com/matrix-org/matrix-spec-proposals/pull/3706)). ([\#15625](https://github.com/matrix-org/synapse/issues/15625)) - Fix the olddeps CI. ([\#15626](https://github.com/matrix-org/synapse/issues/15626)) - Remove duplicate timestamp from test logs (`_trial_temp/test.log`). ([\#15636](https://github.com/matrix-org/synapse/issues/15636)) - Fix two memory leaks in `trial` test runs. ([\#15630](https://github.com/matrix-org/synapse/issues/15630)) -- cgit 1.5.1 From 7477810cc2be241d6f86a1d787fe469c69a84358 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 30 May 2023 14:33:05 +0100 Subject: fixup changelog --- CHANGES.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 636c591568..14aac9f14e 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -4,8 +4,8 @@ Synapse 1.85.0rc1 (2023-05-30) Features -------- -- Process previously failed backfill events in the background to avoid blocking requests for something that is bound to fail again. ([\#15585](https://github.com/matrix-org/synapse/issues/15585)) -- Add a new [admin API](https://matrix-org.github.io/synapse/v1.75/usage/administration/admin_api/index.html) to [create a new device for a user](https://matrix-org.github.io/synapse/v1.75/admin_api/user_admin_api.html#create-a-device). ([\#15611](https://github.com/matrix-org/synapse/issues/15611)) +- Improve performance of backfill requests by performing backfill of previously failed requests in the background. ([\#15585](https://github.com/matrix-org/synapse/issues/15585)) +- Add a new [admin API](https://matrix-org.github.io/synapse/v1.85/usage/administration/admin_api/index.html) to [create a new device for a user](https://matrix-org.github.io/synapse/v1.85/admin_api/user_admin_api.html#create-a-device). ([\#15611](https://github.com/matrix-org/synapse/issues/15611)) - Add Unix socket support for Redis connections. Contributed by Jason Little. ([\#15644](https://github.com/matrix-org/synapse/issues/15644)) -- cgit 1.5.1 From 5d8c659373ae2b169892fc9d99d54bd1b3baf65a Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Tue, 30 May 2023 14:37:39 +0100 Subject: Remove unused `FederationServer.__str__` override (#15690) Signed-off-by: Sean Quah --- changelog.d/15690.misc | 1 + synapse/federation/federation_server.py | 3 --- 2 files changed, 1 insertion(+), 3 deletions(-) create mode 100644 changelog.d/15690.misc diff --git a/changelog.d/15690.misc b/changelog.d/15690.misc new file mode 100644 index 0000000000..c6c259eb7d --- /dev/null +++ b/changelog.d/15690.misc @@ -0,0 +1 @@ +Remove some unused code. diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index f4ca70a698..e17cb840de 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -1291,9 +1291,6 @@ class FederationServer(FederationBase): return lock = new_lock - def __str__(self) -> str: - return "" % self.server_name - async def exchange_third_party_invite( self, sender_user_id: str, target_user_id: str, room_id: str, signed: Dict ) -> None: -- cgit 1.5.1 From e2c8458bba5ab20f84c93a6c68e293b2d304cdc0 Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Fri, 17 Jun 2022 14:48:55 +0200 Subject: Make the api.auth.Auth a Protocol --- synapse/api/auth.py | 602 ---------------------------------------- synapse/api/auth/__init__.py | 175 ++++++++++++ synapse/api/auth/base.py | 273 ++++++++++++++++++ synapse/api/auth/internal.py | 369 ++++++++++++++++++++++++ synapse/server.py | 3 +- tests/api/test_auth.py | 4 +- tests/handlers/test_register.py | 4 +- tests/test_state.py | 4 +- 8 files changed, 825 insertions(+), 609 deletions(-) delete mode 100644 synapse/api/auth.py create mode 100644 synapse/api/auth/__init__.py create mode 100644 synapse/api/auth/base.py create mode 100644 synapse/api/auth/internal.py diff --git a/synapse/api/auth.py b/synapse/api/auth.py deleted file mode 100644 index 66e869bc2d..0000000000 --- a/synapse/api/auth.py +++ /dev/null @@ -1,602 +0,0 @@ -# Copyright 2014 - 2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import logging -from typing import TYPE_CHECKING, Optional, Tuple - -import pymacaroons -from netaddr import IPAddress - -from twisted.web.server import Request - -from synapse import event_auth -from synapse.api.constants import EventTypes, HistoryVisibility, Membership -from synapse.api.errors import ( - AuthError, - Codes, - InvalidClientTokenError, - MissingClientTokenError, - UnstableSpecAuthError, -) -from synapse.appservice import ApplicationService -from synapse.http import get_request_user_agent -from synapse.http.site import SynapseRequest -from synapse.logging.opentracing import ( - active_span, - force_tracing, - start_active_span, - trace, -) -from synapse.types import Requester, create_requester -from synapse.util.cancellation import cancellable - -if TYPE_CHECKING: - from synapse.server import HomeServer - -logger = logging.getLogger(__name__) - - -# guests always get this device id. -GUEST_DEVICE_ID = "guest_device" - - -class Auth: - """ - This class contains functions for authenticating users of our client-server API. - """ - - def __init__(self, hs: "HomeServer"): - self.hs = hs - self.clock = hs.get_clock() - self.store = hs.get_datastores().main - self._account_validity_handler = hs.get_account_validity_handler() - self._storage_controllers = hs.get_storage_controllers() - self._macaroon_generator = hs.get_macaroon_generator() - - self._track_appservice_user_ips = hs.config.appservice.track_appservice_user_ips - self._track_puppeted_user_ips = hs.config.api.track_puppeted_user_ips - self._force_tracing_for_users = hs.config.tracing.force_tracing_for_users - - async def check_user_in_room( - self, - room_id: str, - requester: Requester, - allow_departed_users: bool = False, - ) -> Tuple[str, Optional[str]]: - """Check if the user is in the room, or was at some point. - Args: - room_id: The room to check. - - requester: The user making the request, according to the access token. - - current_state: Optional map of the current state of the room. - If provided then that map is used to check whether they are a - member of the room. Otherwise the current membership is - loaded from the database. - - allow_departed_users: if True, accept users that were previously - members but have now departed. - - Raises: - AuthError if the user is/was not in the room. - Returns: - The current membership of the user in the room and the - membership event ID of the user. - """ - - user_id = requester.user.to_string() - ( - membership, - member_event_id, - ) = await self.store.get_local_current_membership_for_user_in_room( - user_id=user_id, - room_id=room_id, - ) - - if membership: - if membership == Membership.JOIN: - return membership, member_event_id - - # XXX this looks totally bogus. Why do we not allow users who have been banned, - # or those who were members previously and have been re-invited? - if allow_departed_users and membership == Membership.LEAVE: - forgot = await self.store.did_forget(user_id, room_id) - if not forgot: - return membership, member_event_id - raise UnstableSpecAuthError( - 403, - "User %s not in room %s" % (user_id, room_id), - errcode=Codes.NOT_JOINED, - ) - - @cancellable - async def get_user_by_req( - self, - request: SynapseRequest, - allow_guest: bool = False, - allow_expired: bool = False, - ) -> Requester: - """Get a registered user's ID. - - Args: - request: An HTTP request with an access_token query parameter. - allow_guest: If False, will raise an AuthError if the user making the - request is a guest. - allow_expired: If True, allow the request through even if the account - is expired, or session token lifetime has ended. Note that - /login will deliver access tokens regardless of expiration. - - Returns: - Resolves to the requester - Raises: - InvalidClientCredentialsError if no user by that token exists or the token - is invalid. - AuthError if access is denied for the user in the access token - """ - parent_span = active_span() - with start_active_span("get_user_by_req"): - requester = await self._wrapped_get_user_by_req( - request, allow_guest, allow_expired - ) - - if parent_span: - if requester.authenticated_entity in self._force_tracing_for_users: - # request tracing is enabled for this user, so we need to force it - # tracing on for the parent span (which will be the servlet span). - # - # It's too late for the get_user_by_req span to inherit the setting, - # so we also force it on for that. - force_tracing() - force_tracing(parent_span) - parent_span.set_tag( - "authenticated_entity", requester.authenticated_entity - ) - parent_span.set_tag("user_id", requester.user.to_string()) - if requester.device_id is not None: - parent_span.set_tag("device_id", requester.device_id) - if requester.app_service is not None: - parent_span.set_tag("appservice_id", requester.app_service.id) - return requester - - @cancellable - async def _wrapped_get_user_by_req( - self, - request: SynapseRequest, - allow_guest: bool, - allow_expired: bool, - ) -> Requester: - """Helper for get_user_by_req - - Once get_user_by_req has set up the opentracing span, this does the actual work. - """ - try: - ip_addr = request.getClientAddress().host - user_agent = get_request_user_agent(request) - - access_token = self.get_access_token_from_request(request) - - # First check if it could be a request from an appservice - requester = await self._get_appservice_user(request) - if not requester: - # If not, it should be from a regular user - requester = await self.get_user_by_access_token( - access_token, allow_expired=allow_expired - ) - - # Deny the request if the user account has expired. - # This check is only done for regular users, not appservice ones. - if not allow_expired: - if await self._account_validity_handler.is_user_expired( - requester.user.to_string() - ): - # Raise the error if either an account validity module has determined - # the account has expired, or the legacy account validity - # implementation is enabled and determined the account has expired - raise AuthError( - 403, - "User account has expired", - errcode=Codes.EXPIRED_ACCOUNT, - ) - - if ip_addr and ( - not requester.app_service or self._track_appservice_user_ips - ): - # XXX(quenting): I'm 95% confident that we could skip setting the - # device_id to "dummy-device" for appservices, and that the only impact - # would be some rows which whould not deduplicate in the 'user_ips' - # table during the transition - recorded_device_id = ( - "dummy-device" - if requester.device_id is None and requester.app_service is not None - else requester.device_id - ) - await self.store.insert_client_ip( - user_id=requester.authenticated_entity, - access_token=access_token, - ip=ip_addr, - user_agent=user_agent, - device_id=recorded_device_id, - ) - - # Track also the puppeted user client IP if enabled and the user is puppeting - if ( - requester.user.to_string() != requester.authenticated_entity - and self._track_puppeted_user_ips - ): - await self.store.insert_client_ip( - user_id=requester.user.to_string(), - access_token=access_token, - ip=ip_addr, - user_agent=user_agent, - device_id=requester.device_id, - ) - - if requester.is_guest and not allow_guest: - raise AuthError( - 403, - "Guest access not allowed", - errcode=Codes.GUEST_ACCESS_FORBIDDEN, - ) - - request.requester = requester - return requester - except KeyError: - raise MissingClientTokenError() - - async def validate_appservice_can_control_user_id( - self, app_service: ApplicationService, user_id: str - ) -> None: - """Validates that the app service is allowed to control - the given user. - - Args: - app_service: The app service that controls the user - user_id: The author MXID that the app service is controlling - - Raises: - AuthError: If the application service is not allowed to control the user - (user namespace regex does not match, wrong homeserver, etc) - or if the user has not been registered yet. - """ - - # It's ok if the app service is trying to use the sender from their registration - if app_service.sender == user_id: - pass - # Check to make sure the app service is allowed to control the user - elif not app_service.is_interested_in_user(user_id): - raise AuthError( - 403, - "Application service cannot masquerade as this user (%s)." % user_id, - ) - # Check to make sure the user is already registered on the homeserver - elif not (await self.store.get_user_by_id(user_id)): - raise AuthError( - 403, "Application service has not registered this user (%s)" % user_id - ) - - @cancellable - async def _get_appservice_user(self, request: Request) -> Optional[Requester]: - """ - Given a request, reads the request parameters to determine: - - whether it's an application service that's making this request - - what user the application service should be treated as controlling - (the user_id URI parameter allows an application service to masquerade - any applicable user in its namespace) - - what device the application service should be treated as controlling - (the device_id[^1] URI parameter allows an application service to masquerade - as any device that exists for the relevant user) - - [^1] Unstable and provided by MSC3202. - Must use `org.matrix.msc3202.device_id` in place of `device_id` for now. - - Returns: - the application service `Requester` of that request - - Postconditions: - - The `app_service` field in the returned `Requester` is set - - The `user_id` field in the returned `Requester` is either the application - service sender or the controlled user set by the `user_id` URI parameter - - The returned application service is permitted to control the returned user ID. - - The returned device ID, if present, has been checked to be a valid device ID - for the returned user ID. - """ - DEVICE_ID_ARG_NAME = b"org.matrix.msc3202.device_id" - - app_service = self.store.get_app_service_by_token( - self.get_access_token_from_request(request) - ) - if app_service is None: - return None - - if app_service.ip_range_whitelist: - ip_address = IPAddress(request.getClientAddress().host) - if ip_address not in app_service.ip_range_whitelist: - return None - - # This will always be set by the time Twisted calls us. - assert request.args is not None - - if b"user_id" in request.args: - effective_user_id = request.args[b"user_id"][0].decode("utf8") - await self.validate_appservice_can_control_user_id( - app_service, effective_user_id - ) - else: - effective_user_id = app_service.sender - - effective_device_id: Optional[str] = None - - if ( - self.hs.config.experimental.msc3202_device_masquerading_enabled - and DEVICE_ID_ARG_NAME in request.args - ): - effective_device_id = request.args[DEVICE_ID_ARG_NAME][0].decode("utf8") - # We only just set this so it can't be None! - assert effective_device_id is not None - device_opt = await self.store.get_device( - effective_user_id, effective_device_id - ) - if device_opt is None: - # For now, use 400 M_EXCLUSIVE if the device doesn't exist. - # This is an open thread of discussion on MSC3202 as of 2021-12-09. - raise AuthError( - 400, - f"Application service trying to use a device that doesn't exist ('{effective_device_id}' for {effective_user_id})", - Codes.EXCLUSIVE, - ) - - return create_requester( - effective_user_id, app_service=app_service, device_id=effective_device_id - ) - - async def get_user_by_access_token( - self, - token: str, - allow_expired: bool = False, - ) -> Requester: - """Validate access token and get user_id from it - - Args: - token: The access token to get the user by - allow_expired: If False, raises an InvalidClientTokenError - if the token is expired - - Raises: - InvalidClientTokenError if a user by that token exists, but the token is - expired - InvalidClientCredentialsError if no user by that token exists or the token - is invalid - """ - - # First look in the database to see if the access token is present - # as an opaque token. - user_info = await self.store.get_user_by_access_token(token) - if user_info: - valid_until_ms = user_info.valid_until_ms - if ( - not allow_expired - and valid_until_ms is not None - and valid_until_ms < self.clock.time_msec() - ): - # there was a valid access token, but it has expired. - # soft-logout the user. - raise InvalidClientTokenError( - msg="Access token has expired", soft_logout=True - ) - - # Mark the token as used. This is used to invalidate old refresh - # tokens after some time. - await self.store.mark_access_token_as_used(user_info.token_id) - - requester = create_requester( - user_id=user_info.user_id, - access_token_id=user_info.token_id, - is_guest=user_info.is_guest, - shadow_banned=user_info.shadow_banned, - device_id=user_info.device_id, - authenticated_entity=user_info.token_owner, - ) - - return requester - - # If the token isn't found in the database, then it could still be a - # macaroon for a guest, so we check that here. - try: - user_id = self._macaroon_generator.verify_guest_token(token) - - # Guest access tokens are not stored in the database (there can - # only be one access token per guest, anyway). - # - # In order to prevent guest access tokens being used as regular - # user access tokens (and hence getting around the invalidation - # process), we look up the user id and check that it is indeed - # a guest user. - # - # It would of course be much easier to store guest access - # tokens in the database as well, but that would break existing - # guest tokens. - stored_user = await self.store.get_user_by_id(user_id) - if not stored_user: - raise InvalidClientTokenError("Unknown user_id %s" % user_id) - if not stored_user["is_guest"]: - raise InvalidClientTokenError( - "Guest access token used for regular user" - ) - - return create_requester( - user_id=user_id, - is_guest=True, - # all guests get the same device id - device_id=GUEST_DEVICE_ID, - authenticated_entity=user_id, - ) - except ( - pymacaroons.exceptions.MacaroonException, - TypeError, - ValueError, - ) as e: - logger.warning( - "Invalid access token in auth: %s %s.", - type(e), - e, - ) - raise InvalidClientTokenError("Invalid access token passed.") - - async def is_server_admin(self, requester: Requester) -> bool: - """Check if the given user is a local server admin. - - Args: - requester: The user making the request, according to the access token. - - Returns: - True if the user is an admin - """ - return await self.store.is_server_admin(requester.user) - - async def check_can_change_room_list( - self, room_id: str, requester: Requester - ) -> bool: - """Determine whether the user is allowed to edit the room's entry in the - published room list. - - Args: - room_id: The room to check. - requester: The user making the request, according to the access token. - """ - - is_admin = await self.is_server_admin(requester) - if is_admin: - return True - - await self.check_user_in_room(room_id, requester) - - # We currently require the user is a "moderator" in the room. We do this - # by checking if they would (theoretically) be able to change the - # m.room.canonical_alias events - - power_level_event = ( - await self._storage_controllers.state.get_current_state_event( - room_id, EventTypes.PowerLevels, "" - ) - ) - - auth_events = {} - if power_level_event: - auth_events[(EventTypes.PowerLevels, "")] = power_level_event - - send_level = event_auth.get_send_level( - EventTypes.CanonicalAlias, "", power_level_event - ) - user_level = event_auth.get_user_power_level( - requester.user.to_string(), auth_events - ) - - return user_level >= send_level - - @staticmethod - def has_access_token(request: Request) -> bool: - """Checks if the request has an access_token. - - Returns: - False if no access_token was given, True otherwise. - """ - # This will always be set by the time Twisted calls us. - assert request.args is not None - - query_params = request.args.get(b"access_token") - auth_headers = request.requestHeaders.getRawHeaders(b"Authorization") - return bool(query_params) or bool(auth_headers) - - @staticmethod - @cancellable - def get_access_token_from_request(request: Request) -> str: - """Extracts the access_token from the request. - - Args: - request: The http request. - Returns: - The access_token - Raises: - MissingClientTokenError: If there isn't a single access_token in the - request - """ - # This will always be set by the time Twisted calls us. - assert request.args is not None - - auth_headers = request.requestHeaders.getRawHeaders(b"Authorization") - query_params = request.args.get(b"access_token") - if auth_headers: - # Try the get the access_token from a "Authorization: Bearer" - # header - if query_params is not None: - raise MissingClientTokenError( - "Mixing Authorization headers and access_token query parameters." - ) - if len(auth_headers) > 1: - raise MissingClientTokenError("Too many Authorization headers.") - parts = auth_headers[0].split(b" ") - if parts[0] == b"Bearer" and len(parts) == 2: - return parts[1].decode("ascii") - else: - raise MissingClientTokenError("Invalid Authorization header.") - else: - # Try to get the access_token from the query params. - if not query_params: - raise MissingClientTokenError() - - return query_params[0].decode("ascii") - - @trace - async def check_user_in_room_or_world_readable( - self, room_id: str, requester: Requester, allow_departed_users: bool = False - ) -> Tuple[str, Optional[str]]: - """Checks that the user is or was in the room or the room is world - readable. If it isn't then an exception is raised. - - Args: - room_id: The room to check. - requester: The user making the request, according to the access token. - allow_departed_users: If True, accept users that were previously - members but have now departed. - - Returns: - Resolves to the current membership of the user in the room and the - membership event ID of the user. If the user is not in the room and - never has been, then `(Membership.JOIN, None)` is returned. - """ - - try: - # check_user_in_room will return the most recent membership - # event for the user if: - # * The user is a non-guest user, and was ever in the room - # * The user is a guest user, and has joined the room - # else it will throw. - return await self.check_user_in_room( - room_id, requester, allow_departed_users=allow_departed_users - ) - except AuthError: - visibility = await self._storage_controllers.state.get_current_state_event( - room_id, EventTypes.RoomHistoryVisibility, "" - ) - if ( - visibility - and visibility.content.get("history_visibility") - == HistoryVisibility.WORLD_READABLE - ): - return Membership.JOIN, None - raise UnstableSpecAuthError( - 403, - "User %s not in room %s, and room previews are disabled" - % (requester.user, room_id), - errcode=Codes.NOT_JOINED, - ) diff --git a/synapse/api/auth/__init__.py b/synapse/api/auth/__init__.py new file mode 100644 index 0000000000..90cfe39d76 --- /dev/null +++ b/synapse/api/auth/__init__.py @@ -0,0 +1,175 @@ +# Copyright 2023 The Matrix.org Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Optional, Tuple + +from typing_extensions import Protocol + +from twisted.web.server import Request + +from synapse.appservice import ApplicationService +from synapse.http.site import SynapseRequest +from synapse.types import Requester + +# guests always get this device id. +GUEST_DEVICE_ID = "guest_device" + + +class Auth(Protocol): + """The interface that an auth provider must implement.""" + + async def check_user_in_room( + self, + room_id: str, + requester: Requester, + allow_departed_users: bool = False, + ) -> Tuple[str, Optional[str]]: + """Check if the user is in the room, or was at some point. + Args: + room_id: The room to check. + + user_id: The user to check. + + current_state: Optional map of the current state of the room. + If provided then that map is used to check whether they are a + member of the room. Otherwise the current membership is + loaded from the database. + + allow_departed_users: if True, accept users that were previously + members but have now departed. + + Raises: + AuthError if the user is/was not in the room. + Returns: + The current membership of the user in the room and the + membership event ID of the user. + """ + + async def get_user_by_req( + self, + request: SynapseRequest, + allow_guest: bool = False, + allow_expired: bool = False, + ) -> Requester: + """Get a registered user's ID. + + Args: + request: An HTTP request with an access_token query parameter. + allow_guest: If False, will raise an AuthError if the user making the + request is a guest. + allow_expired: If True, allow the request through even if the account + is expired, or session token lifetime has ended. Note that + /login will deliver access tokens regardless of expiration. + + Returns: + Resolves to the requester + Raises: + InvalidClientCredentialsError if no user by that token exists or the token + is invalid. + AuthError if access is denied for the user in the access token + """ + + async def validate_appservice_can_control_user_id( + self, app_service: ApplicationService, user_id: str + ) -> None: + """Validates that the app service is allowed to control + the given user. + + Args: + app_service: The app service that controls the user + user_id: The author MXID that the app service is controlling + + Raises: + AuthError: If the application service is not allowed to control the user + (user namespace regex does not match, wrong homeserver, etc) + or if the user has not been registered yet. + """ + + async def get_user_by_access_token( + self, + token: str, + allow_expired: bool = False, + ) -> Requester: + """Validate access token and get user_id from it + + Args: + token: The access token to get the user by + allow_expired: If False, raises an InvalidClientTokenError + if the token is expired + + Raises: + InvalidClientTokenError if a user by that token exists, but the token is + expired + InvalidClientCredentialsError if no user by that token exists or the token + is invalid + """ + + async def is_server_admin(self, requester: Requester) -> bool: + """Check if the given user is a local server admin. + + Args: + requester: user to check + + Returns: + True if the user is an admin + """ + + async def check_can_change_room_list( + self, room_id: str, requester: Requester + ) -> bool: + """Determine whether the user is allowed to edit the room's entry in the + published room list. + + Args: + room_id + user + """ + + @staticmethod + def has_access_token(request: Request) -> bool: + """Checks if the request has an access_token. + + Returns: + False if no access_token was given, True otherwise. + """ + + @staticmethod + def get_access_token_from_request(request: Request) -> str: + """Extracts the access_token from the request. + + Args: + request: The http request. + Returns: + The access_token + Raises: + MissingClientTokenError: If there isn't a single access_token in the + request + """ + + async def check_user_in_room_or_world_readable( + self, room_id: str, requester: Requester, allow_departed_users: bool = False + ) -> Tuple[str, Optional[str]]: + """Checks that the user is or was in the room or the room is world + readable. If it isn't then an exception is raised. + + Args: + room_id: room to check + user_id: user to check + allow_departed_users: if True, accept users that were previously + members but have now departed + + Returns: + Resolves to the current membership of the user in the room and the + membership event ID of the user. If the user is not in the room and + never has been, then `(Membership.JOIN, None)` is returned. + """ diff --git a/synapse/api/auth/base.py b/synapse/api/auth/base.py new file mode 100644 index 0000000000..240f2b90de --- /dev/null +++ b/synapse/api/auth/base.py @@ -0,0 +1,273 @@ +# Copyright 2023 The Matrix.org Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import logging +from typing import TYPE_CHECKING, Optional, Tuple + +from twisted.web.server import Request + +from synapse import event_auth +from synapse.api.constants import EventTypes, HistoryVisibility, Membership +from synapse.api.errors import ( + AuthError, + Codes, + MissingClientTokenError, + UnstableSpecAuthError, +) +from synapse.appservice import ApplicationService +from synapse.logging.opentracing import trace +from synapse.types import Requester + +if TYPE_CHECKING: + from synapse.server import HomeServer + +logger = logging.getLogger(__name__) + + +class BaseAuth: + """Common base class for all auth implementations.""" + + def __init__(self, hs: "HomeServer"): + self.hs = hs + self.store = hs.get_datastores().main + self._storage_controllers = hs.get_storage_controllers() + + async def check_user_in_room( + self, + room_id: str, + requester: Requester, + allow_departed_users: bool = False, + ) -> Tuple[str, Optional[str]]: + """Check if the user is in the room, or was at some point. + Args: + room_id: The room to check. + + requester: The user making the request, according to the access token. + + current_state: Optional map of the current state of the room. + If provided then that map is used to check whether they are a + member of the room. Otherwise the current membership is + loaded from the database. + + allow_departed_users: if True, accept users that were previously + members but have now departed. + + Raises: + AuthError if the user is/was not in the room. + Returns: + The current membership of the user in the room and the + membership event ID of the user. + """ + + user_id = requester.user.to_string() + ( + membership, + member_event_id, + ) = await self.store.get_local_current_membership_for_user_in_room( + user_id=user_id, + room_id=room_id, + ) + + if membership: + if membership == Membership.JOIN: + return membership, member_event_id + + # XXX this looks totally bogus. Why do we not allow users who have been banned, + # or those who were members previously and have been re-invited? + if allow_departed_users and membership == Membership.LEAVE: + forgot = await self.store.did_forget(user_id, room_id) + if not forgot: + return membership, member_event_id + raise UnstableSpecAuthError( + 403, + "User %s not in room %s" % (user_id, room_id), + errcode=Codes.NOT_JOINED, + ) + + @trace + async def check_user_in_room_or_world_readable( + self, room_id: str, requester: Requester, allow_departed_users: bool = False + ) -> Tuple[str, Optional[str]]: + """Checks that the user is or was in the room or the room is world + readable. If it isn't then an exception is raised. + + Args: + room_id: room to check + user_id: user to check + allow_departed_users: if True, accept users that were previously + members but have now departed + + Returns: + Resolves to the current membership of the user in the room and the + membership event ID of the user. If the user is not in the room and + never has been, then `(Membership.JOIN, None)` is returned. + """ + + try: + # check_user_in_room will return the most recent membership + # event for the user if: + # * The user is a non-guest user, and was ever in the room + # * The user is a guest user, and has joined the room + # else it will throw. + return await self.check_user_in_room( + room_id, requester, allow_departed_users=allow_departed_users + ) + except AuthError: + visibility = await self._storage_controllers.state.get_current_state_event( + room_id, EventTypes.RoomHistoryVisibility, "" + ) + if ( + visibility + and visibility.content.get("history_visibility") + == HistoryVisibility.WORLD_READABLE + ): + return Membership.JOIN, None + raise AuthError( + 403, + "User %r not in room %s, and room previews are disabled" + % (requester.user, room_id), + ) + + async def validate_appservice_can_control_user_id( + self, app_service: ApplicationService, user_id: str + ) -> None: + """Validates that the app service is allowed to control + the given user. + + Args: + app_service: The app service that controls the user + user_id: The author MXID that the app service is controlling + + Raises: + AuthError: If the application service is not allowed to control the user + (user namespace regex does not match, wrong homeserver, etc) + or if the user has not been registered yet. + """ + + # It's ok if the app service is trying to use the sender from their registration + if app_service.sender == user_id: + pass + # Check to make sure the app service is allowed to control the user + elif not app_service.is_interested_in_user(user_id): + raise AuthError( + 403, + "Application service cannot masquerade as this user (%s)." % user_id, + ) + # Check to make sure the user is already registered on the homeserver + elif not (await self.store.get_user_by_id(user_id)): + raise AuthError( + 403, "Application service has not registered this user (%s)" % user_id + ) + + async def is_server_admin(self, requester: Requester) -> bool: + """Check if the given user is a local server admin. + + Args: + requester: user to check + + Returns: + True if the user is an admin + """ + raise NotImplementedError() + + async def check_can_change_room_list( + self, room_id: str, requester: Requester + ) -> bool: + """Determine whether the user is allowed to edit the room's entry in the + published room list. + + Args: + room_id + user + """ + + is_admin = await self.is_server_admin(requester) + if is_admin: + return True + + await self.check_user_in_room(room_id, requester) + + # We currently require the user is a "moderator" in the room. We do this + # by checking if they would (theoretically) be able to change the + # m.room.canonical_alias events + + power_level_event = ( + await self._storage_controllers.state.get_current_state_event( + room_id, EventTypes.PowerLevels, "" + ) + ) + + auth_events = {} + if power_level_event: + auth_events[(EventTypes.PowerLevels, "")] = power_level_event + + send_level = event_auth.get_send_level( + EventTypes.CanonicalAlias, "", power_level_event + ) + user_level = event_auth.get_user_power_level( + requester.user.to_string(), auth_events + ) + + return user_level >= send_level + + @staticmethod + def has_access_token(request: Request) -> bool: + """Checks if the request has an access_token. + + Returns: + False if no access_token was given, True otherwise. + """ + # This will always be set by the time Twisted calls us. + assert request.args is not None + + query_params = request.args.get(b"access_token") + auth_headers = request.requestHeaders.getRawHeaders(b"Authorization") + return bool(query_params) or bool(auth_headers) + + @staticmethod + def get_access_token_from_request(request: Request) -> str: + """Extracts the access_token from the request. + + Args: + request: The http request. + Returns: + The access_token + Raises: + MissingClientTokenError: If there isn't a single access_token in the + request + """ + # This will always be set by the time Twisted calls us. + assert request.args is not None + + auth_headers = request.requestHeaders.getRawHeaders(b"Authorization") + query_params = request.args.get(b"access_token") + if auth_headers: + # Try the get the access_token from a "Authorization: Bearer" + # header + if query_params is not None: + raise MissingClientTokenError( + "Mixing Authorization headers and access_token query parameters." + ) + if len(auth_headers) > 1: + raise MissingClientTokenError("Too many Authorization headers.") + parts = auth_headers[0].split(b" ") + if parts[0] == b"Bearer" and len(parts) == 2: + return parts[1].decode("ascii") + else: + raise MissingClientTokenError("Invalid Authorization header.") + else: + # Try to get the access_token from the query params. + if not query_params: + raise MissingClientTokenError() + + return query_params[0].decode("ascii") diff --git a/synapse/api/auth/internal.py b/synapse/api/auth/internal.py new file mode 100644 index 0000000000..813d537e53 --- /dev/null +++ b/synapse/api/auth/internal.py @@ -0,0 +1,369 @@ +# Copyright 2023 The Matrix.org Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import logging +from typing import TYPE_CHECKING, Optional + +import pymacaroons +from netaddr import IPAddress + +from twisted.web.server import Request + +from synapse.api.errors import ( + AuthError, + Codes, + InvalidClientTokenError, + MissingClientTokenError, +) +from synapse.http import get_request_user_agent +from synapse.http.site import SynapseRequest +from synapse.logging.opentracing import active_span, force_tracing, start_active_span +from synapse.types import Requester, create_requester +from synapse.util.cancellation import cancellable + +from . import GUEST_DEVICE_ID +from .base import BaseAuth + +if TYPE_CHECKING: + from synapse.server import HomeServer + +logger = logging.getLogger(__name__) + + +class InternalAuth(BaseAuth): + """ + This class contains functions for authenticating users of our client-server API. + """ + + def __init__(self, hs: "HomeServer"): + super().__init__(hs) + self.clock = hs.get_clock() + self._account_validity_handler = hs.get_account_validity_handler() + self._macaroon_generator = hs.get_macaroon_generator() + + self._track_appservice_user_ips = hs.config.appservice.track_appservice_user_ips + self._track_puppeted_user_ips = hs.config.api.track_puppeted_user_ips + self._force_tracing_for_users = hs.config.tracing.force_tracing_for_users + + @cancellable + async def get_user_by_req( + self, + request: SynapseRequest, + allow_guest: bool = False, + allow_expired: bool = False, + ) -> Requester: + """Get a registered user's ID. + + Args: + request: An HTTP request with an access_token query parameter. + allow_guest: If False, will raise an AuthError if the user making the + request is a guest. + allow_expired: If True, allow the request through even if the account + is expired, or session token lifetime has ended. Note that + /login will deliver access tokens regardless of expiration. + + Returns: + Resolves to the requester + Raises: + InvalidClientCredentialsError if no user by that token exists or the token + is invalid. + AuthError if access is denied for the user in the access token + """ + parent_span = active_span() + with start_active_span("get_user_by_req"): + requester = await self._wrapped_get_user_by_req( + request, allow_guest, allow_expired + ) + + if parent_span: + if requester.authenticated_entity in self._force_tracing_for_users: + # request tracing is enabled for this user, so we need to force it + # tracing on for the parent span (which will be the servlet span). + # + # It's too late for the get_user_by_req span to inherit the setting, + # so we also force it on for that. + force_tracing() + force_tracing(parent_span) + parent_span.set_tag( + "authenticated_entity", requester.authenticated_entity + ) + parent_span.set_tag("user_id", requester.user.to_string()) + if requester.device_id is not None: + parent_span.set_tag("device_id", requester.device_id) + if requester.app_service is not None: + parent_span.set_tag("appservice_id", requester.app_service.id) + return requester + + @cancellable + async def _wrapped_get_user_by_req( + self, + request: SynapseRequest, + allow_guest: bool, + allow_expired: bool, + ) -> Requester: + """Helper for get_user_by_req + + Once get_user_by_req has set up the opentracing span, this does the actual work. + """ + try: + ip_addr = request.getClientAddress().host + user_agent = get_request_user_agent(request) + + access_token = self.get_access_token_from_request(request) + + # First check if it could be a request from an appservice + requester = await self._get_appservice_user(request) + if not requester: + # If not, it should be from a regular user + requester = await self.get_user_by_access_token( + access_token, allow_expired=allow_expired + ) + + # Deny the request if the user account has expired. + # This check is only done for regular users, not appservice ones. + if not allow_expired: + if await self._account_validity_handler.is_user_expired( + requester.user.to_string() + ): + # Raise the error if either an account validity module has determined + # the account has expired, or the legacy account validity + # implementation is enabled and determined the account has expired + raise AuthError( + 403, + "User account has expired", + errcode=Codes.EXPIRED_ACCOUNT, + ) + + if ip_addr and ( + not requester.app_service or self._track_appservice_user_ips + ): + # XXX(quenting): I'm 95% confident that we could skip setting the + # device_id to "dummy-device" for appservices, and that the only impact + # would be some rows which whould not deduplicate in the 'user_ips' + # table during the transition + recorded_device_id = ( + "dummy-device" + if requester.device_id is None and requester.app_service is not None + else requester.device_id + ) + await self.store.insert_client_ip( + user_id=requester.authenticated_entity, + access_token=access_token, + ip=ip_addr, + user_agent=user_agent, + device_id=recorded_device_id, + ) + + # Track also the puppeted user client IP if enabled and the user is puppeting + if ( + requester.user.to_string() != requester.authenticated_entity + and self._track_puppeted_user_ips + ): + await self.store.insert_client_ip( + user_id=requester.user.to_string(), + access_token=access_token, + ip=ip_addr, + user_agent=user_agent, + device_id=requester.device_id, + ) + + if requester.is_guest and not allow_guest: + raise AuthError( + 403, + "Guest access not allowed", + errcode=Codes.GUEST_ACCESS_FORBIDDEN, + ) + + request.requester = requester + return requester + except KeyError: + raise MissingClientTokenError() + + @cancellable + async def _get_appservice_user(self, request: Request) -> Optional[Requester]: + """ + Given a request, reads the request parameters to determine: + - whether it's an application service that's making this request + - what user the application service should be treated as controlling + (the user_id URI parameter allows an application service to masquerade + any applicable user in its namespace) + - what device the application service should be treated as controlling + (the device_id[^1] URI parameter allows an application service to masquerade + as any device that exists for the relevant user) + + [^1] Unstable and provided by MSC3202. + Must use `org.matrix.msc3202.device_id` in place of `device_id` for now. + + Returns: + the application service `Requester` of that request + + Postconditions: + - The `app_service` field in the returned `Requester` is set + - The `user_id` field in the returned `Requester` is either the application + service sender or the controlled user set by the `user_id` URI parameter + - The returned application service is permitted to control the returned user ID. + - The returned device ID, if present, has been checked to be a valid device ID + for the returned user ID. + """ + DEVICE_ID_ARG_NAME = b"org.matrix.msc3202.device_id" + + app_service = self.store.get_app_service_by_token( + self.get_access_token_from_request(request) + ) + if app_service is None: + return None + + if app_service.ip_range_whitelist: + ip_address = IPAddress(request.getClientAddress().host) + if ip_address not in app_service.ip_range_whitelist: + return None + + # This will always be set by the time Twisted calls us. + assert request.args is not None + + if b"user_id" in request.args: + effective_user_id = request.args[b"user_id"][0].decode("utf8") + await self.validate_appservice_can_control_user_id( + app_service, effective_user_id + ) + else: + effective_user_id = app_service.sender + + effective_device_id: Optional[str] = None + + if ( + self.hs.config.experimental.msc3202_device_masquerading_enabled + and DEVICE_ID_ARG_NAME in request.args + ): + effective_device_id = request.args[DEVICE_ID_ARG_NAME][0].decode("utf8") + # We only just set this so it can't be None! + assert effective_device_id is not None + device_opt = await self.store.get_device( + effective_user_id, effective_device_id + ) + if device_opt is None: + # For now, use 400 M_EXCLUSIVE if the device doesn't exist. + # This is an open thread of discussion on MSC3202 as of 2021-12-09. + raise AuthError( + 400, + f"Application service trying to use a device that doesn't exist ('{effective_device_id}' for {effective_user_id})", + Codes.EXCLUSIVE, + ) + + return create_requester( + effective_user_id, app_service=app_service, device_id=effective_device_id + ) + + async def get_user_by_access_token( + self, + token: str, + allow_expired: bool = False, + ) -> Requester: + """Validate access token and get user_id from it + + Args: + token: The access token to get the user by + allow_expired: If False, raises an InvalidClientTokenError + if the token is expired + + Raises: + InvalidClientTokenError if a user by that token exists, but the token is + expired + InvalidClientCredentialsError if no user by that token exists or the token + is invalid + """ + + # First look in the database to see if the access token is present + # as an opaque token. + user_info = await self.store.get_user_by_access_token(token) + if user_info: + valid_until_ms = user_info.valid_until_ms + if ( + not allow_expired + and valid_until_ms is not None + and valid_until_ms < self.clock.time_msec() + ): + # there was a valid access token, but it has expired. + # soft-logout the user. + raise InvalidClientTokenError( + msg="Access token has expired", soft_logout=True + ) + + # Mark the token as used. This is used to invalidate old refresh + # tokens after some time. + await self.store.mark_access_token_as_used(user_info.token_id) + + requester = create_requester( + user_id=user_info.user_id, + access_token_id=user_info.token_id, + is_guest=user_info.is_guest, + shadow_banned=user_info.shadow_banned, + device_id=user_info.device_id, + authenticated_entity=user_info.token_owner, + ) + + return requester + + # If the token isn't found in the database, then it could still be a + # macaroon for a guest, so we check that here. + try: + user_id = self._macaroon_generator.verify_guest_token(token) + + # Guest access tokens are not stored in the database (there can + # only be one access token per guest, anyway). + # + # In order to prevent guest access tokens being used as regular + # user access tokens (and hence getting around the invalidation + # process), we look up the user id and check that it is indeed + # a guest user. + # + # It would of course be much easier to store guest access + # tokens in the database as well, but that would break existing + # guest tokens. + stored_user = await self.store.get_user_by_id(user_id) + if not stored_user: + raise InvalidClientTokenError("Unknown user_id %s" % user_id) + if not stored_user["is_guest"]: + raise InvalidClientTokenError( + "Guest access token used for regular user" + ) + + return create_requester( + user_id=user_id, + is_guest=True, + # all guests get the same device id + device_id=GUEST_DEVICE_ID, + authenticated_entity=user_id, + ) + except ( + pymacaroons.exceptions.MacaroonException, + TypeError, + ValueError, + ) as e: + logger.warning( + "Invalid access token in auth: %s %s.", + type(e), + e, + ) + raise InvalidClientTokenError("Invalid access token passed.") + + async def is_server_admin(self, requester: Requester) -> bool: + """Check if the given user is a local server admin. + + Args: + requester: The user making the request, according to the access token. + + Returns: + True if the user is an admin + """ + return await self.store.is_server_admin(requester.user) diff --git a/synapse/server.py b/synapse/server.py index cce5fb66ff..df88af12a9 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -31,6 +31,7 @@ from twisted.web.iweb import IPolicyForHTTPS from twisted.web.resource import Resource from synapse.api.auth import Auth +from synapse.api.auth.internal import InternalAuth from synapse.api.auth_blocking import AuthBlocking from synapse.api.filtering import Filtering from synapse.api.ratelimiting import Ratelimiter, RequestRatelimiter @@ -427,7 +428,7 @@ class HomeServer(metaclass=abc.ABCMeta): @cache_in_self def get_auth(self) -> Auth: - return Auth(self) + return InternalAuth(self) @cache_in_self def get_auth_blocking(self) -> AuthBlocking: diff --git a/tests/api/test_auth.py b/tests/api/test_auth.py index 6e36e73f0d..3dac52d178 100644 --- a/tests/api/test_auth.py +++ b/tests/api/test_auth.py @@ -18,7 +18,7 @@ import pymacaroons from twisted.test.proto_helpers import MemoryReactor -from synapse.api.auth import Auth +from synapse.api.auth.internal import InternalAuth from synapse.api.auth_blocking import AuthBlocking from synapse.api.constants import UserTypes from synapse.api.errors import ( @@ -48,7 +48,7 @@ class AuthTestCase(unittest.HomeserverTestCase): # have been called by the HomeserverTestCase machinery. hs.datastores.main = self.store # type: ignore[union-attr] hs.get_auth_handler().store = self.store - self.auth = Auth(hs) + self.auth = InternalAuth(hs) # AuthBlocking reads from the hs' config on initialization. We need to # modify its config instead of the hs' diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py index 73822b07a5..8d8584609b 100644 --- a/tests/handlers/test_register.py +++ b/tests/handlers/test_register.py @@ -17,7 +17,7 @@ from unittest.mock import Mock from twisted.test.proto_helpers import MemoryReactor -from synapse.api.auth import Auth +from synapse.api.auth.internal import InternalAuth from synapse.api.constants import UserTypes from synapse.api.errors import ( CodeMessageException, @@ -683,7 +683,7 @@ class RegistrationTestCase(unittest.HomeserverTestCase): request = Mock(args={}) request.args[b"access_token"] = [token.encode("ascii")] request.requestHeaders.getRawHeaders = mock_getRawHeaders() - auth = Auth(self.hs) + auth = InternalAuth(self.hs) requester = self.get_success(auth.get_user_by_req(request)) self.assertTrue(requester.shadow_banned) diff --git a/tests/test_state.py b/tests/test_state.py index ddf59916b1..7a49b87953 100644 --- a/tests/test_state.py +++ b/tests/test_state.py @@ -28,7 +28,7 @@ from unittest.mock import Mock from twisted.internet import defer -from synapse.api.auth import Auth +from synapse.api.auth.internal import InternalAuth from synapse.api.constants import EventTypes, Membership from synapse.api.room_versions import RoomVersions from synapse.events import EventBase, make_event_from_dict @@ -240,7 +240,7 @@ class StateTestCase(unittest.TestCase): hs.get_macaroon_generator.return_value = MacaroonGenerator( clock, "tesths", b"verysecret" ) - hs.get_auth.return_value = Auth(hs) + hs.get_auth.return_value = InternalAuth(hs) hs.get_state_resolution_handler = lambda: StateResolutionHandler(hs) hs.get_storage_controllers.return_value = storage_controllers -- cgit 1.5.1 From 765244faeef9e20c573d2c7935f05f76aeca1c28 Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Tue, 13 Sep 2022 17:54:32 +0200 Subject: Initial MSC3964 support: delegation of auth to OIDC server --- synapse/api/auth/oauth_delegated.py | 227 ++++++++++++++++++++++++++++++++++++ synapse/config/auth.py | 30 ++++- synapse/server.py | 4 + 3 files changed, 260 insertions(+), 1 deletion(-) create mode 100644 synapse/api/auth/oauth_delegated.py diff --git a/synapse/api/auth/oauth_delegated.py b/synapse/api/auth/oauth_delegated.py new file mode 100644 index 0000000000..b3b5c29a94 --- /dev/null +++ b/synapse/api/auth/oauth_delegated.py @@ -0,0 +1,227 @@ +# Copyright 2023 The Matrix.org Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import logging +from typing import TYPE_CHECKING, Any, Dict, List, Optional +from urllib.parse import urlencode + +from authlib.oauth2 import ClientAuth +from authlib.oauth2.auth import encode_client_secret_basic, encode_client_secret_post +from authlib.oauth2.rfc7523 import ClientSecretJWT, PrivateKeyJWT, private_key_jwt_sign +from authlib.oauth2.rfc7662 import IntrospectionToken +from authlib.oidc.discovery import OpenIDProviderMetadata, get_well_known_url + +from twisted.web.client import readBody +from twisted.web.http_headers import Headers + +from synapse.api.auth.base import BaseAuth +from synapse.api.errors import AuthError, StoreError +from synapse.http.site import SynapseRequest +from synapse.logging.context import make_deferred_yieldable +from synapse.types import Requester, UserID, create_requester +from synapse.util import json_decoder +from synapse.util.caches.cached_call import RetryOnExceptionCachedCall + +if TYPE_CHECKING: + from synapse.server import HomeServer + +logger = logging.getLogger(__name__) + + +def scope_to_list(scope: str) -> List[str]: + """Convert a scope string to a list of scope tokens""" + return scope.strip().split(" ") + + +class PrivateKeyJWTWithKid(PrivateKeyJWT): + """An implementation of the private_key_jwt client auth method that includes a kid header. + + This is needed because some providers (Keycloak) require the kid header to figure + out which key to use to verify the signature. + """ + + def sign(self, auth: Any, token_endpoint: str) -> bytes: + return private_key_jwt_sign( + auth.client_secret, + client_id=auth.client_id, + token_endpoint=token_endpoint, + claims=self.claims, + header={"kid": auth.client_secret["kid"]}, + ) + + +class OAuthDelegatedAuth(BaseAuth): + AUTH_METHODS = { + "client_secret_post": encode_client_secret_post, + "client_secret_basic": encode_client_secret_basic, + "client_secret_jwt": ClientSecretJWT(), + "private_key_jwt": PrivateKeyJWTWithKid(), + } + + def __init__(self, hs: "HomeServer"): + super().__init__(hs) + + self._config = hs.config.auth + assert self._config.oauth_delegation_enabled, "OAuth delegation is not enabled" + assert self._config.oauth_delegation_issuer, "No issuer provided" + assert self._config.oauth_delegation_client_id, "No client_id provided" + assert self._config.oauth_delegation_client_secret, "No client_secret provided" + assert ( + self._config.oauth_delegation_client_auth_method + in OAuthDelegatedAuth.AUTH_METHODS + ), "Invalid client_auth_method" + + self._http_client = hs.get_proxied_http_client() + self._hostname = hs.hostname + + self._issuer_metadata = RetryOnExceptionCachedCall(self._load_metadata) + secret = self._config.oauth_delegation_client_secret + self._client_auth = ClientAuth( + self._config.oauth_delegation_client_id, + secret, + OAuthDelegatedAuth.AUTH_METHODS[ + self._config.oauth_delegation_client_auth_method + ], + ) + + async def _load_metadata(self) -> OpenIDProviderMetadata: + if self._config.oauth_delegation_issuer_metadata is not None: + return OpenIDProviderMetadata( + **self._config.oauth_delegation_issuer_metadata + ) + url = get_well_known_url(self._config.oauth_delegation_issuer, external=True) + response = await self._http_client.get_json(url) + metadata = OpenIDProviderMetadata(**response) + # metadata.validate_introspection_endpoint() + return metadata + + async def _introspect_token(self, token: str) -> IntrospectionToken: + metadata = await self._issuer_metadata.get() + introspection_endpoint = metadata.get("introspection_endpoint") + raw_headers: Dict[str, str] = { + "Content-Type": "application/x-www-form-urlencoded", + "User-Agent": str(self._http_client.user_agent, "utf-8"), + "Accept": "application/json", + } + + args = {"token": token, "token_type_hint": "access_token"} + body = urlencode(args, True) + + # Fill the body/headers with credentials + uri, raw_headers, body = self._client_auth.prepare( + method="POST", uri=introspection_endpoint, headers=raw_headers, body=body + ) + headers = Headers({k: [v] for (k, v) in raw_headers.items()}) + + # Do the actual request + # We're not using the SimpleHttpClient util methods as we don't want to + # check the HTTP status code and we do the body encoding ourself. + response = await self._http_client.request( + method="POST", + uri=uri, + data=body.encode("utf-8"), + headers=headers, + ) + + resp_body = await make_deferred_yieldable(readBody(response)) + # TODO: Let's not worry about 5xx errors & co. for now and just try + # decoding that as JSON. We should also do some validation of the + # response + resp = json_decoder.decode(resp_body.decode("utf-8")) + return IntrospectionToken(**resp) + + async def get_user_by_req( + self, + request: SynapseRequest, + allow_guest: bool = False, + allow_expired: bool = False, + ) -> Requester: + access_token = self.get_access_token_from_request(request) + return await self.get_user_by_access_token(access_token, allow_expired) + + async def get_user_by_access_token( + self, + token: str, + allow_expired: bool = False, + ) -> Requester: + introspection_result = await self._introspect_token(token) + + logger.info(f"Introspection result: {introspection_result!r}") + + # TODO: introspection verification should be more extensive, especially: + # - verify the scopes + # - verify the audience + if not introspection_result.get("active"): + raise AuthError( + 403, + "Invalid access token", + ) + + # TODO: claim mapping should be configurable + username: Optional[str] = introspection_result.get("username") + if username is None or not isinstance(username, str): + raise AuthError( + 500, + "Invalid username claim in the introspection result", + ) + + # Let's look at the scope + scope: List[str] = scope_to_list(introspection_result.get("scope", "")) + device_id = None + # Find device_id in scope + for tok in scope: + if tok.startswith("urn:matrix:org.matrix.msc2967.client:device:"): + parts = tok.split(":") + if len(parts) == 5: + device_id = parts[4] + + user_id = UserID(username, self._hostname) + user_info = await self.store.get_userinfo_by_id(user_id=user_id.to_string()) + + # If the user does not exist, we should create it on the fly + # TODO: we could use SCIM to provision users ahead of time and listen + # for SCIM SET events if those ever become standard: + # https://datatracker.ietf.org/doc/html/draft-hunt-scim-notify-00 + if not user_info: + await self.store.register_user(user_id=user_id.to_string()) + user_info = await self.store.get_userinfo_by_id(user_id=user_id.to_string()) + if not user_info: + raise AuthError( + 500, + "Could not create user on the fly", + ) + + if device_id: + # Create the device on the fly if it does not exist + try: + await self.store.get_device( + user_id=user_id.to_string(), device_id=device_id + ) + except StoreError: + await self.store.store_device( + user_id=user_id.to_string(), + device_id=device_id, + initial_device_display_name="OIDC-native client", + ) + + # TODO: there is a few things missing in the requester here, which still need + # to be figured out, like: + # - impersonation, with the `authenticated_entity`, which is used for + # rate-limiting, MAU limits, etc. + # - shadow-banning, with the `shadow_banned` flag + # - a proper solution for appservices, which still needs to be figured out in + # the context of MSC3861 + return create_requester( + user_id=user_id, + device_id=device_id, + ) diff --git a/synapse/config/auth.py b/synapse/config/auth.py index 35774962c0..25b5cc60dc 100644 --- a/synapse/config/auth.py +++ b/synapse/config/auth.py @@ -14,9 +14,11 @@ # limitations under the License. from typing import Any +from authlib.jose.rfc7517 import JsonWebKey + from synapse.types import JsonDict -from ._base import Config +from ._base import Config, ConfigError class AuthConfig(Config): @@ -53,3 +55,29 @@ class AuthConfig(Config): self.ui_auth_session_timeout = self.parse_duration( ui_auth.get("session_timeout", 0) ) + + oauth_delegation = config.get("oauth_delegation", {}) + self.oauth_delegation_enabled = oauth_delegation.get("enabled", False) + self.oauth_delegation_issuer = oauth_delegation.get("issuer", "") + self.oauth_delegation_issuer_metadata = oauth_delegation.get("issuer_metadata") + self.oauth_delegation_account = oauth_delegation.get("account", "") + self.oauth_delegation_client_id = oauth_delegation.get("client_id", "") + self.oauth_delegation_client_secret = oauth_delegation.get("client_secret", "") + self.oauth_delegation_client_auth_method = oauth_delegation.get( + "client_auth_method", "client_secret_post" + ) + + self.password_enabled = password_config.get( + "enabled", not self.oauth_delegation_enabled + ) + + if self.oauth_delegation_client_auth_method == "private_key_jwt": + self.oauth_delegation_client_secret = JsonWebKey.import_key( + self.oauth_delegation_client_secret + ) + + # If we are delegating via OAuth then password cannot be supported as well + if self.oauth_delegation_enabled and self.password_enabled: + raise ConfigError( + "Password auth cannot be enabled when OAuth delegation is enabled" + ) diff --git a/synapse/server.py b/synapse/server.py index df88af12a9..1c82500f30 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -428,6 +428,10 @@ class HomeServer(metaclass=abc.ABCMeta): @cache_in_self def get_auth(self) -> Auth: + if self.config.auth.oauth_delegation_enabled: + from synapse.api.auth.oauth_delegated import OAuthDelegatedAuth + + return OAuthDelegatedAuth(self) return InternalAuth(self) @cache_in_self -- cgit 1.5.1 From 8f576aa462684e13b20dc380e759a76e6db821b6 Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Tue, 16 May 2023 15:36:40 +0200 Subject: Expose the public keys used for client authentication on an endpoint --- synapse/rest/synapse/client/__init__.py | 6 +++ synapse/rest/synapse/client/jwks.py | 72 +++++++++++++++++++++++++++++++++ 2 files changed, 78 insertions(+) create mode 100644 synapse/rest/synapse/client/jwks.py diff --git a/synapse/rest/synapse/client/__init__.py b/synapse/rest/synapse/client/__init__.py index e55924f597..dcfd0ad6aa 100644 --- a/synapse/rest/synapse/client/__init__.py +++ b/synapse/rest/synapse/client/__init__.py @@ -46,6 +46,12 @@ def build_synapse_client_resource_tree(hs: "HomeServer") -> Mapping[str, Resourc "/_synapse/client/unsubscribe": UnsubscribeResource(hs), } + # Expose the JWKS endpoint if OAuth2 delegation is enabled + if hs.config.auth.oauth_delegation_enabled: + from synapse.rest.synapse.client.jwks import JwksResource + + resources["/_synapse/jwks"] = JwksResource(hs) + # provider-specific SSO bits. Only load these if they are enabled, since they # rely on optional dependencies. if hs.config.oidc.oidc_enabled: diff --git a/synapse/rest/synapse/client/jwks.py b/synapse/rest/synapse/client/jwks.py new file mode 100644 index 0000000000..818585843e --- /dev/null +++ b/synapse/rest/synapse/client/jwks.py @@ -0,0 +1,72 @@ +# Copyright 2022 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import logging +from typing import TYPE_CHECKING, Tuple + +from synapse.http.server import DirectServeJsonResource +from synapse.http.site import SynapseRequest +from synapse.types import JsonDict + +if TYPE_CHECKING: + from synapse.server import HomeServer + +logger = logging.getLogger(__name__) + + +class JwksResource(DirectServeJsonResource): + def __init__(self, hs: "HomeServer"): + from authlib.jose.rfc7517 import Key + + super().__init__(extract_context=True) + + # Parameters that are allowed to be exposed in the public key. + # This is done manually, because authlib's private to public key conversion + # is unreliable depending on the version. Instead, we just serialize the private + # key and only keep the public parameters. + # List from https://www.iana.org/assignments/jose/jose.xhtml#web-key-parameters + public_parameters = { + "kty", + "use", + "key_ops", + "alg", + "kid", + "x5u", + "x5c", + "x5t", + "x5t#S256", + "crv", + "x", + "y", + "n", + "e", + "ext", + } + + secret = hs.config.auth.oauth_delegation_client_secret + + if isinstance(secret, Key): + private_key = secret.as_dict() + public_key = { + k: v for k, v in private_key.items() if k in public_parameters + } + keys = [public_key] + else: + keys = [] + + self.res = { + "keys": keys, + } + + async def _async_render_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + return 200, self.res -- cgit 1.5.1 From e82ec6d00819253d15d22a41ba3b75ad77dce98f Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Thu, 18 Nov 2021 15:21:00 +0100 Subject: MSC2965: OIDC Provider discovery via well-known document --- synapse/rest/well_known.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/synapse/rest/well_known.py b/synapse/rest/well_known.py index e2174fdfea..fd3b17a5ad 100644 --- a/synapse/rest/well_known.py +++ b/synapse/rest/well_known.py @@ -44,6 +44,15 @@ class WellKnownBuilder: "base_url": self._config.registration.default_identity_server } + if self._config.auth.oauth_delegation_enabled: + result["org.matrix.msc2965.authentication"] = { + "issuer": self._config.auth.oauth_delegation_issuer + } + if self._config.auth.oauth_delegation_account != "": + result["org.matrix.msc2965.authentication"][ + "account" + ] = self._config.auth.oauth_delegation_account + if self._config.server.extra_well_known_client_content: for ( key, -- cgit 1.5.1 From c5cf1b421d8e0d765f812880ff41fe5d244a0919 Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Fri, 17 Jun 2022 16:58:05 +0200 Subject: Save the scopes in the requester --- synapse/api/auth/oauth_delegated.py | 1 + synapse/types/__init__.py | 8 ++++++++ tests/api/test_auth.py | 2 ++ 3 files changed, 11 insertions(+) diff --git a/synapse/api/auth/oauth_delegated.py b/synapse/api/auth/oauth_delegated.py index b3b5c29a94..2715127e32 100644 --- a/synapse/api/auth/oauth_delegated.py +++ b/synapse/api/auth/oauth_delegated.py @@ -224,4 +224,5 @@ class OAuthDelegatedAuth(BaseAuth): return create_requester( user_id=user_id, device_id=device_id, + scope=scope, ) diff --git a/synapse/types/__init__.py b/synapse/types/__init__.py index 42baf8ac6b..dfc95e8ebb 100644 --- a/synapse/types/__init__.py +++ b/synapse/types/__init__.py @@ -131,6 +131,7 @@ class Requester: user: "UserID" access_token_id: Optional[int] is_guest: bool + scope: Set[str] shadow_banned: bool device_id: Optional[str] app_service: Optional["ApplicationService"] @@ -147,6 +148,7 @@ class Requester: "user_id": self.user.to_string(), "access_token_id": self.access_token_id, "is_guest": self.is_guest, + "scope": list(self.scope), "shadow_banned": self.shadow_banned, "device_id": self.device_id, "app_server_id": self.app_service.id if self.app_service else None, @@ -175,6 +177,7 @@ class Requester: user=UserID.from_string(input["user_id"]), access_token_id=input["access_token_id"], is_guest=input["is_guest"], + scope=set(input["scope"]), shadow_banned=input["shadow_banned"], device_id=input["device_id"], app_service=appservice, @@ -186,6 +189,7 @@ def create_requester( user_id: Union[str, "UserID"], access_token_id: Optional[int] = None, is_guest: bool = False, + scope: StrCollection = (), shadow_banned: bool = False, device_id: Optional[str] = None, app_service: Optional["ApplicationService"] = None, @@ -199,6 +203,7 @@ def create_requester( access_token_id: *ID* of the access token used for this request, or None if it came via the appservice API or similar is_guest: True if the user making this request is a guest user + scope: the scope of the access token used for this request, if any shadow_banned: True if the user making this request is shadow-banned. device_id: device_id which was set at authentication time app_service: the AS requesting on behalf of the user @@ -215,10 +220,13 @@ def create_requester( if authenticated_entity is None: authenticated_entity = user_id.to_string() + scope = set(scope) + return Requester( user_id, access_token_id, is_guest, + scope, shadow_banned, device_id, app_service, diff --git a/tests/api/test_auth.py b/tests/api/test_auth.py index 3dac52d178..cdb0048122 100644 --- a/tests/api/test_auth.py +++ b/tests/api/test_auth.py @@ -426,6 +426,7 @@ class AuthTestCase(unittest.HomeserverTestCase): access_token_id=None, device_id="FOOBAR", is_guest=False, + scope=set(), shadow_banned=False, app_service=appservice, authenticated_entity="@appservice:server", @@ -456,6 +457,7 @@ class AuthTestCase(unittest.HomeserverTestCase): access_token_id=None, device_id="FOOBAR", is_guest=False, + scope=set(), shadow_banned=False, app_service=appservice, authenticated_entity="@appservice:server", -- cgit 1.5.1 From 7628dbf4e9b48d9714ccbd0530af579d9c290fed Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Mon, 20 Jun 2022 11:17:48 +0200 Subject: Handle the Synapse admin scope --- synapse/api/auth/oauth_delegated.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/synapse/api/auth/oauth_delegated.py b/synapse/api/auth/oauth_delegated.py index 2715127e32..ff1f395e58 100644 --- a/synapse/api/auth/oauth_delegated.py +++ b/synapse/api/auth/oauth_delegated.py @@ -140,6 +140,9 @@ class OAuthDelegatedAuth(BaseAuth): resp = json_decoder.decode(resp_body.decode("utf-8")) return IntrospectionToken(**resp) + async def is_server_admin(self, requester: Requester) -> bool: + return "urn:synapse:admin:*" in requester.scope + async def get_user_by_req( self, request: SynapseRequest, -- cgit 1.5.1 From f9cd549f6485620381443f2b4b75a1bd0a88d39f Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Tue, 13 Sep 2022 16:13:20 +0200 Subject: Record the `sub` claims as an external_id --- synapse/api/auth/oauth_delegated.py | 59 +++++++++++++++++++++++-------------- 1 file changed, 37 insertions(+), 22 deletions(-) diff --git a/synapse/api/auth/oauth_delegated.py b/synapse/api/auth/oauth_delegated.py index ff1f395e58..5565ef0a1a 100644 --- a/synapse/api/auth/oauth_delegated.py +++ b/synapse/api/auth/oauth_delegated.py @@ -68,6 +68,8 @@ class OAuthDelegatedAuth(BaseAuth): "private_key_jwt": PrivateKeyJWTWithKid(), } + EXTERNAL_ID_PROVIDER = "oauth-delegated" + def __init__(self, hs: "HomeServer"): super().__init__(hs) @@ -170,13 +172,42 @@ class OAuthDelegatedAuth(BaseAuth): "Invalid access token", ) - # TODO: claim mapping should be configurable - username: Optional[str] = introspection_result.get("username") - if username is None or not isinstance(username, str): - raise AuthError( - 500, - "Invalid username claim in the introspection result", + # Match via the sub claim + sub: Optional[str] = introspection_result.get("sub") + if sub is None: + raise AuthError(500, "Invalid sub claim in the introspection result") + + user_id_str = await self.store.get_user_by_external_id( + OAuthDelegatedAuth.EXTERNAL_ID_PROVIDER, sub + ) + if user_id_str is None: + # If we could not find a user via the external_id, it either does not exist, + # or the external_id was never recorded + + # TODO: claim mapping should be configurable + username: Optional[str] = introspection_result.get("username") + if username is None or not isinstance(username, str): + raise AuthError( + 500, + "Invalid username claim in the introspection result", + ) + user_id = UserID(username, self._hostname) + + # First try to find a user from the username claim + user_info = await self.store.get_userinfo_by_id(user_id=user_id.to_string()) + if user_info is None: + # If the user does not exist, we should create it on the fly + # TODO: we could use SCIM to provision users ahead of time and listen + # for SCIM SET events if those ever become standard: + # https://datatracker.ietf.org/doc/html/draft-hunt-scim-notify-00 + await self.store.register_user(user_id=user_id.to_string()) + + # And record the sub as external_id + await self.store.record_user_external_id( + OAuthDelegatedAuth.EXTERNAL_ID_PROVIDER, sub, user_id.to_string() ) + else: + user_id = UserID.from_string(user_id_str) # Let's look at the scope scope: List[str] = scope_to_list(introspection_result.get("scope", "")) @@ -188,22 +219,6 @@ class OAuthDelegatedAuth(BaseAuth): if len(parts) == 5: device_id = parts[4] - user_id = UserID(username, self._hostname) - user_info = await self.store.get_userinfo_by_id(user_id=user_id.to_string()) - - # If the user does not exist, we should create it on the fly - # TODO: we could use SCIM to provision users ahead of time and listen - # for SCIM SET events if those ever become standard: - # https://datatracker.ietf.org/doc/html/draft-hunt-scim-notify-00 - if not user_info: - await self.store.register_user(user_id=user_id.to_string()) - user_info = await self.store.get_userinfo_by_id(user_id=user_id.to_string()) - if not user_info: - raise AuthError( - 500, - "Could not create user on the fly", - ) - if device_id: # Create the device on the fly if it does not exist try: -- cgit 1.5.1 From d20669971a5be17776a2991c77f5348662bb3902 Mon Sep 17 00:00:00 2001 From: Hugh Nimmo-Smith Date: Tue, 20 Sep 2022 12:54:18 +0100 Subject: Use `name` claim as display name when registering users on the fly. This makes is so that the `name` claim got when introspecting the token is used as the display name when registering a user on the fly. --- synapse/api/auth/oauth_delegated.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/synapse/api/auth/oauth_delegated.py b/synapse/api/auth/oauth_delegated.py index 5565ef0a1a..9e01e3fadc 100644 --- a/synapse/api/auth/oauth_delegated.py +++ b/synapse/api/auth/oauth_delegated.py @@ -200,7 +200,14 @@ class OAuthDelegatedAuth(BaseAuth): # TODO: we could use SCIM to provision users ahead of time and listen # for SCIM SET events if those ever become standard: # https://datatracker.ietf.org/doc/html/draft-hunt-scim-notify-00 - await self.store.register_user(user_id=user_id.to_string()) + + # TODO: claim mapping should be configurable + # If present, use the name claim as the displayname + name: Optional[str] = introspection_result.get("name") + + await self.store.register_user( + user_id=user_id.to_string(), create_profile_with_displayname=name + ) # And record the sub as external_id await self.store.record_user_external_id( -- cgit 1.5.1 From a1374b5c70fc8520930a1777dc131403812d7967 Mon Sep 17 00:00:00 2001 From: Hugh Nimmo-Smith Date: Wed, 16 Nov 2022 11:05:05 +0000 Subject: MSC2967: Check access token scope for use as user and add guest support --- synapse/api/auth/oauth_delegated.py | 30 ++++++++++++++++++++---------- 1 file changed, 20 insertions(+), 10 deletions(-) diff --git a/synapse/api/auth/oauth_delegated.py b/synapse/api/auth/oauth_delegated.py index 9e01e3fadc..cfa178218c 100644 --- a/synapse/api/auth/oauth_delegated.py +++ b/synapse/api/auth/oauth_delegated.py @@ -25,7 +25,7 @@ from twisted.web.client import readBody from twisted.web.http_headers import Headers from synapse.api.auth.base import BaseAuth -from synapse.api.errors import AuthError, StoreError +from synapse.api.errors import AuthError, InvalidClientTokenError, StoreError from synapse.http.site import SynapseRequest from synapse.logging.context import make_deferred_yieldable from synapse.types import Requester, UserID, create_requester @@ -164,18 +164,29 @@ class OAuthDelegatedAuth(BaseAuth): logger.info(f"Introspection result: {introspection_result!r}") # TODO: introspection verification should be more extensive, especially: - # - verify the scopes # - verify the audience if not introspection_result.get("active"): - raise AuthError( - 403, - "Invalid access token", - ) + raise InvalidClientTokenError("Token is not active") + + # Let's look at the scope + scope: List[str] = scope_to_list(introspection_result.get("scope", "")) + + # Determine type of user based on presence of particular scopes + has_admin_scope = "urn:synapse:admin:*" in scope + has_user_scope = "urn:matrix:org.matrix.msc2967.client:api:*" in scope + has_guest_scope = "urn:matrix:org.matrix.msc2967.client:api:guest" in scope + is_user = has_user_scope or has_admin_scope + is_guest = has_guest_scope and not is_user + + if not is_user and not is_guest: + raise InvalidClientTokenError("No scope in token granting user rights") # Match via the sub claim sub: Optional[str] = introspection_result.get("sub") if sub is None: - raise AuthError(500, "Invalid sub claim in the introspection result") + raise InvalidClientTokenError( + "Invalid sub claim in the introspection result" + ) user_id_str = await self.store.get_user_by_external_id( OAuthDelegatedAuth.EXTERNAL_ID_PROVIDER, sub @@ -216,10 +227,8 @@ class OAuthDelegatedAuth(BaseAuth): else: user_id = UserID.from_string(user_id_str) - # Let's look at the scope - scope: List[str] = scope_to_list(introspection_result.get("scope", "")) - device_id = None # Find device_id in scope + device_id = None for tok in scope: if tok.startswith("urn:matrix:org.matrix.msc2967.client:device:"): parts = tok.split(":") @@ -250,4 +259,5 @@ class OAuthDelegatedAuth(BaseAuth): user_id=user_id, device_id=device_id, scope=scope, + is_guest=is_guest, ) -- cgit 1.5.1 From 28a9663bdf092541250ae1209f201e57b663dc81 Mon Sep 17 00:00:00 2001 From: Hugh Nimmo-Smith Date: Wed, 16 Nov 2022 17:44:13 +0000 Subject: Initial tests for OAuth delegation --- tests/handlers/test_oauth_delegation.py | 345 ++++++++++++++++++++++++++++++++ 1 file changed, 345 insertions(+) create mode 100644 tests/handlers/test_oauth_delegation.py diff --git a/tests/handlers/test_oauth_delegation.py b/tests/handlers/test_oauth_delegation.py new file mode 100644 index 0000000000..54f4894819 --- /dev/null +++ b/tests/handlers/test_oauth_delegation.py @@ -0,0 +1,345 @@ +# Copyright 2022 Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Dict +from unittest.mock import ANY, Mock +from urllib.parse import parse_qs + +from twisted.test.proto_helpers import MemoryReactor + +from synapse.api.errors import InvalidClientTokenError +from synapse.server import HomeServer +from synapse.types import JsonDict +from synapse.util import Clock + +from tests.test_utils import FakeResponse, get_awaitable_result, simple_async_mock +from tests.unittest import HomeserverTestCase, skip_unless +from tests.utils import mock_getRawHeaders + +try: + import authlib # noqa: F401 + + HAS_AUTHLIB = True +except ImportError: + HAS_AUTHLIB = False + + +# These are a few constants that are used as config parameters in the tests. +SERVER_NAME = "test" +ISSUER = "https://issuer/" +CLIENT_ID = "test-client-id" +CLIENT_SECRET = "test-client-secret" +BASE_URL = "https://synapse/" +SCOPES = ["openid"] + +AUTHORIZATION_ENDPOINT = ISSUER + "authorize" +TOKEN_ENDPOINT = ISSUER + "token" +USERINFO_ENDPOINT = ISSUER + "userinfo" +WELL_KNOWN = ISSUER + ".well-known/openid-configuration" +JWKS_URI = ISSUER + ".well-known/jwks.json" +INTROSPECTION_ENDPOINT = ISSUER + "introspect" + +SYNAPSE_ADMIN_SCOPE = "urn:synapse:admin:*" +MATRIX_USER_SCOPE = "urn:matrix:org.matrix.msc2967.client:api:*" +MATRIX_GUEST_SCOPE = "urn:matrix:org.matrix.msc2967.client:api:guest" +DEVICE = "AABBCCDD" +MATRIX_DEVICE_SCOPE = "urn:matrix:org.matrix.msc2967.client:device:" + DEVICE +SUBJECT = "abc-def-ghi" +USERNAME = "test-user" + + +async def get_json(url: str) -> JsonDict: + # Mock get_json calls to handle jwks & oidc discovery endpoints + if url == WELL_KNOWN: + # Minimal discovery document, as defined in OpenID.Discovery + # https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata + return { + "issuer": ISSUER, + "authorization_endpoint": AUTHORIZATION_ENDPOINT, + "token_endpoint": TOKEN_ENDPOINT, + "jwks_uri": JWKS_URI, + "userinfo_endpoint": USERINFO_ENDPOINT, + "introspection_endpoint": INTROSPECTION_ENDPOINT, + "response_types_supported": ["code"], + "subject_types_supported": ["public"], + "id_token_signing_alg_values_supported": ["RS256"], + } + elif url == JWKS_URI: + return {"keys": []} + + return {} + + +@skip_unless(HAS_AUTHLIB, "requires authlib") +class MSC3861OAuthDelegation(HomeserverTestCase): + def default_config(self) -> Dict[str, Any]: + config = super().default_config() + config["public_baseurl"] = BASE_URL + config["oauth_delegation"] = { + "enabled": True, + "issuer": ISSUER, + "client_id": CLIENT_ID, + "client_auth_method": "client_secret_post", + "client_secret": CLIENT_SECRET, + } + return config + + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: + self.http_client = Mock(spec=["get_json"]) + self.http_client.get_json.side_effect = get_json + self.http_client.user_agent = b"Synapse Test" + + hs = self.setup_test_homeserver(proxied_http_client=self.http_client) + + self.auth = hs.get_auth() + + return hs + + def _assertParams(self) -> None: + """Assert that the request parameters are correct.""" + params = parse_qs(self.http_client.request.call_args[1]["data"].decode("utf-8")) + self.assertEqual(params["token"], ["mockAccessToken"]) + self.assertEqual(params["client_id"], [CLIENT_ID]) + self.assertEqual(params["client_secret"], [CLIENT_SECRET]) + + def test_inactive_token(self) -> None: + """The handler should return a 403 where the token is inactive.""" + + self.http_client.request = simple_async_mock( + return_value=FakeResponse.json( + code=200, + payload={"active": False}, + ) + ) + request = Mock(args={}) + request.args[b"access_token"] = [b"mockAccessToken"] + request.requestHeaders.getRawHeaders = mock_getRawHeaders() + self.get_failure(self.auth.get_user_by_req(request), InvalidClientTokenError) + self.http_client.get_json.assert_called_once_with(WELL_KNOWN) + self.http_client.request.assert_called_once_with( + method="POST", uri=INTROSPECTION_ENDPOINT, data=ANY, headers=ANY + ) + self._assertParams() + + def test_active_no_scope(self) -> None: + """The handler should return a 403 where no scope is given.""" + + self.http_client.request = simple_async_mock( + return_value=FakeResponse.json( + code=200, + payload={"active": True}, + ) + ) + request = Mock(args={}) + request.args[b"access_token"] = [b"mockAccessToken"] + request.requestHeaders.getRawHeaders = mock_getRawHeaders() + self.get_failure(self.auth.get_user_by_req(request), InvalidClientTokenError) + self.http_client.get_json.assert_called_once_with(WELL_KNOWN) + self.http_client.request.assert_called_once_with( + method="POST", uri=INTROSPECTION_ENDPOINT, data=ANY, headers=ANY + ) + self._assertParams() + + def test_active_user_no_subject(self) -> None: + """The handler should return a 500 when no subject is present.""" + + self.http_client.request = simple_async_mock( + return_value=FakeResponse.json( + code=200, + payload={"active": True, "scope": " ".join([MATRIX_USER_SCOPE])}, + ) + ) + request = Mock(args={}) + request.args[b"access_token"] = [b"mockAccessToken"] + request.requestHeaders.getRawHeaders = mock_getRawHeaders() + self.get_failure(self.auth.get_user_by_req(request), InvalidClientTokenError) + self.http_client.get_json.assert_called_once_with(WELL_KNOWN) + self.http_client.request.assert_called_once_with( + method="POST", uri=INTROSPECTION_ENDPOINT, data=ANY, headers=ANY + ) + self._assertParams() + + def test_active_no_user_scope(self) -> None: + """The handler should return a 500 when no subject is present.""" + + self.http_client.request = simple_async_mock( + return_value=FakeResponse.json( + code=200, + payload={ + "active": True, + "sub": SUBJECT, + "scope": " ".join([MATRIX_DEVICE_SCOPE]), + }, + ) + ) + request = Mock(args={}) + request.args[b"access_token"] = [b"mockAccessToken"] + request.requestHeaders.getRawHeaders = mock_getRawHeaders() + self.get_failure(self.auth.get_user_by_req(request), InvalidClientTokenError) + self.http_client.get_json.assert_called_once_with(WELL_KNOWN) + self.http_client.request.assert_called_once_with( + method="POST", uri=INTROSPECTION_ENDPOINT, data=ANY, headers=ANY + ) + self._assertParams() + + def test_active_admin(self) -> None: + """The handler should return a requester with admin rights.""" + + self.http_client.request = simple_async_mock( + return_value=FakeResponse.json( + code=200, + payload={ + "active": True, + "sub": SUBJECT, + "scope": " ".join([SYNAPSE_ADMIN_SCOPE]), + "username": USERNAME, + }, + ) + ) + request = Mock(args={}) + request.args[b"access_token"] = [b"mockAccessToken"] + request.requestHeaders.getRawHeaders = mock_getRawHeaders() + requester = self.get_success(self.auth.get_user_by_req(request)) + self.http_client.get_json.assert_called_once_with(WELL_KNOWN) + self.http_client.request.assert_called_once_with( + method="POST", uri=INTROSPECTION_ENDPOINT, data=ANY, headers=ANY + ) + self._assertParams() + self.assertEqual(requester.user.to_string(), "@%s:%s" % (USERNAME, SERVER_NAME)) + self.assertEqual(requester.is_guest, False) + self.assertEqual(requester.device_id, None) + self.assertEqual( + get_awaitable_result(self.auth.is_server_admin(requester)), True + ) + + def test_active_admin_highest_privilege(self) -> None: + """The handler should resolve to the most permissive scope.""" + + self.http_client.request = simple_async_mock( + return_value=FakeResponse.json( + code=200, + payload={ + "active": True, + "sub": SUBJECT, + "scope": " ".join( + [SYNAPSE_ADMIN_SCOPE, MATRIX_USER_SCOPE, MATRIX_GUEST_SCOPE] + ), + "username": USERNAME, + }, + ) + ) + request = Mock(args={}) + request.args[b"access_token"] = [b"mockAccessToken"] + request.requestHeaders.getRawHeaders = mock_getRawHeaders() + requester = self.get_success(self.auth.get_user_by_req(request)) + self.http_client.get_json.assert_called_once_with(WELL_KNOWN) + self.http_client.request.assert_called_once_with( + method="POST", uri=INTROSPECTION_ENDPOINT, data=ANY, headers=ANY + ) + self._assertParams() + self.assertEqual(requester.user.to_string(), "@%s:%s" % (USERNAME, SERVER_NAME)) + self.assertEqual(requester.is_guest, False) + self.assertEqual(requester.device_id, None) + self.assertEqual( + get_awaitable_result(self.auth.is_server_admin(requester)), True + ) + + def test_active_user(self) -> None: + """The handler should return a requester with normal user rights.""" + + self.http_client.request = simple_async_mock( + return_value=FakeResponse.json( + code=200, + payload={ + "active": True, + "sub": SUBJECT, + "scope": " ".join([MATRIX_USER_SCOPE]), + "username": USERNAME, + }, + ) + ) + request = Mock(args={}) + request.args[b"access_token"] = [b"mockAccessToken"] + request.requestHeaders.getRawHeaders = mock_getRawHeaders() + requester = self.get_success(self.auth.get_user_by_req(request)) + self.http_client.get_json.assert_called_once_with(WELL_KNOWN) + self.http_client.request.assert_called_once_with( + method="POST", uri=INTROSPECTION_ENDPOINT, data=ANY, headers=ANY + ) + self._assertParams() + self.assertEqual(requester.user.to_string(), "@%s:%s" % (USERNAME, SERVER_NAME)) + self.assertEqual(requester.is_guest, False) + self.assertEqual(requester.device_id, None) + self.assertEqual( + get_awaitable_result(self.auth.is_server_admin(requester)), False + ) + + def test_active_user_with_device(self) -> None: + """The handler should return a requester with normal user rights and a device ID.""" + + self.http_client.request = simple_async_mock( + return_value=FakeResponse.json( + code=200, + payload={ + "active": True, + "sub": SUBJECT, + "scope": " ".join([MATRIX_USER_SCOPE, MATRIX_DEVICE_SCOPE]), + "username": USERNAME, + }, + ) + ) + request = Mock(args={}) + request.args[b"access_token"] = [b"mockAccessToken"] + request.requestHeaders.getRawHeaders = mock_getRawHeaders() + requester = self.get_success(self.auth.get_user_by_req(request)) + self.http_client.get_json.assert_called_once_with(WELL_KNOWN) + self.http_client.request.assert_called_once_with( + method="POST", uri=INTROSPECTION_ENDPOINT, data=ANY, headers=ANY + ) + self._assertParams() + self.assertEqual(requester.user.to_string(), "@%s:%s" % (USERNAME, SERVER_NAME)) + self.assertEqual(requester.is_guest, False) + self.assertEqual( + get_awaitable_result(self.auth.is_server_admin(requester)), False + ) + self.assertEqual(requester.device_id, DEVICE) + + def test_active_guest_with_device(self) -> None: + """The handler should return a requester with guest user rights and a device ID.""" + + self.http_client.request = simple_async_mock( + return_value=FakeResponse.json( + code=200, + payload={ + "active": True, + "sub": SUBJECT, + "scope": " ".join([MATRIX_GUEST_SCOPE, MATRIX_DEVICE_SCOPE]), + "username": USERNAME, + }, + ) + ) + request = Mock(args={}) + request.args[b"access_token"] = [b"mockAccessToken"] + request.requestHeaders.getRawHeaders = mock_getRawHeaders() + requester = self.get_success(self.auth.get_user_by_req(request)) + self.http_client.get_json.assert_called_once_with(WELL_KNOWN) + self.http_client.request.assert_called_once_with( + method="POST", uri=INTROSPECTION_ENDPOINT, data=ANY, headers=ANY + ) + self._assertParams() + self.assertEqual(requester.user.to_string(), "@%s:%s" % (USERNAME, SERVER_NAME)) + self.assertEqual(requester.is_guest, True) + self.assertEqual( + get_awaitable_result(self.auth.is_server_admin(requester)), False + ) + self.assertEqual(requester.device_id, DEVICE) -- cgit 1.5.1 From 5fe96082d09d1af3dc33b62b6a47a6baca02703c Mon Sep 17 00:00:00 2001 From: Hugh Nimmo-Smith Date: Thu, 17 Nov 2022 14:34:11 +0000 Subject: Actually enforce guest + return www-authenticate header --- synapse/api/auth/oauth_delegated.py | 18 ++++++++++++-- synapse/api/errors.py | 28 ++++++++++++++++++--- synapse/http/server.py | 6 +++++ tests/handlers/test_oauth_delegation.py | 43 ++++++++++++++++++++++++++++++--- 4 files changed, 87 insertions(+), 8 deletions(-) diff --git a/synapse/api/auth/oauth_delegated.py b/synapse/api/auth/oauth_delegated.py index cfa178218c..9cb6eb7f79 100644 --- a/synapse/api/auth/oauth_delegated.py +++ b/synapse/api/auth/oauth_delegated.py @@ -25,7 +25,12 @@ from twisted.web.client import readBody from twisted.web.http_headers import Headers from synapse.api.auth.base import BaseAuth -from synapse.api.errors import AuthError, InvalidClientTokenError, StoreError +from synapse.api.errors import ( + AuthError, + InvalidClientTokenError, + OAuthInsufficientScopeError, + StoreError, +) from synapse.http.site import SynapseRequest from synapse.logging.context import make_deferred_yieldable from synapse.types import Requester, UserID, create_requester @@ -152,7 +157,16 @@ class OAuthDelegatedAuth(BaseAuth): allow_expired: bool = False, ) -> Requester: access_token = self.get_access_token_from_request(request) - return await self.get_user_by_access_token(access_token, allow_expired) + + # TODO: we probably want to assert the allow_guest inside this call so that we don't provision the user if they don't have enough permission: + requester = await self.get_user_by_access_token(access_token, allow_expired) + + if not allow_guest and requester.is_guest: + raise OAuthInsufficientScopeError( + ["urn:matrix:org.matrix.msc2967.client:api:*"] + ) + + return requester async def get_user_by_access_token( self, diff --git a/synapse/api/errors.py b/synapse/api/errors.py index 8c7c94b045..af894243f8 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py @@ -119,14 +119,20 @@ class Codes(str, Enum): class CodeMessageException(RuntimeError): - """An exception with integer code and message string attributes. + """An exception with integer code, a message string attributes and optional headers. Attributes: code: HTTP error code msg: string describing the error + headers: optional response headers to send """ - def __init__(self, code: Union[int, HTTPStatus], msg: str): + def __init__( + self, + code: Union[int, HTTPStatus], + msg: str, + headers: Optional[Dict[str, str]] = None, + ): super().__init__("%d: %s" % (code, msg)) # Some calls to this method pass instances of http.HTTPStatus for `code`. @@ -137,6 +143,7 @@ class CodeMessageException(RuntimeError): # To eliminate this behaviour, we convert them to their integer equivalents here. self.code = int(code) self.msg = msg + self.headers = headers class RedirectException(CodeMessageException): @@ -182,6 +189,7 @@ class SynapseError(CodeMessageException): msg: str, errcode: str = Codes.UNKNOWN, additional_fields: Optional[Dict] = None, + headers: Optional[Dict[str, str]] = None, ): """Constructs a synapse error. @@ -190,7 +198,7 @@ class SynapseError(CodeMessageException): msg: The human-readable error message. errcode: The matrix error code e.g 'M_FORBIDDEN' """ - super().__init__(code, msg) + super().__init__(code, msg, headers) self.errcode = errcode if additional_fields is None: self._additional_fields: Dict = {} @@ -335,6 +343,20 @@ class AuthError(SynapseError): super().__init__(code, msg, errcode, additional_fields) +class OAuthInsufficientScopeError(SynapseError): + """An error raised when the caller does not have sufficient scope to perform the requested action""" + + def __init__( + self, + required_scopes: List[str], + ): + headers = { + "WWW-Authenticate": 'Bearer error="insufficient_scope", scope="%s"' + % (" ".join(required_scopes)) + } + super().__init__(401, "Insufficient scope", Codes.FORBIDDEN, None, headers) + + class UnstableSpecAuthError(AuthError): """An error raised when a new error code is being proposed to replace a previous one. This error will return a "org.matrix.unstable.errcode" property with the new error code, diff --git a/synapse/http/server.py b/synapse/http/server.py index 101dc2e747..04768c6a23 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -111,6 +111,9 @@ def return_json_error( exc: SynapseError = f.value # type: ignore error_code = exc.code error_dict = exc.error_dict(config) + if exc.headers is not None: + for header, value in exc.headers.items(): + request.setHeader(header, value) logger.info("%s SynapseError: %s - %s", request, error_code, exc.msg) elif f.check(CancelledError): error_code = HTTP_STATUS_REQUEST_CANCELLED @@ -172,6 +175,9 @@ def return_html_error( cme: CodeMessageException = f.value # type: ignore code = cme.code msg = cme.msg + if cme.headers is not None: + for header, value in cme.headers.items(): + request.setHeader(header, value) if isinstance(cme, RedirectException): logger.info("%s redirect to %s", request, cme.location) diff --git a/tests/handlers/test_oauth_delegation.py b/tests/handlers/test_oauth_delegation.py index 54f4894819..bca9db1626 100644 --- a/tests/handlers/test_oauth_delegation.py +++ b/tests/handlers/test_oauth_delegation.py @@ -17,7 +17,8 @@ from urllib.parse import parse_qs from twisted.test.proto_helpers import MemoryReactor -from synapse.api.errors import InvalidClientTokenError +from synapse.api.errors import InvalidClientTokenError, OAuthInsufficientScopeError +from synapse.rest.client import devices from synapse.server import HomeServer from synapse.types import JsonDict from synapse.util import Clock @@ -82,6 +83,10 @@ async def get_json(url: str) -> JsonDict: @skip_unless(HAS_AUTHLIB, "requires authlib") class MSC3861OAuthDelegation(HomeserverTestCase): + servlets = [ + devices.register_servlets, + ] + def default_config(self) -> Dict[str, Any]: config = super().default_config() config["public_baseurl"] = BASE_URL @@ -314,7 +319,37 @@ class MSC3861OAuthDelegation(HomeserverTestCase): ) self.assertEqual(requester.device_id, DEVICE) - def test_active_guest_with_device(self) -> None: + def test_active_guest_not_allowed(self) -> None: + """The handler should return an insufficient scope error.""" + + self.http_client.request = simple_async_mock( + return_value=FakeResponse.json( + code=200, + payload={ + "active": True, + "sub": SUBJECT, + "scope": " ".join([MATRIX_GUEST_SCOPE, MATRIX_DEVICE_SCOPE]), + "username": USERNAME, + }, + ) + ) + request = Mock(args={}) + request.args[b"access_token"] = [b"mockAccessToken"] + request.requestHeaders.getRawHeaders = mock_getRawHeaders() + error = self.get_failure( + self.auth.get_user_by_req(request), OAuthInsufficientScopeError + ) + self.http_client.get_json.assert_called_once_with(WELL_KNOWN) + self.http_client.request.assert_called_once_with( + method="POST", uri=INTROSPECTION_ENDPOINT, data=ANY, headers=ANY + ) + self._assertParams() + self.assertEqual( + getattr(error.value, "headers", {})["WWW-Authenticate"], + 'Bearer error="insufficient_scope", scope="urn:matrix:org.matrix.msc2967.client:api:*"', + ) + + def test_active_guest_allowed(self) -> None: """The handler should return a requester with guest user rights and a device ID.""" self.http_client.request = simple_async_mock( @@ -331,7 +366,9 @@ class MSC3861OAuthDelegation(HomeserverTestCase): request = Mock(args={}) request.args[b"access_token"] = [b"mockAccessToken"] request.requestHeaders.getRawHeaders = mock_getRawHeaders() - requester = self.get_success(self.auth.get_user_by_req(request)) + requester = self.get_success( + self.auth.get_user_by_req(request, allow_guest=True) + ) self.http_client.get_json.assert_called_once_with(WELL_KNOWN) self.http_client.request.assert_called_once_with( method="POST", uri=INTROSPECTION_ENDPOINT, data=ANY, headers=ANY -- cgit 1.5.1 From 31691d61511d41286272d779727502e396ce86eb Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Wed, 10 May 2023 16:08:43 +0200 Subject: Disable account related endpoints when using OAuth delegation --- synapse/handlers/auth.py | 8 +- synapse/rest/client/account.py | 24 +++-- synapse/rest/client/devices.py | 11 +- synapse/rest/client/keys.py | 30 +++++- synapse/rest/client/login.py | 3 + synapse/rest/client/logout.py | 3 + synapse/rest/client/register.py | 3 + tests/handlers/test_oauth_delegation.py | 180 +++++++++++++++++++++++++++++++- 8 files changed, 243 insertions(+), 19 deletions(-) diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index d001f2fb2f..a53984be33 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -274,6 +274,8 @@ class AuthHandler: # response. self._extra_attributes: Dict[str, SsoLoginExtraAttributes] = {} + self.oauth_delegation_enabled = hs.config.auth.oauth_delegation_enabled + async def validate_user_via_ui_auth( self, requester: Requester, @@ -322,8 +324,12 @@ class AuthHandler: LimitExceededError if the ratelimiter's failed request count for this user is too high to proceed - """ + if self.oauth_delegation_enabled: + raise SynapseError( + HTTPStatus.INTERNAL_SERVER_ERROR, "UIA shouldn't be used with MSC3861" + ) + if not requester.access_token_id: raise ValueError("Cannot validate a user without an access token") if can_skip_ui_auth and self._ui_auth_session_timeout: diff --git a/synapse/rest/client/account.py b/synapse/rest/client/account.py index 3d0c55daa0..ccd1f7509c 100644 --- a/synapse/rest/client/account.py +++ b/synapse/rest/client/account.py @@ -27,6 +27,7 @@ from synapse.api.constants import LoginType from synapse.api.errors import ( Codes, InteractiveAuthIncompleteError, + NotFoundError, SynapseError, ThreepidValidationError, ) @@ -600,6 +601,9 @@ class ThreepidRestServlet(RestServlet): # ThreePidBindRestServelet.PostBody with an `alias_generator` to handle # `threePidCreds` versus `three_pid_creds`. async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + if self.hs.config.auth.oauth_delegation_enabled: + raise NotFoundError(errcode=Codes.UNRECOGNIZED) + if not self.hs.config.registration.enable_3pid_changes: raise SynapseError( 400, "3PID changes are disabled on this server", Codes.FORBIDDEN @@ -890,19 +894,21 @@ class AccountStatusRestServlet(RestServlet): def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: if hs.config.worker.worker_app is None: - EmailPasswordRequestTokenRestServlet(hs).register(http_server) - PasswordRestServlet(hs).register(http_server) - DeactivateAccountRestServlet(hs).register(http_server) - EmailThreepidRequestTokenRestServlet(hs).register(http_server) - MsisdnThreepidRequestTokenRestServlet(hs).register(http_server) - AddThreepidEmailSubmitTokenServlet(hs).register(http_server) - AddThreepidMsisdnSubmitTokenServlet(hs).register(http_server) + if not hs.config.auth.oauth_delegation_enabled: + EmailPasswordRequestTokenRestServlet(hs).register(http_server) + DeactivateAccountRestServlet(hs).register(http_server) + PasswordRestServlet(hs).register(http_server) + EmailThreepidRequestTokenRestServlet(hs).register(http_server) + MsisdnThreepidRequestTokenRestServlet(hs).register(http_server) + AddThreepidEmailSubmitTokenServlet(hs).register(http_server) + AddThreepidMsisdnSubmitTokenServlet(hs).register(http_server) ThreepidRestServlet(hs).register(http_server) if hs.config.worker.worker_app is None: - ThreepidAddRestServlet(hs).register(http_server) ThreepidBindRestServlet(hs).register(http_server) ThreepidUnbindRestServlet(hs).register(http_server) - ThreepidDeleteRestServlet(hs).register(http_server) + if not hs.config.auth.oauth_delegation_enabled: + ThreepidAddRestServlet(hs).register(http_server) + ThreepidDeleteRestServlet(hs).register(http_server) WhoamiRestServlet(hs).register(http_server) if hs.config.worker.worker_app is None and hs.config.experimental.msc3720_enabled: diff --git a/synapse/rest/client/devices.py b/synapse/rest/client/devices.py index e97d0bf475..00e9bff43f 100644 --- a/synapse/rest/client/devices.py +++ b/synapse/rest/client/devices.py @@ -19,7 +19,7 @@ from typing import TYPE_CHECKING, List, Optional, Tuple from pydantic import Extra, StrictStr from synapse.api import errors -from synapse.api.errors import NotFoundError +from synapse.api.errors import NotFoundError, UnrecognizedRequestError from synapse.handlers.device import DeviceHandler from synapse.http.server import HttpServer from synapse.http.servlet import ( @@ -135,6 +135,7 @@ class DeviceRestServlet(RestServlet): self.device_handler = handler self.auth_handler = hs.get_auth_handler() self._msc3852_enabled = hs.config.experimental.msc3852_enabled + self.oauth_delegation_enabled = hs.config.auth.oauth_delegation_enabled async def on_GET( self, request: SynapseRequest, device_id: str @@ -166,6 +167,9 @@ class DeviceRestServlet(RestServlet): async def on_DELETE( self, request: SynapseRequest, device_id: str ) -> Tuple[int, JsonDict]: + if self.oauth_delegation_enabled: + raise UnrecognizedRequestError(code=404) + requester = await self.auth.get_user_by_req(request) try: @@ -344,7 +348,10 @@ class ClaimDehydratedDeviceServlet(RestServlet): def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: - if hs.config.worker.worker_app is None: + if ( + hs.config.worker.worker_app is None + and not hs.config.auth.oauth_delegation_enabled + ): DeleteDevicesRestServlet(hs).register(http_server) DevicesRestServlet(hs).register(http_server) if hs.config.worker.worker_app is None: diff --git a/synapse/rest/client/keys.py b/synapse/rest/client/keys.py index 413edd8a4d..c3ca83c0c8 100644 --- a/synapse/rest/client/keys.py +++ b/synapse/rest/client/keys.py @@ -17,9 +17,10 @@ import logging import re from collections import Counter +from http import HTTPStatus from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple -from synapse.api.errors import InvalidAPICallError, SynapseError +from synapse.api.errors import Codes, InvalidAPICallError, SynapseError from synapse.http.server import HttpServer from synapse.http.servlet import ( RestServlet, @@ -375,9 +376,29 @@ class SigningKeyUploadServlet(RestServlet): user_id = requester.user.to_string() body = parse_json_object_from_request(request) - if self.hs.config.experimental.msc3967_enabled: - if await self.e2e_keys_handler.is_cross_signing_set_up_for_user(user_id): - # If we already have a master key then cross signing is set up and we require UIA to reset + is_cross_signing_setup = ( + await self.e2e_keys_handler.is_cross_signing_set_up_for_user(user_id) + ) + + # Before MSC3967 we required UIA both when setting up cross signing for the + # first time and when resetting the device signing key. With MSC3967 we only + # require UIA when resetting cross-signing, and not when setting up the first + # time. Because there is no UIA in MSC3861, for now we throw an error if the + # user tries to reset the device signing key when MSC3861 is enabled, but allow + # first-time setup. + if self.hs.config.auth.oauth_delegation_enabled: + # There is no way to reset the device signing key with MSC3861 + if is_cross_signing_setup: + raise SynapseError( + HTTPStatus.NOT_IMPLEMENTED, + "Resetting cross signing keys is not yet supported with MSC3861", + Codes.UNRECOGNIZED, + ) + # But first-time setup is fine + + elif self.hs.config.experimental.msc3967_enabled: + # If we already have a master key then cross signing is set up and we require UIA to reset + if is_cross_signing_setup: await self.auth_handler.validate_user_via_ui_auth( requester, request, @@ -387,6 +408,7 @@ class SigningKeyUploadServlet(RestServlet): can_skip_ui_auth=False, ) # Otherwise we don't require UIA since we are setting up cross signing for first time + else: # Previous behaviour is to always require UIA but allow it to be skipped await self.auth_handler.validate_user_via_ui_auth( diff --git a/synapse/rest/client/login.py b/synapse/rest/client/login.py index 6ca61ffbd0..4d0eabcb84 100644 --- a/synapse/rest/client/login.py +++ b/synapse/rest/client/login.py @@ -633,6 +633,9 @@ class CasTicketServlet(RestServlet): def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: + if hs.config.auth.oauth_delegation_enabled: + return + LoginRestServlet(hs).register(http_server) if ( hs.config.worker.worker_app is None diff --git a/synapse/rest/client/logout.py b/synapse/rest/client/logout.py index 6d34625ad5..b64a6d5961 100644 --- a/synapse/rest/client/logout.py +++ b/synapse/rest/client/logout.py @@ -80,5 +80,8 @@ class LogoutAllRestServlet(RestServlet): def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: + if hs.config.auth.oauth_delegation_enabled: + return + LogoutRestServlet(hs).register(http_server) LogoutAllRestServlet(hs).register(http_server) diff --git a/synapse/rest/client/register.py b/synapse/rest/client/register.py index 7f84a17e29..6866988c38 100644 --- a/synapse/rest/client/register.py +++ b/synapse/rest/client/register.py @@ -955,6 +955,9 @@ def _calculate_registration_flows( def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: + if hs.config.auth.oauth_delegation_enabled: + return + if hs.config.worker.worker_app is None: EmailRegisterRequestTokenRestServlet(hs).register(http_server) MsisdnRegisterRequestTokenRestServlet(hs).register(http_server) diff --git a/tests/handlers/test_oauth_delegation.py b/tests/handlers/test_oauth_delegation.py index bca9db1626..ee1bc5ca7a 100644 --- a/tests/handlers/test_oauth_delegation.py +++ b/tests/handlers/test_oauth_delegation.py @@ -11,14 +11,27 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Dict + +from http import HTTPStatus +from typing import Any, Dict, Union from unittest.mock import ANY, Mock from urllib.parse import parse_qs +from signedjson.key import ( + encode_verify_key_base64, + generate_signing_key, + get_verify_key, +) +from signedjson.sign import sign_json + from twisted.test.proto_helpers import MemoryReactor -from synapse.api.errors import InvalidClientTokenError, OAuthInsufficientScopeError -from synapse.rest.client import devices +from synapse.api.errors import ( + Codes, + InvalidClientTokenError, + OAuthInsufficientScopeError, +) +from synapse.rest.client import account, devices, keys, login, logout, register from synapse.server import HomeServer from synapse.types import JsonDict from synapse.util import Clock @@ -57,6 +70,7 @@ DEVICE = "AABBCCDD" MATRIX_DEVICE_SCOPE = "urn:matrix:org.matrix.msc2967.client:device:" + DEVICE SUBJECT = "abc-def-ghi" USERNAME = "test-user" +USER_ID = "@" + USERNAME + ":" + SERVER_NAME async def get_json(url: str) -> JsonDict: @@ -84,7 +98,12 @@ async def get_json(url: str) -> JsonDict: @skip_unless(HAS_AUTHLIB, "requires authlib") class MSC3861OAuthDelegation(HomeserverTestCase): servlets = [ + account.register_servlets, devices.register_servlets, + keys.register_servlets, + register.register_servlets, + login.register_servlets, + logout.register_servlets, ] def default_config(self) -> Dict[str, Any]: @@ -380,3 +399,158 @@ class MSC3861OAuthDelegation(HomeserverTestCase): get_awaitable_result(self.auth.is_server_admin(requester)), False ) self.assertEqual(requester.device_id, DEVICE) + + def make_device_keys(self, user_id: str, device_id: str) -> JsonDict: + # We only generate a master key to simplify the test. + master_signing_key = generate_signing_key(device_id) + master_verify_key = encode_verify_key_base64(get_verify_key(master_signing_key)) + + return { + "master_key": sign_json( + { + "user_id": user_id, + "usage": ["master"], + "keys": {"ed25519:" + master_verify_key: master_verify_key}, + }, + user_id, + master_signing_key, + ), + } + + def test_cross_signing(self) -> None: + """Try uploading device keys with OAuth delegation enabled.""" + + self.http_client.request = simple_async_mock( + return_value=FakeResponse.json( + code=200, + payload={ + "active": True, + "sub": SUBJECT, + "scope": " ".join([MATRIX_USER_SCOPE, MATRIX_DEVICE_SCOPE]), + "username": USERNAME, + }, + ) + ) + keys_upload_body = self.make_device_keys(USER_ID, DEVICE) + channel = self.make_request( + "POST", + "/_matrix/client/v3/keys/device_signing/upload", + keys_upload_body, + access_token="mockAccessToken", + ) + + self.assertEqual(channel.code, 200, channel.json_body) + + channel = self.make_request( + "POST", + "/_matrix/client/v3/keys/device_signing/upload", + keys_upload_body, + access_token="mockAccessToken", + ) + + self.assertEqual(channel.code, HTTPStatus.NOT_IMPLEMENTED, channel.json_body) + + def expect_unauthorized( + self, method: str, path: str, content: Union[bytes, str, JsonDict] = "" + ) -> None: + channel = self.make_request(method, path, content, shorthand=False) + + self.assertEqual(channel.code, 401, channel.json_body) + + def expect_unrecognized( + self, method: str, path: str, content: Union[bytes, str, JsonDict] = "" + ) -> None: + channel = self.make_request(method, path, content) + + self.assertEqual(channel.code, 404, channel.json_body) + self.assertEqual( + channel.json_body["errcode"], Codes.UNRECOGNIZED, channel.json_body + ) + + def test_uia_endpoints(self) -> None: + """Test that endpoints that were removed in MSC2964 are no longer available.""" + + # This is just an endpoint that should remain visible (but requires auth): + self.expect_unauthorized("GET", "/_matrix/client/v3/devices") + + # This remains usable, but will require a uia scope: + self.expect_unauthorized( + "POST", "/_matrix/client/v3/keys/device_signing/upload" + ) + + def test_3pid_endpoints(self) -> None: + """Test that 3pid account management endpoints that were removed in MSC2964 are no longer available.""" + + # Remains and requires auth: + self.expect_unauthorized("GET", "/_matrix/client/v3/account/3pid") + self.expect_unauthorized( + "POST", + "/_matrix/client/v3/account/3pid/bind", + { + "client_secret": "foo", + "id_access_token": "bar", + "id_server": "foo", + "sid": "bar", + }, + ) + self.expect_unauthorized("POST", "/_matrix/client/v3/account/3pid/unbind", {}) + + # These are gone: + self.expect_unrecognized( + "POST", "/_matrix/client/v3/account/3pid" + ) # deprecated + self.expect_unrecognized("POST", "/_matrix/client/v3/account/3pid/add") + self.expect_unrecognized("POST", "/_matrix/client/v3/account/3pid/delete") + self.expect_unrecognized( + "POST", "/_matrix/client/v3/account/3pid/email/requestToken" + ) + self.expect_unrecognized( + "POST", "/_matrix/client/v3/account/3pid/msisdn/requestToken" + ) + + def test_account_management_endpoints_removed(self) -> None: + """Test that account management endpoints that were removed in MSC2964 are no longer available.""" + self.expect_unrecognized("POST", "/_matrix/client/v3/account/deactivate") + self.expect_unrecognized("POST", "/_matrix/client/v3/account/password") + self.expect_unrecognized( + "POST", "/_matrix/client/v3/account/password/email/requestToken" + ) + self.expect_unrecognized( + "POST", "/_matrix/client/v3/account/password/msisdn/requestToken" + ) + + def test_registration_endpoints_removed(self) -> None: + """Test that registration endpoints that were removed in MSC2964 are no longer available.""" + self.expect_unrecognized( + "GET", "/_matrix/client/v1/register/m.login.registration_token/validity" + ) + self.expect_unrecognized("POST", "/_matrix/client/v3/register") + self.expect_unrecognized("GET", "/_matrix/client/v3/register") + self.expect_unrecognized("GET", "/_matrix/client/v3/register/available") + self.expect_unrecognized( + "POST", "/_matrix/client/v3/register/email/requestToken" + ) + self.expect_unrecognized( + "POST", "/_matrix/client/v3/register/msisdn/requestToken" + ) + + def test_session_management_endpoints_removed(self) -> None: + """Test that session management endpoints that were removed in MSC2964 are no longer available.""" + self.expect_unrecognized("GET", "/_matrix/client/v3/login") + self.expect_unrecognized("POST", "/_matrix/client/v3/login") + self.expect_unrecognized("GET", "/_matrix/client/v3/login/sso/redirect") + self.expect_unrecognized("POST", "/_matrix/client/v3/logout") + self.expect_unrecognized("POST", "/_matrix/client/v3/logout/all") + self.expect_unrecognized("POST", "/_matrix/client/v3/refresh") + self.expect_unrecognized("GET", "/_matrix/static/client/login") + + def test_device_management_endpoints_removed(self) -> None: + """Test that device management endpoints that were removed in MSC2964 are no longer available.""" + self.expect_unrecognized("POST", "/_matrix/client/v3/delete_devices") + self.expect_unrecognized("DELETE", "/_matrix/client/v3/devices/{DEVICE}") + + def test_openid_endpoints_removed(self) -> None: + """Test that OpenID id_token endpoints that were removed in MSC2964 are no longer available.""" + self.expect_unrecognized( + "POST", "/_matrix/client/v3/user/{USERNAME}/openid/request_token" + ) -- cgit 1.5.1 From 03920bdd4e9390d74762ecd923ddf0d6c75d222e Mon Sep 17 00:00:00 2001 From: Hugh Nimmo-Smith Date: Mon, 6 Feb 2023 17:12:42 +0000 Subject: Test MSC2965 implementation: well-known discovery document --- tests/rest/test_well_known.py | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/tests/rest/test_well_known.py b/tests/rest/test_well_known.py index 2091b08d89..34333d88df 100644 --- a/tests/rest/test_well_known.py +++ b/tests/rest/test_well_known.py @@ -17,6 +17,13 @@ from synapse.rest.well_known import well_known_resource from tests import unittest +try: + import authlib # noqa: F401 + + HAS_AUTHLIB = True +except ImportError: + HAS_AUTHLIB = False + class WellKnownTests(unittest.HomeserverTestCase): def create_test_resource(self) -> Resource: @@ -96,3 +103,34 @@ class WellKnownTests(unittest.HomeserverTestCase): "GET", "/.well-known/matrix/server", shorthand=False ) self.assertEqual(channel.code, 404) + + @unittest.skip_unless(HAS_AUTHLIB, "requires authlib") + @unittest.override_config( + { + "public_baseurl": "https://homeserver", # this is only required so that client well known is served + "oauth_delegation": { + "enabled": True, + "issuer": "https://issuer", + "account": "https://my-account.issuer", + "client_id": "id", + "client_auth_method": "client_secret_post", + "client_secret": "secret", + }, + } + ) + def test_client_well_known_msc3861_oauth_delegation(self) -> None: + channel = self.make_request( + "GET", "/.well-known/matrix/client", shorthand=False + ) + + self.assertEqual(channel.code, 200) + self.assertEqual( + channel.json_body, + { + "m.homeserver": {"base_url": "https://homeserver/"}, + "org.matrix.msc2965.authentication": { + "issuer": "https://issuer", + "account": "https://my-account.issuer", + }, + }, + ) -- cgit 1.5.1 From 249f4a338dde0c1bcde5e14121d8d9fa156f185f Mon Sep 17 00:00:00 2001 From: Hugh Nimmo-Smith Date: Tue, 9 May 2023 16:20:04 +0200 Subject: Refactor config to be an experimental feature Also enforce you can't combine it with incompatible config options --- synapse/api/auth/msc3861_delegated.py | 280 ++++++++++++++++++++++++++++++++ synapse/api/auth/oauth_delegated.py | 277 ------------------------------- synapse/config/auth.py | 39 +---- synapse/config/experimental.py | 193 +++++++++++++++++++++- synapse/handlers/auth.py | 4 +- synapse/module_api/__init__.py | 7 + synapse/rest/client/account.py | 6 +- synapse/rest/client/devices.py | 6 +- synapse/rest/client/keys.py | 2 +- synapse/rest/client/login.py | 2 +- synapse/rest/client/logout.py | 2 +- synapse/rest/client/register.py | 2 +- synapse/rest/synapse/client/__init__.py | 2 +- synapse/rest/synapse/client/jwks.py | 8 +- synapse/rest/well_known.py | 9 +- synapse/server.py | 6 +- tests/config/test_oauth_delegation.py | 202 +++++++++++++++++++++++ tests/handlers/test_oauth_delegation.py | 15 +- tests/rest/test_well_known.py | 17 +- 19 files changed, 731 insertions(+), 348 deletions(-) create mode 100644 synapse/api/auth/msc3861_delegated.py delete mode 100644 synapse/api/auth/oauth_delegated.py create mode 100644 tests/config/test_oauth_delegation.py diff --git a/synapse/api/auth/msc3861_delegated.py b/synapse/api/auth/msc3861_delegated.py new file mode 100644 index 0000000000..4ca3280bd3 --- /dev/null +++ b/synapse/api/auth/msc3861_delegated.py @@ -0,0 +1,280 @@ +# Copyright 2023 The Matrix.org Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import logging +from typing import TYPE_CHECKING, Any, Dict, List, Optional +from urllib.parse import urlencode + +from authlib.oauth2 import ClientAuth +from authlib.oauth2.auth import encode_client_secret_basic, encode_client_secret_post +from authlib.oauth2.rfc7523 import ClientSecretJWT, PrivateKeyJWT, private_key_jwt_sign +from authlib.oauth2.rfc7662 import IntrospectionToken +from authlib.oidc.discovery import OpenIDProviderMetadata, get_well_known_url + +from twisted.web.client import readBody +from twisted.web.http_headers import Headers + +from synapse.api.auth.base import BaseAuth +from synapse.api.errors import ( + AuthError, + InvalidClientTokenError, + OAuthInsufficientScopeError, + StoreError, +) +from synapse.http.site import SynapseRequest +from synapse.logging.context import make_deferred_yieldable +from synapse.types import Requester, UserID, create_requester +from synapse.util import json_decoder +from synapse.util.caches.cached_call import RetryOnExceptionCachedCall + +if TYPE_CHECKING: + from synapse.server import HomeServer + +logger = logging.getLogger(__name__) + + +def scope_to_list(scope: str) -> List[str]: + """Convert a scope string to a list of scope tokens""" + return scope.strip().split(" ") + + +class PrivateKeyJWTWithKid(PrivateKeyJWT): + """An implementation of the private_key_jwt client auth method that includes a kid header. + + This is needed because some providers (Keycloak) require the kid header to figure + out which key to use to verify the signature. + """ + + def sign(self, auth: Any, token_endpoint: str) -> bytes: + return private_key_jwt_sign( + auth.client_secret, + client_id=auth.client_id, + token_endpoint=token_endpoint, + claims=self.claims, + header={"kid": auth.client_secret["kid"]}, + ) + + +class MSC3861DelegatedAuth(BaseAuth): + AUTH_METHODS = { + "client_secret_post": encode_client_secret_post, + "client_secret_basic": encode_client_secret_basic, + "client_secret_jwt": ClientSecretJWT(), + "private_key_jwt": PrivateKeyJWTWithKid(), + } + + EXTERNAL_ID_PROVIDER = "oauth-delegated" + + def __init__(self, hs: "HomeServer"): + super().__init__(hs) + + self._config = hs.config.experimental.msc3861 + auth_method = MSC3861DelegatedAuth.AUTH_METHODS.get( + self._config.client_auth_method.value, None + ) + # Those assertions are already checked when parsing the config + assert self._config.enabled, "OAuth delegation is not enabled" + assert self._config.issuer, "No issuer provided" + assert self._config.client_id, "No client_id provided" + assert auth_method is not None, "Invalid client_auth_method provided" + + self._http_client = hs.get_proxied_http_client() + self._hostname = hs.hostname + + self._issuer_metadata = RetryOnExceptionCachedCall(self._load_metadata) + + if isinstance(auth_method, PrivateKeyJWTWithKid): + # Use the JWK as the client secret when using the private_key_jwt method + assert self._config.jwk, "No JWK provided" + self._client_auth = ClientAuth( + self._config.client_id, self._config.jwk, auth_method + ) + else: + # Else use the client secret + assert self._config.client_secret, "No client_secret provided" + self._client_auth = ClientAuth( + self._config.client_id, self._config.client_secret, auth_method + ) + + async def _load_metadata(self) -> OpenIDProviderMetadata: + if self._config.issuer_metadata is not None: + return OpenIDProviderMetadata(**self._config.issuer_metadata) + url = get_well_known_url(self._config.issuer, external=True) + response = await self._http_client.get_json(url) + metadata = OpenIDProviderMetadata(**response) + # metadata.validate_introspection_endpoint() + return metadata + + async def _introspect_token(self, token: str) -> IntrospectionToken: + metadata = await self._issuer_metadata.get() + introspection_endpoint = metadata.get("introspection_endpoint") + raw_headers: Dict[str, str] = { + "Content-Type": "application/x-www-form-urlencoded", + "User-Agent": str(self._http_client.user_agent, "utf-8"), + "Accept": "application/json", + } + + args = {"token": token, "token_type_hint": "access_token"} + body = urlencode(args, True) + + # Fill the body/headers with credentials + uri, raw_headers, body = self._client_auth.prepare( + method="POST", uri=introspection_endpoint, headers=raw_headers, body=body + ) + headers = Headers({k: [v] for (k, v) in raw_headers.items()}) + + # Do the actual request + # We're not using the SimpleHttpClient util methods as we don't want to + # check the HTTP status code and we do the body encoding ourself. + response = await self._http_client.request( + method="POST", + uri=uri, + data=body.encode("utf-8"), + headers=headers, + ) + + resp_body = await make_deferred_yieldable(readBody(response)) + # TODO: Let's not worry about 5xx errors & co. for now and just try + # decoding that as JSON. We should also do some validation of the + # response + resp = json_decoder.decode(resp_body.decode("utf-8")) + return IntrospectionToken(**resp) + + async def is_server_admin(self, requester: Requester) -> bool: + return "urn:synapse:admin:*" in requester.scope + + async def get_user_by_req( + self, + request: SynapseRequest, + allow_guest: bool = False, + allow_expired: bool = False, + ) -> Requester: + access_token = self.get_access_token_from_request(request) + + # TODO: we probably want to assert the allow_guest inside this call so that we don't provision the user if they don't have enough permission: + requester = await self.get_user_by_access_token(access_token, allow_expired) + + if not allow_guest and requester.is_guest: + raise OAuthInsufficientScopeError( + ["urn:matrix:org.matrix.msc2967.client:api:*"] + ) + + return requester + + async def get_user_by_access_token( + self, + token: str, + allow_expired: bool = False, + ) -> Requester: + introspection_result = await self._introspect_token(token) + + logger.info(f"Introspection result: {introspection_result!r}") + + # TODO: introspection verification should be more extensive, especially: + # - verify the audience + if not introspection_result.get("active"): + raise InvalidClientTokenError("Token is not active") + + # Let's look at the scope + scope: List[str] = scope_to_list(introspection_result.get("scope", "")) + + # Determine type of user based on presence of particular scopes + has_admin_scope = "urn:synapse:admin:*" in scope + has_user_scope = "urn:matrix:org.matrix.msc2967.client:api:*" in scope + has_guest_scope = "urn:matrix:org.matrix.msc2967.client:api:guest" in scope + is_user = has_user_scope or has_admin_scope + is_guest = has_guest_scope and not is_user + + if not is_user and not is_guest: + raise InvalidClientTokenError("No scope in token granting user rights") + + # Match via the sub claim + sub: Optional[str] = introspection_result.get("sub") + if sub is None: + raise InvalidClientTokenError( + "Invalid sub claim in the introspection result" + ) + + user_id_str = await self.store.get_user_by_external_id( + MSC3861DelegatedAuth.EXTERNAL_ID_PROVIDER, sub + ) + if user_id_str is None: + # If we could not find a user via the external_id, it either does not exist, + # or the external_id was never recorded + + # TODO: claim mapping should be configurable + username: Optional[str] = introspection_result.get("username") + if username is None or not isinstance(username, str): + raise AuthError( + 500, + "Invalid username claim in the introspection result", + ) + user_id = UserID(username, self._hostname) + + # First try to find a user from the username claim + user_info = await self.store.get_userinfo_by_id(user_id=user_id.to_string()) + if user_info is None: + # If the user does not exist, we should create it on the fly + # TODO: we could use SCIM to provision users ahead of time and listen + # for SCIM SET events if those ever become standard: + # https://datatracker.ietf.org/doc/html/draft-hunt-scim-notify-00 + + # TODO: claim mapping should be configurable + # If present, use the name claim as the displayname + name: Optional[str] = introspection_result.get("name") + + await self.store.register_user( + user_id=user_id.to_string(), create_profile_with_displayname=name + ) + + # And record the sub as external_id + await self.store.record_user_external_id( + MSC3861DelegatedAuth.EXTERNAL_ID_PROVIDER, sub, user_id.to_string() + ) + else: + user_id = UserID.from_string(user_id_str) + + # Find device_id in scope + device_id = None + for tok in scope: + if tok.startswith("urn:matrix:org.matrix.msc2967.client:device:"): + parts = tok.split(":") + if len(parts) == 5: + device_id = parts[4] + + if device_id: + # Create the device on the fly if it does not exist + try: + await self.store.get_device( + user_id=user_id.to_string(), device_id=device_id + ) + except StoreError: + await self.store.store_device( + user_id=user_id.to_string(), + device_id=device_id, + initial_device_display_name="OIDC-native client", + ) + + # TODO: there is a few things missing in the requester here, which still need + # to be figured out, like: + # - impersonation, with the `authenticated_entity`, which is used for + # rate-limiting, MAU limits, etc. + # - shadow-banning, with the `shadow_banned` flag + # - a proper solution for appservices, which still needs to be figured out in + # the context of MSC3861 + return create_requester( + user_id=user_id, + device_id=device_id, + scope=scope, + is_guest=is_guest, + ) diff --git a/synapse/api/auth/oauth_delegated.py b/synapse/api/auth/oauth_delegated.py deleted file mode 100644 index 9cb6eb7f79..0000000000 --- a/synapse/api/auth/oauth_delegated.py +++ /dev/null @@ -1,277 +0,0 @@ -# Copyright 2023 The Matrix.org Foundation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import logging -from typing import TYPE_CHECKING, Any, Dict, List, Optional -from urllib.parse import urlencode - -from authlib.oauth2 import ClientAuth -from authlib.oauth2.auth import encode_client_secret_basic, encode_client_secret_post -from authlib.oauth2.rfc7523 import ClientSecretJWT, PrivateKeyJWT, private_key_jwt_sign -from authlib.oauth2.rfc7662 import IntrospectionToken -from authlib.oidc.discovery import OpenIDProviderMetadata, get_well_known_url - -from twisted.web.client import readBody -from twisted.web.http_headers import Headers - -from synapse.api.auth.base import BaseAuth -from synapse.api.errors import ( - AuthError, - InvalidClientTokenError, - OAuthInsufficientScopeError, - StoreError, -) -from synapse.http.site import SynapseRequest -from synapse.logging.context import make_deferred_yieldable -from synapse.types import Requester, UserID, create_requester -from synapse.util import json_decoder -from synapse.util.caches.cached_call import RetryOnExceptionCachedCall - -if TYPE_CHECKING: - from synapse.server import HomeServer - -logger = logging.getLogger(__name__) - - -def scope_to_list(scope: str) -> List[str]: - """Convert a scope string to a list of scope tokens""" - return scope.strip().split(" ") - - -class PrivateKeyJWTWithKid(PrivateKeyJWT): - """An implementation of the private_key_jwt client auth method that includes a kid header. - - This is needed because some providers (Keycloak) require the kid header to figure - out which key to use to verify the signature. - """ - - def sign(self, auth: Any, token_endpoint: str) -> bytes: - return private_key_jwt_sign( - auth.client_secret, - client_id=auth.client_id, - token_endpoint=token_endpoint, - claims=self.claims, - header={"kid": auth.client_secret["kid"]}, - ) - - -class OAuthDelegatedAuth(BaseAuth): - AUTH_METHODS = { - "client_secret_post": encode_client_secret_post, - "client_secret_basic": encode_client_secret_basic, - "client_secret_jwt": ClientSecretJWT(), - "private_key_jwt": PrivateKeyJWTWithKid(), - } - - EXTERNAL_ID_PROVIDER = "oauth-delegated" - - def __init__(self, hs: "HomeServer"): - super().__init__(hs) - - self._config = hs.config.auth - assert self._config.oauth_delegation_enabled, "OAuth delegation is not enabled" - assert self._config.oauth_delegation_issuer, "No issuer provided" - assert self._config.oauth_delegation_client_id, "No client_id provided" - assert self._config.oauth_delegation_client_secret, "No client_secret provided" - assert ( - self._config.oauth_delegation_client_auth_method - in OAuthDelegatedAuth.AUTH_METHODS - ), "Invalid client_auth_method" - - self._http_client = hs.get_proxied_http_client() - self._hostname = hs.hostname - - self._issuer_metadata = RetryOnExceptionCachedCall(self._load_metadata) - secret = self._config.oauth_delegation_client_secret - self._client_auth = ClientAuth( - self._config.oauth_delegation_client_id, - secret, - OAuthDelegatedAuth.AUTH_METHODS[ - self._config.oauth_delegation_client_auth_method - ], - ) - - async def _load_metadata(self) -> OpenIDProviderMetadata: - if self._config.oauth_delegation_issuer_metadata is not None: - return OpenIDProviderMetadata( - **self._config.oauth_delegation_issuer_metadata - ) - url = get_well_known_url(self._config.oauth_delegation_issuer, external=True) - response = await self._http_client.get_json(url) - metadata = OpenIDProviderMetadata(**response) - # metadata.validate_introspection_endpoint() - return metadata - - async def _introspect_token(self, token: str) -> IntrospectionToken: - metadata = await self._issuer_metadata.get() - introspection_endpoint = metadata.get("introspection_endpoint") - raw_headers: Dict[str, str] = { - "Content-Type": "application/x-www-form-urlencoded", - "User-Agent": str(self._http_client.user_agent, "utf-8"), - "Accept": "application/json", - } - - args = {"token": token, "token_type_hint": "access_token"} - body = urlencode(args, True) - - # Fill the body/headers with credentials - uri, raw_headers, body = self._client_auth.prepare( - method="POST", uri=introspection_endpoint, headers=raw_headers, body=body - ) - headers = Headers({k: [v] for (k, v) in raw_headers.items()}) - - # Do the actual request - # We're not using the SimpleHttpClient util methods as we don't want to - # check the HTTP status code and we do the body encoding ourself. - response = await self._http_client.request( - method="POST", - uri=uri, - data=body.encode("utf-8"), - headers=headers, - ) - - resp_body = await make_deferred_yieldable(readBody(response)) - # TODO: Let's not worry about 5xx errors & co. for now and just try - # decoding that as JSON. We should also do some validation of the - # response - resp = json_decoder.decode(resp_body.decode("utf-8")) - return IntrospectionToken(**resp) - - async def is_server_admin(self, requester: Requester) -> bool: - return "urn:synapse:admin:*" in requester.scope - - async def get_user_by_req( - self, - request: SynapseRequest, - allow_guest: bool = False, - allow_expired: bool = False, - ) -> Requester: - access_token = self.get_access_token_from_request(request) - - # TODO: we probably want to assert the allow_guest inside this call so that we don't provision the user if they don't have enough permission: - requester = await self.get_user_by_access_token(access_token, allow_expired) - - if not allow_guest and requester.is_guest: - raise OAuthInsufficientScopeError( - ["urn:matrix:org.matrix.msc2967.client:api:*"] - ) - - return requester - - async def get_user_by_access_token( - self, - token: str, - allow_expired: bool = False, - ) -> Requester: - introspection_result = await self._introspect_token(token) - - logger.info(f"Introspection result: {introspection_result!r}") - - # TODO: introspection verification should be more extensive, especially: - # - verify the audience - if not introspection_result.get("active"): - raise InvalidClientTokenError("Token is not active") - - # Let's look at the scope - scope: List[str] = scope_to_list(introspection_result.get("scope", "")) - - # Determine type of user based on presence of particular scopes - has_admin_scope = "urn:synapse:admin:*" in scope - has_user_scope = "urn:matrix:org.matrix.msc2967.client:api:*" in scope - has_guest_scope = "urn:matrix:org.matrix.msc2967.client:api:guest" in scope - is_user = has_user_scope or has_admin_scope - is_guest = has_guest_scope and not is_user - - if not is_user and not is_guest: - raise InvalidClientTokenError("No scope in token granting user rights") - - # Match via the sub claim - sub: Optional[str] = introspection_result.get("sub") - if sub is None: - raise InvalidClientTokenError( - "Invalid sub claim in the introspection result" - ) - - user_id_str = await self.store.get_user_by_external_id( - OAuthDelegatedAuth.EXTERNAL_ID_PROVIDER, sub - ) - if user_id_str is None: - # If we could not find a user via the external_id, it either does not exist, - # or the external_id was never recorded - - # TODO: claim mapping should be configurable - username: Optional[str] = introspection_result.get("username") - if username is None or not isinstance(username, str): - raise AuthError( - 500, - "Invalid username claim in the introspection result", - ) - user_id = UserID(username, self._hostname) - - # First try to find a user from the username claim - user_info = await self.store.get_userinfo_by_id(user_id=user_id.to_string()) - if user_info is None: - # If the user does not exist, we should create it on the fly - # TODO: we could use SCIM to provision users ahead of time and listen - # for SCIM SET events if those ever become standard: - # https://datatracker.ietf.org/doc/html/draft-hunt-scim-notify-00 - - # TODO: claim mapping should be configurable - # If present, use the name claim as the displayname - name: Optional[str] = introspection_result.get("name") - - await self.store.register_user( - user_id=user_id.to_string(), create_profile_with_displayname=name - ) - - # And record the sub as external_id - await self.store.record_user_external_id( - OAuthDelegatedAuth.EXTERNAL_ID_PROVIDER, sub, user_id.to_string() - ) - else: - user_id = UserID.from_string(user_id_str) - - # Find device_id in scope - device_id = None - for tok in scope: - if tok.startswith("urn:matrix:org.matrix.msc2967.client:device:"): - parts = tok.split(":") - if len(parts) == 5: - device_id = parts[4] - - if device_id: - # Create the device on the fly if it does not exist - try: - await self.store.get_device( - user_id=user_id.to_string(), device_id=device_id - ) - except StoreError: - await self.store.store_device( - user_id=user_id.to_string(), - device_id=device_id, - initial_device_display_name="OIDC-native client", - ) - - # TODO: there is a few things missing in the requester here, which still need - # to be figured out, like: - # - impersonation, with the `authenticated_entity`, which is used for - # rate-limiting, MAU limits, etc. - # - shadow-banning, with the `shadow_banned` flag - # - a proper solution for appservices, which still needs to be figured out in - # the context of MSC3861 - return create_requester( - user_id=user_id, - device_id=device_id, - scope=scope, - is_guest=is_guest, - ) diff --git a/synapse/config/auth.py b/synapse/config/auth.py index 25b5cc60dc..12e853980e 100644 --- a/synapse/config/auth.py +++ b/synapse/config/auth.py @@ -14,11 +14,9 @@ # limitations under the License. from typing import Any -from authlib.jose.rfc7517 import JsonWebKey - from synapse.types import JsonDict -from ._base import Config, ConfigError +from ._base import Config class AuthConfig(Config): @@ -31,7 +29,14 @@ class AuthConfig(Config): if password_config is None: password_config = {} - passwords_enabled = password_config.get("enabled", True) + # The default value of password_config.enabled is True, unless msc3861 is enabled. + msc3861_enabled = ( + config.get("experimental_features", {}) + .get("msc3861", {}) + .get("enabled", False) + ) + passwords_enabled = password_config.get("enabled", not msc3861_enabled) + # 'only_for_reauth' allows users who have previously set a password to use it, # even though passwords would otherwise be disabled. passwords_for_reauth_only = passwords_enabled == "only_for_reauth" @@ -55,29 +60,3 @@ class AuthConfig(Config): self.ui_auth_session_timeout = self.parse_duration( ui_auth.get("session_timeout", 0) ) - - oauth_delegation = config.get("oauth_delegation", {}) - self.oauth_delegation_enabled = oauth_delegation.get("enabled", False) - self.oauth_delegation_issuer = oauth_delegation.get("issuer", "") - self.oauth_delegation_issuer_metadata = oauth_delegation.get("issuer_metadata") - self.oauth_delegation_account = oauth_delegation.get("account", "") - self.oauth_delegation_client_id = oauth_delegation.get("client_id", "") - self.oauth_delegation_client_secret = oauth_delegation.get("client_secret", "") - self.oauth_delegation_client_auth_method = oauth_delegation.get( - "client_auth_method", "client_secret_post" - ) - - self.password_enabled = password_config.get( - "enabled", not self.oauth_delegation_enabled - ) - - if self.oauth_delegation_client_auth_method == "private_key_jwt": - self.oauth_delegation_client_secret = JsonWebKey.import_key( - self.oauth_delegation_client_secret - ) - - # If we are delegating via OAuth then password cannot be supported as well - if self.oauth_delegation_enabled and self.password_enabled: - raise ConfigError( - "Password auth cannot be enabled when OAuth delegation is enabled" - ) diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index d769b7f668..b9607975f9 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -12,15 +12,196 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Optional +import enum +from typing import TYPE_CHECKING, Any, Optional import attr +import attr.validators from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersions from synapse.config import ConfigError -from synapse.config._base import Config +from synapse.config._base import Config, RootConfig from synapse.types import JsonDict +# Determine whether authlib is installed. +try: + import authlib # noqa: F401 + + HAS_AUTHLIB = True +except ImportError: + HAS_AUTHLIB = False + +if TYPE_CHECKING: + # Only import this if we're type checking, as it might not be installed at runtime. + from authlib.jose.rfc7517 import JsonWebKey + + +class ClientAuthMethod(enum.Enum): + """List of supported client auth methods.""" + + CLIENT_SECRET_POST = "client_secret_post" + CLIENT_SECRET_BASIC = "client_secret_basic" + CLIENT_SECRET_JWT = "client_secret_jwt" + PRIVATE_KEY_JWT = "private_key_jwt" + + +def _parse_jwks(jwks: Optional[JsonDict]) -> Optional["JsonWebKey"]: + """A helper function to parse a JWK dict into a JsonWebKey.""" + + if jwks is None: + return None + + from authlib.jose.rfc7517 import JsonWebKey + + return JsonWebKey.import_key(jwks) + + +@attr.s(slots=True, frozen=True) +class MSC3861: + """Configuration for MSC3861: Matrix architecture change to delegate authentication via OIDC""" + + enabled: bool = attr.ib(default=False, validator=attr.validators.instance_of(bool)) + """Whether to enable MSC3861 auth delegation.""" + + @enabled.validator + def _check_enabled(self, attribute: attr.Attribute, value: bool) -> None: + # Only allow enabling MSC3861 if authlib is installed + if value and not HAS_AUTHLIB: + raise ConfigError( + "MSC3861 is enabled but authlib is not installed. " + "Please install authlib to use MSC3861." + ) + + issuer: str = attr.ib(default="", validator=attr.validators.instance_of(str)) + """The URL of the OIDC Provider.""" + + issuer_metadata: Optional[JsonDict] = attr.ib(default=None) + """The issuer metadata to use, otherwise discovered from /.well-known/openid-configuration as per MSC2965.""" + + client_id: str = attr.ib( + default="", + validator=attr.validators.instance_of(str), + ) + """The client ID to use when calling the introspection endpoint.""" + + client_auth_method: ClientAuthMethod = attr.ib( + default=ClientAuthMethod.CLIENT_SECRET_POST, converter=ClientAuthMethod + ) + """The auth method used when calling the introspection endpoint.""" + + client_secret: Optional[str] = attr.ib( + default=None, + validator=attr.validators.optional(attr.validators.instance_of(str)), + ) + """ + The client secret to use when calling the introspection endpoint, + when using any of the client_secret_* client auth methods. + """ + + jwk: Optional["JsonWebKey"] = attr.ib(default=None, converter=_parse_jwks) + """ + The JWKS to use when calling the introspection endpoint, + when using the private_key_jwt client auth method. + """ + + @client_auth_method.validator + def _check_client_auth_method( + self, attribute: attr.Attribute, value: ClientAuthMethod + ) -> None: + # Check that the right client credentials are provided for the client auth method. + if not self.enabled: + return + + if value == ClientAuthMethod.PRIVATE_KEY_JWT and self.jwk is None: + raise ConfigError( + "A JWKS must be provided when using the private_key_jwt client auth method" + ) + + if ( + value + in ( + ClientAuthMethod.CLIENT_SECRET_POST, + ClientAuthMethod.CLIENT_SECRET_BASIC, + ClientAuthMethod.CLIENT_SECRET_JWT, + ) + and self.client_secret is None + ): + raise ConfigError( + f"A client secret must be provided when using the {value} client auth method" + ) + + account_management_url: Optional[str] = attr.ib( + default=None, + validator=attr.validators.optional(attr.validators.instance_of(str)), + ) + """The URL of the My Account page on the OIDC Provider as per MSC2965.""" + + def check_config_conflicts(self, root: RootConfig) -> None: + """Checks for any configuration conflicts with other parts of Synapse. + + Raises: + ConfigError: If there are any configuration conflicts. + """ + + if not self.enabled: + return + + if ( + root.auth.password_enabled_for_reauth + or root.auth.password_enabled_for_login + ): + raise ConfigError( + "Password auth cannot be enabled when OAuth delegation is enabled" + ) + + if root.registration.enable_registration: + raise ConfigError( + "Registration cannot be enabled when OAuth delegation is enabled" + ) + + if ( + root.oidc.oidc_enabled + or root.saml2.saml2_enabled + or root.cas.cas_enabled + or root.jwt.jwt_enabled + ): + raise ConfigError("SSO cannot be enabled when OAuth delegation is enabled") + + if bool(root.authproviders.password_providers): + raise ConfigError( + "Password auth providers cannot be enabled when OAuth delegation is enabled" + ) + + if root.captcha.enable_registration_captcha: + raise ConfigError( + "CAPTCHA cannot be enabled when OAuth delegation is enabled" + ) + + if root.experimental.msc3882_enabled: + raise ConfigError( + "MSC3882 cannot be enabled when OAuth delegation is enabled" + ) + + if root.registration.refresh_token_lifetime: + raise ConfigError( + "refresh_token_lifetime cannot be set when OAuth delegation is enabled" + ) + + if root.registration.nonrefreshable_access_token_lifetime: + raise ConfigError( + "nonrefreshable_access_token_lifetime cannot be set when OAuth delegation is enabled" + ) + + if root.registration.session_lifetime: + raise ConfigError( + "session_lifetime cannot be set when OAuth delegation is enabled" + ) + + if not root.experimental.msc3970_enabled: + raise ConfigError( + "experimental_features.msc3970_enabled must be 'true' when OAuth delegation is enabled" + ) + @attr.s(auto_attribs=True, frozen=True, slots=True) class MSC3866Config: @@ -182,8 +363,14 @@ class ExperimentalConfig(Config): "msc3981_recurse_relations", False ) + # MSC3861: Matrix architecture change to delegate authentication via OIDC + self.msc3861 = MSC3861(**experimental.get("msc3861", {})) + # MSC3970: Scope transaction IDs to devices - self.msc3970_enabled = experimental.get("msc3970_enabled", False) + self.msc3970_enabled = experimental.get("msc3970_enabled", self.msc3861.enabled) + + # Check that none of the other config options conflict with MSC3861 when enabled + self.msc3861.check_config_conflicts(self.root) # MSC4009: E.164 Matrix IDs self.msc4009_e164_mxids = experimental.get("msc4009_e164_mxids", False) diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index a53984be33..4f986d90cb 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -274,7 +274,7 @@ class AuthHandler: # response. self._extra_attributes: Dict[str, SsoLoginExtraAttributes] = {} - self.oauth_delegation_enabled = hs.config.auth.oauth_delegation_enabled + self.msc3861_oauth_delegation_enabled = hs.config.experimental.msc3861.enabled async def validate_user_via_ui_auth( self, @@ -325,7 +325,7 @@ class AuthHandler: LimitExceededError if the ratelimiter's failed request count for this user is too high to proceed """ - if self.oauth_delegation_enabled: + if self.msc3861_oauth_delegation_enabled: raise SynapseError( HTTPStatus.INTERNAL_SERVER_ERROR, "UIA shouldn't be used with MSC3861" ) diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index 0e9f366cba..134bd2e620 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -38,6 +38,7 @@ from twisted.web.resource import Resource from synapse.api import errors from synapse.api.errors import SynapseError +from synapse.config import ConfigError from synapse.events import EventBase from synapse.events.presence_router import ( GET_INTERESTED_USERS_CALLBACK, @@ -252,6 +253,7 @@ class ModuleApi: self._device_handler = hs.get_device_handler() self.custom_template_dir = hs.config.server.custom_template_directory self._callbacks = hs.get_module_api_callbacks() + self.msc3861_oauth_delegation_enabled = hs.config.experimental.msc3861.enabled try: app_name = self._hs.config.email.email_app_name @@ -419,6 +421,11 @@ class ModuleApi: Added in Synapse v1.46.0. """ + if self.msc3861_oauth_delegation_enabled: + raise ConfigError( + "Cannot use password auth provider callbacks when OAuth delegation is enabled" + ) + return self._password_auth_provider.register_password_auth_provider_callbacks( check_3pid_auth=check_3pid_auth, on_logged_out=on_logged_out, diff --git a/synapse/rest/client/account.py b/synapse/rest/client/account.py index ccd1f7509c..679ab9f266 100644 --- a/synapse/rest/client/account.py +++ b/synapse/rest/client/account.py @@ -601,7 +601,7 @@ class ThreepidRestServlet(RestServlet): # ThreePidBindRestServelet.PostBody with an `alias_generator` to handle # `threePidCreds` versus `three_pid_creds`. async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: - if self.hs.config.auth.oauth_delegation_enabled: + if self.hs.config.experimental.msc3861.enabled: raise NotFoundError(errcode=Codes.UNRECOGNIZED) if not self.hs.config.registration.enable_3pid_changes: @@ -894,7 +894,7 @@ class AccountStatusRestServlet(RestServlet): def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: if hs.config.worker.worker_app is None: - if not hs.config.auth.oauth_delegation_enabled: + if not hs.config.experimental.msc3861.enabled: EmailPasswordRequestTokenRestServlet(hs).register(http_server) DeactivateAccountRestServlet(hs).register(http_server) PasswordRestServlet(hs).register(http_server) @@ -906,7 +906,7 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: if hs.config.worker.worker_app is None: ThreepidBindRestServlet(hs).register(http_server) ThreepidUnbindRestServlet(hs).register(http_server) - if not hs.config.auth.oauth_delegation_enabled: + if not hs.config.experimental.msc3861.enabled: ThreepidAddRestServlet(hs).register(http_server) ThreepidDeleteRestServlet(hs).register(http_server) WhoamiRestServlet(hs).register(http_server) diff --git a/synapse/rest/client/devices.py b/synapse/rest/client/devices.py index 00e9bff43f..38dff9703f 100644 --- a/synapse/rest/client/devices.py +++ b/synapse/rest/client/devices.py @@ -135,7 +135,7 @@ class DeviceRestServlet(RestServlet): self.device_handler = handler self.auth_handler = hs.get_auth_handler() self._msc3852_enabled = hs.config.experimental.msc3852_enabled - self.oauth_delegation_enabled = hs.config.auth.oauth_delegation_enabled + self._msc3861_oauth_delegation_enabled = hs.config.experimental.msc3861.enabled async def on_GET( self, request: SynapseRequest, device_id: str @@ -167,7 +167,7 @@ class DeviceRestServlet(RestServlet): async def on_DELETE( self, request: SynapseRequest, device_id: str ) -> Tuple[int, JsonDict]: - if self.oauth_delegation_enabled: + if self._msc3861_oauth_delegation_enabled: raise UnrecognizedRequestError(code=404) requester = await self.auth.get_user_by_req(request) @@ -350,7 +350,7 @@ class ClaimDehydratedDeviceServlet(RestServlet): def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: if ( hs.config.worker.worker_app is None - and not hs.config.auth.oauth_delegation_enabled + and not hs.config.experimental.msc3861.enabled ): DeleteDevicesRestServlet(hs).register(http_server) DevicesRestServlet(hs).register(http_server) diff --git a/synapse/rest/client/keys.py b/synapse/rest/client/keys.py index c3ca83c0c8..70b8be1aa2 100644 --- a/synapse/rest/client/keys.py +++ b/synapse/rest/client/keys.py @@ -386,7 +386,7 @@ class SigningKeyUploadServlet(RestServlet): # time. Because there is no UIA in MSC3861, for now we throw an error if the # user tries to reset the device signing key when MSC3861 is enabled, but allow # first-time setup. - if self.hs.config.auth.oauth_delegation_enabled: + if self.hs.config.experimental.msc3861.enabled: # There is no way to reset the device signing key with MSC3861 if is_cross_signing_setup: raise SynapseError( diff --git a/synapse/rest/client/login.py b/synapse/rest/client/login.py index 4d0eabcb84..d4dc2462b9 100644 --- a/synapse/rest/client/login.py +++ b/synapse/rest/client/login.py @@ -633,7 +633,7 @@ class CasTicketServlet(RestServlet): def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: - if hs.config.auth.oauth_delegation_enabled: + if hs.config.experimental.msc3861.enabled: return LoginRestServlet(hs).register(http_server) diff --git a/synapse/rest/client/logout.py b/synapse/rest/client/logout.py index b64a6d5961..94ad90942f 100644 --- a/synapse/rest/client/logout.py +++ b/synapse/rest/client/logout.py @@ -80,7 +80,7 @@ class LogoutAllRestServlet(RestServlet): def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: - if hs.config.auth.oauth_delegation_enabled: + if hs.config.experimental.msc3861.enabled: return LogoutRestServlet(hs).register(http_server) diff --git a/synapse/rest/client/register.py b/synapse/rest/client/register.py index 6866988c38..f8fb0e1dee 100644 --- a/synapse/rest/client/register.py +++ b/synapse/rest/client/register.py @@ -955,7 +955,7 @@ def _calculate_registration_flows( def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: - if hs.config.auth.oauth_delegation_enabled: + if hs.config.experimental.msc3861.enabled: return if hs.config.worker.worker_app is None: diff --git a/synapse/rest/synapse/client/__init__.py b/synapse/rest/synapse/client/__init__.py index dcfd0ad6aa..57335fb913 100644 --- a/synapse/rest/synapse/client/__init__.py +++ b/synapse/rest/synapse/client/__init__.py @@ -47,7 +47,7 @@ def build_synapse_client_resource_tree(hs: "HomeServer") -> Mapping[str, Resourc } # Expose the JWKS endpoint if OAuth2 delegation is enabled - if hs.config.auth.oauth_delegation_enabled: + if hs.config.experimental.msc3861.enabled: from synapse.rest.synapse.client.jwks import JwksResource resources["/_synapse/jwks"] = JwksResource(hs) diff --git a/synapse/rest/synapse/client/jwks.py b/synapse/rest/synapse/client/jwks.py index 818585843e..7c0a1223fb 100644 --- a/synapse/rest/synapse/client/jwks.py +++ b/synapse/rest/synapse/client/jwks.py @@ -26,8 +26,6 @@ logger = logging.getLogger(__name__) class JwksResource(DirectServeJsonResource): def __init__(self, hs: "HomeServer"): - from authlib.jose.rfc7517 import Key - super().__init__(extract_context=True) # Parameters that are allowed to be exposed in the public key. @@ -53,10 +51,10 @@ class JwksResource(DirectServeJsonResource): "ext", } - secret = hs.config.auth.oauth_delegation_client_secret + key = hs.config.experimental.msc3861.jwk - if isinstance(secret, Key): - private_key = secret.as_dict() + if key is not None: + private_key = key.as_dict() public_key = { k: v for k, v in private_key.items() if k in public_parameters } diff --git a/synapse/rest/well_known.py b/synapse/rest/well_known.py index fd3b17a5ad..b8b4b5379b 100644 --- a/synapse/rest/well_known.py +++ b/synapse/rest/well_known.py @@ -44,14 +44,15 @@ class WellKnownBuilder: "base_url": self._config.registration.default_identity_server } - if self._config.auth.oauth_delegation_enabled: + # We use the MSC3861 values as they are used by multiple MSCs + if self._config.experimental.msc3861.enabled: result["org.matrix.msc2965.authentication"] = { - "issuer": self._config.auth.oauth_delegation_issuer + "issuer": self._config.experimental.msc3861.issuer } - if self._config.auth.oauth_delegation_account != "": + if self._config.experimental.msc3861.account_management_url is not None: result["org.matrix.msc2965.authentication"][ "account" - ] = self._config.auth.oauth_delegation_account + ] = self._config.experimental.msc3861.account_management_url if self._config.server.extra_well_known_client_content: for ( diff --git a/synapse/server.py b/synapse/server.py index 1c82500f30..0f36ef69cb 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -428,10 +428,10 @@ class HomeServer(metaclass=abc.ABCMeta): @cache_in_self def get_auth(self) -> Auth: - if self.config.auth.oauth_delegation_enabled: - from synapse.api.auth.oauth_delegated import OAuthDelegatedAuth + if self.config.experimental.msc3861.enabled: + from synapse.api.auth.msc3861_delegated import MSC3861DelegatedAuth - return OAuthDelegatedAuth(self) + return MSC3861DelegatedAuth(self) return InternalAuth(self) @cache_in_self diff --git a/tests/config/test_oauth_delegation.py b/tests/config/test_oauth_delegation.py new file mode 100644 index 0000000000..c5fc6d6ebb --- /dev/null +++ b/tests/config/test_oauth_delegation.py @@ -0,0 +1,202 @@ +# Copyright 2023 Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Any, Dict +from unittest.mock import Mock + +from synapse.config import ConfigError +from synapse.module_api import ModuleApi +from synapse.types import JsonDict + +from tests.server import get_clock +from tests.unittest import HomeserverTestCase, override_config, skip_unless + +try: + import authlib # noqa: F401 + + HAS_AUTHLIB = True +except ImportError: + HAS_AUTHLIB = False + + +# These are a few constants that are used as config parameters in the tests. +SERVER_NAME = "test" +ISSUER = "https://issuer/" +CLIENT_ID = "test-client-id" +CLIENT_SECRET = "test-client-secret" +BASE_URL = "https://synapse/" + + +class CustomAuthModule: + """A module which registers a password auth provider.""" + + @staticmethod + def parse_config(config: JsonDict) -> None: + pass + + def __init__(self, config: None, api: ModuleApi): + api.register_password_auth_provider_callbacks( + auth_checkers={("m.login.password", ("password",)): Mock()}, + ) + + +@skip_unless(HAS_AUTHLIB, "requires authlib") +class MSC3861OAuthDelegation(HomeserverTestCase): + """Test that the Homeserver fails to initialize if the config is invalid.""" + + def setUp(self) -> None: + self.reactor, self.clock = get_clock() + self._hs_args = {"clock": self.clock, "reactor": self.reactor} + + def default_config(self) -> Dict[str, Any]: + config = super().default_config() + config["public_baseurl"] = BASE_URL + if "experimental_features" not in config: + config["experimental_features"] = {} + config["experimental_features"]["msc3861"] = { + "enabled": True, + "issuer": ISSUER, + "client_id": CLIENT_ID, + "client_auth_method": "client_secret_post", + "client_secret": CLIENT_SECRET, + } + return config + + def test_registration_cannot_be_enabled(self) -> None: + with self.assertRaises(ConfigError): + self.setup_test_homeserver() + + @override_config( + { + "enable_registration": False, + "password_config": { + "enabled": True, + }, + } + ) + def test_password_config_cannot_be_enabled(self) -> None: + with self.assertRaises(ConfigError): + self.setup_test_homeserver() + + @override_config( + { + "enable_registration": False, + "oidc_providers": [ + { + "idp_id": "microsoft", + "idp_name": "Microsoft", + "issuer": "https://login.microsoftonline.com//v2.0", + "client_id": "", + "client_secret": "", + "scopes": ["openid", "profile"], + "authorization_endpoint": "https://login.microsoftonline.com//oauth2/v2.0/authorize", + "token_endpoint": "https://login.microsoftonline.com//oauth2/v2.0/token", + "userinfo_endpoint": "https://graph.microsoft.com/oidc/userinfo", + } + ], + } + ) + def test_oidc_sso_cannot_be_enabled(self) -> None: + with self.assertRaises(ConfigError): + self.setup_test_homeserver() + + @override_config( + { + "enable_registration": False, + "cas_config": { + "enabled": True, + "server_url": "https://cas-server.com", + "displayname_attribute": "name", + "required_attributes": {"userGroup": "staff", "department": "None"}, + }, + } + ) + def test_cas_sso_cannot_be_enabled(self) -> None: + with self.assertRaises(ConfigError): + self.setup_test_homeserver() + + @override_config( + { + "enable_registration": False, + "modules": [ + { + "module": f"{__name__}.{CustomAuthModule.__qualname__}", + "config": {}, + } + ], + } + ) + def test_auth_providers_cannot_be_enabled(self) -> None: + with self.assertRaises(ConfigError): + self.setup_test_homeserver() + + @override_config( + { + "enable_registration": False, + "jwt_config": { + "enabled": True, + "secret": "my-secret-token", + "algorithm": "HS256", + }, + } + ) + def test_jwt_auth_cannot_be_enabled(self) -> None: + with self.assertRaises(ConfigError): + self.setup_test_homeserver() + + @override_config( + { + "enable_registration": False, + "experimental_features": { + "msc3882_enabled": True, + }, + } + ) + def test_msc3882_auth_cannot_be_enabled(self) -> None: + with self.assertRaises(ConfigError): + self.setup_test_homeserver() + + @override_config( + { + "enable_registration": False, + "recaptcha_public_key": "test", + "recaptcha_private_key": "test", + "enable_registration_captcha": True, + } + ) + def test_captcha_cannot_be_enabled(self) -> None: + with self.assertRaises(ConfigError): + self.setup_test_homeserver() + + @override_config( + { + "enable_registration": False, + "refresh_token_lifetime": "24h", + "refreshable_access_token_lifetime": "10m", + "nonrefreshable_access_token_lifetime": "24h", + } + ) + def test_refreshable_tokens_cannot_be_enabled(self) -> None: + with self.assertRaises(ConfigError): + self.setup_test_homeserver() + + @override_config( + { + "enable_registration": False, + "session_lifetime": "24h", + } + ) + def test_session_lifetime_cannot_be_set(self) -> None: + with self.assertRaises(ConfigError): + self.setup_test_homeserver() diff --git a/tests/handlers/test_oauth_delegation.py b/tests/handlers/test_oauth_delegation.py index ee1bc5ca7a..081fef51ec 100644 --- a/tests/handlers/test_oauth_delegation.py +++ b/tests/handlers/test_oauth_delegation.py @@ -109,12 +109,15 @@ class MSC3861OAuthDelegation(HomeserverTestCase): def default_config(self) -> Dict[str, Any]: config = super().default_config() config["public_baseurl"] = BASE_URL - config["oauth_delegation"] = { - "enabled": True, - "issuer": ISSUER, - "client_id": CLIENT_ID, - "client_auth_method": "client_secret_post", - "client_secret": CLIENT_SECRET, + config["disable_registration"] = True + config["experimental_features"] = { + "msc3861": { + "enabled": True, + "issuer": ISSUER, + "client_id": CLIENT_ID, + "client_auth_method": "client_secret_post", + "client_secret": CLIENT_SECRET, + } } return config diff --git a/tests/rest/test_well_known.py b/tests/rest/test_well_known.py index 34333d88df..377243a170 100644 --- a/tests/rest/test_well_known.py +++ b/tests/rest/test_well_known.py @@ -108,14 +108,17 @@ class WellKnownTests(unittest.HomeserverTestCase): @unittest.override_config( { "public_baseurl": "https://homeserver", # this is only required so that client well known is served - "oauth_delegation": { - "enabled": True, - "issuer": "https://issuer", - "account": "https://my-account.issuer", - "client_id": "id", - "client_auth_method": "client_secret_post", - "client_secret": "secret", + "experimental_features": { + "msc3861": { + "enabled": True, + "issuer": "https://issuer", + "account_management_url": "https://my-account.issuer", + "client_id": "id", + "client_auth_method": "client_secret_post", + "client_secret": "secret", + }, }, + "disable_registration": True, } ) def test_client_well_known_msc3861_oauth_delegation(self) -> None: -- cgit 1.5.1 From bad1f2cd3558d908b579b6c191bcd7bebecd32be Mon Sep 17 00:00:00 2001 From: Hugh Nimmo-Smith Date: Tue, 7 Feb 2023 12:55:54 +0000 Subject: Tests for JWKS endpoint --- tests/config/test_oauth_delegation.py | 117 ++++++++++++++++++++++++++++++---- tests/rest/admin/test_jwks.py | 106 ++++++++++++++++++++++++++++++ 2 files changed, 212 insertions(+), 11 deletions(-) create mode 100644 tests/rest/admin/test_jwks.py diff --git a/tests/config/test_oauth_delegation.py b/tests/config/test_oauth_delegation.py index c5fc6d6ebb..6d294e0144 100644 --- a/tests/config/test_oauth_delegation.py +++ b/tests/config/test_oauth_delegation.py @@ -51,6 +51,34 @@ class CustomAuthModule: ) +def _dict_merge(merge_dict: dict, into_dict: dict) -> None: + """Do a deep merge of two dicts + + Recursively merges `merge_dict` into `into_dict`: + * For keys where both `merge_dict` and `into_dict` have a dict value, the values + are recursively merged + * For all other keys, the values in `into_dict` (if any) are overwritten with + the value from `merge_dict`. + + Args: + merge_dict: dict to merge + into_dict: target dict to be modified + """ + for k, v in merge_dict.items(): + if k not in into_dict: + into_dict[k] = v + continue + + current_val = into_dict[k] + + if isinstance(v, dict) and isinstance(current_val, dict): + _dict_merge(v, current_val) + continue + + # otherwise we just overwrite + into_dict[k] = v + + @skip_unless(HAS_AUTHLIB, "requires authlib") class MSC3861OAuthDelegation(HomeserverTestCase): """Test that the Homeserver fails to initialize if the config is invalid.""" @@ -60,18 +88,85 @@ class MSC3861OAuthDelegation(HomeserverTestCase): self._hs_args = {"clock": self.clock, "reactor": self.reactor} def default_config(self) -> Dict[str, Any]: - config = super().default_config() - config["public_baseurl"] = BASE_URL - if "experimental_features" not in config: - config["experimental_features"] = {} - config["experimental_features"]["msc3861"] = { - "enabled": True, - "issuer": ISSUER, - "client_id": CLIENT_ID, - "client_auth_method": "client_secret_post", - "client_secret": CLIENT_SECRET, + default_extra_config = { + "public_baseurl": BASE_URL, + "experimental_features": { + "msc3861": { + "enabled": True, + "issuer": ISSUER, + "client_id": CLIENT_ID, + "client_auth_method": "client_secret_post", + "client_secret": CLIENT_SECRET, + } + }, + } + _dict_merge( + {} if self._extra_config is None else self._extra_config, + default_extra_config, + ) + self._extra_config = default_extra_config + return super().default_config() + + @override_config( + { + "enable_registration": False, } - return config + ) + def test_client_secret_post_works(self) -> None: + self.setup_test_homeserver() + + @override_config( + { + "enable_registration": False, + "experimental_features": { + "msc3861": { + "client_auth_method": "invalid", + } + }, + } + ) + def test_invalid_client_auth_method(self) -> None: + with self.assertRaises(ValueError): + self.setup_test_homeserver() + + @override_config( + { + "enable_registration": False, + "experimental_features": { + "msc3861": { + "client_auth_method": "private_key_jwt", + } + }, + } + ) + def test_invalid_private_key_jwt(self) -> None: + with self.assertRaises(ConfigError): + self.setup_test_homeserver() + + @override_config( + { + "enable_registration": False, + "experimental_features": { + "msc3861": { + "client_auth_method": "private_key_jwt", + "jwk": { + "p": "-frVdP_tZ-J_nIR6HNMDq1N7aunwm51nAqNnhqIyuA8ikx7LlQED1tt2LD3YEvYyW8nxE2V95HlCRZXQPMiRJBFOsbmYkzl2t-MpavTaObB_fct_JqcRtdXddg4-_ihdjRDwUOreq_dpWh6MIKsC3UyekfkHmeEJg5YpOTL15j8", + "kty": "RSA", + "q": "oFw-Enr_YozQB1ab-kawn4jY3yHi8B1nSmYT0s8oTCflrmps5BFJfCkHL5ij3iY15z0o2m0N-jjB1oSJ98O4RayEEYNQlHnTNTl0kRIWzpoqblHUIxVcahIpP_xTovBJzwi8XXoLGqHOOMA-r40LSyVgP2Ut8D9qBwV6_UfT0LU", + "d": "WFkDPYo4b4LIS64D_QtQfGGuAObPvc3HFfp9VZXyq3SJR58XZRHE0jqtlEMNHhOTgbMYS3w8nxPQ_qVzY-5hs4fIanwvB64mAoOGl0qMHO65DTD_WsGFwzYClJPBVniavkLE2Hmpu8IGe6lGliN8vREC6_4t69liY-XcN_ECboVtC2behKkLOEASOIMuS7YcKAhTJFJwkl1dqDlliEn5A4u4xy7nuWQz3juB1OFdKlwGA5dfhDNglhoLIwNnkLsUPPFO-WB5ZNEW35xxHOToxj4bShvDuanVA6mJPtTKjz0XibjB36bj_nF_j7EtbE2PdGJ2KevAVgElR4lqS4ISgQ", + "e": "AQAB", + "kid": "test", + "qi": "cPfNk8l8W5exVNNea4d7QZZ8Qr8LgHghypYAxz8PQh1fNa8Ya1SNUDVzC2iHHhszxxA0vB9C7jGze8dBrvnzWYF1XvQcqNIVVgHhD57R1Nm3dj2NoHIKe0Cu4bCUtP8xnZQUN4KX7y4IIcgRcBWG1hT6DEYZ4BxqicnBXXNXAUI", + "dp": "dKlMHvslV1sMBQaKWpNb3gPq0B13TZhqr3-E2_8sPlvJ3fD8P4CmwwnOn50JDuhY3h9jY5L06sBwXjspYISVv8hX-ndMLkEeF3lrJeA5S70D8rgakfZcPIkffm3tlf1Ok3v5OzoxSv3-67Df4osMniyYwDUBCB5Oq1tTx77xpU8", + "dq": "S4ooU1xNYYcjl9FcuJEEMqKsRrAXzzSKq6laPTwIp5dDwt2vXeAm1a4eDHXC-6rUSZGt5PbqVqzV4s-cjnJMI8YYkIdjNg4NSE1Ac_YpeDl3M3Colb5CQlU7yUB7xY2bt0NOOFp9UJZYJrOo09mFMGjy5eorsbitoZEbVqS3SuE", + "n": "nJbYKqFwnURKimaviyDFrNLD3gaKR1JW343Qem25VeZxoMq1665RHVoO8n1oBm4ClZdjIiZiVdpyqzD5-Ow12YQgQEf1ZHP3CCcOQQhU57Rh5XvScTe5IxYVkEW32IW2mp_CJ6WfjYpfeL4azarVk8H3Vr59d1rSrKTVVinVdZer9YLQyC_rWAQNtHafPBMrf6RYiNGV9EiYn72wFIXlLlBYQ9Fx7bfe1PaL6qrQSsZP3_rSpuvVdLh1lqGeCLR0pyclA9uo5m2tMyCXuuGQLbA_QJm5xEc7zd-WFdux2eXF045oxnSZ_kgQt-pdN7AxGWOVvwoTf9am6mSkEdv6iw", + }, + } + }, + } + ) + def test_private_key_jwt_works(self) -> None: + self.setup_test_homeserver() def test_registration_cannot_be_enabled(self) -> None: with self.assertRaises(ConfigError): diff --git a/tests/rest/admin/test_jwks.py b/tests/rest/admin/test_jwks.py new file mode 100644 index 0000000000..a9a6191c73 --- /dev/null +++ b/tests/rest/admin/test_jwks.py @@ -0,0 +1,106 @@ +# Copyright 2023 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Dict + +from twisted.web.resource import Resource + +from synapse.rest.synapse.client import build_synapse_client_resource_tree + +from tests.unittest import HomeserverTestCase, override_config, skip_unless + +try: + import authlib # noqa: F401 + + HAS_AUTHLIB = True +except ImportError: + HAS_AUTHLIB = False + + +@skip_unless(HAS_AUTHLIB, "requires authlib") +class JWKSTestCase(HomeserverTestCase): + """Test /_synapse/jwks JWKS data.""" + + def create_resource_dict(self) -> Dict[str, Resource]: + d = super().create_resource_dict() + d.update(build_synapse_client_resource_tree(self.hs)) + return d + + def test_empty_jwks(self) -> None: + """Test that the JWKS endpoint is not present by default.""" + channel = self.make_request("GET", "/_synapse/jwks") + self.assertEqual(404, channel.code, channel.result) + + @override_config( + { + "disable_registration": True, + "experimental_features": { + "msc3861": { + "enabled": True, + "issuer": "https://issuer/", + "client_id": "test-client-id", + "client_auth_method": "client_secret_post", + "client_secret": "secret", + }, + }, + } + ) + def test_empty_jwks_for_msc3861_client_secret_post(self) -> None: + """Test that the JWKS endpoint is empty when plain auth is used.""" + channel = self.make_request("GET", "/_synapse/jwks") + self.assertEqual(200, channel.code, channel.result) + self.assertEqual({"keys": []}, channel.json_body) + + @override_config( + { + "disable_registration": True, + "experimental_features": { + "msc3861": { + "enabled": True, + "issuer": "https://issuer/", + "client_id": "test-client-id", + "client_auth_method": "private_key_jwt", + "jwk": { + "p": "-frVdP_tZ-J_nIR6HNMDq1N7aunwm51nAqNnhqIyuA8ikx7LlQED1tt2LD3YEvYyW8nxE2V95HlCRZXQPMiRJBFOsbmYkzl2t-MpavTaObB_fct_JqcRtdXddg4-_ihdjRDwUOreq_dpWh6MIKsC3UyekfkHmeEJg5YpOTL15j8", + "kty": "RSA", + "q": "oFw-Enr_YozQB1ab-kawn4jY3yHi8B1nSmYT0s8oTCflrmps5BFJfCkHL5ij3iY15z0o2m0N-jjB1oSJ98O4RayEEYNQlHnTNTl0kRIWzpoqblHUIxVcahIpP_xTovBJzwi8XXoLGqHOOMA-r40LSyVgP2Ut8D9qBwV6_UfT0LU", + "d": "WFkDPYo4b4LIS64D_QtQfGGuAObPvc3HFfp9VZXyq3SJR58XZRHE0jqtlEMNHhOTgbMYS3w8nxPQ_qVzY-5hs4fIanwvB64mAoOGl0qMHO65DTD_WsGFwzYClJPBVniavkLE2Hmpu8IGe6lGliN8vREC6_4t69liY-XcN_ECboVtC2behKkLOEASOIMuS7YcKAhTJFJwkl1dqDlliEn5A4u4xy7nuWQz3juB1OFdKlwGA5dfhDNglhoLIwNnkLsUPPFO-WB5ZNEW35xxHOToxj4bShvDuanVA6mJPtTKjz0XibjB36bj_nF_j7EtbE2PdGJ2KevAVgElR4lqS4ISgQ", + "e": "AQAB", + "kid": "test", + "qi": "cPfNk8l8W5exVNNea4d7QZZ8Qr8LgHghypYAxz8PQh1fNa8Ya1SNUDVzC2iHHhszxxA0vB9C7jGze8dBrvnzWYF1XvQcqNIVVgHhD57R1Nm3dj2NoHIKe0Cu4bCUtP8xnZQUN4KX7y4IIcgRcBWG1hT6DEYZ4BxqicnBXXNXAUI", + "dp": "dKlMHvslV1sMBQaKWpNb3gPq0B13TZhqr3-E2_8sPlvJ3fD8P4CmwwnOn50JDuhY3h9jY5L06sBwXjspYISVv8hX-ndMLkEeF3lrJeA5S70D8rgakfZcPIkffm3tlf1Ok3v5OzoxSv3-67Df4osMniyYwDUBCB5Oq1tTx77xpU8", + "dq": "S4ooU1xNYYcjl9FcuJEEMqKsRrAXzzSKq6laPTwIp5dDwt2vXeAm1a4eDHXC-6rUSZGt5PbqVqzV4s-cjnJMI8YYkIdjNg4NSE1Ac_YpeDl3M3Colb5CQlU7yUB7xY2bt0NOOFp9UJZYJrOo09mFMGjy5eorsbitoZEbVqS3SuE", + "n": "nJbYKqFwnURKimaviyDFrNLD3gaKR1JW343Qem25VeZxoMq1665RHVoO8n1oBm4ClZdjIiZiVdpyqzD5-Ow12YQgQEf1ZHP3CCcOQQhU57Rh5XvScTe5IxYVkEW32IW2mp_CJ6WfjYpfeL4azarVk8H3Vr59d1rSrKTVVinVdZer9YLQyC_rWAQNtHafPBMrf6RYiNGV9EiYn72wFIXlLlBYQ9Fx7bfe1PaL6qrQSsZP3_rSpuvVdLh1lqGeCLR0pyclA9uo5m2tMyCXuuGQLbA_QJm5xEc7zd-WFdux2eXF045oxnSZ_kgQt-pdN7AxGWOVvwoTf9am6mSkEdv6iw", + }, + }, + }, + } + ) + def test_key_returned_for_msc3861_client_secret_post(self) -> None: + """Test that the JWKS includes public part of JWK for private_key_jwt auth is used.""" + channel = self.make_request("GET", "/_synapse/jwks") + self.assertEqual(200, channel.code, channel.result) + self.assertEqual( + { + "keys": [ + { + "kty": "RSA", + "e": "AQAB", + "kid": "test", + "n": "nJbYKqFwnURKimaviyDFrNLD3gaKR1JW343Qem25VeZxoMq1665RHVoO8n1oBm4ClZdjIiZiVdpyqzD5-Ow12YQgQEf1ZHP3CCcOQQhU57Rh5XvScTe5IxYVkEW32IW2mp_CJ6WfjYpfeL4azarVk8H3Vr59d1rSrKTVVinVdZer9YLQyC_rWAQNtHafPBMrf6RYiNGV9EiYn72wFIXlLlBYQ9Fx7bfe1PaL6qrQSsZP3_rSpuvVdLh1lqGeCLR0pyclA9uo5m2tMyCXuuGQLbA_QJm5xEc7zd-WFdux2eXF045oxnSZ_kgQt-pdN7AxGWOVvwoTf9am6mSkEdv6iw", + } + ] + }, + channel.json_body, + ) -- cgit 1.5.1 From c008b44b4f7bb3604be77709c62e6ec78389f8ed Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Tue, 4 Apr 2023 18:11:17 +0200 Subject: Add an admin token for MAS -> Synapse calls --- synapse/api/auth/msc3861_delegated.py | 15 +++++++++++++++ synapse/config/experimental.py | 9 +++++++++ 2 files changed, 24 insertions(+) diff --git a/synapse/api/auth/msc3861_delegated.py b/synapse/api/auth/msc3861_delegated.py index 4ca3280bd3..a84b7730b3 100644 --- a/synapse/api/auth/msc3861_delegated.py +++ b/synapse/api/auth/msc3861_delegated.py @@ -90,6 +90,7 @@ class MSC3861DelegatedAuth(BaseAuth): self._http_client = hs.get_proxied_http_client() self._hostname = hs.hostname + self._admin_token = self._config.admin_token self._issuer_metadata = RetryOnExceptionCachedCall(self._load_metadata) @@ -176,6 +177,20 @@ class MSC3861DelegatedAuth(BaseAuth): token: str, allow_expired: bool = False, ) -> Requester: + if self._admin_token is not None and token == self._admin_token: + # XXX: This is a temporary solution so that the admin API can be called by + # the OIDC provider. This will be removed once we have OIDC client + # credentials grant support in matrix-authentication-service. + logging.info("Admin toked used") + # XXX: that user doesn't exist and won't be provisioned. + # This is mostly fine for admin calls, but we should also think about doing + # requesters without a user_id. + admin_user = UserID("__oidc_admin", self._hostname) + return create_requester( + user_id=admin_user, + scope=["urn:synapse:admin:*"], + ) + introspection_result = await self._introspect_token(token) logger.info(f"Introspection result: {introspection_result!r}") diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index b9607975f9..d4dff22b0b 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -136,6 +136,15 @@ class MSC3861: ) """The URL of the My Account page on the OIDC Provider as per MSC2965.""" + admin_token: Optional[str] = attr.ib( + default=None, + validator=attr.validators.optional(attr.validators.instance_of(str)), + ) + """ + A token that should be considered as an admin token. + This is used by the OIDC provider, to make admin calls to Synapse. + """ + def check_config_conflicts(self, root: RootConfig) -> None: """Checks for any configuration conflicts with other parts of Synapse. -- cgit 1.5.1 From 4d0231b3648d5d70a8e0f4d99a0c040f12f15669 Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Tue, 16 May 2023 10:52:37 +0200 Subject: Make AS tokens work & allow ASes to /register --- synapse/api/auth/base.py | 80 +++++++++++++++++++++++++++++++- synapse/api/auth/internal.py | 82 +-------------------------------- synapse/api/auth/msc3861_delegated.py | 9 +++- synapse/rest/client/register.py | 69 +++++++++++++++++++++++++++ tests/handlers/test_oauth_delegation.py | 4 +- 5 files changed, 159 insertions(+), 85 deletions(-) diff --git a/synapse/api/auth/base.py b/synapse/api/auth/base.py index 240f2b90de..9321d6f186 100644 --- a/synapse/api/auth/base.py +++ b/synapse/api/auth/base.py @@ -14,6 +14,8 @@ import logging from typing import TYPE_CHECKING, Optional, Tuple +from netaddr import IPAddress + from twisted.web.server import Request from synapse import event_auth @@ -26,7 +28,8 @@ from synapse.api.errors import ( ) from synapse.appservice import ApplicationService from synapse.logging.opentracing import trace -from synapse.types import Requester +from synapse.types import Requester, create_requester +from synapse.util.cancellation import cancellable if TYPE_CHECKING: from synapse.server import HomeServer @@ -271,3 +274,78 @@ class BaseAuth: raise MissingClientTokenError() return query_params[0].decode("ascii") + + @cancellable + async def get_appservice_user( + self, request: Request, access_token: str + ) -> Optional[Requester]: + """ + Given a request, reads the request parameters to determine: + - whether it's an application service that's making this request + - what user the application service should be treated as controlling + (the user_id URI parameter allows an application service to masquerade + any applicable user in its namespace) + - what device the application service should be treated as controlling + (the device_id[^1] URI parameter allows an application service to masquerade + as any device that exists for the relevant user) + + [^1] Unstable and provided by MSC3202. + Must use `org.matrix.msc3202.device_id` in place of `device_id` for now. + + Returns: + the application service `Requester` of that request + + Postconditions: + - The `app_service` field in the returned `Requester` is set + - The `user_id` field in the returned `Requester` is either the application + service sender or the controlled user set by the `user_id` URI parameter + - The returned application service is permitted to control the returned user ID. + - The returned device ID, if present, has been checked to be a valid device ID + for the returned user ID. + """ + DEVICE_ID_ARG_NAME = b"org.matrix.msc3202.device_id" + + app_service = self.store.get_app_service_by_token(access_token) + if app_service is None: + return None + + if app_service.ip_range_whitelist: + ip_address = IPAddress(request.getClientAddress().host) + if ip_address not in app_service.ip_range_whitelist: + return None + + # This will always be set by the time Twisted calls us. + assert request.args is not None + + if b"user_id" in request.args: + effective_user_id = request.args[b"user_id"][0].decode("utf8") + await self.validate_appservice_can_control_user_id( + app_service, effective_user_id + ) + else: + effective_user_id = app_service.sender + + effective_device_id: Optional[str] = None + + if ( + self.hs.config.experimental.msc3202_device_masquerading_enabled + and DEVICE_ID_ARG_NAME in request.args + ): + effective_device_id = request.args[DEVICE_ID_ARG_NAME][0].decode("utf8") + # We only just set this so it can't be None! + assert effective_device_id is not None + device_opt = await self.store.get_device( + effective_user_id, effective_device_id + ) + if device_opt is None: + # For now, use 400 M_EXCLUSIVE if the device doesn't exist. + # This is an open thread of discussion on MSC3202 as of 2021-12-09. + raise AuthError( + 400, + f"Application service trying to use a device that doesn't exist ('{effective_device_id}' for {effective_user_id})", + Codes.EXCLUSIVE, + ) + + return create_requester( + effective_user_id, app_service=app_service, device_id=effective_device_id + ) diff --git a/synapse/api/auth/internal.py b/synapse/api/auth/internal.py index 813d537e53..e2ae198b19 100644 --- a/synapse/api/auth/internal.py +++ b/synapse/api/auth/internal.py @@ -12,12 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING import pymacaroons -from netaddr import IPAddress - -from twisted.web.server import Request from synapse.api.errors import ( AuthError, @@ -122,7 +119,7 @@ class InternalAuth(BaseAuth): access_token = self.get_access_token_from_request(request) # First check if it could be a request from an appservice - requester = await self._get_appservice_user(request) + requester = await self.get_appservice_user(request, access_token) if not requester: # If not, it should be from a regular user requester = await self.get_user_by_access_token( @@ -189,81 +186,6 @@ class InternalAuth(BaseAuth): except KeyError: raise MissingClientTokenError() - @cancellable - async def _get_appservice_user(self, request: Request) -> Optional[Requester]: - """ - Given a request, reads the request parameters to determine: - - whether it's an application service that's making this request - - what user the application service should be treated as controlling - (the user_id URI parameter allows an application service to masquerade - any applicable user in its namespace) - - what device the application service should be treated as controlling - (the device_id[^1] URI parameter allows an application service to masquerade - as any device that exists for the relevant user) - - [^1] Unstable and provided by MSC3202. - Must use `org.matrix.msc3202.device_id` in place of `device_id` for now. - - Returns: - the application service `Requester` of that request - - Postconditions: - - The `app_service` field in the returned `Requester` is set - - The `user_id` field in the returned `Requester` is either the application - service sender or the controlled user set by the `user_id` URI parameter - - The returned application service is permitted to control the returned user ID. - - The returned device ID, if present, has been checked to be a valid device ID - for the returned user ID. - """ - DEVICE_ID_ARG_NAME = b"org.matrix.msc3202.device_id" - - app_service = self.store.get_app_service_by_token( - self.get_access_token_from_request(request) - ) - if app_service is None: - return None - - if app_service.ip_range_whitelist: - ip_address = IPAddress(request.getClientAddress().host) - if ip_address not in app_service.ip_range_whitelist: - return None - - # This will always be set by the time Twisted calls us. - assert request.args is not None - - if b"user_id" in request.args: - effective_user_id = request.args[b"user_id"][0].decode("utf8") - await self.validate_appservice_can_control_user_id( - app_service, effective_user_id - ) - else: - effective_user_id = app_service.sender - - effective_device_id: Optional[str] = None - - if ( - self.hs.config.experimental.msc3202_device_masquerading_enabled - and DEVICE_ID_ARG_NAME in request.args - ): - effective_device_id = request.args[DEVICE_ID_ARG_NAME][0].decode("utf8") - # We only just set this so it can't be None! - assert effective_device_id is not None - device_opt = await self.store.get_device( - effective_user_id, effective_device_id - ) - if device_opt is None: - # For now, use 400 M_EXCLUSIVE if the device doesn't exist. - # This is an open thread of discussion on MSC3202 as of 2021-12-09. - raise AuthError( - 400, - f"Application service trying to use a device that doesn't exist ('{effective_device_id}' for {effective_user_id})", - Codes.EXCLUSIVE, - ) - - return create_requester( - effective_user_id, app_service=app_service, device_id=effective_device_id - ) - async def get_user_by_access_token( self, token: str, diff --git a/synapse/api/auth/msc3861_delegated.py b/synapse/api/auth/msc3861_delegated.py index a84b7730b3..b84dce2563 100644 --- a/synapse/api/auth/msc3861_delegated.py +++ b/synapse/api/auth/msc3861_delegated.py @@ -162,14 +162,19 @@ class MSC3861DelegatedAuth(BaseAuth): ) -> Requester: access_token = self.get_access_token_from_request(request) - # TODO: we probably want to assert the allow_guest inside this call so that we don't provision the user if they don't have enough permission: - requester = await self.get_user_by_access_token(access_token, allow_expired) + requester = await self.get_appservice_user(request, access_token) + if not requester: + # TODO: we probably want to assert the allow_guest inside this call + # so that we don't provision the user if they don't have enough permission: + requester = await self.get_user_by_access_token(access_token, allow_expired) if not allow_guest and requester.is_guest: raise OAuthInsufficientScopeError( ["urn:matrix:org.matrix.msc2967.client:api:*"] ) + request.requester = requester + return requester async def get_user_by_access_token( diff --git a/synapse/rest/client/register.py b/synapse/rest/client/register.py index f8fb0e1dee..d59669f0b6 100644 --- a/synapse/rest/client/register.py +++ b/synapse/rest/client/register.py @@ -869,6 +869,74 @@ class RegisterRestServlet(RestServlet): return 200, result +class RegisterAppServiceOnlyRestServlet(RestServlet): + """An alternative registration API endpoint that only allows ASes to register + + This replaces the regular /register endpoint if MSC3861. There are two notable + differences with the regular /register endpoint: + - It only allows the `m.login.application_service` login type + - It does not create a device or access token for the just-registered user + + Note that the exact behaviour of this endpoint is not yet finalised. It should be + just good enough to make most ASes work. + """ + + PATTERNS = client_patterns("/register$") + CATEGORY = "Registration/login requests" + + def __init__(self, hs: "HomeServer"): + super().__init__() + + self.auth = hs.get_auth() + self.registration_handler = hs.get_registration_handler() + self.ratelimiter = hs.get_registration_ratelimiter() + + @interactive_auth_handler + async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + body = parse_json_object_from_request(request) + + client_addr = request.getClientAddress().host + + await self.ratelimiter.ratelimit(None, client_addr, update=False) + + kind = parse_string(request, "kind", default="user") + + if kind == "guest": + raise SynapseError(403, "Guest access is disabled") + elif kind != "user": + raise UnrecognizedRequestError( + f"Do not understand membership kind: {kind}", + ) + + # Pull out the provided username and do basic sanity checks early since + # the auth layer will store these in sessions. + desired_username = body.get("username") + if not isinstance(desired_username, str) or len(desired_username) > 512: + raise SynapseError(400, "Invalid username") + + # Allow only ASes to use this API. + if body.get("type") != APP_SERVICE_REGISTRATION_TYPE: + raise SynapseError(403, "Non-application service registration type") + + if not self.auth.has_access_token(request): + raise SynapseError( + 400, + "Appservice token must be provided when using a type of m.login.application_service", + ) + + # XXX we should check that desired_username is valid. Currently + # we give appservices carte blanche for any insanity in mxids, + # because the IRC bridges rely on being able to register stupid + # IDs. + + as_token = self.auth.get_access_token_from_request(request) + + user_id = await self.registration_handler.appservice_register( + desired_username, as_token + ) + return 200, {"user_id": user_id} + + def _calculate_registration_flows( config: HomeServerConfig, auth_handler: AuthHandler ) -> List[List[str]]: @@ -956,6 +1024,7 @@ def _calculate_registration_flows( def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: if hs.config.experimental.msc3861.enabled: + RegisterAppServiceOnlyRestServlet(hs).register(http_server) return if hs.config.worker.worker_app is None: diff --git a/tests/handlers/test_oauth_delegation.py b/tests/handlers/test_oauth_delegation.py index 081fef51ec..e53020a58a 100644 --- a/tests/handlers/test_oauth_delegation.py +++ b/tests/handlers/test_oauth_delegation.py @@ -527,8 +527,8 @@ class MSC3861OAuthDelegation(HomeserverTestCase): self.expect_unrecognized( "GET", "/_matrix/client/v1/register/m.login.registration_token/validity" ) - self.expect_unrecognized("POST", "/_matrix/client/v3/register") - self.expect_unrecognized("GET", "/_matrix/client/v3/register") + # This is still available for AS registrations + # self.expect_unrecognized("POST", "/_matrix/client/v3/register") self.expect_unrecognized("GET", "/_matrix/client/v3/register/available") self.expect_unrecognized( "POST", "/_matrix/client/v3/register/email/requestToken" -- cgit 1.5.1 From e343125b3880bfc55223735a784eb1894db5e9be Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Wed, 10 May 2023 18:05:06 +0200 Subject: Disable incompatible Admin API endpoints --- synapse/rest/admin/__init__.py | 21 +++++++++++++-------- synapse/rest/admin/users.py | 8 ++++++++ tests/handlers/test_oauth_delegation.py | 19 +++++++++++++++++++ 3 files changed, 40 insertions(+), 8 deletions(-) diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index c729364839..fe8177ed4d 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -257,9 +257,11 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: DeleteRoomStatusByRoomIdRestServlet(hs).register(http_server) JoinRoomAliasServlet(hs).register(http_server) VersionServlet(hs).register(http_server) - UserAdminServlet(hs).register(http_server) + if not hs.config.experimental.msc3861.enabled: + UserAdminServlet(hs).register(http_server) UserMembershipRestServlet(hs).register(http_server) - UserTokenRestServlet(hs).register(http_server) + if not hs.config.experimental.msc3861.enabled: + UserTokenRestServlet(hs).register(http_server) UserRestServletV2(hs).register(http_server) UsersRestServletV2(hs).register(http_server) UserMediaStatisticsRestServlet(hs).register(http_server) @@ -274,9 +276,10 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: RoomEventContextServlet(hs).register(http_server) RateLimitRestServlet(hs).register(http_server) UsernameAvailableRestServlet(hs).register(http_server) - ListRegistrationTokensRestServlet(hs).register(http_server) - NewRegistrationTokenRestServlet(hs).register(http_server) - RegistrationTokenRestServlet(hs).register(http_server) + if not hs.config.experimental.msc3861.enabled: + ListRegistrationTokensRestServlet(hs).register(http_server) + NewRegistrationTokenRestServlet(hs).register(http_server) + RegistrationTokenRestServlet(hs).register(http_server) DestinationMembershipRestServlet(hs).register(http_server) DestinationResetConnectionRestServlet(hs).register(http_server) DestinationRestServlet(hs).register(http_server) @@ -306,10 +309,12 @@ def register_servlets_for_client_rest_resource( # The following resources can only be run on the main process. if hs.config.worker.worker_app is None: DeactivateAccountRestServlet(hs).register(http_server) - ResetPasswordRestServlet(hs).register(http_server) + if not hs.config.experimental.msc3861.enabled: + ResetPasswordRestServlet(hs).register(http_server) SearchUsersRestServlet(hs).register(http_server) - UserRegisterServlet(hs).register(http_server) - AccountValidityRenewServlet(hs).register(http_server) + if not hs.config.experimental.msc3861.enabled: + UserRegisterServlet(hs).register(http_server) + AccountValidityRenewServlet(hs).register(http_server) # Load the media repo ones if we're using them. Otherwise load the servlets which # don't need a media repo (typically readonly admin APIs). diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index 932333ae57..407fe9c804 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -71,6 +71,7 @@ class UsersRestServletV2(RestServlet): self.auth = hs.get_auth() self.admin_handler = hs.get_admin_handler() self._msc3866_enabled = hs.config.experimental.msc3866.enabled + self._msc3861_enabled = hs.config.experimental.msc3861.enabled async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: await assert_requester_is_admin(self.auth, request) @@ -94,7 +95,14 @@ class UsersRestServletV2(RestServlet): user_id = parse_string(request, "user_id") name = parse_string(request, "name") + guests = parse_boolean(request, "guests", default=True) + if self._msc3861_enabled and guests: + raise SynapseError( + HTTPStatus.BAD_REQUEST, + "The guests parameter is not supported when MSC3861 is enabled.", + errcode=Codes.INVALID_PARAM, + ) deactivated = parse_boolean(request, "deactivated", default=False) # If support for MSC3866 is not enabled, apply no filtering based on the diff --git a/tests/handlers/test_oauth_delegation.py b/tests/handlers/test_oauth_delegation.py index e53020a58a..b79c43a424 100644 --- a/tests/handlers/test_oauth_delegation.py +++ b/tests/handlers/test_oauth_delegation.py @@ -31,6 +31,7 @@ from synapse.api.errors import ( InvalidClientTokenError, OAuthInsufficientScopeError, ) +from synapse.rest import admin from synapse.rest.client import account, devices, keys, login, logout, register from synapse.server import HomeServer from synapse.types import JsonDict @@ -104,6 +105,7 @@ class MSC3861OAuthDelegation(HomeserverTestCase): register.register_servlets, login.register_servlets, logout.register_servlets, + admin.register_servlets, ] def default_config(self) -> Dict[str, Any]: @@ -557,3 +559,20 @@ class MSC3861OAuthDelegation(HomeserverTestCase): self.expect_unrecognized( "POST", "/_matrix/client/v3/user/{USERNAME}/openid/request_token" ) + + def test_admin_api_endpoints_removed(self) -> None: + """Test that admin API endpoints that were removed in MSC2964 are no longer available.""" + self.expect_unrecognized("GET", "/_synapse/admin/v1/registration_tokens") + self.expect_unrecognized("POST", "/_synapse/admin/v1/registration_tokens/new") + self.expect_unrecognized("GET", "/_synapse/admin/v1/registration_tokens/abcd") + self.expect_unrecognized("PUT", "/_synapse/admin/v1/registration_tokens/abcd") + self.expect_unrecognized( + "DELETE", "/_synapse/admin/v1/registration_tokens/abcd" + ) + self.expect_unrecognized("POST", "/_synapse/admin/v1/reset_password/foo") + self.expect_unrecognized("POST", "/_synapse/admin/v1/users/foo/login") + self.expect_unrecognized("GET", "/_synapse/admin/v1/register") + self.expect_unrecognized("POST", "/_synapse/admin/v1/register") + self.expect_unrecognized("GET", "/_synapse/admin/v1/users/foo/admin") + self.expect_unrecognized("PUT", "/_synapse/admin/v1/users/foo/admin") + self.expect_unrecognized("POST", "/_synapse/admin/v1/account_validity/validity") -- cgit 1.5.1 From ec9379d7e298c24f3530cf48ee34c30aa038feb2 Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Fri, 12 May 2023 15:22:46 +0200 Subject: Newsfile. --- changelog.d/15582.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/15582.feature diff --git a/changelog.d/15582.feature b/changelog.d/15582.feature new file mode 100644 index 0000000000..00959500a5 --- /dev/null +++ b/changelog.d/15582.feature @@ -0,0 +1 @@ +Experimental [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) support: delegate auth to an OIDC provider. -- cgit 1.5.1 From 14a5be9c4d69b5669792f2cdc658c266847a8c4a Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Mon, 22 May 2023 15:48:57 +0200 Subject: Handle errors when introspecting tokens This returns a proper 503 when the introspection endpoint is not working for some reason, which should avoid logging out clients in those cases. --- synapse/api/auth/msc3861_delegated.py | 42 +++++++++++++++++++++++++++++---- tests/handlers/test_oauth_delegation.py | 35 +++++++++++++++++++++++++++ tests/test_utils/__init__.py | 4 ++-- 3 files changed, 74 insertions(+), 7 deletions(-) diff --git a/synapse/api/auth/msc3861_delegated.py b/synapse/api/auth/msc3861_delegated.py index b84dce2563..82c66691da 100644 --- a/synapse/api/auth/msc3861_delegated.py +++ b/synapse/api/auth/msc3861_delegated.py @@ -27,9 +27,11 @@ from twisted.web.http_headers import Headers from synapse.api.auth.base import BaseAuth from synapse.api.errors import ( AuthError, + HttpResponseException, InvalidClientTokenError, OAuthInsufficientScopeError, StoreError, + SynapseError, ) from synapse.http.site import SynapseRequest from synapse.logging.context import make_deferred_yieldable @@ -117,6 +119,21 @@ class MSC3861DelegatedAuth(BaseAuth): return metadata async def _introspect_token(self, token: str) -> IntrospectionToken: + """ + Send a token to the introspection endpoint and returns the introspection response + + Parameters: + token: The token to introspect + + Raises: + HttpResponseException: If the introspection endpoint returns a non-2xx response + ValueError: If the introspection endpoint returns an invalid JSON response + JSONDecodeError: If the introspection endpoint returns a non-JSON response + Exception: If the HTTP request fails + + Returns: + The introspection response + """ metadata = await self._issuer_metadata.get() introspection_endpoint = metadata.get("introspection_endpoint") raw_headers: Dict[str, str] = { @@ -136,7 +153,7 @@ class MSC3861DelegatedAuth(BaseAuth): # Do the actual request # We're not using the SimpleHttpClient util methods as we don't want to - # check the HTTP status code and we do the body encoding ourself. + # check the HTTP status code, and we do the body encoding ourselves. response = await self._http_client.request( method="POST", uri=uri, @@ -145,10 +162,21 @@ class MSC3861DelegatedAuth(BaseAuth): ) resp_body = await make_deferred_yieldable(readBody(response)) - # TODO: Let's not worry about 5xx errors & co. for now and just try - # decoding that as JSON. We should also do some validation of the - # response + + if response.code < 200 or response.code >= 300: + raise HttpResponseException( + response.code, + response.phrase.decode("ascii", errors="replace"), + resp_body, + ) + resp = json_decoder.decode(resp_body.decode("utf-8")) + + if not isinstance(resp, dict): + raise ValueError( + "The introspection endpoint returned an invalid JSON response." + ) + return IntrospectionToken(**resp) async def is_server_admin(self, requester: Requester) -> bool: @@ -196,7 +224,11 @@ class MSC3861DelegatedAuth(BaseAuth): scope=["urn:synapse:admin:*"], ) - introspection_result = await self._introspect_token(token) + try: + introspection_result = await self._introspect_token(token) + except Exception: + logger.exception("Failed to introspect token") + raise SynapseError(503, "Unable to introspect the access token") logger.info(f"Introspection result: {introspection_result!r}") diff --git a/tests/handlers/test_oauth_delegation.py b/tests/handlers/test_oauth_delegation.py index b79c43a424..16ce2c069d 100644 --- a/tests/handlers/test_oauth_delegation.py +++ b/tests/handlers/test_oauth_delegation.py @@ -30,6 +30,7 @@ from synapse.api.errors import ( Codes, InvalidClientTokenError, OAuthInsufficientScopeError, + SynapseError, ) from synapse.rest import admin from synapse.rest.client import account, devices, keys, login, logout, register @@ -405,6 +406,40 @@ class MSC3861OAuthDelegation(HomeserverTestCase): ) self.assertEqual(requester.device_id, DEVICE) + def test_unavailable_introspection_endpoint(self) -> None: + """The handler should return an internal server error.""" + request = Mock(args={}) + request.args[b"access_token"] = [b"mockAccessToken"] + request.requestHeaders.getRawHeaders = mock_getRawHeaders() + + # The introspection endpoint is returning an error. + self.http_client.request = simple_async_mock( + return_value=FakeResponse(code=500, body=b"Internal Server Error") + ) + error = self.get_failure(self.auth.get_user_by_req(request), SynapseError) + self.assertEqual(error.value.code, 503) + + # The introspection endpoint request fails. + self.http_client.request = simple_async_mock(raises=Exception()) + error = self.get_failure(self.auth.get_user_by_req(request), SynapseError) + self.assertEqual(error.value.code, 503) + + # The introspection endpoint does not return a JSON object. + self.http_client.request = simple_async_mock( + return_value=FakeResponse.json( + code=200, payload=["this is an array", "not an object"] + ) + ) + error = self.get_failure(self.auth.get_user_by_req(request), SynapseError) + self.assertEqual(error.value.code, 503) + + # The introspection endpoint does not return valid JSON. + self.http_client.request = simple_async_mock( + return_value=FakeResponse(code=200, body=b"this is not valid JSON") + ) + error = self.get_failure(self.auth.get_user_by_req(request), SynapseError) + self.assertEqual(error.value.code, 503) + def make_device_keys(self, user_id: str, device_id: str) -> JsonDict: # We only generate a master key to simplify the test. master_signing_key = generate_signing_key(device_id) diff --git a/tests/test_utils/__init__.py b/tests/test_utils/__init__.py index e5dae670a7..c8cc841d95 100644 --- a/tests/test_utils/__init__.py +++ b/tests/test_utils/__init__.py @@ -33,7 +33,7 @@ from twisted.web.http import RESPONSES from twisted.web.http_headers import Headers from twisted.web.iweb import IResponse -from synapse.types import JsonDict +from synapse.types import JsonSerializable if TYPE_CHECKING: from sys import UnraisableHookArgs @@ -145,7 +145,7 @@ class FakeResponse: # type: ignore[misc] protocol.connectionLost(Failure(ResponseDone())) @classmethod - def json(cls, *, code: int = 200, payload: JsonDict) -> "FakeResponse": + def json(cls, *, code: int = 200, payload: JsonSerializable) -> "FakeResponse": headers = Headers({"Content-Type": ["application/json"]}) body = json.dumps(payload).encode("utf-8") return cls(code=code, body=body, headers=headers) -- cgit 1.5.1 From 98afc57d59df118a13f894fc66f206bc7409e14a Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Mon, 22 May 2023 17:17:49 +0200 Subject: Make OIDC scope constants --- synapse/api/auth/msc3861_delegated.py | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/synapse/api/auth/msc3861_delegated.py b/synapse/api/auth/msc3861_delegated.py index 82c66691da..5b0e678c0f 100644 --- a/synapse/api/auth/msc3861_delegated.py +++ b/synapse/api/auth/msc3861_delegated.py @@ -44,6 +44,15 @@ if TYPE_CHECKING: logger = logging.getLogger(__name__) +# Scope as defined by MSC2967 +# https://github.com/matrix-org/matrix-spec-proposals/pull/2967 +SCOPE_MATRIX_API = "urn:matrix:org.matrix.msc2967.client:api:*" +SCOPE_MATRIX_GUEST = "urn:matrix:org.matrix.msc2967.client:api:guest" +SCOPE_MATRIX_DEVICE_PREFIX = "urn:matrix:org.matrix.msc2967.client:device:" + +# Scope which allows access to the Synapse admin API +SCOPE_SYNAPSE_ADMIN = "urn:synapse:admin:*" + def scope_to_list(scope: str) -> List[str]: """Convert a scope string to a list of scope tokens""" @@ -197,9 +206,7 @@ class MSC3861DelegatedAuth(BaseAuth): requester = await self.get_user_by_access_token(access_token, allow_expired) if not allow_guest and requester.is_guest: - raise OAuthInsufficientScopeError( - ["urn:matrix:org.matrix.msc2967.client:api:*"] - ) + raise OAuthInsufficientScopeError([SCOPE_MATRIX_API]) request.requester = requester @@ -241,9 +248,9 @@ class MSC3861DelegatedAuth(BaseAuth): scope: List[str] = scope_to_list(introspection_result.get("scope", "")) # Determine type of user based on presence of particular scopes - has_admin_scope = "urn:synapse:admin:*" in scope - has_user_scope = "urn:matrix:org.matrix.msc2967.client:api:*" in scope - has_guest_scope = "urn:matrix:org.matrix.msc2967.client:api:guest" in scope + has_admin_scope = SCOPE_SYNAPSE_ADMIN in scope + has_user_scope = SCOPE_MATRIX_API in scope + has_guest_scope = SCOPE_MATRIX_GUEST in scope is_user = has_user_scope or has_admin_scope is_guest = has_guest_scope and not is_user @@ -299,10 +306,8 @@ class MSC3861DelegatedAuth(BaseAuth): # Find device_id in scope device_id = None for tok in scope: - if tok.startswith("urn:matrix:org.matrix.msc2967.client:device:"): - parts = tok.split(":") - if len(parts) == 5: - device_id = parts[4] + if tok.startswith(SCOPE_MATRIX_DEVICE_PREFIX): + device_id = tok[len(SCOPE_MATRIX_DEVICE_PREFIX) :] if device_id: # Create the device on the fly if it does not exist -- cgit 1.5.1 From f739bde962daa9bc425c8343f35993ae889dbc67 Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Tue, 23 May 2023 16:59:53 +0200 Subject: Reject tokens with multiple device scopes --- synapse/api/auth/msc3861_delegated.py | 30 ++++++++++++++++++++++++------ tests/handlers/test_oauth_delegation.py | 29 ++++++++++++++++++++++++++++- 2 files changed, 52 insertions(+), 7 deletions(-) diff --git a/synapse/api/auth/msc3861_delegated.py b/synapse/api/auth/msc3861_delegated.py index 5b0e678c0f..e4b16c0b5c 100644 --- a/synapse/api/auth/msc3861_delegated.py +++ b/synapse/api/auth/msc3861_delegated.py @@ -303,13 +303,31 @@ class MSC3861DelegatedAuth(BaseAuth): else: user_id = UserID.from_string(user_id_str) - # Find device_id in scope - device_id = None - for tok in scope: - if tok.startswith(SCOPE_MATRIX_DEVICE_PREFIX): - device_id = tok[len(SCOPE_MATRIX_DEVICE_PREFIX) :] + # Find device_ids in scope + # We only allow a single device_id in the scope, so we find them all in the + # scope list, and raise if there are more than one. The OIDC server should be + # the one enforcing valid scopes, so we raise a 500 if we find an invalid scope. + device_ids = [ + tok[len(SCOPE_MATRIX_DEVICE_PREFIX) :] + for tok in scope + if tok.startswith(SCOPE_MATRIX_DEVICE_PREFIX) + ] + + if len(device_ids) > 1: + raise AuthError( + 500, + "Multiple device IDs in scope", + ) + + device_id = device_ids[0] if device_ids else None + if device_id is not None: + # Sanity check the device_id + if len(device_id) > 255 or len(device_id) < 1: + raise AuthError( + 500, + "Invalid device ID in scope", + ) - if device_id: # Create the device on the fly if it does not exist try: await self.store.get_device( diff --git a/tests/handlers/test_oauth_delegation.py b/tests/handlers/test_oauth_delegation.py index 16ce2c069d..0641535512 100644 --- a/tests/handlers/test_oauth_delegation.py +++ b/tests/handlers/test_oauth_delegation.py @@ -27,6 +27,7 @@ from signedjson.sign import sign_json from twisted.test.proto_helpers import MemoryReactor from synapse.api.errors import ( + AuthError, Codes, InvalidClientTokenError, OAuthInsufficientScopeError, @@ -68,8 +69,9 @@ INTROSPECTION_ENDPOINT = ISSUER + "introspect" SYNAPSE_ADMIN_SCOPE = "urn:synapse:admin:*" MATRIX_USER_SCOPE = "urn:matrix:org.matrix.msc2967.client:api:*" MATRIX_GUEST_SCOPE = "urn:matrix:org.matrix.msc2967.client:api:guest" +MATRIX_DEVICE_SCOPE_PREFIX = "urn:matrix:org.matrix.msc2967.client:device:" DEVICE = "AABBCCDD" -MATRIX_DEVICE_SCOPE = "urn:matrix:org.matrix.msc2967.client:device:" + DEVICE +MATRIX_DEVICE_SCOPE = MATRIX_DEVICE_SCOPE_PREFIX + DEVICE SUBJECT = "abc-def-ghi" USERNAME = "test-user" USER_ID = "@" + USERNAME + ":" + SERVER_NAME @@ -344,6 +346,31 @@ class MSC3861OAuthDelegation(HomeserverTestCase): ) self.assertEqual(requester.device_id, DEVICE) + def test_multiple_devices(self) -> None: + """The handler should raise an error if multiple devices are found in the scope.""" + + self.http_client.request = simple_async_mock( + return_value=FakeResponse.json( + code=200, + payload={ + "active": True, + "sub": SUBJECT, + "scope": " ".join( + [ + MATRIX_USER_SCOPE, + f"{MATRIX_DEVICE_SCOPE_PREFIX}AABBCC", + f"{MATRIX_DEVICE_SCOPE_PREFIX}DDEEFF", + ] + ), + "username": USERNAME, + }, + ) + ) + request = Mock(args={}) + request.args[b"access_token"] = [b"mockAccessToken"] + request.requestHeaders.getRawHeaders = mock_getRawHeaders() + self.get_failure(self.auth.get_user_by_req(request), AuthError) + def test_active_guest_not_allowed(self) -> None: """The handler should return an insufficient scope error.""" -- cgit 1.5.1 From 32a2f050042531ad4673b42789e833e9cd307740 Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Fri, 26 May 2023 14:50:19 +0200 Subject: Make the config tests spawn the homeserver only when needed --- synapse/config/experimental.py | 40 ++-- tests/config/test_oauth_delegation.py | 348 +++++++++++++++------------------- 2 files changed, 182 insertions(+), 206 deletions(-) diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index d4dff22b0b..1d189b2e26 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -69,7 +69,8 @@ class MSC3861: if value and not HAS_AUTHLIB: raise ConfigError( "MSC3861 is enabled but authlib is not installed. " - "Please install authlib to use MSC3861." + "Please install authlib to use MSC3861.", + ("experimental", "msc3861", "enabled"), ) issuer: str = attr.ib(default="", validator=attr.validators.instance_of(str)) @@ -114,7 +115,8 @@ class MSC3861: if value == ClientAuthMethod.PRIVATE_KEY_JWT and self.jwk is None: raise ConfigError( - "A JWKS must be provided when using the private_key_jwt client auth method" + "A JWKS must be provided when using the private_key_jwt client auth method", + ("experimental", "msc3861", "client_auth_method"), ) if ( @@ -127,7 +129,8 @@ class MSC3861: and self.client_secret is None ): raise ConfigError( - f"A client secret must be provided when using the {value} client auth method" + f"A client secret must be provided when using the {value} client auth method", + ("experimental", "msc3861", "client_auth_method"), ) account_management_url: Optional[str] = attr.ib( @@ -160,12 +163,14 @@ class MSC3861: or root.auth.password_enabled_for_login ): raise ConfigError( - "Password auth cannot be enabled when OAuth delegation is enabled" + "Password auth cannot be enabled when OAuth delegation is enabled", + ("password_config", "enabled"), ) if root.registration.enable_registration: raise ConfigError( - "Registration cannot be enabled when OAuth delegation is enabled" + "Registration cannot be enabled when OAuth delegation is enabled", + ("enable_registration",), ) if ( @@ -183,32 +188,38 @@ class MSC3861: if root.captcha.enable_registration_captcha: raise ConfigError( - "CAPTCHA cannot be enabled when OAuth delegation is enabled" + "CAPTCHA cannot be enabled when OAuth delegation is enabled", + ("captcha", "enable_registration_captcha"), ) if root.experimental.msc3882_enabled: raise ConfigError( - "MSC3882 cannot be enabled when OAuth delegation is enabled" + "MSC3882 cannot be enabled when OAuth delegation is enabled", + ("experimental_features", "msc3882_enabled"), ) if root.registration.refresh_token_lifetime: raise ConfigError( - "refresh_token_lifetime cannot be set when OAuth delegation is enabled" + "refresh_token_lifetime cannot be set when OAuth delegation is enabled", + ("refresh_token_lifetime",), ) if root.registration.nonrefreshable_access_token_lifetime: raise ConfigError( - "nonrefreshable_access_token_lifetime cannot be set when OAuth delegation is enabled" + "nonrefreshable_access_token_lifetime cannot be set when OAuth delegation is enabled", + ("nonrefreshable_access_token_lifetime",), ) if root.registration.session_lifetime: raise ConfigError( - "session_lifetime cannot be set when OAuth delegation is enabled" + "session_lifetime cannot be set when OAuth delegation is enabled", + ("session_lifetime",), ) if not root.experimental.msc3970_enabled: raise ConfigError( - "experimental_features.msc3970_enabled must be 'true' when OAuth delegation is enabled" + "experimental_features.msc3970_enabled must be 'true' when OAuth delegation is enabled", + ("experimental_features", "msc3970_enabled"), ) @@ -373,7 +384,12 @@ class ExperimentalConfig(Config): ) # MSC3861: Matrix architecture change to delegate authentication via OIDC - self.msc3861 = MSC3861(**experimental.get("msc3861", {})) + try: + self.msc3861 = MSC3861(**experimental.get("msc3861", {})) + except ValueError as exc: + raise ConfigError( + "Invalid MSC3861 configuration", ("experimental", "msc3861") + ) from exc # MSC3970: Scope transaction IDs to devices self.msc3970_enabled = experimental.get("msc3970_enabled", self.msc3861.enabled) diff --git a/tests/config/test_oauth_delegation.py b/tests/config/test_oauth_delegation.py index 6d294e0144..2ead721b00 100644 --- a/tests/config/test_oauth_delegation.py +++ b/tests/config/test_oauth_delegation.py @@ -12,15 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Dict from unittest.mock import Mock from synapse.config import ConfigError +from synapse.config.homeserver import HomeServerConfig from synapse.module_api import ModuleApi from synapse.types import JsonDict -from tests.server import get_clock -from tests.unittest import HomeserverTestCase, override_config, skip_unless +from tests.server import get_clock, setup_test_homeserver +from tests.unittest import TestCase, skip_unless +from tests.utils import default_config try: import authlib # noqa: F401 @@ -51,45 +52,15 @@ class CustomAuthModule: ) -def _dict_merge(merge_dict: dict, into_dict: dict) -> None: - """Do a deep merge of two dicts - - Recursively merges `merge_dict` into `into_dict`: - * For keys where both `merge_dict` and `into_dict` have a dict value, the values - are recursively merged - * For all other keys, the values in `into_dict` (if any) are overwritten with - the value from `merge_dict`. - - Args: - merge_dict: dict to merge - into_dict: target dict to be modified - """ - for k, v in merge_dict.items(): - if k not in into_dict: - into_dict[k] = v - continue - - current_val = into_dict[k] - - if isinstance(v, dict) and isinstance(current_val, dict): - _dict_merge(v, current_val) - continue - - # otherwise we just overwrite - into_dict[k] = v - - @skip_unless(HAS_AUTHLIB, "requires authlib") -class MSC3861OAuthDelegation(HomeserverTestCase): +class MSC3861OAuthDelegation(TestCase): """Test that the Homeserver fails to initialize if the config is invalid.""" def setUp(self) -> None: - self.reactor, self.clock = get_clock() - self._hs_args = {"clock": self.clock, "reactor": self.reactor} - - def default_config(self) -> Dict[str, Any]: - default_extra_config = { + self.config_dict: JsonDict = { + **default_config("test"), "public_baseurl": BASE_URL, + "enable_registration": False, "experimental_features": { "msc3861": { "enabled": True, @@ -100,198 +71,187 @@ class MSC3861OAuthDelegation(HomeserverTestCase): } }, } - _dict_merge( - {} if self._extra_config is None else self._extra_config, - default_extra_config, - ) - self._extra_config = default_extra_config - return super().default_config() - @override_config( - { - "enable_registration": False, - } - ) + def parse_config(self) -> HomeServerConfig: + config = HomeServerConfig() + config.parse_config_dict(self.config_dict, "", "") + return config + def test_client_secret_post_works(self) -> None: - self.setup_test_homeserver() + self.config_dict["experimental_features"]["msc3861"].update( + client_auth_method="client_secret_post", + client_secret=CLIENT_SECRET, + ) + + self.parse_config() + + def test_client_secret_post_requires_client_secret(self) -> None: + self.config_dict["experimental_features"]["msc3861"].update( + client_auth_method="client_secret_post", + client_secret=None, + ) + + with self.assertRaises(ConfigError): + self.parse_config() + + def test_client_secret_basic_works(self) -> None: + self.config_dict["experimental_features"]["msc3861"].update( + client_auth_method="client_secret_basic", + client_secret=CLIENT_SECRET, + ) + + self.parse_config() + + def test_client_secret_basic_requires_client_secret(self) -> None: + self.config_dict["experimental_features"]["msc3861"].update( + client_auth_method="client_secret_basic", + client_secret=None, + ) + + with self.assertRaises(ConfigError): + self.parse_config() + + def test_client_secret_jwt_works(self) -> None: + self.config_dict["experimental_features"]["msc3861"].update( + client_auth_method="client_secret_jwt", + client_secret=CLIENT_SECRET, + ) + + self.parse_config() + + def test_client_secret_jwt_requires_client_secret(self) -> None: + self.config_dict["experimental_features"]["msc3861"].update( + client_auth_method="client_secret_jwt", + client_secret=None, + ) + + with self.assertRaises(ConfigError): + self.parse_config() - @override_config( - { - "enable_registration": False, - "experimental_features": { - "msc3861": { - "client_auth_method": "invalid", - } - }, - } - ) def test_invalid_client_auth_method(self) -> None: - with self.assertRaises(ValueError): - self.setup_test_homeserver() + self.config_dict["experimental_features"]["msc3861"].update( + client_auth_method="invalid", + ) - @override_config( - { - "enable_registration": False, - "experimental_features": { - "msc3861": { - "client_auth_method": "private_key_jwt", - } - }, - } - ) - def test_invalid_private_key_jwt(self) -> None: with self.assertRaises(ConfigError): - self.setup_test_homeserver() + self.parse_config() + + def test_private_key_jwt_requires_jwk(self) -> None: + self.config_dict["experimental_features"]["msc3861"].update( + client_auth_method="private_key_jwt", + ) + + with self.assertRaises(ConfigError): + self.parse_config() - @override_config( - { - "enable_registration": False, - "experimental_features": { - "msc3861": { - "client_auth_method": "private_key_jwt", - "jwk": { - "p": "-frVdP_tZ-J_nIR6HNMDq1N7aunwm51nAqNnhqIyuA8ikx7LlQED1tt2LD3YEvYyW8nxE2V95HlCRZXQPMiRJBFOsbmYkzl2t-MpavTaObB_fct_JqcRtdXddg4-_ihdjRDwUOreq_dpWh6MIKsC3UyekfkHmeEJg5YpOTL15j8", - "kty": "RSA", - "q": "oFw-Enr_YozQB1ab-kawn4jY3yHi8B1nSmYT0s8oTCflrmps5BFJfCkHL5ij3iY15z0o2m0N-jjB1oSJ98O4RayEEYNQlHnTNTl0kRIWzpoqblHUIxVcahIpP_xTovBJzwi8XXoLGqHOOMA-r40LSyVgP2Ut8D9qBwV6_UfT0LU", - "d": "WFkDPYo4b4LIS64D_QtQfGGuAObPvc3HFfp9VZXyq3SJR58XZRHE0jqtlEMNHhOTgbMYS3w8nxPQ_qVzY-5hs4fIanwvB64mAoOGl0qMHO65DTD_WsGFwzYClJPBVniavkLE2Hmpu8IGe6lGliN8vREC6_4t69liY-XcN_ECboVtC2behKkLOEASOIMuS7YcKAhTJFJwkl1dqDlliEn5A4u4xy7nuWQz3juB1OFdKlwGA5dfhDNglhoLIwNnkLsUPPFO-WB5ZNEW35xxHOToxj4bShvDuanVA6mJPtTKjz0XibjB36bj_nF_j7EtbE2PdGJ2KevAVgElR4lqS4ISgQ", - "e": "AQAB", - "kid": "test", - "qi": "cPfNk8l8W5exVNNea4d7QZZ8Qr8LgHghypYAxz8PQh1fNa8Ya1SNUDVzC2iHHhszxxA0vB9C7jGze8dBrvnzWYF1XvQcqNIVVgHhD57R1Nm3dj2NoHIKe0Cu4bCUtP8xnZQUN4KX7y4IIcgRcBWG1hT6DEYZ4BxqicnBXXNXAUI", - "dp": "dKlMHvslV1sMBQaKWpNb3gPq0B13TZhqr3-E2_8sPlvJ3fD8P4CmwwnOn50JDuhY3h9jY5L06sBwXjspYISVv8hX-ndMLkEeF3lrJeA5S70D8rgakfZcPIkffm3tlf1Ok3v5OzoxSv3-67Df4osMniyYwDUBCB5Oq1tTx77xpU8", - "dq": "S4ooU1xNYYcjl9FcuJEEMqKsRrAXzzSKq6laPTwIp5dDwt2vXeAm1a4eDHXC-6rUSZGt5PbqVqzV4s-cjnJMI8YYkIdjNg4NSE1Ac_YpeDl3M3Colb5CQlU7yUB7xY2bt0NOOFp9UJZYJrOo09mFMGjy5eorsbitoZEbVqS3SuE", - "n": "nJbYKqFwnURKimaviyDFrNLD3gaKR1JW343Qem25VeZxoMq1665RHVoO8n1oBm4ClZdjIiZiVdpyqzD5-Ow12YQgQEf1ZHP3CCcOQQhU57Rh5XvScTe5IxYVkEW32IW2mp_CJ6WfjYpfeL4azarVk8H3Vr59d1rSrKTVVinVdZer9YLQyC_rWAQNtHafPBMrf6RYiNGV9EiYn72wFIXlLlBYQ9Fx7bfe1PaL6qrQSsZP3_rSpuvVdLh1lqGeCLR0pyclA9uo5m2tMyCXuuGQLbA_QJm5xEc7zd-WFdux2eXF045oxnSZ_kgQt-pdN7AxGWOVvwoTf9am6mSkEdv6iw", - }, - } - }, - } - ) def test_private_key_jwt_works(self) -> None: - self.setup_test_homeserver() + self.config_dict["experimental_features"]["msc3861"].update( + client_auth_method="private_key_jwt", + jwk={ + "p": "-frVdP_tZ-J_nIR6HNMDq1N7aunwm51nAqNnhqIyuA8ikx7LlQED1tt2LD3YEvYyW8nxE2V95HlCRZXQPMiRJBFOsbmYkzl2t-MpavTaObB_fct_JqcRtdXddg4-_ihdjRDwUOreq_dpWh6MIKsC3UyekfkHmeEJg5YpOTL15j8", + "kty": "RSA", + "q": "oFw-Enr_YozQB1ab-kawn4jY3yHi8B1nSmYT0s8oTCflrmps5BFJfCkHL5ij3iY15z0o2m0N-jjB1oSJ98O4RayEEYNQlHnTNTl0kRIWzpoqblHUIxVcahIpP_xTovBJzwi8XXoLGqHOOMA-r40LSyVgP2Ut8D9qBwV6_UfT0LU", + "d": "WFkDPYo4b4LIS64D_QtQfGGuAObPvc3HFfp9VZXyq3SJR58XZRHE0jqtlEMNHhOTgbMYS3w8nxPQ_qVzY-5hs4fIanwvB64mAoOGl0qMHO65DTD_WsGFwzYClJPBVniavkLE2Hmpu8IGe6lGliN8vREC6_4t69liY-XcN_ECboVtC2behKkLOEASOIMuS7YcKAhTJFJwkl1dqDlliEn5A4u4xy7nuWQz3juB1OFdKlwGA5dfhDNglhoLIwNnkLsUPPFO-WB5ZNEW35xxHOToxj4bShvDuanVA6mJPtTKjz0XibjB36bj_nF_j7EtbE2PdGJ2KevAVgElR4lqS4ISgQ", + "e": "AQAB", + "kid": "test", + "qi": "cPfNk8l8W5exVNNea4d7QZZ8Qr8LgHghypYAxz8PQh1fNa8Ya1SNUDVzC2iHHhszxxA0vB9C7jGze8dBrvnzWYF1XvQcqNIVVgHhD57R1Nm3dj2NoHIKe0Cu4bCUtP8xnZQUN4KX7y4IIcgRcBWG1hT6DEYZ4BxqicnBXXNXAUI", + "dp": "dKlMHvslV1sMBQaKWpNb3gPq0B13TZhqr3-E2_8sPlvJ3fD8P4CmwwnOn50JDuhY3h9jY5L06sBwXjspYISVv8hX-ndMLkEeF3lrJeA5S70D8rgakfZcPIkffm3tlf1Ok3v5OzoxSv3-67Df4osMniyYwDUBCB5Oq1tTx77xpU8", + "dq": "S4ooU1xNYYcjl9FcuJEEMqKsRrAXzzSKq6laPTwIp5dDwt2vXeAm1a4eDHXC-6rUSZGt5PbqVqzV4s-cjnJMI8YYkIdjNg4NSE1Ac_YpeDl3M3Colb5CQlU7yUB7xY2bt0NOOFp9UJZYJrOo09mFMGjy5eorsbitoZEbVqS3SuE", + "n": "nJbYKqFwnURKimaviyDFrNLD3gaKR1JW343Qem25VeZxoMq1665RHVoO8n1oBm4ClZdjIiZiVdpyqzD5-Ow12YQgQEf1ZHP3CCcOQQhU57Rh5XvScTe5IxYVkEW32IW2mp_CJ6WfjYpfeL4azarVk8H3Vr59d1rSrKTVVinVdZer9YLQyC_rWAQNtHafPBMrf6RYiNGV9EiYn72wFIXlLlBYQ9Fx7bfe1PaL6qrQSsZP3_rSpuvVdLh1lqGeCLR0pyclA9uo5m2tMyCXuuGQLbA_QJm5xEc7zd-WFdux2eXF045oxnSZ_kgQt-pdN7AxGWOVvwoTf9am6mSkEdv6iw", + }, + ) + self.parse_config() def test_registration_cannot_be_enabled(self) -> None: + self.config_dict["enable_registration"] = True with self.assertRaises(ConfigError): - self.setup_test_homeserver() + self.parse_config() - @override_config( - { - "enable_registration": False, - "password_config": { - "enabled": True, - }, - } - ) def test_password_config_cannot_be_enabled(self) -> None: + self.config_dict["password_config"] = {"enabled": True} with self.assertRaises(ConfigError): - self.setup_test_homeserver() + self.parse_config() - @override_config( - { - "enable_registration": False, - "oidc_providers": [ - { - "idp_id": "microsoft", - "idp_name": "Microsoft", - "issuer": "https://login.microsoftonline.com//v2.0", - "client_id": "", - "client_secret": "", - "scopes": ["openid", "profile"], - "authorization_endpoint": "https://login.microsoftonline.com//oauth2/v2.0/authorize", - "token_endpoint": "https://login.microsoftonline.com//oauth2/v2.0/token", - "userinfo_endpoint": "https://graph.microsoft.com/oidc/userinfo", - } - ], - } - ) def test_oidc_sso_cannot_be_enabled(self) -> None: + self.config_dict["oidc_providers"] = [ + { + "idp_id": "microsoft", + "idp_name": "Microsoft", + "issuer": "https://login.microsoftonline.com//v2.0", + "client_id": "", + "client_secret": "", + "scopes": ["openid", "profile"], + "authorization_endpoint": "https://login.microsoftonline.com//oauth2/v2.0/authorize", + "token_endpoint": "https://login.microsoftonline.com//oauth2/v2.0/token", + "userinfo_endpoint": "https://graph.microsoft.com/oidc/userinfo", + } + ] + with self.assertRaises(ConfigError): - self.setup_test_homeserver() + self.parse_config() - @override_config( - { - "enable_registration": False, - "cas_config": { - "enabled": True, - "server_url": "https://cas-server.com", - "displayname_attribute": "name", - "required_attributes": {"userGroup": "staff", "department": "None"}, - }, - } - ) def test_cas_sso_cannot_be_enabled(self) -> None: + self.config_dict["cas_config"] = { + "enabled": True, + "server_url": "https://cas-server.com", + "displayname_attribute": "name", + "required_attributes": {"userGroup": "staff", "department": "None"}, + } + with self.assertRaises(ConfigError): - self.setup_test_homeserver() + self.parse_config() - @override_config( - { - "enable_registration": False, - "modules": [ - { - "module": f"{__name__}.{CustomAuthModule.__qualname__}", - "config": {}, - } - ], - } - ) def test_auth_providers_cannot_be_enabled(self) -> None: + self.config_dict["modules"] = [ + { + "module": f"{__name__}.{CustomAuthModule.__qualname__}", + "config": {}, + } + ] + + # This requires actually setting up an HS, as the module will be run on setup, + # which should raise as the module tries to register an auth provider + config = self.parse_config() + reactor, clock = get_clock() with self.assertRaises(ConfigError): - self.setup_test_homeserver() + setup_test_homeserver( + self.addCleanup, reactor=reactor, clock=clock, config=config + ) - @override_config( - { - "enable_registration": False, - "jwt_config": { - "enabled": True, - "secret": "my-secret-token", - "algorithm": "HS256", - }, - } - ) def test_jwt_auth_cannot_be_enabled(self) -> None: + self.config_dict["jwt_config"] = { + "enabled": True, + "secret": "my-secret-token", + "algorithm": "HS256", + } + with self.assertRaises(ConfigError): - self.setup_test_homeserver() + self.parse_config() - @override_config( - { - "enable_registration": False, - "experimental_features": { - "msc3882_enabled": True, - }, - } - ) def test_msc3882_auth_cannot_be_enabled(self) -> None: + self.config_dict["experimental_features"]["msc3882_enabled"] = True with self.assertRaises(ConfigError): - self.setup_test_homeserver() + self.parse_config() - @override_config( - { - "enable_registration": False, - "recaptcha_public_key": "test", - "recaptcha_private_key": "test", - "enable_registration_captcha": True, - } - ) def test_captcha_cannot_be_enabled(self) -> None: + self.config_dict.update( + enable_registration_captcha=True, + recaptcha_public_key="test", + recaptcha_private_key="test", + ) with self.assertRaises(ConfigError): - self.setup_test_homeserver() + self.parse_config() - @override_config( - { - "enable_registration": False, - "refresh_token_lifetime": "24h", - "refreshable_access_token_lifetime": "10m", - "nonrefreshable_access_token_lifetime": "24h", - } - ) def test_refreshable_tokens_cannot_be_enabled(self) -> None: + self.config_dict.update( + refresh_token_lifetime="24h", + refreshable_access_token_lifetime="10m", + nonrefreshable_access_token_lifetime="24h", + ) with self.assertRaises(ConfigError): - self.setup_test_homeserver() + self.parse_config() - @override_config( - { - "enable_registration": False, - "session_lifetime": "24h", - } - ) def test_session_lifetime_cannot_be_set(self) -> None: + self.config_dict["session_lifetime"] = "24h" with self.assertRaises(ConfigError): - self.setup_test_homeserver() + self.parse_config() -- cgit 1.5.1 From ceb3dd77db0d3ce992d40175c3f53f6b6ddfa168 Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Fri, 26 May 2023 15:16:34 +0200 Subject: Enforce that an admin token also has the basic Matrix API scope --- synapse/api/auth/msc3861_delegated.py | 7 ++----- tests/handlers/test_oauth_delegation.py | 26 +++++++++++++++++++++++++- 2 files changed, 27 insertions(+), 6 deletions(-) diff --git a/synapse/api/auth/msc3861_delegated.py b/synapse/api/auth/msc3861_delegated.py index e4b16c0b5c..31c1de0119 100644 --- a/synapse/api/auth/msc3861_delegated.py +++ b/synapse/api/auth/msc3861_delegated.py @@ -248,13 +248,10 @@ class MSC3861DelegatedAuth(BaseAuth): scope: List[str] = scope_to_list(introspection_result.get("scope", "")) # Determine type of user based on presence of particular scopes - has_admin_scope = SCOPE_SYNAPSE_ADMIN in scope has_user_scope = SCOPE_MATRIX_API in scope has_guest_scope = SCOPE_MATRIX_GUEST in scope - is_user = has_user_scope or has_admin_scope - is_guest = has_guest_scope and not is_user - if not is_user and not is_guest: + if not has_user_scope and not has_guest_scope: raise InvalidClientTokenError("No scope in token granting user rights") # Match via the sub claim @@ -351,5 +348,5 @@ class MSC3861DelegatedAuth(BaseAuth): user_id=user_id, device_id=device_id, scope=scope, - is_guest=is_guest, + is_guest=(has_guest_scope and not has_user_scope), ) diff --git a/tests/handlers/test_oauth_delegation.py b/tests/handlers/test_oauth_delegation.py index 0641535512..6309d7b36e 100644 --- a/tests/handlers/test_oauth_delegation.py +++ b/tests/handlers/test_oauth_delegation.py @@ -224,6 +224,30 @@ class MSC3861OAuthDelegation(HomeserverTestCase): ) self._assertParams() + def test_active_admin_not_user(self) -> None: + """The handler should raise when the scope has admin right but not user.""" + + self.http_client.request = simple_async_mock( + return_value=FakeResponse.json( + code=200, + payload={ + "active": True, + "sub": SUBJECT, + "scope": " ".join([SYNAPSE_ADMIN_SCOPE]), + "username": USERNAME, + }, + ) + ) + request = Mock(args={}) + request.args[b"access_token"] = [b"mockAccessToken"] + request.requestHeaders.getRawHeaders = mock_getRawHeaders() + self.get_failure(self.auth.get_user_by_req(request), InvalidClientTokenError) + self.http_client.get_json.assert_called_once_with(WELL_KNOWN) + self.http_client.request.assert_called_once_with( + method="POST", uri=INTROSPECTION_ENDPOINT, data=ANY, headers=ANY + ) + self._assertParams() + def test_active_admin(self) -> None: """The handler should return a requester with admin rights.""" @@ -233,7 +257,7 @@ class MSC3861OAuthDelegation(HomeserverTestCase): payload={ "active": True, "sub": SUBJECT, - "scope": " ".join([SYNAPSE_ADMIN_SCOPE]), + "scope": " ".join([SYNAPSE_ADMIN_SCOPE, MATRIX_USER_SCOPE]), "username": USERNAME, }, ) -- cgit 1.5.1 From c01343de43b86eb4a6c055547369d07c198a435f Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 31 May 2023 07:18:29 -0400 Subject: Add stricter mypy options (#15694) Enable warn_unused_configs, strict_concatenate, disallow_subclassing_any, and disallow_incomplete_defs. --- changelog.d/15694.misc | 1 + mypy.ini | 23 ++++++++++++++++++++--- synapse/api/auth/msc3861_delegated.py | 2 +- synapse/federation/federation_server.py | 4 ++-- synapse/handlers/oidc.py | 2 +- synapse/handlers/pagination.py | 4 ++-- synapse/http/server.py | 14 +++++++------- synapse/util/__init__.py | 4 ++-- synapse/util/async_helpers.py | 2 +- synapse/util/caches/lrucache.py | 6 ++---- tests/server.py | 2 +- 11 files changed, 40 insertions(+), 24 deletions(-) create mode 100644 changelog.d/15694.misc diff --git a/changelog.d/15694.misc b/changelog.d/15694.misc new file mode 100644 index 0000000000..93ceaeafc9 --- /dev/null +++ b/changelog.d/15694.misc @@ -0,0 +1 @@ +Improve type hints. diff --git a/mypy.ini b/mypy.ini index a7ec66196d..56cd1d560e 100644 --- a/mypy.ini +++ b/mypy.ini @@ -2,17 +2,29 @@ namespace_packages = True plugins = pydantic.mypy, mypy_zope:plugin, scripts-dev/mypy_synapse_plugin.py follow_imports = normal -check_untyped_defs = True show_error_codes = True show_traceback = True mypy_path = stubs warn_unreachable = True -warn_unused_ignores = True local_partial_types = True no_implicit_optional = True + +# Strict checks, see mypy --help +warn_unused_configs = True +# disallow_any_generics = True +disallow_subclassing_any = True +# disallow_untyped_calls = True disallow_untyped_defs = True -strict_equality = True +disallow_incomplete_defs = True +# check_untyped_defs = True +# disallow_untyped_decorators = True warn_redundant_casts = True +warn_unused_ignores = True +# warn_return_any = True +# no_implicit_reexport = True +strict_equality = True +strict_concatenate = True + # Run mypy type checking with the minimum supported Python version to catch new usage # that isn't backwards-compatible (types, overloads, etc). python_version = 3.8 @@ -31,6 +43,7 @@ warn_unused_ignores = False [mypy-synapse.util.caches.treecache] disallow_untyped_defs = False +disallow_incomplete_defs = False ;; Dependencies without annotations ;; Before ignoring a module, check to see if type stubs are available. @@ -40,6 +53,7 @@ disallow_untyped_defs = False ;; which we can pull in as a dev dependency by adding to `pyproject.toml`'s ;; `[tool.poetry.dev-dependencies]` list. +# https://github.com/lepture/authlib/issues/460 [mypy-authlib.*] ignore_missing_imports = True @@ -49,9 +63,11 @@ ignore_missing_imports = True [mypy-lxml] ignore_missing_imports = True +# https://github.com/msgpack/msgpack-python/issues/448 [mypy-msgpack] ignore_missing_imports = True +# https://github.com/wolever/parameterized/issues/143 [mypy-parameterized.*] ignore_missing_imports = True @@ -73,6 +89,7 @@ ignore_missing_imports = True [mypy-srvlookup.*] ignore_missing_imports = True +# https://github.com/twisted/treq/pull/366 [mypy-treq.*] ignore_missing_imports = True diff --git a/synapse/api/auth/msc3861_delegated.py b/synapse/api/auth/msc3861_delegated.py index 31c1de0119..bd4fc9c0ee 100644 --- a/synapse/api/auth/msc3861_delegated.py +++ b/synapse/api/auth/msc3861_delegated.py @@ -59,7 +59,7 @@ def scope_to_list(scope: str) -> List[str]: return scope.strip().split(" ") -class PrivateKeyJWTWithKid(PrivateKeyJWT): +class PrivateKeyJWTWithKid(PrivateKeyJWT): # type: ignore[misc] """An implementation of the private_key_jwt client auth method that includes a kid header. This is needed because some providers (Keycloak) require the kid header to figure diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index e17cb840de..149351dda0 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -515,7 +515,7 @@ class FederationServer(FederationBase): logger.error( "Failed to handle PDU %s", event_id, - exc_info=(f.type, f.value, f.getTracebackObject()), # type: ignore + exc_info=(f.type, f.value, f.getTracebackObject()), ) return {"error": str(e)} @@ -1247,7 +1247,7 @@ class FederationServer(FederationBase): logger.error( "Failed to handle PDU %s", event.event_id, - exc_info=(f.type, f.value, f.getTracebackObject()), # type: ignore + exc_info=(f.type, f.value, f.getTracebackObject()), ) received_ts = await self.store.remove_received_event_from_staging( diff --git a/synapse/handlers/oidc.py b/synapse/handlers/oidc.py index e7e0b5e049..24b68e0301 100644 --- a/synapse/handlers/oidc.py +++ b/synapse/handlers/oidc.py @@ -1354,7 +1354,7 @@ class OidcProvider: finish_request(request) -class LogoutToken(JWTClaims): +class LogoutToken(JWTClaims): # type: ignore[misc] """ Holds and verify claims of a logout token, as per https://openid.net/specs/openid-connect-backchannel-1_0.html#LogoutToken diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index 63b35c8d62..d5257acb7d 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -360,7 +360,7 @@ class PaginationHandler: except Exception: f = Failure() logger.error( - "[purge] failed", exc_info=(f.type, f.value, f.getTracebackObject()) # type: ignore + "[purge] failed", exc_info=(f.type, f.value, f.getTracebackObject()) ) self._purges_by_id[purge_id].status = PurgeStatus.STATUS_FAILED self._purges_by_id[purge_id].error = f.getErrorMessage() @@ -689,7 +689,7 @@ class PaginationHandler: f = Failure() logger.error( "failed", - exc_info=(f.type, f.value, f.getTracebackObject()), # type: ignore + exc_info=(f.type, f.value, f.getTracebackObject()), ) self._delete_by_id[delete_id].status = DeleteStatus.STATUS_FAILED self._delete_by_id[delete_id].error = f.getErrorMessage() diff --git a/synapse/http/server.py b/synapse/http/server.py index 04768c6a23..933172c873 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -108,7 +108,7 @@ def return_json_error( if f.check(SynapseError): # mypy doesn't understand that f.check asserts the type. - exc: SynapseError = f.value # type: ignore + exc: SynapseError = f.value error_code = exc.code error_dict = exc.error_dict(config) if exc.headers is not None: @@ -124,7 +124,7 @@ def return_json_error( "Got cancellation before client disconnection from %r: %r", request.request_metrics.name, request, - exc_info=(f.type, f.value, f.getTracebackObject()), # type: ignore[arg-type] + exc_info=(f.type, f.value, f.getTracebackObject()), ) else: error_code = 500 @@ -134,7 +134,7 @@ def return_json_error( "Failed handle request via %r: %r", request.request_metrics.name, request, - exc_info=(f.type, f.value, f.getTracebackObject()), # type: ignore[arg-type] + exc_info=(f.type, f.value, f.getTracebackObject()), ) # Only respond with an error response if we haven't already started writing, @@ -172,7 +172,7 @@ def return_html_error( """ if f.check(CodeMessageException): # mypy doesn't understand that f.check asserts the type. - cme: CodeMessageException = f.value # type: ignore + cme: CodeMessageException = f.value code = cme.code msg = cme.msg if cme.headers is not None: @@ -189,7 +189,7 @@ def return_html_error( logger.error( "Failed handle request %r", request, - exc_info=(f.type, f.value, f.getTracebackObject()), # type: ignore[arg-type] + exc_info=(f.type, f.value, f.getTracebackObject()), ) elif f.check(CancelledError): code = HTTP_STATUS_REQUEST_CANCELLED @@ -199,7 +199,7 @@ def return_html_error( logger.error( "Got cancellation before client disconnection when handling request %r", request, - exc_info=(f.type, f.value, f.getTracebackObject()), # type: ignore[arg-type] + exc_info=(f.type, f.value, f.getTracebackObject()), ) else: code = HTTPStatus.INTERNAL_SERVER_ERROR @@ -208,7 +208,7 @@ def return_html_error( logger.error( "Failed handle request %r", request, - exc_info=(f.type, f.value, f.getTracebackObject()), # type: ignore[arg-type] + exc_info=(f.type, f.value, f.getTracebackObject()), ) if isinstance(error_template, str): diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py index 9ddd26ccaa..7ea0c4c36b 100644 --- a/synapse/util/__init__.py +++ b/synapse/util/__init__.py @@ -76,7 +76,7 @@ def unwrapFirstError(failure: Failure) -> Failure: # the subFailure's value, which will do a better job of preserving stacktraces. # (actually, you probably want to use yieldable_gather_results anyway) failure.trap(defer.FirstError) - return failure.value.subFailure # type: ignore[union-attr] # Issue in Twisted's annotations + return failure.value.subFailure P = ParamSpec("P") @@ -178,7 +178,7 @@ def log_failure( """ logger.error( - msg, exc_info=(failure.type, failure.value, failure.getTracebackObject()) # type: ignore[arg-type] + msg, exc_info=(failure.type, failure.value, failure.getTracebackObject()) ) if not consumeErrors: diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py index 01e3cd46f6..4041e49e71 100644 --- a/synapse/util/async_helpers.py +++ b/synapse/util/async_helpers.py @@ -138,7 +138,7 @@ class ObservableDeferred(Generic[_T], AbstractObservableDeferred[_T]): for observer in observers: # This is a little bit of magic to correctly propagate stack # traces when we `await` on one of the observer deferreds. - f.value.__failure__ = f # type: ignore[union-attr] + f.value.__failure__ = f try: observer.errback(f) except Exception as e: diff --git a/synapse/util/caches/lrucache.py b/synapse/util/caches/lrucache.py index 452d5d04c1..ed0da17227 100644 --- a/synapse/util/caches/lrucache.py +++ b/synapse/util/caches/lrucache.py @@ -93,10 +93,8 @@ VT = TypeVar("VT") # a general type var, distinct from either KT or VT T = TypeVar("T") -P = TypeVar("P") - -class _TimedListNode(ListNode[P]): +class _TimedListNode(ListNode[T]): """A `ListNode` that tracks last access time.""" __slots__ = ["last_access_ts_secs"] @@ -821,7 +819,7 @@ class AsyncLruCache(Generic[KT, VT]): utilize external cache systems that require await behaviour to be created. """ - def __init__(self, *args, **kwargs): # type: ignore + def __init__(self, *args: Any, **kwargs: Any): self._lru_cache: LruCache[KT, VT] = LruCache(*args, **kwargs) async def get( diff --git a/tests/server.py b/tests/server.py index 7296f0a552..a12c3e3b9a 100644 --- a/tests/server.py +++ b/tests/server.py @@ -642,7 +642,7 @@ def _make_test_homeserver_synchronous(server: HomeServer) -> None: pool.runWithConnection = runWithConnection # type: ignore[assignment] pool.runInteraction = runInteraction # type: ignore[assignment] # Replace the thread pool with a threadless 'thread' pool - pool.threadpool = ThreadPool(clock._reactor) # type: ignore[assignment] + pool.threadpool = ThreadPool(clock._reactor) pool.running = True # We've just changed the Databases to run DB transactions on the same -- cgit 1.5.1 From daf3a679089770e00d1b70d8ed2f91ab108b73e3 Mon Sep 17 00:00:00 2001 From: Gabriel Féron Date: Wed, 31 May 2023 15:18:37 +0200 Subject: Add get_canonical_room_alias to module API (#15450) Co-authored-by: Boxdot --- changelog.d/15450.feature | 1 + synapse/module_api/__init__.py | 27 +++++++++++++++++++++++++++ synapse/storage/controllers/state.py | 2 +- 3 files changed, 29 insertions(+), 1 deletion(-) create mode 100644 changelog.d/15450.feature diff --git a/changelog.d/15450.feature b/changelog.d/15450.feature new file mode 100644 index 0000000000..2102381143 --- /dev/null +++ b/changelog.d/15450.feature @@ -0,0 +1 @@ +Support resolving a room's [canonical alias](https://spec.matrix.org/v1.7/client-server-api/#mroomcanonical_alias) via the module API. \ No newline at end of file diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index 134bd2e620..a8d6224a45 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -122,6 +122,7 @@ from synapse.types import ( JsonMapping, Requester, RoomAlias, + RoomID, StateMap, UserID, UserInfo, @@ -1570,6 +1571,32 @@ class ModuleApi: start_timestamp, end_timestamp ) + async def get_canonical_room_alias(self, room_id: RoomID) -> Optional[RoomAlias]: + """ + Retrieve the given room's current canonical alias. + + A room may declare an alias as "canonical", meaning that it is the + preferred alias to use when referring to the room. This function + retrieves that alias from the room's state. + + Added in Synapse v1.86.0. + + Args: + room_id: The Room ID to find the alias of. + + Returns: + None if the room ID does not exist, or if the room exists but has no canonical alias. + Otherwise, the parsed room alias. + """ + room_alias_str = ( + await self._storage_controllers.state.get_canonical_alias_for_room( + room_id.to_string() + ) + ) + if room_alias_str: + return RoomAlias.from_string(room_alias_str) + return None + async def lookup_room_alias(self, room_alias: str) -> Tuple[str, List[str]]: """ Get the room ID associated with a room alias. diff --git a/synapse/storage/controllers/state.py b/synapse/storage/controllers/state.py index 7089b0a1d8..233df7cce2 100644 --- a/synapse/storage/controllers/state.py +++ b/synapse/storage/controllers/state.py @@ -485,7 +485,7 @@ class StateStorageController: if not event: return None - return event.content.get("canonical_alias") + return event.content.get("alias") @trace @tag_args -- cgit 1.5.1 From 11e15d79b8a0af593fd9467e0cc7f8a9dfcb6c4f Mon Sep 17 00:00:00 2001 From: reivilibre Date: Wed, 31 May 2023 13:59:56 +0000 Subject: Fix a performance issue introduced in Synapse v1.83.0 which meant that purging rooms was very slow and database-intensive. (#15693) * Add indices required to efficiently validate new foreign key constraints on stream_ordering * Newsfile Signed-off-by: Olivier Wilkinson (reivilibre) --------- Signed-off-by: Olivier Wilkinson (reivilibre) --- changelog.d/15693.bugfix | 1 + synapse/storage/databases/state/bg_updates.py | 31 ++++++++++++++++++++++ .../77/14bg_indices_event_stream_ordering.sql | 20 ++++++++++++++ 3 files changed, 52 insertions(+) create mode 100644 changelog.d/15693.bugfix create mode 100644 synapse/storage/schema/main/delta/77/14bg_indices_event_stream_ordering.sql diff --git a/changelog.d/15693.bugfix b/changelog.d/15693.bugfix new file mode 100644 index 0000000000..d0325de007 --- /dev/null +++ b/changelog.d/15693.bugfix @@ -0,0 +1 @@ +Fix a performance issue introduced in Synapse v1.83.0 which meant that purging rooms was very slow and database-intensive. \ No newline at end of file diff --git a/synapse/storage/databases/state/bg_updates.py b/synapse/storage/databases/state/bg_updates.py index 86eb1a8a08..5b8ba436d4 100644 --- a/synapse/storage/databases/state/bg_updates.py +++ b/synapse/storage/databases/state/bg_updates.py @@ -261,6 +261,16 @@ class StateBackgroundUpdateStore(StateGroupBackgroundUpdateStore): STATE_GROUPS_ROOM_INDEX_UPDATE_NAME = "state_groups_room_id_idx" STATE_GROUP_EDGES_UNIQUE_INDEX_UPDATE_NAME = "state_group_edges_unique_idx" + CURRENT_STATE_EVENTS_STREAM_ORDERING_INDEX_UPDATE_NAME = ( + "current_state_events_stream_ordering_idx" + ) + ROOM_MEMBERSHIPS_STREAM_ORDERING_INDEX_UPDATE_NAME = ( + "room_memberships_stream_ordering_idx" + ) + LOCAL_CURRENT_MEMBERSHIP_STREAM_ORDERING_INDEX_UPDATE_NAME = ( + "local_current_membership_stream_ordering_idx" + ) + def __init__( self, database: DatabasePool, @@ -297,6 +307,27 @@ class StateBackgroundUpdateStore(StateGroupBackgroundUpdateStore): replaces_index="state_group_edges_idx", ) + # These indices are needed to validate the foreign key constraint + # when events are deleted. + self.db_pool.updates.register_background_index_update( + self.CURRENT_STATE_EVENTS_STREAM_ORDERING_INDEX_UPDATE_NAME, + index_name="current_state_events_stream_ordering_idx", + table="current_state_events", + columns=["event_stream_ordering"], + ) + self.db_pool.updates.register_background_index_update( + self.ROOM_MEMBERSHIPS_STREAM_ORDERING_INDEX_UPDATE_NAME, + index_name="room_memberships_stream_ordering_idx", + table="room_memberships", + columns=["event_stream_ordering"], + ) + self.db_pool.updates.register_background_index_update( + self.LOCAL_CURRENT_MEMBERSHIP_STREAM_ORDERING_INDEX_UPDATE_NAME, + index_name="local_current_membership_stream_ordering_idx", + table="local_current_membership", + columns=["event_stream_ordering"], + ) + async def _background_deduplicate_state( self, progress: dict, batch_size: int ) -> int: diff --git a/synapse/storage/schema/main/delta/77/14bg_indices_event_stream_ordering.sql b/synapse/storage/schema/main/delta/77/14bg_indices_event_stream_ordering.sql new file mode 100644 index 0000000000..ec8cd522ec --- /dev/null +++ b/synapse/storage/schema/main/delta/77/14bg_indices_event_stream_ordering.sql @@ -0,0 +1,20 @@ +/* Copyright 2023 The Matrix.org Foundation C.I.C. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +INSERT INTO background_updates (ordering, update_name, progress_json) + VALUES + (7714, 'current_state_events_stream_ordering_idx', '{}'), + (7714, 'local_current_membership_stream_ordering_idx', '{}'), + (7714, 'room_memberships_stream_ordering_idx', '{}'); -- cgit 1.5.1 From 874378c0523bb82314434f1f0f2c5e1462a34a5b Mon Sep 17 00:00:00 2001 From: Jason Little Date: Wed, 31 May 2023 10:13:31 -0500 Subject: Docker fully qualified image names (#15689) * Fully qualified docker image names for the main Dockerfile and Complement related. * Fully qualified docker image names for Dockerfiles associated with building Debian release artifacts. This one is harder and is separate from the other commit in case it wasn't correct or was unwanted. I decided to do the expansion on the docker images in the Dockerfile itself, instead of the various source places that build which distribution that is selected, as it would have been more invasive with the scripts breaking up the string for tagging and such. This one is untested. * Changelog * Update docker/Dockerfile-workers * Update docker/complement/Dockerfile --------- Co-authored-by: reivilibre --- .github/workflows/release-artifacts.yml | 1 + changelog.d/15689.misc | 1 + docker/Dockerfile | 6 +++--- docker/Dockerfile-dhvirtualenv | 4 ++-- docker/Dockerfile-workers | 4 ++-- docker/complement/Dockerfile | 5 +++-- docker/editable.Dockerfile | 2 +- scripts-dev/build_debian_packages.py | 2 ++ 8 files changed, 15 insertions(+), 10 deletions(-) create mode 100644 changelog.d/15689.misc diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml index ebd7d298a9..0981200401 100644 --- a/.github/workflows/release-artifacts.yml +++ b/.github/workflows/release-artifacts.yml @@ -34,6 +34,7 @@ jobs: - id: set-distros run: | # if we're running from a tag, get the full list of distros; otherwise just use debian:sid + # NOTE: inside the actual Dockerfile-dhvirtualenv, the image name is expanded into its full image path dists='["debian:sid"]' if [[ $GITHUB_REF == refs/tags/* ]]; then dists=$(scripts-dev/build_debian_packages.py --show-dists-json) diff --git a/changelog.d/15689.misc b/changelog.d/15689.misc new file mode 100644 index 0000000000..4262cc9515 --- /dev/null +++ b/changelog.d/15689.misc @@ -0,0 +1 @@ +Add fully qualified docker image names to Dockerfiles. diff --git a/docker/Dockerfile b/docker/Dockerfile index 6107dced43..12cff84131 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -27,7 +27,7 @@ ARG PYTHON_VERSION=3.11 ### # We hardcode the use of Debian bullseye here because this could change upstream # and other Dockerfiles used for testing are expecting bullseye. -FROM docker.io/python:${PYTHON_VERSION}-slim-bullseye as requirements +FROM docker.io/library/python:${PYTHON_VERSION}-slim-bullseye as requirements # RUN --mount is specific to buildkit and is documented at # https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/syntax.md#build-mounts-run---mount. @@ -87,7 +87,7 @@ RUN if [ -z "$TEST_ONLY_IGNORE_POETRY_LOCKFILE" ]; then \ ### ### Stage 1: builder ### -FROM docker.io/python:${PYTHON_VERSION}-slim-bullseye as builder +FROM docker.io/library/python:${PYTHON_VERSION}-slim-bullseye as builder # install the OS build deps RUN \ @@ -158,7 +158,7 @@ RUN --mount=type=cache,target=/synapse/target,sharing=locked \ ### Stage 2: runtime ### -FROM docker.io/python:${PYTHON_VERSION}-slim-bullseye +FROM docker.io/library/python:${PYTHON_VERSION}-slim-bullseye LABEL org.opencontainers.image.url='https://matrix.org/docs/projects/server/synapse' LABEL org.opencontainers.image.documentation='https://github.com/matrix-org/synapse/blob/master/docker/README.md' diff --git a/docker/Dockerfile-dhvirtualenv b/docker/Dockerfile-dhvirtualenv index 2013732422..861129ebc2 100644 --- a/docker/Dockerfile-dhvirtualenv +++ b/docker/Dockerfile-dhvirtualenv @@ -24,7 +24,7 @@ ARG distro="" # https://launchpad.net/~jyrki-pulliainen/+archive/ubuntu/dh-virtualenv, but # it's not obviously easier to use that than to build our own.) -FROM ${distro} as builder +FROM docker.io/library/${distro} as builder RUN apt-get update -qq -o Acquire::Languages=none RUN env DEBIAN_FRONTEND=noninteractive apt-get install \ @@ -55,7 +55,7 @@ RUN cd /dh-virtualenv && DEB_BUILD_OPTIONS=nodoc dpkg-buildpackage -us -uc -b ### ### Stage 1 ### -FROM ${distro} +FROM docker.io/library/${distro} # Get the distro we want to pull from as a dynamic build variable # (We need to define it in each build stage) diff --git a/docker/Dockerfile-workers b/docker/Dockerfile-workers index faf7f2cef8..adb9a725e3 100644 --- a/docker/Dockerfile-workers +++ b/docker/Dockerfile-workers @@ -7,7 +7,7 @@ ARG FROM=matrixdotorg/synapse:$SYNAPSE_VERSION # target image. For repeated rebuilds, this is much faster than apt installing # each time. -FROM debian:bullseye-slim AS deps_base +FROM docker.io/library/debian:bullseye-slim AS deps_base RUN \ --mount=type=cache,target=/var/cache/apt,sharing=locked \ --mount=type=cache,target=/var/lib/apt,sharing=locked \ @@ -21,7 +21,7 @@ FROM debian:bullseye-slim AS deps_base # which makes it much easier to copy (but we need to make sure we use an image # based on the same debian version as the synapse image, to make sure we get # the expected version of libc. -FROM redis:6-bullseye AS redis_base +FROM docker.io/library/redis:6-bullseye AS redis_base # now build the final image, based on the the regular Synapse docker image FROM $FROM diff --git a/docker/complement/Dockerfile b/docker/complement/Dockerfile index be1aa1c55e..5103068a49 100644 --- a/docker/complement/Dockerfile +++ b/docker/complement/Dockerfile @@ -7,6 +7,7 @@ # https://github.com/matrix-org/synapse/blob/develop/docker/README-testing.md#testing-with-postgresql-and-single-or-multi-process-synapse ARG SYNAPSE_VERSION=latest +# This is an intermediate image, to be built locally (not pulled from a registry). ARG FROM=matrixdotorg/synapse-workers:$SYNAPSE_VERSION FROM $FROM @@ -19,8 +20,8 @@ FROM $FROM # the same debian version as Synapse's docker image (so the versions of the # shared libraries match). RUN adduser --system --uid 999 postgres --home /var/lib/postgresql - COPY --from=postgres:13-bullseye /usr/lib/postgresql /usr/lib/postgresql - COPY --from=postgres:13-bullseye /usr/share/postgresql /usr/share/postgresql + COPY --from=docker.io/library/postgres:13-bullseye /usr/lib/postgresql /usr/lib/postgresql + COPY --from=docker.io/library/postgres:13-bullseye /usr/share/postgresql /usr/share/postgresql RUN mkdir /var/run/postgresql && chown postgres /var/run/postgresql ENV PATH="${PATH}:/usr/lib/postgresql/13/bin" ENV PGDATA=/var/lib/postgresql/data diff --git a/docker/editable.Dockerfile b/docker/editable.Dockerfile index 0e8cf2e712..c53ce1c718 100644 --- a/docker/editable.Dockerfile +++ b/docker/editable.Dockerfile @@ -10,7 +10,7 @@ ARG PYTHON_VERSION=3.9 ### # We hardcode the use of Debian bullseye here because this could change upstream # and other Dockerfiles used for testing are expecting bullseye. -FROM docker.io/python:${PYTHON_VERSION}-slim-bullseye +FROM docker.io/library/python:${PYTHON_VERSION}-slim-bullseye # Install Rust and other dependencies (stolen from normal Dockerfile) # install the OS build deps diff --git a/scripts-dev/build_debian_packages.py b/scripts-dev/build_debian_packages.py index ede7665011..4c9f134ddd 100755 --- a/scripts-dev/build_debian_packages.py +++ b/scripts-dev/build_debian_packages.py @@ -20,6 +20,8 @@ from concurrent.futures import ThreadPoolExecutor from types import FrameType from typing import Collection, Optional, Sequence, Set +# These are expanded inside the dockerfile to be a fully qualified image name. +# e.g. docker.io/library/debian:bullseye DISTS = ( "debian:buster", # oldstable: EOL 2022-08 "debian:bullseye", -- cgit 1.5.1 From 6f18812bb044a2959fdc9881c328578adb7b33f2 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 31 May 2023 13:06:57 -0400 Subject: Add stubs package for lxml. (#15697) The stubs have some issues so this has some generous cast and ignores in it, but it is better than not having stubs. Note that confusing that Element is a function which creates _Element instances (and similarly for Comment). --- changelog.d/15697.misc | 1 + mypy.ini | 3 -- poetry.lock | 25 +++++++++--- pyproject.toml | 1 + synapse/media/oembed.py | 32 +++++++++------ synapse/media/preview_html.py | 79 +++++++++++++++++++++++++----------- tests/media/test_html_preview.py | 18 +++++++- tests/media/test_oembed.py | 2 +- tests/media/test_url_previewer.py | 2 +- tests/rest/media/test_url_preview.py | 2 +- 10 files changed, 117 insertions(+), 48 deletions(-) create mode 100644 changelog.d/15697.misc diff --git a/changelog.d/15697.misc b/changelog.d/15697.misc new file mode 100644 index 0000000000..93ceaeafc9 --- /dev/null +++ b/changelog.d/15697.misc @@ -0,0 +1 @@ +Improve type hints. diff --git a/mypy.ini b/mypy.ini index 56cd1d560e..1038b7d8c7 100644 --- a/mypy.ini +++ b/mypy.ini @@ -60,9 +60,6 @@ ignore_missing_imports = True [mypy-ijson.*] ignore_missing_imports = True -[mypy-lxml] -ignore_missing_imports = True - # https://github.com/msgpack/msgpack-python/issues/448 [mypy-msgpack] ignore_missing_imports = True diff --git a/poetry.lock b/poetry.lock index 0879e64cf1..d8964f5719 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry and should not be changed by hand. +# This file is automatically @generated by Poetry 1.4.2 and should not be changed by hand. [[package]] name = "alabaster" @@ -1215,6 +1215,21 @@ html5 = ["html5lib"] htmlsoup = ["BeautifulSoup4"] source = ["Cython (>=0.29.7)"] +[[package]] +name = "lxml-stubs" +version = "0.4.0" +description = "Type annotations for the lxml package" +category = "dev" +optional = false +python-versions = "*" +files = [ + {file = "lxml-stubs-0.4.0.tar.gz", hash = "sha256:184877b42127256abc2b932ba8bd0ab5ea80bd0b0fee618d16daa40e0b71abee"}, + {file = "lxml_stubs-0.4.0-py3-none-any.whl", hash = "sha256:3b381e9e82397c64ea3cc4d6f79d1255d015f7b114806d4826218805c10ec003"}, +] + +[package.extras] +test = ["coverage[toml] (==5.2)", "pytest (>=6.0.0)", "pytest-mypy-plugins (==1.9.3)"] + [[package]] name = "markdown-it-py" version = "2.2.0" @@ -3409,22 +3424,22 @@ docs = ["Sphinx", "repoze.sphinx.autointerface"] test = ["zope.i18nmessageid", "zope.testing", "zope.testrunner"] [extras] -all = ["matrix-synapse-ldap3", "psycopg2", "psycopg2cffi", "psycopg2cffi-compat", "pysaml2", "authlib", "lxml", "sentry-sdk", "jaeger-client", "opentracing", "txredisapi", "hiredis", "Pympler", "pyicu"] +all = ["Pympler", "authlib", "hiredis", "jaeger-client", "lxml", "matrix-synapse-ldap3", "opentracing", "psycopg2", "psycopg2cffi", "psycopg2cffi-compat", "pyicu", "pysaml2", "sentry-sdk", "txredisapi"] cache-memory = ["Pympler"] jwt = ["authlib"] matrix-synapse-ldap3 = ["matrix-synapse-ldap3"] oidc = ["authlib"] opentracing = ["jaeger-client", "opentracing"] postgres = ["psycopg2", "psycopg2cffi", "psycopg2cffi-compat"] -redis = ["txredisapi", "hiredis"] +redis = ["hiredis", "txredisapi"] saml2 = ["pysaml2"] sentry = ["sentry-sdk"] systemd = ["systemd-python"] -test = ["parameterized", "idna"] +test = ["idna", "parameterized"] url-preview = ["lxml"] user-search = ["pyicu"] [metadata] lock-version = "2.0" python-versions = "^3.7.1" -content-hash = "ef3a16dd66177f7141239e1a2d3e07cc14c08f1e4e0c5127184d022bc062da52" +content-hash = "7ad11e62a675e09444cf33ca2de3216fc4efc5874a2575e54d95d577a52439d3" diff --git a/pyproject.toml b/pyproject.toml index 7227bc7523..4476f57ca7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -314,6 +314,7 @@ black = ">=22.3.0" ruff = "0.0.265" # Typechecking +lxml-stubs = ">=0.4.0" mypy = "*" mypy-zope = "*" types-bleach = ">=4.1.0" diff --git a/synapse/media/oembed.py b/synapse/media/oembed.py index c0eaf04be5..5ad9eec80b 100644 --- a/synapse/media/oembed.py +++ b/synapse/media/oembed.py @@ -14,7 +14,7 @@ import html import logging import urllib.parse -from typing import TYPE_CHECKING, List, Optional +from typing import TYPE_CHECKING, List, Optional, cast import attr @@ -98,7 +98,7 @@ class OEmbedProvider: # No match. return None - def autodiscover_from_html(self, tree: "etree.Element") -> Optional[str]: + def autodiscover_from_html(self, tree: "etree._Element") -> Optional[str]: """ Search an HTML document for oEmbed autodiscovery information. @@ -109,18 +109,22 @@ class OEmbedProvider: The URL to use for oEmbed information, or None if no URL was found. """ # Search for link elements with the proper rel and type attributes. - for tag in tree.xpath( - "//link[@rel='alternate'][@type='application/json+oembed']" + # Cast: the type returned by xpath depends on the xpath expression: mypy can't deduce this. + for tag in cast( + List["etree._Element"], + tree.xpath("//link[@rel='alternate'][@type='application/json+oembed']"), ): if "href" in tag.attrib: - return tag.attrib["href"] + return cast(str, tag.attrib["href"]) # Some providers (e.g. Flickr) use alternative instead of alternate. - for tag in tree.xpath( - "//link[@rel='alternative'][@type='application/json+oembed']" + # Cast: the type returned by xpath depends on the xpath expression: mypy can't deduce this. + for tag in cast( + List["etree._Element"], + tree.xpath("//link[@rel='alternative'][@type='application/json+oembed']"), ): if "href" in tag.attrib: - return tag.attrib["href"] + return cast(str, tag.attrib["href"]) return None @@ -212,11 +216,12 @@ class OEmbedProvider: return OEmbedResult(open_graph_response, author_name, cache_age) -def _fetch_urls(tree: "etree.Element", tag_name: str) -> List[str]: +def _fetch_urls(tree: "etree._Element", tag_name: str) -> List[str]: results = [] - for tag in tree.xpath("//*/" + tag_name): + # Cast: the type returned by xpath depends on the xpath expression: mypy can't deduce this. + for tag in cast(List["etree._Element"], tree.xpath("//*/" + tag_name)): if "src" in tag.attrib: - results.append(tag.attrib["src"]) + results.append(cast(str, tag.attrib["src"])) return results @@ -244,11 +249,12 @@ def calc_description_and_urls(open_graph_response: JsonDict, html_body: str) -> parser = etree.HTMLParser(recover=True, encoding="utf-8") # Attempt to parse the body. If this fails, log and return no metadata. - tree = etree.fromstring(html_body, parser) + # TODO Develop of lxml-stubs has this correct. + tree = etree.fromstring(html_body, parser) # type: ignore[arg-type] # The data was successfully parsed, but no tree was found. if tree is None: - return + return # type: ignore[unreachable] # Attempt to find interesting URLs (images, videos, embeds). if "og:image" not in open_graph_response: diff --git a/synapse/media/preview_html.py b/synapse/media/preview_html.py index 516d0434f0..1bc7ccb7f3 100644 --- a/synapse/media/preview_html.py +++ b/synapse/media/preview_html.py @@ -24,6 +24,7 @@ from typing import ( Optional, Set, Union, + cast, ) if TYPE_CHECKING: @@ -115,7 +116,7 @@ def _get_html_media_encodings( def decode_body( body: bytes, uri: str, content_type: Optional[str] = None -) -> Optional["etree.Element"]: +) -> Optional["etree._Element"]: """ This uses lxml to parse the HTML document. @@ -152,11 +153,12 @@ def decode_body( # Attempt to parse the body. Returns None if the body was successfully # parsed, but no tree was found. - return etree.fromstring(body, parser) + # TODO Develop of lxml-stubs has this correct. + return etree.fromstring(body, parser) # type: ignore[arg-type] def _get_meta_tags( - tree: "etree.Element", + tree: "etree._Element", property: str, prefix: str, property_mapper: Optional[Callable[[str], Optional[str]]] = None, @@ -175,9 +177,15 @@ def _get_meta_tags( Returns: A map of tag name to value. """ + # This actually returns Dict[str, str], but the caller sets this as a variable + # which is Dict[str, Optional[str]]. results: Dict[str, Optional[str]] = {} - for tag in tree.xpath( - f"//*/meta[starts-with(@{property}, '{prefix}:')][@content][not(@content='')]" + # Cast: the type returned by xpath depends on the xpath expression: mypy can't deduce this. + for tag in cast( + List["etree._Element"], + tree.xpath( + f"//*/meta[starts-with(@{property}, '{prefix}:')][@content][not(@content='')]" + ), ): # if we've got more than 50 tags, someone is taking the piss if len(results) >= 50: @@ -187,14 +195,15 @@ def _get_meta_tags( ) return {} - key = tag.attrib[property] + key = cast(str, tag.attrib[property]) if property_mapper: - key = property_mapper(key) + new_key = property_mapper(key) # None is a special value used to ignore a value. - if key is None: + if new_key is None: continue + key = new_key - results[key] = tag.attrib["content"] + results[key] = cast(str, tag.attrib["content"]) return results @@ -219,7 +228,7 @@ def _map_twitter_to_open_graph(key: str) -> Optional[str]: return "og" + key[7:] -def parse_html_to_open_graph(tree: "etree.Element") -> Dict[str, Optional[str]]: +def parse_html_to_open_graph(tree: "etree._Element") -> Dict[str, Optional[str]]: """ Parse the HTML document into an Open Graph response. @@ -276,24 +285,36 @@ def parse_html_to_open_graph(tree: "etree.Element") -> Dict[str, Optional[str]]: if "og:title" not in og: # Attempt to find a title from the title tag, or the biggest header on the page. - title = tree.xpath("((//title)[1] | (//h1)[1] | (//h2)[1] | (//h3)[1])/text()") + # Cast: the type returned by xpath depends on the xpath expression: mypy can't deduce this. + title = cast( + List["etree._ElementUnicodeResult"], + tree.xpath("((//title)[1] | (//h1)[1] | (//h2)[1] | (//h3)[1])/text()"), + ) if title: og["og:title"] = title[0].strip() else: og["og:title"] = None if "og:image" not in og: - meta_image = tree.xpath( - "//*/meta[translate(@itemprop, 'IMAGE', 'image')='image'][not(@content='')]/@content[1]" + # Cast: the type returned by xpath depends on the xpath expression: mypy can't deduce this. + meta_image = cast( + List["etree._ElementUnicodeResult"], + tree.xpath( + "//*/meta[translate(@itemprop, 'IMAGE', 'image')='image'][not(@content='')]/@content[1]" + ), ) # If a meta image is found, use it. if meta_image: og["og:image"] = meta_image[0] else: # Try to find images which are larger than 10px by 10px. + # Cast: the type returned by xpath depends on the xpath expression: mypy can't deduce this. # # TODO: consider inlined CSS styles as well as width & height attribs - images = tree.xpath("//img[@src][number(@width)>10][number(@height)>10]") + images = cast( + List["etree._Element"], + tree.xpath("//img[@src][number(@width)>10][number(@height)>10]"), + ) images = sorted( images, key=lambda i: ( @@ -302,20 +323,29 @@ def parse_html_to_open_graph(tree: "etree.Element") -> Dict[str, Optional[str]]: ) # If no images were found, try to find *any* images. if not images: - images = tree.xpath("//img[@src][1]") + # Cast: the type returned by xpath depends on the xpath expression: mypy can't deduce this. + images = cast(List["etree._Element"], tree.xpath("//img[@src][1]")) if images: - og["og:image"] = images[0].attrib["src"] + og["og:image"] = cast(str, images[0].attrib["src"]) # Finally, fallback to the favicon if nothing else. else: - favicons = tree.xpath("//link[@href][contains(@rel, 'icon')]/@href[1]") + # Cast: the type returned by xpath depends on the xpath expression: mypy can't deduce this. + favicons = cast( + List["etree._ElementUnicodeResult"], + tree.xpath("//link[@href][contains(@rel, 'icon')]/@href[1]"), + ) if favicons: og["og:image"] = favicons[0] if "og:description" not in og: # Check the first meta description tag for content. - meta_description = tree.xpath( - "//*/meta[translate(@name, 'DESCRIPTION', 'description')='description'][not(@content='')]/@content[1]" + # Cast: the type returned by xpath depends on the xpath expression: mypy can't deduce this. + meta_description = cast( + List["etree._ElementUnicodeResult"], + tree.xpath( + "//*/meta[translate(@name, 'DESCRIPTION', 'description')='description'][not(@content='')]/@content[1]" + ), ) # If a meta description is found with content, use it. if meta_description: @@ -332,7 +362,7 @@ def parse_html_to_open_graph(tree: "etree.Element") -> Dict[str, Optional[str]]: return og -def parse_html_description(tree: "etree.Element") -> Optional[str]: +def parse_html_description(tree: "etree._Element") -> Optional[str]: """ Calculate a text description based on an HTML document. @@ -368,6 +398,9 @@ def parse_html_description(tree: "etree.Element") -> Optional[str]: "canvas", "img", "picture", + # etree.Comment is a function which creates an etree._Comment element. + # The "tag" attribute of an etree._Comment instance is confusingly the + # etree.Comment function instead of a string. etree.Comment, } @@ -381,8 +414,8 @@ def parse_html_description(tree: "etree.Element") -> Optional[str]: def _iterate_over_text( - tree: Optional["etree.Element"], - tags_to_ignore: Set[Union[str, "etree.Comment"]], + tree: Optional["etree._Element"], + tags_to_ignore: Set[object], stack_limit: int = 1024, ) -> Generator[str, None, None]: """Iterate over the tree returning text nodes in a depth first fashion, @@ -402,7 +435,7 @@ def _iterate_over_text( # This is a stack whose items are elements to iterate over *or* strings # to be returned. - elements: List[Union[str, "etree.Element"]] = [tree] + elements: List[Union[str, "etree._Element"]] = [tree] while elements: el = elements.pop() diff --git a/tests/media/test_html_preview.py b/tests/media/test_html_preview.py index e7da75db3e..ea84bb3d3d 100644 --- a/tests/media/test_html_preview.py +++ b/tests/media/test_html_preview.py @@ -24,7 +24,7 @@ from tests import unittest try: import lxml except ImportError: - lxml = None + lxml = None # type: ignore[assignment] class SummarizeTestCase(unittest.TestCase): @@ -160,6 +160,7 @@ class OpenGraphFromHtmlTestCase(unittest.TestCase): """ tree = decode_body(html, "http://example.com/test.html") + assert tree is not None og = parse_html_to_open_graph(tree) self.assertEqual(og, {"og:title": "Foo", "og:description": "Some text."}) @@ -176,6 +177,7 @@ class OpenGraphFromHtmlTestCase(unittest.TestCase): """ tree = decode_body(html, "http://example.com/test.html") + assert tree is not None og = parse_html_to_open_graph(tree) self.assertEqual(og, {"og:title": "Foo", "og:description": "Some text."}) @@ -195,6 +197,7 @@ class OpenGraphFromHtmlTestCase(unittest.TestCase): """ tree = decode_body(html, "http://example.com/test.html") + assert tree is not None og = parse_html_to_open_graph(tree) self.assertEqual( @@ -217,6 +220,7 @@ class OpenGraphFromHtmlTestCase(unittest.TestCase): """ tree = decode_body(html, "http://example.com/test.html") + assert tree is not None og = parse_html_to_open_graph(tree) self.assertEqual(og, {"og:title": "Foo", "og:description": "Some text."}) @@ -231,6 +235,7 @@ class OpenGraphFromHtmlTestCase(unittest.TestCase): """ tree = decode_body(html, "http://example.com/test.html") + assert tree is not None og = parse_html_to_open_graph(tree) self.assertEqual(og, {"og:title": None, "og:description": "Some text."}) @@ -246,6 +251,7 @@ class OpenGraphFromHtmlTestCase(unittest.TestCase): """ tree = decode_body(html, "http://example.com/test.html") + assert tree is not None og = parse_html_to_open_graph(tree) self.assertEqual(og, {"og:title": "Title", "og:description": "Title"}) @@ -261,6 +267,7 @@ class OpenGraphFromHtmlTestCase(unittest.TestCase): """ tree = decode_body(html, "http://example.com/test.html") + assert tree is not None og = parse_html_to_open_graph(tree) self.assertEqual(og, {"og:title": "Title", "og:description": "Some text."}) @@ -281,6 +288,7 @@ class OpenGraphFromHtmlTestCase(unittest.TestCase): """ tree = decode_body(html, "http://example.com/test.html") + assert tree is not None og = parse_html_to_open_graph(tree) self.assertEqual(og, {"og:title": "Title", "og:description": "Finally!"}) @@ -296,6 +304,7 @@ class OpenGraphFromHtmlTestCase(unittest.TestCase): """ tree = decode_body(html, "http://example.com/test.html") + assert tree is not None og = parse_html_to_open_graph(tree) self.assertEqual(og, {"og:title": None, "og:description": "Some text."}) @@ -324,6 +333,7 @@ class OpenGraphFromHtmlTestCase(unittest.TestCase): FooSome text. """.strip() tree = decode_body(html, "http://example.com/test.html") + assert tree is not None og = parse_html_to_open_graph(tree) self.assertEqual(og, {"og:title": "Foo", "og:description": "Some text."}) @@ -338,6 +348,7 @@ class OpenGraphFromHtmlTestCase(unittest.TestCase): """ tree = decode_body(html, "http://example.com/test.html", "invalid-encoding") + assert tree is not None og = parse_html_to_open_graph(tree) self.assertEqual(og, {"og:title": "Foo", "og:description": "Some text."}) @@ -353,6 +364,7 @@ class OpenGraphFromHtmlTestCase(unittest.TestCase): """ tree = decode_body(html, "http://example.com/test.html") + assert tree is not None og = parse_html_to_open_graph(tree) self.assertEqual(og, {"og:title": "ÿÿ Foo", "og:description": "Some text."}) @@ -367,6 +379,7 @@ class OpenGraphFromHtmlTestCase(unittest.TestCase): """ tree = decode_body(html, "http://example.com/test.html") + assert tree is not None og = parse_html_to_open_graph(tree) self.assertEqual(og, {"og:title": "ó", "og:description": "Some text."}) @@ -380,6 +393,7 @@ class OpenGraphFromHtmlTestCase(unittest.TestCase): """ tree = decode_body(html, "http://example.com/test.html") + assert tree is not None og = parse_html_to_open_graph(tree) self.assertEqual( og, @@ -401,6 +415,7 @@ class OpenGraphFromHtmlTestCase(unittest.TestCase): """ tree = decode_body(html, "http://example.com/test.html") + assert tree is not None og = parse_html_to_open_graph(tree) self.assertEqual( og, @@ -419,6 +434,7 @@ class OpenGraphFromHtmlTestCase(unittest.TestCase): with a cheeky SVG and some tail text """ tree = decode_body(html, "http://example.com/test.html") + assert tree is not None og = parse_html_to_open_graph(tree) self.assertEqual( og, diff --git a/tests/media/test_oembed.py b/tests/media/test_oembed.py index c8bf8421da..3bc19cb1cc 100644 --- a/tests/media/test_oembed.py +++ b/tests/media/test_oembed.py @@ -28,7 +28,7 @@ from tests.unittest import HomeserverTestCase try: import lxml except ImportError: - lxml = None + lxml = None # type: ignore[assignment] class OEmbedTests(HomeserverTestCase): diff --git a/tests/media/test_url_previewer.py b/tests/media/test_url_previewer.py index 3c4c7d6765..46ecde5344 100644 --- a/tests/media/test_url_previewer.py +++ b/tests/media/test_url_previewer.py @@ -24,7 +24,7 @@ from tests.unittest import override_config try: import lxml except ImportError: - lxml = None + lxml = None # type: ignore[assignment] class URLPreviewTests(unittest.HomeserverTestCase): diff --git a/tests/rest/media/test_url_preview.py b/tests/rest/media/test_url_preview.py index 170fb0534a..05d5e39cab 100644 --- a/tests/rest/media/test_url_preview.py +++ b/tests/rest/media/test_url_preview.py @@ -40,7 +40,7 @@ from tests.test_utils import SMALL_PNG try: import lxml except ImportError: - lxml = None + lxml = None # type: ignore[assignment] class URLPreviewTests(unittest.HomeserverTestCase): -- cgit 1.5.1 From 0b5f64ff09d44338d2514cbdba80aa4a4f11d1aa Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Wed, 31 May 2023 14:35:49 -0500 Subject: Add Synapse version deploy annotations to Grafana dashboard (#15674) Fix https://github.com/matrix-org/synapse/issues/15662 This manifests as purple lines that show up on all time series panels that you can hover and see what version was deployed. Also added a new "Deployed Synapse versions over time" panel where the color block changes with each version. And mixed this color block into the "Up" time series panel. To get the Grafana dashboard JSON to copy here: use the **Share** icon at the top -> **Export** -> check the **Export for sharing externally** option -> **View JSON** or **Save to file** --- changelog.d/15674.feature | 1 + contrib/grafana/synapse.json | 1240 +++++++++++++++++++++++++++++++++--------- 2 files changed, 981 insertions(+), 260 deletions(-) create mode 100644 changelog.d/15674.feature diff --git a/changelog.d/15674.feature b/changelog.d/15674.feature new file mode 100644 index 0000000000..68cf207dc0 --- /dev/null +++ b/changelog.d/15674.feature @@ -0,0 +1 @@ +Add Syanpse version deploy annotations to Grafana dashboard which enables easy correlation between behavior changes witnessed in a graph to a certain Synapse version and nail down regressions. diff --git a/contrib/grafana/synapse.json b/contrib/grafana/synapse.json index f09cd6f87c..f3253b32b9 100644 --- a/contrib/grafana/synapse.json +++ b/contrib/grafana/synapse.json @@ -56,6 +56,17 @@ "name": "Annotations & Alerts", "showIn": 0, "type": "dashboard" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "enable": true, + "expr": "changes(process_start_time_seconds{instance=\"matrix.org\",job=~\"synapse\"}[$bucket_size]) * on (instance, job) group_left(version) synapse_build_info{instance=\"matrix.org\",job=\"synapse\"}", + "iconColor": "purple", + "name": "deploys", + "titleFormat": "Deployed {{version}}" } ] }, @@ -670,6 +681,95 @@ "align": false } }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisSoftMax": 1, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 10, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 19 + }, + "id": 245, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "synapse_build_info{instance=\"$instance\", job=\"synapse\"} - 1", + "legendFormat": "version {{version}}", + "range": true, + "refId": "deployed_synapse_versions" + } + ], + "title": "Deployed Synapse versions over time", + "type": "timeseries" + }, { "aliasColors": {}, "bars": false, @@ -809,6 +909,7 @@ "dashLength": 10, "dashes": false, "datasource": { + "type": "prometheus", "uid": "$datasource" }, "editable": true, @@ -874,11 +975,13 @@ "datasource": { "uid": "$datasource" }, + "editorMode": "code", "expr": "rate(process_cpu_system_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{job}}-{{index}} system ", "metric": "", + "range": true, "refId": "B", "step": 20 }, @@ -1328,6 +1431,7 @@ "dashLength": 10, "dashes": false, "datasource": { + "type": "prometheus", "uid": "$datasource" }, "fieldConfig": { @@ -1368,7 +1472,15 @@ "pointradius": 5, "points": false, "renderer": "flot", - "seriesOverrides": [], + "seriesOverrides": [ + { + "$$hashKey": "object:116", + "alias": "/^version .*/", + "lines": true, + "linewidth": 6, + "points": false + } + ], "spaceLength": 10, "stack": false, "steppedLine": false, @@ -1377,11 +1489,25 @@ "datasource": { "uid": "$datasource" }, + "editorMode": "code", "expr": "min_over_time(up{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{job}}-{{index}}", + "range": true, "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "editorMode": "code", + "expr": "synapse_build_info{instance=\"$instance\", job=\"synapse\"} - 1", + "hide": false, + "legendFormat": "version {{version}}", + "range": true, + "refId": "deployed_synapse_versions" } ], "thresholds": [], @@ -1788,7 +1914,7 @@ "h": 9, "w": 12, "x": 0, - "y": 56 + "y": 28 }, "heatmap": {}, "hideZeroBuckets": false, @@ -1890,7 +2016,7 @@ "h": 9, "w": 12, "x": 12, - "y": 56 + "y": 28 }, "hiddenSeries": false, "id": 33, @@ -1982,7 +2108,7 @@ "h": 7, "w": 12, "x": 0, - "y": 65 + "y": 37 }, "hiddenSeries": false, "id": 40, @@ -2070,7 +2196,7 @@ "h": 7, "w": 12, "x": 12, - "y": 65 + "y": 37 }, "hiddenSeries": false, "id": 46, @@ -2161,7 +2287,7 @@ "h": 7, "w": 12, "x": 0, - "y": 72 + "y": 44 }, "hiddenSeries": false, "id": 44, @@ -2253,7 +2379,7 @@ "h": 7, "w": 12, "x": 12, - "y": 72 + "y": 44 }, "hiddenSeries": false, "id": 45, @@ -2354,7 +2480,7 @@ "h": 9, "w": 12, "x": 0, - "y": 79 + "y": 51 }, "hiddenSeries": false, "id": 118, @@ -2547,7 +2673,7 @@ "h": 9, "w": 12, "x": 12, - "y": 79 + "y": 51 }, "id": 222, "options": { @@ -2646,7 +2772,7 @@ "h": 8, "w": 12, "x": 0, - "y": 57 + "y": 29 }, "hiddenSeries": false, "id": 4, @@ -2768,7 +2894,7 @@ "h": 8, "w": 12, "x": 12, - "y": 57 + "y": 29 }, "hiddenSeries": false, "id": 32, @@ -2867,7 +2993,7 @@ "h": 8, "w": 12, "x": 0, - "y": 65 + "y": 37 }, "hiddenSeries": false, "id": 139, @@ -2989,7 +3115,7 @@ "h": 8, "w": 12, "x": 12, - "y": 65 + "y": 37 }, "hiddenSeries": false, "id": 52, @@ -3111,7 +3237,7 @@ "h": 8, "w": 12, "x": 0, - "y": 73 + "y": 45 }, "hiddenSeries": false, "id": 7, @@ -3212,7 +3338,7 @@ "h": 8, "w": 12, "x": 12, - "y": 73 + "y": 45 }, "hiddenSeries": false, "id": 47, @@ -3310,7 +3436,7 @@ "h": 9, "w": 12, "x": 0, - "y": 81 + "y": 53 }, "hiddenSeries": false, "id": 103, @@ -3445,7 +3571,7 @@ "h": 9, "w": 12, "x": 0, - "y": 5 + "y": 30 }, "hiddenSeries": false, "id": 99, @@ -3467,7 +3593,7 @@ }, "paceLength": 10, "percentage": false, - "pluginVersion": "8.4.3", + "pluginVersion": "9.2.2", "pointradius": 5, "points": false, "renderer": "flot", @@ -3538,7 +3664,7 @@ "h": 9, "w": 12, "x": 12, - "y": 5 + "y": 30 }, "hiddenSeries": false, "id": 101, @@ -3560,7 +3686,7 @@ }, "paceLength": 10, "percentage": false, - "pluginVersion": "8.4.3", + "pluginVersion": "9.2.2", "pointradius": 5, "points": false, "renderer": "flot", @@ -3631,7 +3757,7 @@ "h": 8, "w": 12, "x": 0, - "y": 14 + "y": 39 }, "hiddenSeries": false, "id": 138, @@ -3651,7 +3777,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "8.4.3", + "pluginVersion": "9.2.2", "pointradius": 2, "points": false, "renderer": "flot", @@ -3746,7 +3872,7 @@ "h": 9, "w": 12, "x": 0, - "y": 59 + "y": 31 }, "hiddenSeries": false, "id": 79, @@ -3846,7 +3972,7 @@ "h": 9, "w": 12, "x": 12, - "y": 59 + "y": 31 }, "hiddenSeries": false, "id": 83, @@ -3934,6 +4060,7 @@ "dashLength": 10, "dashes": false, "datasource": { + "type": "prometheus", "uid": "$datasource" }, "fieldConfig": { @@ -3948,7 +4075,7 @@ "h": 9, "w": 12, "x": 0, - "y": 68 + "y": 40 }, "hiddenSeries": false, "id": 109, @@ -3983,11 +4110,13 @@ "datasource": { "uid": "$datasource" }, - "expr": "sum(rate(synapse_federation_client_sent_pdu_destinations:total_total{instance=\"$instance\"}[$bucket_size]))", + "editorMode": "code", + "expr": "sum(rate(synapse_federation_client_sent_pdu_destinations_count_total{instance=\"$instance\"}[$bucket_size]))", "format": "time_series", "interval": "", "intervalFactor": 1, "legendFormat": "pdus", + "range": true, "refId": "A" }, { @@ -4052,7 +4181,7 @@ "h": 9, "w": 12, "x": 12, - "y": 68 + "y": 40 }, "hiddenSeries": false, "id": 111, @@ -4130,119 +4259,363 @@ } }, { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, - "description": "The number of events in the in-memory queues ", + "description": "Triangular growth may indicate a problem with federation sending from the remote host --- but it may also be the case that everyone is asleep and no messages are being sent.\n\nSee https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#federation_metrics_domains", "fieldConfig": { "defaults": { - "links": [] + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisSoftMax": 60, + "axisSoftMin": 0, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "line" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 60 + } + ] + }, + "unit": "m" }, - "overrides": [] + "overrides": [ + { + "__systemRef": "hideSeriesFrom", + "matcher": { + "id": "byNames", + "options": { + "mode": "exclude", + "names": [ + "libera.chat " + ], + "prefix": "All except:", + "readOnly": true + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": false, + "tooltip": false, + "viz": true + } + } + ] + } + ] }, - "fill": 1, - "fillGradient": 0, "gridPos": { "h": 8, "w": 12, "x": 0, - "y": 77 - }, - "hiddenSeries": false, - "id": 142, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false + "y": 49 }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", + "id": 243, "options": { - "alertThreshold": true + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } }, - "percentage": false, - "pluginVersion": "9.2.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, "targets": [ { "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", - "expr": "synapse_federation_transaction_queue_pending_pdus{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", - "interval": "", - "legendFormat": "pending PDUs {{job}}-{{index}}", + "exemplar": false, + "expr": "(time() - max without (job, index, host) (avg_over_time(synapse_federation_last_received_pdu_time[10m]))) / 60", + "instant": false, + "legendFormat": "{{server_name}} ", "range": true, "refId": "A" - }, - { - "datasource": { - "type": "prometheus", - "uid": "$datasource" - }, - "expr": "synapse_federation_transaction_queue_pending_edus{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", - "interval": "", - "legendFormat": "pending EDUs {{job}}-{{index}}", - "refId": "B" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "In-memory federation transmission queues", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:547", - "format": "short", - "label": "events", - "logBase": 1, - "min": "0", - "show": true - }, - { - "$$hashKey": "object:548", - "format": "short", - "label": "", - "logBase": 1, - "show": true } ], - "yaxis": { - "align": false - } + "title": "Age of last PDU received from nominated hosts", + "type": "timeseries" }, { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Triangular growth may indicate a problem with federation senders on the monitored instance---but it may also be the case that everyone is asleep and no messages are being sent.\n\nSee https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#federation_metrics_domains", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisSoftMax": 60, + "axisSoftMin": 0, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "line" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 60 + } + ] + }, + "unit": "m" + }, + "overrides": [ + { + "__systemRef": "hideSeriesFrom", + "matcher": { + "id": "byNames", + "options": { + "mode": "exclude", + "names": [ + "libera.chat" + ], + "prefix": "All except:", + "readOnly": true + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": false, + "tooltip": false, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 49 + }, + "id": 241, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "(time() - max without (job, index, host) (avg_over_time(synapse_federation_last_sent_pdu_time[10m]))) / 60", + "instant": false, + "legendFormat": "{{server_name}}", + "range": true, + "refId": "A" + } + ], + "title": "Age of last PDU sent to nominated hosts", + "type": "timeseries" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "description": "The number of events in the in-memory queues ", + "fieldConfig": { + "defaults": { + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 57 + }, + "hiddenSeries": false, + "id": 142, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "9.2.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "editorMode": "code", + "expr": "synapse_federation_transaction_queue_pending_pdus{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", + "interval": "", + "legendFormat": "pending PDUs {{job}}-{{index}}", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "expr": "synapse_federation_transaction_queue_pending_edus{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", + "interval": "", + "legendFormat": "pending EDUs {{job}}-{{index}}", + "refId": "B" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "In-memory federation transmission queues", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:547", + "format": "short", + "label": "events", + "logBase": 1, + "min": "0", + "show": true + }, + { + "$$hashKey": "object:548", + "format": "short", + "label": "", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, "datasource": { "uid": "$datasource" }, @@ -4259,7 +4632,7 @@ "h": 9, "w": 12, "x": 12, - "y": 77 + "y": 57 }, "hiddenSeries": false, "id": 140, @@ -4428,7 +4801,7 @@ "h": 9, "w": 12, "x": 0, - "y": 85 + "y": 66 }, "heatmap": {}, "hideZeroBuckets": false, @@ -4533,7 +4906,7 @@ "h": 9, "w": 12, "x": 12, - "y": 86 + "y": 66 }, "hiddenSeries": false, "id": 162, @@ -4745,11 +5118,26 @@ "datasource": { "uid": "$datasource" }, + "fieldConfig": { + "defaults": { + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "scaleDistribution": { + "type": "linear" + } + } + }, + "overrides": [] + }, "gridPos": { "h": 9, "w": 12, "x": 0, - "y": 94 + "y": 75 }, "heatmap": {}, "hideZeroBuckets": false, @@ -4759,6 +5147,48 @@ "show": false }, "links": [], + "options": { + "calculate": false, + "calculation": {}, + "cellGap": -1, + "cellValues": { + "decimals": 2 + }, + "color": { + "exponent": 0.5, + "fill": "#b4ff00", + "min": 0, + "mode": "scheme", + "reverse": false, + "scale": "exponential", + "scheme": "Inferno", + "steps": 128 + }, + "exemplars": { + "color": "rgba(255,0,255,0.7)" + }, + "filterValues": { + "le": 1e-9 + }, + "legend": { + "show": false + }, + "rowsFrame": { + "layout": "auto" + }, + "showValue": "never", + "tooltip": { + "show": true, + "yHistogram": true + }, + "yAxis": { + "axisPlacement": "left", + "decimals": 0, + "reverse": false, + "unit": "s" + } + }, + "pluginVersion": "9.2.2", "reverseYBuckets": false, "targets": [ { @@ -4798,6 +5228,7 @@ "dashLength": 10, "dashes": false, "datasource": { + "type": "prometheus", "uid": "$datasource" }, "editable": true, @@ -4815,7 +5246,7 @@ "h": 9, "w": 12, "x": 12, - "y": 95 + "y": 75 }, "hiddenSeries": false, "id": 203, @@ -4837,7 +5268,7 @@ }, "paceLength": 10, "percentage": false, - "pluginVersion": "9.0.4", + "pluginVersion": "9.2.2", "pointradius": 5, "points": false, "renderer": "flot", @@ -4850,11 +5281,13 @@ "datasource": { "uid": "$datasource" }, - "expr": "synapse_federation_server_oldest_inbound_pdu_in_staging{job=\"$job\",index=~\"$index\",instance=\"$instance\"}", + "editorMode": "code", + "expr": "synapse_federation_server_oldest_inbound_pdu_in_staging{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}", "format": "time_series", "interval": "", "intervalFactor": 1, - "legendFormat": "rss {{index}}", + "legendFormat": "{{job}}-{{index}}", + "range": true, "refId": "A", "step": 4 } @@ -4899,6 +5332,7 @@ "dashLength": 10, "dashes": false, "datasource": { + "type": "prometheus", "uid": "$datasource" }, "editable": true, @@ -4916,7 +5350,7 @@ "h": 9, "w": 12, "x": 0, - "y": 103 + "y": 84 }, "hiddenSeries": false, "id": 202, @@ -4938,7 +5372,7 @@ }, "paceLength": 10, "percentage": false, - "pluginVersion": "9.0.4", + "pluginVersion": "9.2.2", "pointradius": 5, "points": false, "renderer": "flot", @@ -4951,11 +5385,13 @@ "datasource": { "uid": "$datasource" }, - "expr": "synapse_federation_server_number_inbound_pdu_in_staging{job=\"$job\",index=~\"$index\",instance=\"$instance\"}", + "editorMode": "code", + "expr": "synapse_federation_server_number_inbound_pdu_in_staging{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}", "format": "time_series", "interval": "", "intervalFactor": 1, - "legendFormat": "rss {{index}}", + "legendFormat": "{{job}}-{{index}}", + "range": true, "refId": "A", "step": 4 } @@ -5009,7 +5445,7 @@ "h": 8, "w": 12, "x": 12, - "y": 104 + "y": 84 }, "hiddenSeries": false, "id": 205, @@ -5029,7 +5465,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "9.0.4", + "pluginVersion": "9.2.2", "pointradius": 2, "points": false, "renderer": "flot", @@ -5115,6 +5551,8 @@ "mode": "palette-classic" }, "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, @@ -5162,7 +5600,7 @@ "h": 8, "w": 12, "x": 0, - "y": 1 + "y": 154 }, "id": 239, "options": { @@ -5201,6 +5639,8 @@ "mode": "palette-classic" }, "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, @@ -5248,7 +5688,7 @@ "h": 8, "w": 12, "x": 12, - "y": 1 + "y": 154 }, "id": 235, "options": { @@ -5288,6 +5728,8 @@ "mode": "palette-classic" }, "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, @@ -5335,7 +5777,7 @@ "h": 8, "w": 12, "x": 0, - "y": 9 + "y": 162 }, "id": 237, "options": { @@ -5376,6 +5818,8 @@ "mode": "palette-classic" }, "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, @@ -5423,7 +5867,7 @@ "h": 8, "w": 12, "x": 12, - "y": 9 + "y": 162 }, "id": 233, "options": { @@ -5474,7 +5918,7 @@ "h": 8, "w": 12, "x": 0, - "y": 17 + "y": 170 }, "hiddenSeries": false, "id": 229, @@ -5497,7 +5941,7 @@ }, "paceLength": 10, "percentage": false, - "pluginVersion": "9.0.4", + "pluginVersion": "9.2.2", "pointradius": 5, "points": false, "renderer": "flot", @@ -5709,6 +6153,8 @@ "mode": "palette-classic" }, "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, @@ -5773,7 +6219,7 @@ "h": 8, "w": 12, "x": 12, - "y": 17 + "y": 170 }, "id": 231, "options": { @@ -5832,65 +6278,96 @@ "id": 60, "panels": [ { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, "datasource": { + "type": "prometheus", "uid": "$datasource" }, "fieldConfig": { "defaults": { - "links": [] + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "links": [], + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "hertz" }, "overrides": [] }, - "fill": 1, - "fillGradient": 0, "gridPos": { "h": 8, "w": 12, "x": 0, - "y": 32 + "y": 155 }, - "hiddenSeries": false, "id": 51, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, "links": [], - "nullPointMode": "null", "options": { - "alertThreshold": true + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } }, - "paceLength": 10, - "percentage": false, - "pluginVersion": "8.4.3", - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, + "pluginVersion": "9.2.2", "targets": [ { "datasource": { "uid": "$datasource" }, + "editorMode": "code", "expr": "rate(synapse_http_httppusher_http_pushes_processed_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) and on (instance, job, index) (synapse_http_httppusher_http_pushes_failed_total + synapse_http_httppusher_http_pushes_processed_total) > 0", "format": "time_series", "interval": "", "intervalFactor": 2, - "legendFormat": "processed {{job}}", + "legendFormat": "processed {{job}}-{{index}}", + "range": true, "refId": "A", "step": 20 }, @@ -5898,43 +6375,18 @@ "datasource": { "uid": "$datasource" }, + "editorMode": "code", "expr": "rate(synapse_http_httppusher_http_pushes_failed_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) and on (instance, job, index) (synapse_http_httppusher_http_pushes_failed_total + synapse_http_httppusher_http_pushes_processed_total) > 0", "format": "time_series", "intervalFactor": 2, - "legendFormat": "failed {{job}}", + "legendFormat": "failed {{job}}-{{index}}", + "range": true, "refId": "B", "step": 20 } ], - "thresholds": [], - "timeRegions": [], "title": "HTTP Push rate", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "hertz", - "logBase": 1, - "show": true - }, - { - "format": "short", - "logBase": 1, - "show": true - } - ], - "yaxis": { - "align": false - } + "type": "timeseries" }, { "aliasColors": {}, @@ -5957,7 +6409,7 @@ "h": 8, "w": 12, "x": 12, - "y": 32 + "y": 155 }, "hiddenSeries": false, "id": 134, @@ -5978,7 +6430,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "8.4.3", + "pluginVersion": "9.2.2", "pointradius": 2, "points": false, "renderer": "flot", @@ -7344,7 +7796,7 @@ "h": 13, "w": 12, "x": 0, - "y": 35 + "y": 158 }, "hiddenSeries": false, "id": 12, @@ -7367,7 +7819,7 @@ }, "paceLength": 10, "percentage": false, - "pluginVersion": "9.0.4", + "pluginVersion": "9.2.2", "pointradius": 5, "points": false, "renderer": "flot", @@ -7442,7 +7894,7 @@ "h": 13, "w": 12, "x": 12, - "y": 35 + "y": 158 }, "hiddenSeries": false, "id": 26, @@ -7465,7 +7917,7 @@ }, "paceLength": 10, "percentage": false, - "pluginVersion": "9.0.4", + "pluginVersion": "9.2.2", "pointradius": 5, "points": false, "renderer": "flot", @@ -7541,7 +7993,7 @@ "h": 13, "w": 12, "x": 0, - "y": 48 + "y": 171 }, "hiddenSeries": false, "id": 13, @@ -7564,7 +8016,7 @@ }, "paceLength": 10, "percentage": false, - "pluginVersion": "9.0.4", + "pluginVersion": "9.2.2", "pointradius": 5, "points": false, "renderer": "flot", @@ -7645,7 +8097,7 @@ "h": 13, "w": 12, "x": 12, - "y": 48 + "y": 171 }, "hiddenSeries": false, "id": 27, @@ -7668,7 +8120,7 @@ }, "paceLength": 10, "percentage": false, - "pluginVersion": "9.0.4", + "pluginVersion": "9.2.2", "pointradius": 5, "points": false, "renderer": "flot", @@ -7743,7 +8195,7 @@ "h": 13, "w": 12, "x": 0, - "y": 61 + "y": 184 }, "hiddenSeries": false, "id": 28, @@ -7765,7 +8217,7 @@ }, "paceLength": 10, "percentage": false, - "pluginVersion": "9.0.4", + "pluginVersion": "9.2.2", "pointradius": 5, "points": false, "renderer": "flot", @@ -7840,7 +8292,7 @@ "h": 13, "w": 12, "x": 12, - "y": 61 + "y": 184 }, "hiddenSeries": false, "id": 25, @@ -7862,7 +8314,7 @@ }, "paceLength": 10, "percentage": false, - "pluginVersion": "9.0.4", + "pluginVersion": "9.2.2", "pointradius": 5, "points": false, "renderer": "flot", @@ -7930,7 +8382,7 @@ "h": 15, "w": 12, "x": 0, - "y": 74 + "y": 197 }, "hiddenSeries": false, "id": 154, @@ -7951,7 +8403,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "9.0.4", + "pluginVersion": "9.2.2", "pointradius": 2, "points": false, "renderer": "flot", @@ -9363,7 +9815,7 @@ "h": 7, "w": 12, "x": 0, - "y": 40 + "y": 162 }, "hiddenSeries": false, "id": 43, @@ -9385,7 +9837,7 @@ }, "paceLength": 10, "percentage": false, - "pluginVersion": "9.0.4", + "pluginVersion": "9.2.2", "pointradius": 5, "points": false, "renderer": "flot", @@ -9449,6 +9901,8 @@ "mode": "palette-classic" }, "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, @@ -9498,7 +9952,7 @@ "h": 7, "w": 12, "x": 12, - "y": 40 + "y": 162 }, "id": 41, "links": [], @@ -9545,6 +9999,8 @@ "mode": "palette-classic" }, "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, @@ -9595,7 +10051,7 @@ "h": 7, "w": 12, "x": 0, - "y": 47 + "y": 169 }, "id": 42, "links": [], @@ -9642,6 +10098,8 @@ "mode": "palette-classic" }, "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "axisSoftMin": 1, @@ -9693,7 +10151,7 @@ "h": 7, "w": 12, "x": 12, - "y": 47 + "y": 169 }, "id": 220, "links": [], @@ -9751,7 +10209,7 @@ "h": 7, "w": 12, "x": 0, - "y": 54 + "y": 176 }, "hiddenSeries": false, "id": 144, @@ -9771,7 +10229,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "9.0.4", + "pluginVersion": "9.2.2", "pointradius": 2, "points": false, "renderer": "flot", @@ -9844,7 +10302,7 @@ "h": 7, "w": 12, "x": 12, - "y": 54 + "y": 176 }, "hiddenSeries": false, "id": 115, @@ -9866,7 +10324,7 @@ }, "paceLength": 10, "percentage": false, - "pluginVersion": "9.0.4", + "pluginVersion": "9.2.2", "pointradius": 5, "points": false, "renderer": "flot", @@ -9938,7 +10396,7 @@ "h": 7, "w": 12, "x": 0, - "y": 61 + "y": 183 }, "hiddenSeries": false, "id": 113, @@ -9960,7 +10418,7 @@ }, "paceLength": 10, "percentage": false, - "pluginVersion": "9.0.4", + "pluginVersion": "9.2.2", "pointradius": 5, "points": false, "renderer": "flot", @@ -10058,7 +10516,6 @@ }, "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -10069,7 +10526,7 @@ "h": 9, "w": 12, "x": 0, - "y": 41 + "y": 163 }, "hiddenSeries": false, "id": 67, @@ -10091,7 +10548,7 @@ }, "paceLength": 10, "percentage": false, - "pluginVersion": "7.3.7", + "pluginVersion": "9.2.2", "pointradius": 5, "points": false, "renderer": "flot", @@ -10154,7 +10611,6 @@ }, "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -10165,7 +10621,7 @@ "h": 9, "w": 12, "x": 12, - "y": 41 + "y": 163 }, "hiddenSeries": false, "id": 71, @@ -10187,7 +10643,7 @@ }, "paceLength": 10, "percentage": false, - "pluginVersion": "7.3.7", + "pluginVersion": "9.2.2", "pointradius": 5, "points": false, "renderer": "flot", @@ -10250,7 +10706,6 @@ }, "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -10261,7 +10716,7 @@ "h": 9, "w": 12, "x": 0, - "y": 50 + "y": 172 }, "hiddenSeries": false, "id": 121, @@ -10284,7 +10739,7 @@ }, "paceLength": 10, "percentage": false, - "pluginVersion": "7.3.7", + "pluginVersion": "9.2.2", "pointradius": 5, "points": false, "renderer": "flot", @@ -10383,7 +10838,16 @@ "description": "Colour reflects the number of rooms with the given number of forward extremities, or fewer.\n\nThis is only updated once an hour.", "fieldConfig": { "defaults": { - "custom": {} + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "scaleDistribution": { + "type": "linear" + } + } }, "overrides": [] }, @@ -10400,7 +10864,47 @@ "legend": { "show": true }, - "links": [], + "links": [], + "options": { + "calculate": false, + "calculation": {}, + "cellGap": 1, + "cellValues": {}, + "color": { + "exponent": 0.5, + "fill": "#B877D9", + "min": 0, + "mode": "opacity", + "reverse": false, + "scale": "exponential", + "scheme": "Oranges", + "steps": 128 + }, + "exemplars": { + "color": "rgba(255,0,255,0.7)" + }, + "filterValues": { + "le": 1e-9 + }, + "legend": { + "show": true + }, + "rowsFrame": { + "layout": "auto" + }, + "showValue": "never", + "tooltip": { + "show": true, + "yHistogram": true + }, + "yAxis": { + "axisPlacement": "left", + "decimals": 0, + "reverse": false, + "unit": "short" + } + }, + "pluginVersion": "9.2.2", "reverseYBuckets": false, "targets": [ { @@ -10442,7 +10946,6 @@ "description": "Number of rooms with the given number of forward extremities or fewer.\n\nThis is only updated once an hour.", "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -10471,8 +10974,11 @@ "linewidth": 1, "links": [], "nullPointMode": "connected", + "options": { + "alertThreshold": true + }, "percentage": false, - "pluginVersion": "7.1.3", + "pluginVersion": "9.2.2", "pointradius": 2, "points": false, "renderer": "flot", @@ -10543,7 +11049,16 @@ "description": "Colour reflects the number of events persisted to rooms with the given number of forward extremities, or fewer.", "fieldConfig": { "defaults": { - "custom": {} + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "scaleDistribution": { + "type": "linear" + } + } }, "overrides": [] }, @@ -10561,6 +11076,46 @@ "show": true }, "links": [], + "options": { + "calculate": false, + "calculation": {}, + "cellGap": 1, + "cellValues": {}, + "color": { + "exponent": 0.5, + "fill": "#5794F2", + "min": 0, + "mode": "opacity", + "reverse": false, + "scale": "exponential", + "scheme": "Oranges", + "steps": 128 + }, + "exemplars": { + "color": "rgba(255,0,255,0.7)" + }, + "filterValues": { + "le": 1e-9 + }, + "legend": { + "show": true + }, + "rowsFrame": { + "layout": "auto" + }, + "showValue": "never", + "tooltip": { + "show": true, + "yHistogram": true + }, + "yAxis": { + "axisPlacement": "left", + "decimals": 0, + "reverse": false, + "unit": "short" + } + }, + "pluginVersion": "9.2.2", "reverseYBuckets": false, "targets": [ { @@ -10602,7 +11157,6 @@ "description": "For a given percentage P, the number X where P% of events were persisted to rooms with X forward extremities or fewer.", "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -10630,8 +11184,11 @@ "linewidth": 1, "links": [], "nullPointMode": "null", + "options": { + "alertThreshold": true + }, "percentage": false, - "pluginVersion": "7.1.3", + "pluginVersion": "9.2.2", "pointradius": 2, "points": false, "renderer": "flot", @@ -10732,7 +11289,16 @@ "description": "Colour reflects the number of events persisted to rooms with the given number of stale forward extremities, or fewer.\n\nStale forward extremities are those that were in the previous set of extremities as well as the new.", "fieldConfig": { "defaults": { - "custom": {} + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "scaleDistribution": { + "type": "linear" + } + } }, "overrides": [] }, @@ -10750,6 +11316,46 @@ "show": true }, "links": [], + "options": { + "calculate": false, + "calculation": {}, + "cellGap": 1, + "cellValues": {}, + "color": { + "exponent": 0.5, + "fill": "#FF9830", + "min": 0, + "mode": "opacity", + "reverse": false, + "scale": "exponential", + "scheme": "Oranges", + "steps": 128 + }, + "exemplars": { + "color": "rgba(255,0,255,0.7)" + }, + "filterValues": { + "le": 1e-9 + }, + "legend": { + "show": true + }, + "rowsFrame": { + "layout": "auto" + }, + "showValue": "never", + "tooltip": { + "show": true, + "yHistogram": true + }, + "yAxis": { + "axisPlacement": "left", + "decimals": 0, + "reverse": false, + "unit": "short" + } + }, + "pluginVersion": "9.2.2", "reverseYBuckets": false, "targets": [ { @@ -10791,7 +11397,6 @@ "description": "For given percentage P, the number X where P% of events were persisted to rooms with X stale forward extremities or fewer.\n\nStale forward extremities are those that were in the previous set of extremities as well as the new.", "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -10819,8 +11424,11 @@ "linewidth": 1, "links": [], "nullPointMode": "null", + "options": { + "alertThreshold": true + }, "percentage": false, - "pluginVersion": "7.1.3", + "pluginVersion": "9.2.2", "pointradius": 2, "points": false, "renderer": "flot", @@ -10921,7 +11529,16 @@ "description": "Colour reflects the number of state resolution operations performed over the given number of state groups, or fewer.", "fieldConfig": { "defaults": { - "custom": {} + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "scaleDistribution": { + "type": "linear" + } + } }, "overrides": [] }, @@ -10939,6 +11556,46 @@ "show": true }, "links": [], + "options": { + "calculate": false, + "calculation": {}, + "cellGap": 1, + "cellValues": {}, + "color": { + "exponent": 0.5, + "fill": "#73BF69", + "min": 0, + "mode": "opacity", + "reverse": false, + "scale": "exponential", + "scheme": "Oranges", + "steps": 128 + }, + "exemplars": { + "color": "rgba(255,0,255,0.7)" + }, + "filterValues": { + "le": 1e-9 + }, + "legend": { + "show": true + }, + "rowsFrame": { + "layout": "auto" + }, + "showValue": "never", + "tooltip": { + "show": true, + "yHistogram": true + }, + "yAxis": { + "axisPlacement": "left", + "decimals": 0, + "reverse": false, + "unit": "short" + } + }, + "pluginVersion": "9.2.2", "reverseYBuckets": false, "targets": [ { @@ -10976,12 +11633,12 @@ "dashLength": 10, "dashes": false, "datasource": { + "type": "prometheus", "uid": "$datasource" }, "description": "For a given percentage P, the number X where P% of state resolution operations took place over X state groups or fewer.", "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -11010,8 +11667,11 @@ "linewidth": 1, "links": [], "nullPointMode": "null", + "options": { + "alertThreshold": true + }, "percentage": false, - "pluginVersion": "7.1.3", + "pluginVersion": "9.2.2", "pointradius": 2, "points": false, "renderer": "flot", @@ -11024,11 +11684,13 @@ "datasource": { "uid": "$datasource" }, + "editorMode": "code", "expr": "histogram_quantile(0.5, rate(synapse_state_number_state_groups_in_resolution_bucket{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))", "format": "time_series", "interval": "", "intervalFactor": 1, "legendFormat": "50%", + "range": true, "refId": "A" }, { @@ -11106,12 +11768,6 @@ "uid": "$datasource" }, "description": "When we do a state res while persisting events we try and see if we can prune any stale extremities.", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -11134,8 +11790,11 @@ "lines": true, "linewidth": 1, "nullPointMode": "null", + "options": { + "alertThreshold": true + }, "percentage": false, - "pluginVersion": "7.1.3", + "pluginVersion": "9.2.2", "pointradius": 2, "points": false, "renderer": "flot", @@ -12218,6 +12877,8 @@ "mode": "palette-classic" }, "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, @@ -12266,7 +12927,7 @@ "h": 8, "w": 12, "x": 0, - "y": 46 + "y": 47 }, "id": 191, "options": { @@ -12314,7 +12975,7 @@ "h": 8, "w": 12, "x": 12, - "y": 46 + "y": 47 }, "hiddenSeries": false, "id": 193, @@ -12334,7 +12995,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "9.0.4", + "pluginVersion": "9.2.2", "pointradius": 2, "points": false, "renderer": "flot", @@ -12404,11 +13065,26 @@ "type": "prometheus", "uid": "$datasource" }, + "fieldConfig": { + "defaults": { + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "scaleDistribution": { + "type": "linear" + } + } + }, + "overrides": [] + }, "gridPos": { "h": 8, "w": 12, "x": 0, - "y": 54 + "y": 55 }, "heatmap": {}, "hideZeroBuckets": false, @@ -12418,6 +13094,48 @@ "show": false }, "links": [], + "options": { + "calculate": false, + "calculation": {}, + "cellGap": -1, + "cellValues": { + "decimals": 2 + }, + "color": { + "exponent": 0.5, + "fill": "#b4ff00", + "min": 0, + "mode": "scheme", + "reverse": false, + "scale": "exponential", + "scheme": "Inferno", + "steps": 128 + }, + "exemplars": { + "color": "rgba(255,0,255,0.7)" + }, + "filterValues": { + "le": 1e-9 + }, + "legend": { + "show": false + }, + "rowsFrame": { + "layout": "auto" + }, + "showValue": "never", + "tooltip": { + "show": true, + "yHistogram": true + }, + "yAxis": { + "axisPlacement": "left", + "decimals": 0, + "reverse": false, + "unit": "s" + } + }, + "pluginVersion": "9.2.2", "reverseYBuckets": false, "targets": [ { @@ -12463,6 +13181,8 @@ "mode": "palette-classic" }, "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, @@ -12507,7 +13227,7 @@ "h": 8, "w": 12, "x": 12, - "y": 54 + "y": 55 }, "id": 223, "options": { @@ -12757,6 +13477,6 @@ "timezone": "", "title": "Synapse", "uid": "000000012", - "version": 150, + "version": 160, "weekStart": "" -} \ No newline at end of file +} -- cgit 1.5.1 From 6d9e2fd8782a6610d6daf499d141e67f476b2f8c Mon Sep 17 00:00:00 2001 From: Shay Date: Wed, 31 May 2023 15:13:48 -0700 Subject: Speed up background jobs populate_full_user_id_user_filters and populate_full_user_id_profiles (#15700) --- changelog.d/15700.misc | 1 + synapse/storage/databases/main/filtering.py | 2 +- synapse/storage/databases/main/profile.py | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/15700.misc diff --git a/changelog.d/15700.misc b/changelog.d/15700.misc new file mode 100644 index 0000000000..e96bc681aa --- /dev/null +++ b/changelog.d/15700.misc @@ -0,0 +1 @@ +Speed up background jobs `populate_full_user_id_user_filters` and `populate_full_user_id_profiles`. \ No newline at end of file diff --git a/synapse/storage/databases/main/filtering.py b/synapse/storage/databases/main/filtering.py index da31eb44dc..f777777cbf 100644 --- a/synapse/storage/databases/main/filtering.py +++ b/synapse/storage/databases/main/filtering.py @@ -71,7 +71,7 @@ class FilteringWorkerStore(SQLBaseStore): SELECT user_id FROM user_filters WHERE user_id > ? ORDER BY user_id - LIMIT 1 OFFSET 50 + LIMIT 1 OFFSET 1000 """ txn.execute(sql, (lower_bound_id,)) res = txn.fetchone() diff --git a/synapse/storage/databases/main/profile.py b/synapse/storage/databases/main/profile.py index 65c92bef51..21d54c7a7a 100644 --- a/synapse/storage/databases/main/profile.py +++ b/synapse/storage/databases/main/profile.py @@ -65,7 +65,7 @@ class ProfileWorkerStore(SQLBaseStore): SELECT user_id FROM profiles WHERE user_id > ? ORDER BY user_id - LIMIT 1 OFFSET 50 + LIMIT 1 OFFSET 1000 """ txn.execute(sql, (lower_bound_id,)) res = txn.fetchone() -- cgit 1.5.1 From a273561c2247ee433f97a31961a30ab00ab19574 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 1 Jun 2023 08:21:37 -0400 Subject: Add a note about deprecating /register with a user property. (#15703) Application services providing a "user" property (instead of "username") for the /register endpoint was never specified. Deprecate this very old fallback. --- changelog.d/15703.removal | 1 + docs/upgrade.md | 11 +++++++++++ 2 files changed, 12 insertions(+) create mode 100644 changelog.d/15703.removal diff --git a/changelog.d/15703.removal b/changelog.d/15703.removal new file mode 100644 index 0000000000..95a2d8e484 --- /dev/null +++ b/changelog.d/15703.removal @@ -0,0 +1 @@ +Deprecate calling the `/register` endpoint with an unspecced `user` property for application services. diff --git a/docs/upgrade.md b/docs/upgrade.md index af999dd91f..49ab00c057 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -88,6 +88,17 @@ process, for example: dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb ``` +# Upgrading to v1.85.0 + +## Application service registration with "user" property deprecation + +Application services should ensure they call the `/register` endpoint with a +`username` property. The legacy `user` property is considered deprecated and +should no longer be included. + +A future version of Synapse (v1.88.0 or later) will remove support for legacy +application service login. + # Upgrading to v1.84.0 ## Deprecation of `worker_replication_*` configuration settings -- cgit 1.5.1 From d1693f03626391097b59ea9568cd8a869ed89569 Mon Sep 17 00:00:00 2001 From: Hugh Nimmo-Smith Date: Thu, 1 Jun 2023 13:52:51 +0100 Subject: Implement stable support for MSC3882 to allow an existing device/session to generate a login token for use on a new device/session (#15388) Implements stable support for MSC3882; this involves updating Synapse's support to match the MSC / the spec says. Continue to support the unstable version to allow clients to transition. --- changelog.d/15388.feature | 1 + docs/usage/configuration/config_documentation.md | 65 ++++++++++++++-------- synapse/config/auth.py | 10 ++++ synapse/config/experimental.py | 13 +---- synapse/rest/client/capabilities.py | 3 + synapse/rest/client/login.py | 31 ++++++++--- synapse/rest/client/login_token_request.py | 47 +++++++++++----- synapse/rest/client/versions.py | 4 +- tests/config/test_oauth_delegation.py | 4 +- tests/rest/client/test_capabilities.py | 28 ++++++++++ tests/rest/client/test_login.py | 23 ++++++++ tests/rest/client/test_login_token_request.py | 71 ++++++++++++++++++------ 12 files changed, 225 insertions(+), 75 deletions(-) create mode 100644 changelog.d/15388.feature diff --git a/changelog.d/15388.feature b/changelog.d/15388.feature new file mode 100644 index 0000000000..6cc55cafa2 --- /dev/null +++ b/changelog.d/15388.feature @@ -0,0 +1 @@ +Stable support for [MSC3882](https://github.com/matrix-org/matrix-spec-proposals/pull/3882) to allow an existing device/session to generate a login token for use on a new device/session. \ No newline at end of file diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 5ede6d0a82..0cf6e075ff 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -2570,7 +2570,50 @@ Example configuration: ```yaml nonrefreshable_access_token_lifetime: 24h ``` +--- +### `ui_auth` + +The amount of time to allow a user-interactive authentication session to be active. +This defaults to 0, meaning the user is queried for their credentials +before every action, but this can be overridden to allow a single +validation to be re-used. This weakens the protections afforded by +the user-interactive authentication process, by allowing for multiple +(and potentially different) operations to use the same validation session. + +This is ignored for potentially "dangerous" operations (including +deactivating an account, modifying an account password, adding a 3PID, +and minting additional login tokens). + +Use the `session_timeout` sub-option here to change the time allowed for credential validation. + +Example configuration: +```yaml +ui_auth: + session_timeout: "15s" +``` +--- +### `login_via_existing_session` + +Matrix supports the ability of an existing session to mint a login token for +another client. + +Synapse disables this by default as it has security ramifications -- a malicious +client could use the mechanism to spawn more than one session. + +The duration of time the generated token is valid for can be configured with the +`token_timeout` sub-option. + +User-interactive authentication is required when this is enabled unless the +`require_ui_auth` sub-option is set to `False`. + +Example configuration: +```yaml +login_via_existing_session: + enabled: true + require_ui_auth: false + token_timeout: "5m" +``` --- ## Metrics Config options related to metrics. @@ -3415,28 +3458,6 @@ password_config: require_uppercase: true ``` --- -### `ui_auth` - -The amount of time to allow a user-interactive authentication session to be active. - -This defaults to 0, meaning the user is queried for their credentials -before every action, but this can be overridden to allow a single -validation to be re-used. This weakens the protections afforded by -the user-interactive authentication process, by allowing for multiple -(and potentially different) operations to use the same validation session. - -This is ignored for potentially "dangerous" operations (including -deactivating an account, modifying an account password, and -adding a 3PID). - -Use the `session_timeout` sub-option here to change the time allowed for credential validation. - -Example configuration: -```yaml -ui_auth: - session_timeout: "15s" -``` ---- ## Push Configuration settings related to push notifications diff --git a/synapse/config/auth.py b/synapse/config/auth.py index 12e853980e..c7ab428f28 100644 --- a/synapse/config/auth.py +++ b/synapse/config/auth.py @@ -60,3 +60,13 @@ class AuthConfig(Config): self.ui_auth_session_timeout = self.parse_duration( ui_auth.get("session_timeout", 0) ) + + # Logging in with an existing session. + login_via_existing = config.get("login_via_existing_session", {}) + self.login_via_existing_enabled = login_via_existing.get("enabled", False) + self.login_via_existing_require_ui_auth = login_via_existing.get( + "require_ui_auth", True + ) + self.login_via_existing_token_timeout = self.parse_duration( + login_via_existing.get("token_timeout", "5m") + ) diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index 1d189b2e26..a9e002cf08 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -192,10 +192,10 @@ class MSC3861: ("captcha", "enable_registration_captcha"), ) - if root.experimental.msc3882_enabled: + if root.auth.login_via_existing_enabled: raise ConfigError( - "MSC3882 cannot be enabled when OAuth delegation is enabled", - ("experimental_features", "msc3882_enabled"), + "Login via existing session cannot be enabled when OAuth delegation is enabled", + ("login_via_existing_session", "enabled"), ) if root.registration.refresh_token_lifetime: @@ -319,13 +319,6 @@ class ExperimentalConfig(Config): # MSC3881: Remotely toggle push notifications for another client self.msc3881_enabled: bool = experimental.get("msc3881_enabled", False) - # MSC3882: Allow an existing session to sign in a new session - self.msc3882_enabled: bool = experimental.get("msc3882_enabled", False) - self.msc3882_ui_auth: bool = experimental.get("msc3882_ui_auth", True) - self.msc3882_token_timeout = self.parse_duration( - experimental.get("msc3882_token_timeout", "5m") - ) - # MSC3874: Filtering /messages with rel_types / not_rel_types. self.msc3874_enabled: bool = experimental.get("msc3874_enabled", False) diff --git a/synapse/rest/client/capabilities.py b/synapse/rest/client/capabilities.py index 0dbf8f6818..3154b9f77e 100644 --- a/synapse/rest/client/capabilities.py +++ b/synapse/rest/client/capabilities.py @@ -65,6 +65,9 @@ class CapabilitiesRestServlet(RestServlet): "m.3pid_changes": { "enabled": self.config.registration.enable_3pid_changes }, + "m.get_login_token": { + "enabled": self.config.auth.login_via_existing_enabled, + }, } } diff --git a/synapse/rest/client/login.py b/synapse/rest/client/login.py index d4dc2462b9..6493b00bb8 100644 --- a/synapse/rest/client/login.py +++ b/synapse/rest/client/login.py @@ -104,6 +104,9 @@ class LoginRestServlet(RestServlet): and hs.config.experimental.msc3866.require_approval_for_new_accounts ) + # Whether get login token is enabled. + self._get_login_token_enabled = hs.config.auth.login_via_existing_enabled + self.auth = hs.get_auth() self.clock = hs.get_clock() @@ -142,6 +145,9 @@ class LoginRestServlet(RestServlet): # to SSO. flows.append({"type": LoginRestServlet.CAS_TYPE}) + # The login token flow requires m.login.token to be advertised. + support_login_token_flow = self._get_login_token_enabled + if self.cas_enabled or self.saml2_enabled or self.oidc_enabled: flows.append( { @@ -153,14 +159,23 @@ class LoginRestServlet(RestServlet): } ) - # While it's valid for us to advertise this login type generally, - # synapse currently only gives out these tokens as part of the - # SSO login flow. - # Generally we don't want to advertise login flows that clients - # don't know how to implement, since they (currently) will always - # fall back to the fallback API if they don't understand one of the - # login flow types returned. - flows.append({"type": LoginRestServlet.TOKEN_TYPE}) + # SSO requires a login token to be generated, so we need to advertise that flow + support_login_token_flow = True + + # While it's valid for us to advertise this login type generally, + # synapse currently only gives out these tokens as part of the + # SSO login flow or as part of login via an existing session. + # + # Generally we don't want to advertise login flows that clients + # don't know how to implement, since they (currently) will always + # fall back to the fallback API if they don't understand one of the + # login flow types returned. + if support_login_token_flow: + tokenTypeFlow: Dict[str, Any] = {"type": LoginRestServlet.TOKEN_TYPE} + # If the login token flow is enabled advertise the get_login_token flag. + if self._get_login_token_enabled: + tokenTypeFlow["get_login_token"] = True + flows.append(tokenTypeFlow) flows.extend({"type": t} for t in self.auth_handler.get_supported_login_types()) diff --git a/synapse/rest/client/login_token_request.py b/synapse/rest/client/login_token_request.py index 43ea21d5e6..b1629f94a5 100644 --- a/synapse/rest/client/login_token_request.py +++ b/synapse/rest/client/login_token_request.py @@ -15,6 +15,7 @@ import logging from typing import TYPE_CHECKING, Tuple +from synapse.api.ratelimiting import Ratelimiter from synapse.http.server import HttpServer from synapse.http.servlet import RestServlet, parse_json_object_from_request from synapse.http.site import SynapseRequest @@ -33,7 +34,7 @@ class LoginTokenRequestServlet(RestServlet): Request: - POST /login/token HTTP/1.1 + POST /login/get_token HTTP/1.1 Content-Type: application/json {} @@ -43,30 +44,45 @@ class LoginTokenRequestServlet(RestServlet): HTTP/1.1 200 OK { "login_token": "ABDEFGH", - "expires_in": 3600, + "expires_in_ms": 3600000, } """ - PATTERNS = client_patterns( - "/org.matrix.msc3882/login/token$", releases=[], v1=False, unstable=True - ) + PATTERNS = [ + *client_patterns( + "/login/get_token$", releases=["v1"], v1=False, unstable=False + ), + # TODO: this is no longer needed once unstable MSC3882 does not need to be supported: + *client_patterns( + "/org.matrix.msc3882/login/token$", releases=[], v1=False, unstable=True + ), + ] def __init__(self, hs: "HomeServer"): super().__init__() self.auth = hs.get_auth() - self.store = hs.get_datastores().main - self.clock = hs.get_clock() - self.server_name = hs.config.server.server_name + self._main_store = hs.get_datastores().main self.auth_handler = hs.get_auth_handler() - self.token_timeout = hs.config.experimental.msc3882_token_timeout - self.ui_auth = hs.config.experimental.msc3882_ui_auth + self.token_timeout = hs.config.auth.login_via_existing_token_timeout + self._require_ui_auth = hs.config.auth.login_via_existing_require_ui_auth + + # Ratelimit aggressively to a maxmimum of 1 request per minute. + # + # This endpoint can be used to spawn additional sessions and could be + # abused by a malicious client to create many sessions. + self._ratelimiter = Ratelimiter( + store=self._main_store, + clock=hs.get_clock(), + rate_hz=1 / 60, + burst_count=1, + ) @interactive_auth_handler async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) body = parse_json_object_from_request(request) - if self.ui_auth: + if self._require_ui_auth: await self.auth_handler.validate_user_via_ui_auth( requester, request, @@ -75,9 +91,12 @@ class LoginTokenRequestServlet(RestServlet): can_skip_ui_auth=False, # Don't allow skipping of UI auth ) + # Ensure that this endpoint isn't being used too often. (Ensure this is + # done *after* UI auth.) + await self._ratelimiter.ratelimit(None, requester.user.to_string().lower()) + login_token = await self.auth_handler.create_login_token_for_user_id( user_id=requester.user.to_string(), - auth_provider_id="org.matrix.msc3882.login_token_request", duration_ms=self.token_timeout, ) @@ -85,11 +104,13 @@ class LoginTokenRequestServlet(RestServlet): 200, { "login_token": login_token, + # TODO: this is no longer needed once unstable MSC3882 does not need to be supported: "expires_in": self.token_timeout // 1000, + "expires_in_ms": self.token_timeout, }, ) def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: - if hs.config.experimental.msc3882_enabled: + if hs.config.auth.login_via_existing_enabled: LoginTokenRequestServlet(hs).register(http_server) diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py index 32df054f56..547bf34df1 100644 --- a/synapse/rest/client/versions.py +++ b/synapse/rest/client/versions.py @@ -113,8 +113,8 @@ class VersionsRestServlet(RestServlet): "fi.mau.msc2815": self.config.experimental.msc2815_enabled, # Adds a ping endpoint for appservices to check HS->AS connection "fi.mau.msc2659.stable": True, # TODO: remove when "v1.7" is added above - # Adds support for login token requests as per MSC3882 - "org.matrix.msc3882": self.config.experimental.msc3882_enabled, + # TODO: this is no longer needed once unstable MSC3882 does not need to be supported: + "org.matrix.msc3882": self.config.auth.login_via_existing_enabled, # Adds support for remotely enabling/disabling pushers, as per MSC3881 "org.matrix.msc3881": self.config.experimental.msc3881_enabled, # Adds support for filtering /messages by event relation. diff --git a/tests/config/test_oauth_delegation.py b/tests/config/test_oauth_delegation.py index 2ead721b00..f57c813a58 100644 --- a/tests/config/test_oauth_delegation.py +++ b/tests/config/test_oauth_delegation.py @@ -228,8 +228,8 @@ class MSC3861OAuthDelegation(TestCase): with self.assertRaises(ConfigError): self.parse_config() - def test_msc3882_auth_cannot_be_enabled(self) -> None: - self.config_dict["experimental_features"]["msc3882_enabled"] = True + def test_login_via_existing_session_cannot_be_enabled(self) -> None: + self.config_dict["login_via_existing_session"] = {"enabled": True} with self.assertRaises(ConfigError): self.parse_config() diff --git a/tests/rest/client/test_capabilities.py b/tests/rest/client/test_capabilities.py index c16e8d43f4..cf23430f6a 100644 --- a/tests/rest/client/test_capabilities.py +++ b/tests/rest/client/test_capabilities.py @@ -186,3 +186,31 @@ class CapabilitiesTestCase(unittest.HomeserverTestCase): self.assertGreater(len(details["support"]), 0) for room_version in details["support"]: self.assertTrue(room_version in KNOWN_ROOM_VERSIONS, str(room_version)) + + def test_get_get_token_login_fields_when_disabled(self) -> None: + """By default login via an existing session is disabled.""" + access_token = self.get_success( + self.auth_handler.create_access_token_for_user_id( + self.user, device_id=None, valid_until_ms=None + ) + ) + + channel = self.make_request("GET", self.url, access_token=access_token) + capabilities = channel.json_body["capabilities"] + + self.assertEqual(channel.code, HTTPStatus.OK) + self.assertFalse(capabilities["m.get_login_token"]["enabled"]) + + @override_config({"login_via_existing_session": {"enabled": True}}) + def test_get_get_token_login_fields_when_enabled(self) -> None: + access_token = self.get_success( + self.auth_handler.create_access_token_for_user_id( + self.user, device_id=None, valid_until_ms=None + ) + ) + + channel = self.make_request("GET", self.url, access_token=access_token) + capabilities = channel.json_body["capabilities"] + + self.assertEqual(channel.code, HTTPStatus.OK) + self.assertTrue(capabilities["m.get_login_token"]["enabled"]) diff --git a/tests/rest/client/test_login.py b/tests/rest/client/test_login.py index dc32982e22..f3c3bc69a9 100644 --- a/tests/rest/client/test_login.py +++ b/tests/rest/client/test_login.py @@ -446,6 +446,29 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase): ApprovalNoticeMedium.NONE, channel.json_body["approval_notice_medium"] ) + def test_get_login_flows_with_login_via_existing_disabled(self) -> None: + """GET /login should return m.login.token without get_login_token""" + channel = self.make_request("GET", "/_matrix/client/r0/login") + self.assertEqual(channel.code, 200, channel.result) + + flows = {flow["type"]: flow for flow in channel.json_body["flows"]} + self.assertNotIn("m.login.token", flows) + + @override_config({"login_via_existing_session": {"enabled": True}}) + def test_get_login_flows_with_login_via_existing_enabled(self) -> None: + """GET /login should return m.login.token with get_login_token true""" + channel = self.make_request("GET", "/_matrix/client/r0/login") + self.assertEqual(channel.code, 200, channel.result) + + self.assertCountEqual( + channel.json_body["flows"], + [ + {"type": "m.login.token", "get_login_token": True}, + {"type": "m.login.password"}, + {"type": "m.login.application_service"}, + ], + ) + @skip_unless(has_saml2 and HAS_OIDC, "Requires SAML2 and OIDC") class MultiSSOTestCase(unittest.HomeserverTestCase): diff --git a/tests/rest/client/test_login_token_request.py b/tests/rest/client/test_login_token_request.py index b8187db982..f05e619aa8 100644 --- a/tests/rest/client/test_login_token_request.py +++ b/tests/rest/client/test_login_token_request.py @@ -15,14 +15,14 @@ from twisted.test.proto_helpers import MemoryReactor from synapse.rest import admin -from synapse.rest.client import login, login_token_request +from synapse.rest.client import login, login_token_request, versions from synapse.server import HomeServer from synapse.util import Clock from tests import unittest from tests.unittest import override_config -endpoint = "/_matrix/client/unstable/org.matrix.msc3882/login/token" +GET_TOKEN_ENDPOINT = "/_matrix/client/v1/login/get_token" class LoginTokenRequestServletTestCase(unittest.HomeserverTestCase): @@ -30,6 +30,7 @@ class LoginTokenRequestServletTestCase(unittest.HomeserverTestCase): login.register_servlets, admin.register_servlets, login_token_request.register_servlets, + versions.register_servlets, # TODO: remove once unstable revision 0 support is removed ] def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: @@ -46,26 +47,26 @@ class LoginTokenRequestServletTestCase(unittest.HomeserverTestCase): self.password = "password" def test_disabled(self) -> None: - channel = self.make_request("POST", endpoint, {}, access_token=None) + channel = self.make_request("POST", GET_TOKEN_ENDPOINT, {}, access_token=None) self.assertEqual(channel.code, 404) self.register_user(self.user, self.password) token = self.login(self.user, self.password) - channel = self.make_request("POST", endpoint, {}, access_token=token) + channel = self.make_request("POST", GET_TOKEN_ENDPOINT, {}, access_token=token) self.assertEqual(channel.code, 404) - @override_config({"experimental_features": {"msc3882_enabled": True}}) + @override_config({"login_via_existing_session": {"enabled": True}}) def test_require_auth(self) -> None: - channel = self.make_request("POST", endpoint, {}, access_token=None) + channel = self.make_request("POST", GET_TOKEN_ENDPOINT, {}, access_token=None) self.assertEqual(channel.code, 401) - @override_config({"experimental_features": {"msc3882_enabled": True}}) + @override_config({"login_via_existing_session": {"enabled": True}}) def test_uia_on(self) -> None: user_id = self.register_user(self.user, self.password) token = self.login(self.user, self.password) - channel = self.make_request("POST", endpoint, {}, access_token=token) + channel = self.make_request("POST", GET_TOKEN_ENDPOINT, {}, access_token=token) self.assertEqual(channel.code, 401) self.assertIn({"stages": ["m.login.password"]}, channel.json_body["flows"]) @@ -80,9 +81,9 @@ class LoginTokenRequestServletTestCase(unittest.HomeserverTestCase): }, } - channel = self.make_request("POST", endpoint, uia, access_token=token) + channel = self.make_request("POST", GET_TOKEN_ENDPOINT, uia, access_token=token) self.assertEqual(channel.code, 200) - self.assertEqual(channel.json_body["expires_in"], 300) + self.assertEqual(channel.json_body["expires_in_ms"], 300000) login_token = channel.json_body["login_token"] @@ -95,15 +96,15 @@ class LoginTokenRequestServletTestCase(unittest.HomeserverTestCase): self.assertEqual(channel.json_body["user_id"], user_id) @override_config( - {"experimental_features": {"msc3882_enabled": True, "msc3882_ui_auth": False}} + {"login_via_existing_session": {"enabled": True, "require_ui_auth": False}} ) def test_uia_off(self) -> None: user_id = self.register_user(self.user, self.password) token = self.login(self.user, self.password) - channel = self.make_request("POST", endpoint, {}, access_token=token) + channel = self.make_request("POST", GET_TOKEN_ENDPOINT, {}, access_token=token) self.assertEqual(channel.code, 200) - self.assertEqual(channel.json_body["expires_in"], 300) + self.assertEqual(channel.json_body["expires_in_ms"], 300000) login_token = channel.json_body["login_token"] @@ -117,10 +118,10 @@ class LoginTokenRequestServletTestCase(unittest.HomeserverTestCase): @override_config( { - "experimental_features": { - "msc3882_enabled": True, - "msc3882_ui_auth": False, - "msc3882_token_timeout": "15s", + "login_via_existing_session": { + "enabled": True, + "require_ui_auth": False, + "token_timeout": "15s", } } ) @@ -128,6 +129,40 @@ class LoginTokenRequestServletTestCase(unittest.HomeserverTestCase): self.register_user(self.user, self.password) token = self.login(self.user, self.password) - channel = self.make_request("POST", endpoint, {}, access_token=token) + channel = self.make_request("POST", GET_TOKEN_ENDPOINT, {}, access_token=token) + self.assertEqual(channel.code, 200) + self.assertEqual(channel.json_body["expires_in_ms"], 15000) + + @override_config( + { + "login_via_existing_session": { + "enabled": True, + "require_ui_auth": False, + "token_timeout": "15s", + } + } + ) + def test_unstable_support(self) -> None: + # TODO: remove support for unstable MSC3882 is no longer needed + + # check feature is advertised in versions response: + channel = self.make_request( + "GET", "/_matrix/client/versions", {}, access_token=None + ) + self.assertEqual(channel.code, 200) + self.assertEqual( + channel.json_body["unstable_features"]["org.matrix.msc3882"], True + ) + + self.register_user(self.user, self.password) + token = self.login(self.user, self.password) + + # check feature is available via the unstable endpoint and returns an expires_in value in seconds + channel = self.make_request( + "POST", + "/_matrix/client/unstable/org.matrix.msc3882/login/token", + {}, + access_token=token, + ) self.assertEqual(channel.code, 200) self.assertEqual(channel.json_body["expires_in"], 15) -- cgit 1.5.1 From 5ed0e8c61f6b46289fdc5609e8e573b67c2c1982 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 1 Jun 2023 14:25:20 +0100 Subject: Cache requests for user's devices from federation (#15675) This should mitigate the issue where lots of different servers requests the same user's devices all at once. --- changelog.d/15675.misc | 1 + synapse/storage/databases/main/devices.py | 4 ++ synapse/storage/databases/main/end_to_end_keys.py | 67 ++++++++++++++++++++++- 3 files changed, 70 insertions(+), 2 deletions(-) create mode 100644 changelog.d/15675.misc diff --git a/changelog.d/15675.misc b/changelog.d/15675.misc new file mode 100644 index 0000000000..05538fdbef --- /dev/null +++ b/changelog.d/15675.misc @@ -0,0 +1 @@ +Cache requests for user's devices over federation. diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index a67fdb3c22..f677d048aa 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -1941,6 +1941,10 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore): user_id, stream_ids[-1], ) + txn.call_after( + self._get_e2e_device_keys_for_federation_query_inner.invalidate, + (user_id,), + ) min_stream_id = stream_ids[0] diff --git a/synapse/storage/databases/main/end_to_end_keys.py b/synapse/storage/databases/main/end_to_end_keys.py index 4bc391f213..91ae9c457d 100644 --- a/synapse/storage/databases/main/end_to_end_keys.py +++ b/synapse/storage/databases/main/end_to_end_keys.py @@ -16,6 +16,7 @@ import abc from typing import ( TYPE_CHECKING, + Any, Collection, Dict, Iterable, @@ -39,6 +40,7 @@ from synapse.appservice import ( TransactionUnusedFallbackKeys, ) from synapse.logging.opentracing import log_kv, set_tag, trace +from synapse.replication.tcp.streams._base import DeviceListsStream from synapse.storage._base import SQLBaseStore, db_to_json from synapse.storage.database import ( DatabasePool, @@ -104,6 +106,23 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker self.hs.config.federation.allow_device_name_lookup_over_federation ) + def process_replication_rows( + self, + stream_name: str, + instance_name: str, + token: int, + rows: Iterable[Any], + ) -> None: + if stream_name == DeviceListsStream.NAME: + for row in rows: + assert isinstance(row, DeviceListsStream.DeviceListsStreamRow) + if row.entity.startswith("@"): + self._get_e2e_device_keys_for_federation_query_inner.invalidate( + (row.entity,) + ) + + super().process_replication_rows(stream_name, instance_name, token, rows) + async def get_e2e_device_keys_for_federation_query( self, user_id: str ) -> Tuple[int, List[JsonDict]]: @@ -114,6 +133,50 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker """ now_stream_id = self.get_device_stream_token() + # We need to be careful with the caching here, as we need to always + # return *all* persisted devices, however there may be a lag between a + # new device being persisted and the cache being invalidated. + cached_results = ( + self._get_e2e_device_keys_for_federation_query_inner.cache.get_immediate( + user_id, None + ) + ) + if cached_results is not None: + # Check that there have been no new devices added by another worker + # after the cache. This should be quick as there should be few rows + # with a higher stream ordering. + # + # Note that we invalidate based on the device stream, so we only + # have to check for potential invalidations after the + # `now_stream_id`. + sql = """ + SELECT user_id FROM device_lists_stream + WHERE stream_id >= ? AND user_id = ? + """ + rows = await self.db_pool.execute( + "get_e2e_device_keys_for_federation_query_check", + None, + sql, + now_stream_id, + user_id, + ) + if not rows: + # No new rows, so cache is still valid. + return now_stream_id, cached_results + + # There has, so let's invalidate the cache and run the query. + self._get_e2e_device_keys_for_federation_query_inner.invalidate((user_id,)) + + results = await self._get_e2e_device_keys_for_federation_query_inner(user_id) + + return now_stream_id, results + + @cached(iterable=True) + async def _get_e2e_device_keys_for_federation_query_inner( + self, user_id: str + ) -> List[JsonDict]: + """Get all devices (with any device keys) for a user""" + devices = await self.get_e2e_device_keys_and_signatures([(user_id, None)]) if devices: @@ -134,9 +197,9 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker results.append(result) - return now_stream_id, results + return results - return now_stream_id, [] + return [] @trace @cancellable -- cgit 1.5.1 From 4c0bffaca5ded573cc26d99bd5831f136f8acacc Mon Sep 17 00:00:00 2001 From: "H. Shay" Date: Thu, 1 Jun 2023 09:16:35 -0700 Subject: 1.85.0rc2 --- CHANGES.md | 21 +++++++++++++++++++++ changelog.d/15693.bugfix | 1 - changelog.d/15700.misc | 1 - changelog.d/15703.removal | 1 - debian/changelog | 6 ++++++ pyproject.toml | 2 +- 6 files changed, 28 insertions(+), 4 deletions(-) delete mode 100644 changelog.d/15693.bugfix delete mode 100644 changelog.d/15700.misc delete mode 100644 changelog.d/15703.removal diff --git a/CHANGES.md b/CHANGES.md index 14aac9f14e..f0885a2f1e 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,24 @@ +Synapse 1.85.0rc2 (2023-06-01) +============================== + +Bugfixes +-------- + +- Fix a performance issue introduced in Synapse v1.83.0 which meant that purging rooms was very slow and database-intensive. ([\#15693](https://github.com/matrix-org/synapse/issues/15693)) + + +Deprecations and Removals +------------------------- + +- Deprecate calling the `/register` endpoint with an unspecced `user` property for application services. ([\#15703](https://github.com/matrix-org/synapse/issues/15703)) + + +Internal Changes +---------------- + +- Speed up background jobs `populate_full_user_id_user_filters` and `populate_full_user_id_profiles`. ([\#15700](https://github.com/matrix-org/synapse/issues/15700)) + + Synapse 1.85.0rc1 (2023-05-30) ============================== diff --git a/changelog.d/15693.bugfix b/changelog.d/15693.bugfix deleted file mode 100644 index d0325de007..0000000000 --- a/changelog.d/15693.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a performance issue introduced in Synapse v1.83.0 which meant that purging rooms was very slow and database-intensive. \ No newline at end of file diff --git a/changelog.d/15700.misc b/changelog.d/15700.misc deleted file mode 100644 index e96bc681aa..0000000000 --- a/changelog.d/15700.misc +++ /dev/null @@ -1 +0,0 @@ -Speed up background jobs `populate_full_user_id_user_filters` and `populate_full_user_id_profiles`. \ No newline at end of file diff --git a/changelog.d/15703.removal b/changelog.d/15703.removal deleted file mode 100644 index 95a2d8e484..0000000000 --- a/changelog.d/15703.removal +++ /dev/null @@ -1 +0,0 @@ -Deprecate calling the `/register` endpoint with an unspecced `user` property for application services. diff --git a/debian/changelog b/debian/changelog index 2d88cd9d29..ae348ce4df 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.85.0~rc2) stable; urgency=medium + + * New Synapse release 1.85.0rc2. + + -- Synapse Packaging team Thu, 01 Jun 2023 09:16:18 -0700 + matrix-synapse-py3 (1.85.0~rc1) stable; urgency=medium * New Synapse release 1.85.0rc1. diff --git a/pyproject.toml b/pyproject.toml index 7227bc7523..4ed4214f34 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -89,7 +89,7 @@ manifest-path = "rust/Cargo.toml" [tool.poetry] name = "matrix-synapse" -version = "1.85.0rc1" +version = "1.85.0rc2" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" -- cgit 1.5.1 From 30a5076da8ad776c150ad2745b5f34b4446012e0 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Thu, 1 Jun 2023 21:27:18 -0500 Subject: Log when events are (unexpectedly) filtered out of responses in tests (#14213) See https://github.com/matrix-org/synapse/pull/14095#discussion_r990335492 This is useful because when see that a relevant event is an `outlier` or `soft-failed`, then that's a good unexpected indicator explaining why it's not showing up. `filter_events_for_client` is used in `/sync`, `/messages`, `/context` which are all common end-to-end assertion touch points (also notifications, relations). --- changelog.d/14213.misc | 1 + docker/README.md | 3 ++- docker/conf/log.config | 30 ++++++++++++++++++++++++------ docker/configure_workers_and_start.py | 3 +++ scripts-dev/complement.sh | 4 ++++ synapse/visibility.py | 14 +++++++------- tests/test_utils/logging_setup.py | 12 ++++++++++++ 7 files changed, 53 insertions(+), 14 deletions(-) create mode 100644 changelog.d/14213.misc diff --git a/changelog.d/14213.misc b/changelog.d/14213.misc new file mode 100644 index 0000000000..b0689f3d15 --- /dev/null +++ b/changelog.d/14213.misc @@ -0,0 +1 @@ +Log when events are (maybe unexpectedly) filtered out of responses in tests. diff --git a/docker/README.md b/docker/README.md index eda3221c23..08372e95c6 100644 --- a/docker/README.md +++ b/docker/README.md @@ -73,7 +73,8 @@ The following environment variables are supported in `generate` mode: will log sensitive information such as access tokens. This should not be needed unless you are a developer attempting to debug something particularly tricky. - +* `SYNAPSE_LOG_TESTING`: if set, Synapse will log additional information useful + for testing. ## Postgres diff --git a/docker/conf/log.config b/docker/conf/log.config index 90b5179838..5772321202 100644 --- a/docker/conf/log.config +++ b/docker/conf/log.config @@ -49,17 +49,35 @@ handlers: class: logging.StreamHandler formatter: precise -{% if not SYNAPSE_LOG_SENSITIVE %} -{# - If SYNAPSE_LOG_SENSITIVE is unset, then override synapse.storage.SQL to INFO - so that DEBUG entries (containing sensitive information) are not emitted. -#} loggers: + # This is just here so we can leave `loggers` in the config regardless of whether + # we configure other loggers below (avoid empty yaml dict error). + _placeholder: + level: "INFO" + + {% if not SYNAPSE_LOG_SENSITIVE %} + {# + If SYNAPSE_LOG_SENSITIVE is unset, then override synapse.storage.SQL to INFO + so that DEBUG entries (containing sensitive information) are not emitted. + #} synapse.storage.SQL: # beware: increasing this to DEBUG will make synapse log sensitive # information such as access tokens. level: INFO -{% endif %} + {% endif %} + + {% if SYNAPSE_LOG_TESTING %} + {# + If Synapse is under test, log a few more useful things for a developer + attempting to debug something particularly tricky. + + With `synapse.visibility.filtered_event_debug`, it logs when events are (maybe + unexpectedly) filtered out of responses in tests. It's just nice to be able to + look at the CI log and figure out why an event isn't being returned. + #} + synapse.visibility.filtered_event_debug: + level: DEBUG + {% endif %} root: level: {{ SYNAPSE_LOG_LEVEL or "INFO" }} diff --git a/docker/configure_workers_and_start.py b/docker/configure_workers_and_start.py index 79b5b87397..87a740e3d4 100755 --- a/docker/configure_workers_and_start.py +++ b/docker/configure_workers_and_start.py @@ -40,6 +40,8 @@ # log level. INFO is the default. # * SYNAPSE_LOG_SENSITIVE: If unset, SQL and SQL values won't be logged, # regardless of the SYNAPSE_LOG_LEVEL setting. +# * SYNAPSE_LOG_TESTING: if set, Synapse will log additional information useful +# for testing. # # NOTE: According to Complement's ENTRYPOINT expectations for a homeserver image (as defined # in the project's README), this script may be run multiple times, and functionality should @@ -947,6 +949,7 @@ def generate_worker_log_config( extra_log_template_args["SYNAPSE_LOG_SENSITIVE"] = environ.get( "SYNAPSE_LOG_SENSITIVE" ) + extra_log_template_args["SYNAPSE_LOG_TESTING"] = environ.get("SYNAPSE_LOG_TESTING") # Render and write the file log_config_filepath = f"/conf/workers/{worker_name}.log.config" diff --git a/scripts-dev/complement.sh b/scripts-dev/complement.sh index cba2799f15..131f26234e 100755 --- a/scripts-dev/complement.sh +++ b/scripts-dev/complement.sh @@ -269,6 +269,10 @@ if [[ -n "$SYNAPSE_TEST_LOG_LEVEL" ]]; then export PASS_SYNAPSE_LOG_SENSITIVE=1 fi +# Log a few more useful things for a developer attempting to debug something +# particularly tricky. +export PASS_SYNAPSE_LOG_TESTING=1 + # Run the tests! echo "Images built; running complement" cd "$COMPLEMENT_DIR" diff --git a/synapse/visibility.py b/synapse/visibility.py index 468e22f8f6..fc71dc92a4 100644 --- a/synapse/visibility.py +++ b/synapse/visibility.py @@ -41,7 +41,7 @@ from synapse.types.state import StateFilter from synapse.util import Clock logger = logging.getLogger(__name__) - +filtered_event_logger = logging.getLogger("synapse.visibility.filtered_event_debug") VISIBILITY_PRIORITY = ( HistoryVisibility.WORLD_READABLE, @@ -97,8 +97,8 @@ async def filter_events_for_client( events_before_filtering = events events = [e for e in events if not e.internal_metadata.is_soft_failed()] if len(events_before_filtering) != len(events): - if logger.isEnabledFor(logging.DEBUG): - logger.debug( + if filtered_event_logger.isEnabledFor(logging.DEBUG): + filtered_event_logger.debug( "filter_events_for_client: Filtered out soft-failed events: Before=%s, After=%s", [event.event_id for event in events_before_filtering], [event.event_id for event in events], @@ -319,7 +319,7 @@ def _check_client_allowed_to_see_event( _check_filter_send_to_client(event, clock, retention_policy, sender_ignored) == _CheckFilter.DENIED ): - logger.debug( + filtered_event_logger.debug( "_check_client_allowed_to_see_event(event=%s): Filtered out event because `_check_filter_send_to_client` returned `_CheckFilter.DENIED`", event.event_id, ) @@ -341,7 +341,7 @@ def _check_client_allowed_to_see_event( ) return event - logger.debug( + filtered_event_logger.debug( "_check_client_allowed_to_see_event(event=%s): Filtered out event because it's an outlier", event.event_id, ) @@ -367,7 +367,7 @@ def _check_client_allowed_to_see_event( membership_result = _check_membership(user_id, event, visibility, state, is_peeking) if not membership_result.allowed: - logger.debug( + filtered_event_logger.debug( "_check_client_allowed_to_see_event(event=%s): Filtered out event because the user can't see the event because of their membership, membership_result.allowed=%s membership_result.joined=%s", event.event_id, membership_result.allowed, @@ -378,7 +378,7 @@ def _check_client_allowed_to_see_event( # If the sender has been erased and the user was not joined at the time, we # must only return the redacted form. if sender_erased and not membership_result.joined: - logger.debug( + filtered_event_logger.debug( "_check_client_allowed_to_see_event(event=%s): Returning pruned event because `sender_erased` and the user was not joined at the time", event.event_id, ) diff --git a/tests/test_utils/logging_setup.py b/tests/test_utils/logging_setup.py index c37f205ed0..199bb06a81 100644 --- a/tests/test_utils/logging_setup.py +++ b/tests/test_utils/logging_setup.py @@ -53,4 +53,16 @@ def setup_logging() -> None: log_level = os.environ.get("SYNAPSE_TEST_LOG_LEVEL", "ERROR") root_logger.setLevel(log_level) + # In order to not add noise by default (since we only log ERROR messages for trial + # tests as configured above), we only enable this for developers for looking for + # more INFO or DEBUG. + if root_logger.isEnabledFor(logging.INFO): + # Log when events are (maybe unexpectedly) filtered out of responses in tests. It's + # just nice to be able to look at the CI log and figure out why an event isn't being + # returned. + logging.getLogger("synapse.visibility.filtered_event_debug").setLevel( + logging.DEBUG + ) + + # Blow away the pyo3-log cache so that it reloads the configuration. reset_logging_config() -- cgit 1.5.1 From e0f2429d137c74059f5b7f151297e28dbfd82d48 Mon Sep 17 00:00:00 2001 From: Mathieu Velten Date: Fri, 2 Jun 2023 15:13:50 +0200 Subject: Add a catch-all * to the supported relation types when redacting (#15705) This is an update to MSC3912 implementation --- changelog.d/15705.feature | 1 + synapse/handlers/relations.py | 16 +++-- synapse/storage/databases/main/relations.py | 30 ++++++++ tests/rest/client/test_redactions.py | 104 +++++++++++++++++++++++++++- 4 files changed, 143 insertions(+), 8 deletions(-) create mode 100644 changelog.d/15705.feature diff --git a/changelog.d/15705.feature b/changelog.d/15705.feature new file mode 100644 index 0000000000..e3cbb5a12e --- /dev/null +++ b/changelog.d/15705.feature @@ -0,0 +1 @@ +Add a catch-all * to the supported relation types when redacting an event and its related events. This is an update to [MSC3912](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) implementation. diff --git a/synapse/handlers/relations.py b/synapse/handlers/relations.py index 4824635162..db97f7aede 100644 --- a/synapse/handlers/relations.py +++ b/synapse/handlers/relations.py @@ -205,16 +205,22 @@ class RelationsHandler: event_id: The event IDs to look and redact relations of. initial_redaction_event: The redaction for the event referred to by event_id. - relation_types: The types of relations to look for. + relation_types: The types of relations to look for. If "*" is in the list, + all related events will be redacted regardless of the type. Raises: ShadowBanError if the requester is shadow-banned """ - related_event_ids = ( - await self._main_store.get_all_relations_for_event_with_types( - event_id, relation_types + if "*" in relation_types: + related_event_ids = await self._main_store.get_all_relations_for_event( + event_id + ) + else: + related_event_ids = ( + await self._main_store.get_all_relations_for_event_with_types( + event_id, relation_types + ) ) - ) for related_event_id in related_event_ids: try: diff --git a/synapse/storage/databases/main/relations.py b/synapse/storage/databases/main/relations.py index 4a6c6c724d..96908f14ba 100644 --- a/synapse/storage/databases/main/relations.py +++ b/synapse/storage/databases/main/relations.py @@ -365,6 +365,36 @@ class RelationsWorkerStore(SQLBaseStore): func=get_all_relation_ids_for_event_with_types_txn, ) + async def get_all_relations_for_event( + self, + event_id: str, + ) -> List[str]: + """Get the event IDs of all events that have a relation to the given event. + + Args: + event_id: The event for which to look for related events. + + Returns: + A list of the IDs of the events that relate to the given event. + """ + + def get_all_relation_ids_for_event_txn( + txn: LoggingTransaction, + ) -> List[str]: + rows = self.db_pool.simple_select_list_txn( + txn=txn, + table="event_relations", + keyvalues={"relates_to_id": event_id}, + retcols=["event_id"], + ) + + return [row["event_id"] for row in rows] + + return await self.db_pool.runInteraction( + desc="get_all_relation_ids_for_event", + func=get_all_relation_ids_for_event_txn, + ) + async def event_includes_relation(self, event_id: str) -> bool: """Check if the given event relates to another event. diff --git a/tests/rest/client/test_redactions.py b/tests/rest/client/test_redactions.py index 84a60c0b07..b43e95292c 100644 --- a/tests/rest/client/test_redactions.py +++ b/tests/rest/client/test_redactions.py @@ -217,9 +217,9 @@ class RedactionsTestCase(HomeserverTestCase): self._redact_event(self.mod_access_token, self.room_id, msg_id) @override_config({"experimental_features": {"msc3912_enabled": True}}) - def test_redact_relations(self) -> None: - """Tests that we can redact the relations of an event at the same time as the - event itself. + def test_redact_relations_with_types(self) -> None: + """Tests that we can redact the relations of an event of specific types + at the same time as the event itself. """ # Send a root event. res = self.helper.send_event( @@ -317,6 +317,104 @@ class RedactionsTestCase(HomeserverTestCase): ) self.assertNotIn("redacted_because", event_dict, event_dict) + @override_config({"experimental_features": {"msc3912_enabled": True}}) + def test_redact_all_relations(self) -> None: + """Tests that we can redact all the relations of an event at the same time as the + event itself. + """ + # Send a root event. + res = self.helper.send_event( + room_id=self.room_id, + type=EventTypes.Message, + content={"msgtype": "m.text", "body": "hello"}, + tok=self.mod_access_token, + ) + root_event_id = res["event_id"] + + # Send an edit to this root event. + res = self.helper.send_event( + room_id=self.room_id, + type=EventTypes.Message, + content={ + "body": " * hello world", + "m.new_content": { + "body": "hello world", + "msgtype": "m.text", + }, + "m.relates_to": { + "event_id": root_event_id, + "rel_type": RelationTypes.REPLACE, + }, + "msgtype": "m.text", + }, + tok=self.mod_access_token, + ) + edit_event_id = res["event_id"] + + # Also send a threaded message whose root is the same as the edit's. + res = self.helper.send_event( + room_id=self.room_id, + type=EventTypes.Message, + content={ + "msgtype": "m.text", + "body": "message 1", + "m.relates_to": { + "event_id": root_event_id, + "rel_type": RelationTypes.THREAD, + }, + }, + tok=self.mod_access_token, + ) + threaded_event_id = res["event_id"] + + # Also send a reaction, again with the same root. + res = self.helper.send_event( + room_id=self.room_id, + type=EventTypes.Reaction, + content={ + "m.relates_to": { + "rel_type": RelationTypes.ANNOTATION, + "event_id": root_event_id, + "key": "👍", + } + }, + tok=self.mod_access_token, + ) + reaction_event_id = res["event_id"] + + # Redact the root event, specifying that we also want to delete all events that + # relate to it. + self._redact_event( + self.mod_access_token, + self.room_id, + root_event_id, + with_relations=["*"], + ) + + # Check that the root event got redacted. + event_dict = self.helper.get_event( + self.room_id, root_event_id, self.mod_access_token + ) + self.assertIn("redacted_because", event_dict, event_dict) + + # Check that the edit got redacted. + event_dict = self.helper.get_event( + self.room_id, edit_event_id, self.mod_access_token + ) + self.assertIn("redacted_because", event_dict, event_dict) + + # Check that the threaded message got redacted. + event_dict = self.helper.get_event( + self.room_id, threaded_event_id, self.mod_access_token + ) + self.assertIn("redacted_because", event_dict, event_dict) + + # Check that the reaction got redacted. + event_dict = self.helper.get_event( + self.room_id, reaction_event_id, self.mod_access_token + ) + self.assertIn("redacted_because", event_dict, event_dict) + @override_config({"experimental_features": {"msc3912_enabled": True}}) def test_redact_relations_no_perms(self) -> None: """Tests that, when redacting a message along with its relations, if not all -- cgit 1.5.1 From d0c4257f14addbf0c9072c2e34ae1c8294716ed5 Mon Sep 17 00:00:00 2001 From: Shay Date: Fri, 2 Jun 2023 17:24:13 -0700 Subject: `N + 3`: Read from column `full_user_id` rather than `user_id` of tables `profiles` and `user_filters` (#15649) --- changelog.d/15649.misc | 1 + synapse/api/filtering.py | 4 +- synapse/handlers/account_validity.py | 2 +- synapse/handlers/admin.py | 2 +- synapse/handlers/auth.py | 2 +- synapse/handlers/deactivate_account.py | 2 +- synapse/handlers/profile.py | 26 ++---- synapse/handlers/register.py | 2 +- synapse/module_api/__init__.py | 4 +- synapse/push/mailer.py | 2 +- synapse/rest/client/filter.py | 2 +- synapse/rest/client/sync.py | 2 +- synapse/storage/databases/main/filtering.py | 12 +-- synapse/storage/databases/main/profile.py | 12 +-- synapse/storage/schema/__init__.py | 5 +- .../delta/78/01_validate_and_update_profiles.py | 92 +++++++++++++++++++++ .../78/02_validate_and_update_user_filters.py | 95 ++++++++++++++++++++++ tests/api/test_filtering.py | 25 ++---- tests/handlers/test_profile.py | 28 ++----- tests/module_api/test_api.py | 6 +- tests/rest/client/test_filter.py | 4 +- tests/storage/test_profile.py | 17 ++-- 22 files changed, 252 insertions(+), 95 deletions(-) create mode 100644 changelog.d/15649.misc create mode 100644 synapse/storage/schema/main/delta/78/01_validate_and_update_profiles.py create mode 100644 synapse/storage/schema/main/delta/78/02_validate_and_update_user_filters.py diff --git a/changelog.d/15649.misc b/changelog.d/15649.misc new file mode 100644 index 0000000000..fca38abe0f --- /dev/null +++ b/changelog.d/15649.misc @@ -0,0 +1 @@ +Read from column `full_user_id` rather than `user_id` of tables `profiles` and `user_filters`. diff --git a/synapse/api/filtering.py b/synapse/api/filtering.py index 82aeef8d19..0995ecbe83 100644 --- a/synapse/api/filtering.py +++ b/synapse/api/filtering.py @@ -152,9 +152,9 @@ class Filtering: self.DEFAULT_FILTER_COLLECTION = FilterCollection(hs, {}) async def get_user_filter( - self, user_localpart: str, filter_id: Union[int, str] + self, user_id: UserID, filter_id: Union[int, str] ) -> "FilterCollection": - result = await self.store.get_user_filter(user_localpart, filter_id) + result = await self.store.get_user_filter(user_id, filter_id) return FilterCollection(self._hs, result) def add_user_filter(self, user_id: UserID, user_filter: JsonDict) -> Awaitable[int]: diff --git a/synapse/handlers/account_validity.py b/synapse/handlers/account_validity.py index 4aa4ebf7e4..f1a7a05df6 100644 --- a/synapse/handlers/account_validity.py +++ b/synapse/handlers/account_validity.py @@ -164,7 +164,7 @@ class AccountValidityHandler: try: user_display_name = await self.store.get_profile_displayname( - UserID.from_string(user_id).localpart + UserID.from_string(user_id) ) if user_display_name is None: user_display_name = user_id diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py index b06f25b03c..119c7f8384 100644 --- a/synapse/handlers/admin.py +++ b/synapse/handlers/admin.py @@ -89,7 +89,7 @@ class AdminHandler: } # Add additional user metadata - profile = await self._store.get_profileinfo(user.localpart) + profile = await self._store.get_profileinfo(user) threepids = await self._store.user_get_threepids(user.to_string()) external_ids = [ ({"auth_provider": auth_provider, "external_id": external_id}) diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 4f986d90cb..59ecafa6a0 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -1759,7 +1759,7 @@ class AuthHandler: return user_profile_data = await self.store.get_profileinfo( - UserID.from_string(registered_user_id).localpart + UserID.from_string(registered_user_id) ) # Store any extra attributes which will be passed in the login response. diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py index f299b89a1b..67adeae6a7 100644 --- a/synapse/handlers/deactivate_account.py +++ b/synapse/handlers/deactivate_account.py @@ -297,5 +297,5 @@ class DeactivateAccountHandler: # Add the user to the directory, if necessary. Note that # this must be done after the user is re-activated, because # deactivated users are excluded from the user directory. - profile = await self.store.get_profileinfo(user.localpart) + profile = await self.store.get_profileinfo(user) await self.user_directory_handler.handle_local_profile_change(user_id, profile) diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index a9160c87e3..a7f8c5e636 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -67,7 +67,7 @@ class ProfileHandler: target_user = UserID.from_string(user_id) if self.hs.is_mine(target_user): - profileinfo = await self.store.get_profileinfo(target_user.localpart) + profileinfo = await self.store.get_profileinfo(target_user) if profileinfo.display_name is None: raise SynapseError(404, "Profile was not found", Codes.NOT_FOUND) @@ -99,9 +99,7 @@ class ProfileHandler: async def get_displayname(self, target_user: UserID) -> Optional[str]: if self.hs.is_mine(target_user): try: - displayname = await self.store.get_profile_displayname( - target_user.localpart - ) + displayname = await self.store.get_profile_displayname(target_user) except StoreError as e: if e.code == 404: raise SynapseError(404, "Profile was not found", Codes.NOT_FOUND) @@ -147,7 +145,7 @@ class ProfileHandler: raise AuthError(400, "Cannot set another user's displayname") if not by_admin and not self.hs.config.registration.enable_set_displayname: - profile = await self.store.get_profileinfo(target_user.localpart) + profile = await self.store.get_profileinfo(target_user) if profile.display_name: raise SynapseError( 400, @@ -180,7 +178,7 @@ class ProfileHandler: await self.store.set_profile_displayname(target_user, displayname_to_set) - profile = await self.store.get_profileinfo(target_user.localpart) + profile = await self.store.get_profileinfo(target_user) await self.user_directory_handler.handle_local_profile_change( target_user.to_string(), profile ) @@ -194,9 +192,7 @@ class ProfileHandler: async def get_avatar_url(self, target_user: UserID) -> Optional[str]: if self.hs.is_mine(target_user): try: - avatar_url = await self.store.get_profile_avatar_url( - target_user.localpart - ) + avatar_url = await self.store.get_profile_avatar_url(target_user) except StoreError as e: if e.code == 404: raise SynapseError(404, "Profile was not found", Codes.NOT_FOUND) @@ -241,7 +237,7 @@ class ProfileHandler: raise AuthError(400, "Cannot set another user's avatar_url") if not by_admin and not self.hs.config.registration.enable_set_avatar_url: - profile = await self.store.get_profileinfo(target_user.localpart) + profile = await self.store.get_profileinfo(target_user) if profile.avatar_url: raise SynapseError( 400, "Changing avatar is disabled on this server", Codes.FORBIDDEN @@ -272,7 +268,7 @@ class ProfileHandler: await self.store.set_profile_avatar_url(target_user, avatar_url_to_set) - profile = await self.store.get_profileinfo(target_user.localpart) + profile = await self.store.get_profileinfo(target_user) await self.user_directory_handler.handle_local_profile_change( target_user.to_string(), profile ) @@ -369,14 +365,10 @@ class ProfileHandler: response = {} try: if just_field is None or just_field == "displayname": - response["displayname"] = await self.store.get_profile_displayname( - user.localpart - ) + response["displayname"] = await self.store.get_profile_displayname(user) if just_field is None or just_field == "avatar_url": - response["avatar_url"] = await self.store.get_profile_avatar_url( - user.localpart - ) + response["avatar_url"] = await self.store.get_profile_avatar_url(user) except StoreError as e: if e.code == 404: raise SynapseError(404, "Profile was not found", Codes.NOT_FOUND) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index c80946c2e9..a2d3f03061 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -315,7 +315,7 @@ class RegistrationHandler: approved=approved, ) - profile = await self.store.get_profileinfo(localpart) + profile = await self.store.get_profileinfo(user) await self.user_directory_handler.handle_local_profile_change( user_id, profile ) diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index a8d6224a45..84b2aef620 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -655,7 +655,9 @@ class ModuleApi: Returns: The profile information (i.e. display name and avatar URL). """ - return await self._store.get_profileinfo(localpart) + server_name = self._hs.hostname + user_id = UserID.from_string(f"@{localpart}:{server_name}") + return await self._store.get_profileinfo(user_id) async def get_threepids_for_user(self, user_id: str) -> List[Dict[str, str]]: """Look up the threepids (email addresses and phone numbers) associated with the diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py index 491a09b71d..79e0627b6a 100644 --- a/synapse/push/mailer.py +++ b/synapse/push/mailer.py @@ -247,7 +247,7 @@ class Mailer: try: user_display_name = await self.store.get_profile_displayname( - UserID.from_string(user_id).localpart + UserID.from_string(user_id) ) if user_display_name is None: user_display_name = user_id diff --git a/synapse/rest/client/filter.py b/synapse/rest/client/filter.py index 04561f36d7..5da1e511a2 100644 --- a/synapse/rest/client/filter.py +++ b/synapse/rest/client/filter.py @@ -58,7 +58,7 @@ class GetFilterRestServlet(RestServlet): try: filter_collection = await self.filtering.get_user_filter( - user_localpart=target_user.localpart, filter_id=filter_id_int + user_id=target_user, filter_id=filter_id_int ) except StoreError as e: if e.code != 404: diff --git a/synapse/rest/client/sync.py b/synapse/rest/client/sync.py index 03b0578945..d7854ed4fd 100644 --- a/synapse/rest/client/sync.py +++ b/synapse/rest/client/sync.py @@ -178,7 +178,7 @@ class SyncRestServlet(RestServlet): else: try: filter_collection = await self.filtering.get_user_filter( - user.localpart, filter_id + user, filter_id ) except StoreError as err: if err.code != 404: diff --git a/synapse/storage/databases/main/filtering.py b/synapse/storage/databases/main/filtering.py index f777777cbf..fff417f9e3 100644 --- a/synapse/storage/databases/main/filtering.py +++ b/synapse/storage/databases/main/filtering.py @@ -145,7 +145,7 @@ class FilteringWorkerStore(SQLBaseStore): @cached(num_args=2) async def get_user_filter( - self, user_localpart: str, filter_id: Union[int, str] + self, user_id: UserID, filter_id: Union[int, str] ) -> JsonDict: # filter_id is BIGINT UNSIGNED, so if it isn't a number, fail # with a coherent error message rather than 500 M_UNKNOWN. @@ -156,7 +156,7 @@ class FilteringWorkerStore(SQLBaseStore): def_json = await self.db_pool.simple_select_one_onecol( table="user_filters", - keyvalues={"user_id": user_localpart, "filter_id": filter_id}, + keyvalues={"full_user_id": user_id.to_string(), "filter_id": filter_id}, retcol="filter_json", allow_none=False, desc="get_user_filter", @@ -172,15 +172,15 @@ class FilteringWorkerStore(SQLBaseStore): def _do_txn(txn: LoggingTransaction) -> int: sql = ( "SELECT filter_id FROM user_filters " - "WHERE user_id = ? AND filter_json = ?" + "WHERE full_user_id = ? AND filter_json = ?" ) - txn.execute(sql, (user_id.localpart, bytearray(def_json))) + txn.execute(sql, (user_id.to_string(), bytearray(def_json))) filter_id_response = txn.fetchone() if filter_id_response is not None: return filter_id_response[0] - sql = "SELECT MAX(filter_id) FROM user_filters WHERE user_id = ?" - txn.execute(sql, (user_id.localpart,)) + sql = "SELECT MAX(filter_id) FROM user_filters WHERE full_user_id = ?" + txn.execute(sql, (user_id.to_string(),)) max_id = cast(Tuple[Optional[int]], txn.fetchone())[0] if max_id is None: filter_id = 0 diff --git a/synapse/storage/databases/main/profile.py b/synapse/storage/databases/main/profile.py index 21d54c7a7a..3ba9cc8853 100644 --- a/synapse/storage/databases/main/profile.py +++ b/synapse/storage/databases/main/profile.py @@ -137,11 +137,11 @@ class ProfileWorkerStore(SQLBaseStore): return 50 - async def get_profileinfo(self, user_localpart: str) -> ProfileInfo: + async def get_profileinfo(self, user_id: UserID) -> ProfileInfo: try: profile = await self.db_pool.simple_select_one( table="profiles", - keyvalues={"user_id": user_localpart}, + keyvalues={"full_user_id": user_id.to_string()}, retcols=("displayname", "avatar_url"), desc="get_profileinfo", ) @@ -156,18 +156,18 @@ class ProfileWorkerStore(SQLBaseStore): avatar_url=profile["avatar_url"], display_name=profile["displayname"] ) - async def get_profile_displayname(self, user_localpart: str) -> Optional[str]: + async def get_profile_displayname(self, user_id: UserID) -> Optional[str]: return await self.db_pool.simple_select_one_onecol( table="profiles", - keyvalues={"user_id": user_localpart}, + keyvalues={"full_user_id": user_id.to_string()}, retcol="displayname", desc="get_profile_displayname", ) - async def get_profile_avatar_url(self, user_localpart: str) -> Optional[str]: + async def get_profile_avatar_url(self, user_id: UserID) -> Optional[str]: return await self.db_pool.simple_select_one_onecol( table="profiles", - keyvalues={"user_id": user_localpart}, + keyvalues={"full_user_id": user_id.to_string()}, retcol="avatar_url", desc="get_profile_avatar_url", ) diff --git a/synapse/storage/schema/__init__.py b/synapse/storage/schema/__init__.py index 5cc786f030..fc190a8b13 100644 --- a/synapse/storage/schema/__init__.py +++ b/synapse/storage/schema/__init__.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -SCHEMA_VERSION = 77 # remember to update the list below when updating +SCHEMA_VERSION = 78 # remember to update the list below when updating """Represents the expectations made by the codebase about the database schema This should be incremented whenever the codebase changes its requirements on the @@ -103,6 +103,9 @@ Changes in SCHEMA_VERSION = 76: Changes in SCHEMA_VERSION = 77 - (Postgres) Add NOT VALID CHECK (full_user_id IS NOT NULL) to tables profiles and user_filters + +Changes in SCHEMA_VERSION = 78 + - Validate check (full_user_id IS NOT NULL) on tables profiles and user_filters """ diff --git a/synapse/storage/schema/main/delta/78/01_validate_and_update_profiles.py b/synapse/storage/schema/main/delta/78/01_validate_and_update_profiles.py new file mode 100644 index 0000000000..8398d8f548 --- /dev/null +++ b/synapse/storage/schema/main/delta/78/01_validate_and_update_profiles.py @@ -0,0 +1,92 @@ +# Copyright 2023 The Matrix.org Foundation C.I.C +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from synapse.config.homeserver import HomeServerConfig +from synapse.storage.database import LoggingTransaction +from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine + + +def run_upgrade( + cur: LoggingTransaction, + database_engine: BaseDatabaseEngine, + config: HomeServerConfig, +) -> None: + """ + Part 3 of a multi-step migration to drop the column `user_id` and replace it with + `full_user_id`. See the database schema docs for more information on the full + migration steps. + """ + hostname = config.server.server_name + + if isinstance(database_engine, PostgresEngine): + # check if the constraint can be validated + check_sql = """ + SELECT user_id from profiles WHERE full_user_id IS NULL + """ + cur.execute(check_sql) + res = cur.fetchall() + + if res: + # there are rows the background job missed, finish them here before we validate the constraint + process_rows_sql = """ + UPDATE profiles + SET full_user_id = '@' || user_id || ? + WHERE user_id IN ( + SELECT user_id FROM profiles WHERE full_user_id IS NULL + ) + """ + cur.execute(process_rows_sql, (f":{hostname}",)) + + # Now we can validate + validate_sql = """ + ALTER TABLE profiles VALIDATE CONSTRAINT full_user_id_not_null + """ + cur.execute(validate_sql) + + else: + # in SQLite we need to rewrite the table to add the constraint. + # First drop any temporary table that might be here from a previous failed migration. + cur.execute("DROP TABLE IF EXISTS temp_profiles") + + create_sql = """ + CREATE TABLE temp_profiles ( + full_user_id text NOT NULL, + user_id text, + displayname text, + avatar_url text, + UNIQUE (full_user_id), + UNIQUE (user_id) + ) + """ + cur.execute(create_sql) + + copy_sql = """ + INSERT INTO temp_profiles ( + user_id, + displayname, + avatar_url, + full_user_id) + SELECT user_id, displayname, avatar_url, '@' || user_id || ':' || ? FROM profiles + """ + cur.execute(copy_sql, (f"{hostname}",)) + + drop_sql = """ + DROP TABLE profiles + """ + cur.execute(drop_sql) + + rename_sql = """ + ALTER TABLE temp_profiles RENAME to profiles + """ + cur.execute(rename_sql) diff --git a/synapse/storage/schema/main/delta/78/02_validate_and_update_user_filters.py b/synapse/storage/schema/main/delta/78/02_validate_and_update_user_filters.py new file mode 100644 index 0000000000..8ef63335e7 --- /dev/null +++ b/synapse/storage/schema/main/delta/78/02_validate_and_update_user_filters.py @@ -0,0 +1,95 @@ +# Copyright 2023 The Matrix.org Foundation C.I.C +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from synapse.config.homeserver import HomeServerConfig +from synapse.storage.database import LoggingTransaction +from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine + + +def run_upgrade( + cur: LoggingTransaction, + database_engine: BaseDatabaseEngine, + config: HomeServerConfig, +) -> None: + """ + Part 3 of a multi-step migration to drop the column `user_id` and replace it with + `full_user_id`. See the database schema docs for more information on the full + migration steps. + """ + hostname = config.server.server_name + + if isinstance(database_engine, PostgresEngine): + # check if the constraint can be validated + check_sql = """ + SELECT user_id from user_filters WHERE full_user_id IS NULL + """ + cur.execute(check_sql) + res = cur.fetchall() + + if res: + # there are rows the background job missed, finish them here before we validate constraint + process_rows_sql = """ + UPDATE user_filters + SET full_user_id = '@' || user_id || ? + WHERE user_id IN ( + SELECT user_id FROM user_filters WHERE full_user_id IS NULL + ) + """ + cur.execute(process_rows_sql, (f":{hostname}",)) + + # Now we can validate + validate_sql = """ + ALTER TABLE user_filters VALIDATE CONSTRAINT full_user_id_not_null + """ + cur.execute(validate_sql) + + else: + cur.execute("DROP TABLE IF EXISTS temp_user_filters") + create_sql = """ + CREATE TABLE temp_user_filters ( + full_user_id text NOT NULL, + user_id text NOT NULL, + filter_id bigint NOT NULL, + filter_json bytea NOT NULL, + UNIQUE (full_user_id), + UNIQUE (user_id) + ) + """ + cur.execute(create_sql) + + index_sql = """ + CREATE UNIQUE INDEX IF NOT EXISTS user_filters_unique ON + temp_user_filters (user_id, filter_id) + """ + cur.execute(index_sql) + + copy_sql = """ + INSERT INTO temp_user_filters ( + user_id, + filter_id, + filter_json, + full_user_id) + SELECT user_id, filter_id, filter_json, '@' || user_id || ':' || ? FROM user_filters + """ + cur.execute(copy_sql, (f"{hostname}",)) + + drop_sql = """ + DROP TABLE user_filters + """ + cur.execute(drop_sql) + + rename_sql = """ + ALTER TABLE temp_user_filters RENAME to user_filters + """ + cur.execute(rename_sql) diff --git a/tests/api/test_filtering.py b/tests/api/test_filtering.py index aa6af5ad7b..868f0c6995 100644 --- a/tests/api/test_filtering.py +++ b/tests/api/test_filtering.py @@ -35,7 +35,6 @@ from tests.events.test_utils import MockEvent user_id = UserID.from_string("@test_user:test") user2_id = UserID.from_string("@test_user2:test") -user_localpart = "test_user" class FilteringTestCase(unittest.HomeserverTestCase): @@ -449,9 +448,7 @@ class FilteringTestCase(unittest.HomeserverTestCase): ] user_filter = self.get_success( - self.filtering.get_user_filter( - user_localpart=user_localpart, filter_id=filter_id - ) + self.filtering.get_user_filter(user_id=user_id, filter_id=filter_id) ) results = self.get_success(user_filter.filter_presence(presence_states)) @@ -479,9 +476,7 @@ class FilteringTestCase(unittest.HomeserverTestCase): ] user_filter = self.get_success( - self.filtering.get_user_filter( - user_localpart=user_localpart + "2", filter_id=filter_id - ) + self.filtering.get_user_filter(user_id=user2_id, filter_id=filter_id) ) results = self.get_success(user_filter.filter_presence(presence_states)) @@ -498,9 +493,7 @@ class FilteringTestCase(unittest.HomeserverTestCase): events = [event] user_filter = self.get_success( - self.filtering.get_user_filter( - user_localpart=user_localpart, filter_id=filter_id - ) + self.filtering.get_user_filter(user_id=user_id, filter_id=filter_id) ) results = self.get_success(user_filter.filter_room_state(events=events)) @@ -519,9 +512,7 @@ class FilteringTestCase(unittest.HomeserverTestCase): events = [event] user_filter = self.get_success( - self.filtering.get_user_filter( - user_localpart=user_localpart, filter_id=filter_id - ) + self.filtering.get_user_filter(user_id=user_id, filter_id=filter_id) ) results = self.get_success(user_filter.filter_room_state(events)) @@ -603,9 +594,7 @@ class FilteringTestCase(unittest.HomeserverTestCase): user_filter_json, ( self.get_success( - self.datastore.get_user_filter( - user_localpart=user_localpart, filter_id=0 - ) + self.datastore.get_user_filter(user_id=user_id, filter_id=0) ) ), ) @@ -620,9 +609,7 @@ class FilteringTestCase(unittest.HomeserverTestCase): ) filter = self.get_success( - self.filtering.get_user_filter( - user_localpart=user_localpart, filter_id=filter_id - ) + self.filtering.get_user_filter(user_id=user_id, filter_id=filter_id) ) self.assertEqual(filter.get_filter_json(), user_filter_json) diff --git a/tests/handlers/test_profile.py b/tests/handlers/test_profile.py index 64a9a22afe..196ceb0b82 100644 --- a/tests/handlers/test_profile.py +++ b/tests/handlers/test_profile.py @@ -80,11 +80,7 @@ class ProfileTestCase(unittest.HomeserverTestCase): ) self.assertEqual( - ( - self.get_success( - self.store.get_profile_displayname(self.frank.localpart) - ) - ), + (self.get_success(self.store.get_profile_displayname(self.frank))), "Frank Jr.", ) @@ -96,11 +92,7 @@ class ProfileTestCase(unittest.HomeserverTestCase): ) self.assertEqual( - ( - self.get_success( - self.store.get_profile_displayname(self.frank.localpart) - ) - ), + (self.get_success(self.store.get_profile_displayname(self.frank))), "Frank", ) @@ -112,7 +104,7 @@ class ProfileTestCase(unittest.HomeserverTestCase): ) self.assertIsNone( - self.get_success(self.store.get_profile_displayname(self.frank.localpart)) + self.get_success(self.store.get_profile_displayname(self.frank)) ) def test_set_my_name_if_disabled(self) -> None: @@ -122,11 +114,7 @@ class ProfileTestCase(unittest.HomeserverTestCase): self.get_success(self.store.set_profile_displayname(self.frank, "Frank")) self.assertEqual( - ( - self.get_success( - self.store.get_profile_displayname(self.frank.localpart) - ) - ), + (self.get_success(self.store.get_profile_displayname(self.frank))), "Frank", ) @@ -201,7 +189,7 @@ class ProfileTestCase(unittest.HomeserverTestCase): ) self.assertEqual( - (self.get_success(self.store.get_profile_avatar_url(self.frank.localpart))), + (self.get_success(self.store.get_profile_avatar_url(self.frank))), "http://my.server/pic.gif", ) @@ -215,7 +203,7 @@ class ProfileTestCase(unittest.HomeserverTestCase): ) self.assertEqual( - (self.get_success(self.store.get_profile_avatar_url(self.frank.localpart))), + (self.get_success(self.store.get_profile_avatar_url(self.frank))), "http://my.server/me.png", ) @@ -229,7 +217,7 @@ class ProfileTestCase(unittest.HomeserverTestCase): ) self.assertIsNone( - (self.get_success(self.store.get_profile_avatar_url(self.frank.localpart))), + (self.get_success(self.store.get_profile_avatar_url(self.frank))), ) def test_set_my_avatar_if_disabled(self) -> None: @@ -241,7 +229,7 @@ class ProfileTestCase(unittest.HomeserverTestCase): ) self.assertEqual( - (self.get_success(self.store.get_profile_avatar_url(self.frank.localpart))), + (self.get_success(self.store.get_profile_avatar_url(self.frank))), "http://my.server/me.png", ) diff --git a/tests/module_api/test_api.py b/tests/module_api/test_api.py index bff7114cd8..b3310abe1b 100644 --- a/tests/module_api/test_api.py +++ b/tests/module_api/test_api.py @@ -28,7 +28,7 @@ from synapse.module_api import ModuleApi from synapse.rest import admin from synapse.rest.client import login, notifications, presence, profile, room from synapse.server import HomeServer -from synapse.types import JsonDict, create_requester +from synapse.types import JsonDict, UserID, create_requester from synapse.util import Clock from tests.events.test_presence_router import send_presence_update, sync_presence @@ -103,7 +103,9 @@ class ModuleApiTestCase(BaseModuleApiTestCase): self.assertEqual(email["added_at"], 0) # Check that the displayname was assigned - displayname = self.get_success(self.store.get_profile_displayname("bob")) + displayname = self.get_success( + self.store.get_profile_displayname(UserID.from_string("@bob:test")) + ) self.assertEqual(displayname, "Bobberino") def test_can_register_admin_user(self) -> None: diff --git a/tests/rest/client/test_filter.py b/tests/rest/client/test_filter.py index 9faa9de050..a2d5d340be 100644 --- a/tests/rest/client/test_filter.py +++ b/tests/rest/client/test_filter.py @@ -46,7 +46,9 @@ class FilterTestCase(unittest.HomeserverTestCase): self.assertEqual(channel.code, 200) self.assertEqual(channel.json_body, {"filter_id": "0"}) filter = self.get_success( - self.store.get_user_filter(user_localpart="apple", filter_id=0) + self.store.get_user_filter( + user_id=UserID.from_string(FilterTestCase.user_id), filter_id=0 + ) ) self.pump() self.assertEqual(filter, self.EXAMPLE_FILTER) diff --git a/tests/storage/test_profile.py b/tests/storage/test_profile.py index f9cf0fcb82..fe5bb77913 100644 --- a/tests/storage/test_profile.py +++ b/tests/storage/test_profile.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + from twisted.test.proto_helpers import MemoryReactor from synapse.server import HomeServer @@ -35,18 +36,14 @@ class ProfileStoreTestCase(unittest.HomeserverTestCase): self.assertEqual( "Frank", - ( - self.get_success( - self.store.get_profile_displayname(self.u_frank.localpart) - ) - ), + (self.get_success(self.store.get_profile_displayname(self.u_frank))), ) # test set to None self.get_success(self.store.set_profile_displayname(self.u_frank, None)) self.assertIsNone( - self.get_success(self.store.get_profile_displayname(self.u_frank.localpart)) + self.get_success(self.store.get_profile_displayname(self.u_frank)) ) def test_avatar_url(self) -> None: @@ -58,18 +55,14 @@ class ProfileStoreTestCase(unittest.HomeserverTestCase): self.assertEqual( "http://my.site/here", - ( - self.get_success( - self.store.get_profile_avatar_url(self.u_frank.localpart) - ) - ), + (self.get_success(self.store.get_profile_avatar_url(self.u_frank))), ) # test set to None self.get_success(self.store.set_profile_avatar_url(self.u_frank, None)) self.assertIsNone( - self.get_success(self.store.get_profile_avatar_url(self.u_frank.localpart)) + self.get_success(self.store.get_profile_avatar_url(self.u_frank)) ) def test_profiles_bg_migration(self) -> None: -- cgit 1.5.1 From 8ba530c0e3b157137031d456225b7ba1e0b1627d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 5 Jun 2023 10:31:41 +0100 Subject: Bump importlib-metadata from 6.1.0 to 6.6.0 (#15711) Bumps [importlib-metadata](https://github.com/python/importlib_metadata) from 6.1.0 to 6.6.0. - [Release notes](https://github.com/python/importlib_metadata/releases) - [Changelog](https://github.com/python/importlib_metadata/blob/main/CHANGES.rst) - [Commits](https://github.com/python/importlib_metadata/compare/v6.1.0...v6.6.0) --- updated-dependencies: - dependency-name: importlib-metadata dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/poetry.lock b/poetry.lock index d8964f5719..180f274087 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.4.2 and should not be changed by hand. +# This file is automatically @generated by Poetry and should not be changed by hand. [[package]] name = "alabaster" @@ -867,14 +867,14 @@ files = [ [[package]] name = "importlib-metadata" -version = "6.1.0" +version = "6.6.0" description = "Read metadata from Python packages" category = "main" optional = false python-versions = ">=3.7" files = [ - {file = "importlib_metadata-6.1.0-py3-none-any.whl", hash = "sha256:ff80f3b5394912eb1b108fcfd444dc78b7f1f3e16b16188054bd01cb9cb86f09"}, - {file = "importlib_metadata-6.1.0.tar.gz", hash = "sha256:43ce9281e097583d758c2c708c4376371261a02c34682491a8e98352365aad20"}, + {file = "importlib_metadata-6.6.0-py3-none-any.whl", hash = "sha256:43dd286a2cd8995d5eaef7fee2066340423b818ed3fd70adf0bad5f1fac53fed"}, + {file = "importlib_metadata-6.6.0.tar.gz", hash = "sha256:92501cdf9cc66ebd3e612f1b4f0c0765dfa42f0fa38ffb319b6bd84dd675d705"}, ] [package.dependencies] @@ -3424,18 +3424,18 @@ docs = ["Sphinx", "repoze.sphinx.autointerface"] test = ["zope.i18nmessageid", "zope.testing", "zope.testrunner"] [extras] -all = ["Pympler", "authlib", "hiredis", "jaeger-client", "lxml", "matrix-synapse-ldap3", "opentracing", "psycopg2", "psycopg2cffi", "psycopg2cffi-compat", "pyicu", "pysaml2", "sentry-sdk", "txredisapi"] +all = ["matrix-synapse-ldap3", "psycopg2", "psycopg2cffi", "psycopg2cffi-compat", "pysaml2", "authlib", "lxml", "sentry-sdk", "jaeger-client", "opentracing", "txredisapi", "hiredis", "Pympler", "pyicu"] cache-memory = ["Pympler"] jwt = ["authlib"] matrix-synapse-ldap3 = ["matrix-synapse-ldap3"] oidc = ["authlib"] opentracing = ["jaeger-client", "opentracing"] postgres = ["psycopg2", "psycopg2cffi", "psycopg2cffi-compat"] -redis = ["hiredis", "txredisapi"] +redis = ["txredisapi", "hiredis"] saml2 = ["pysaml2"] sentry = ["sentry-sdk"] systemd = ["systemd-python"] -test = ["idna", "parameterized"] +test = ["parameterized", "idna"] url-preview = ["lxml"] user-search = ["pyicu"] -- cgit 1.5.1 From 36a5bcae2cf70f5b7dec44e34c10d7e47ee0bcc2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 5 Jun 2023 10:31:54 +0100 Subject: Bump library/redis from 6-bullseye to 7-bullseye in /docker (#15712) Bumps library/redis from 6-bullseye to 7-bullseye. --- updated-dependencies: - dependency-name: library/redis dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- docker/Dockerfile-workers | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/Dockerfile-workers b/docker/Dockerfile-workers index adb9a725e3..31d6d33407 100644 --- a/docker/Dockerfile-workers +++ b/docker/Dockerfile-workers @@ -21,7 +21,7 @@ FROM docker.io/library/debian:bullseye-slim AS deps_base # which makes it much easier to copy (but we need to make sure we use an image # based on the same debian version as the synapse image, to make sure we get # the expected version of libc. -FROM docker.io/library/redis:6-bullseye AS redis_base +FROM docker.io/library/redis:7-bullseye AS redis_base # now build the final image, based on the the regular Synapse docker image FROM $FROM -- cgit 1.5.1 From 5feabbdf062d16577f697fed41687c7bffc60c49 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 5 Jun 2023 10:32:07 +0100 Subject: Bump pyasn1 from 0.4.8 to 0.5.0 (#15713) Bumps [pyasn1](https://github.com/pyasn1/pyasn1) from 0.4.8 to 0.5.0. - [Release notes](https://github.com/pyasn1/pyasn1/releases) - [Changelog](https://github.com/pyasn1/pyasn1/blob/main/CHANGES.rst) - [Commits](https://github.com/pyasn1/pyasn1/compare/v0.4.8...v0.5.0) --- updated-dependencies: - dependency-name: pyasn1 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/poetry.lock b/poetry.lock index 180f274087..d2fc2c1c9c 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1863,14 +1863,14 @@ psycopg2 = "*" [[package]] name = "pyasn1" -version = "0.4.8" -description = "ASN.1 types and codecs" +version = "0.5.0" +description = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs (X.208)" category = "main" optional = false -python-versions = "*" +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" files = [ - {file = "pyasn1-0.4.8-py2.py3-none-any.whl", hash = "sha256:39c7e2ec30515947ff4e87fb6f456dfc6e84857d34be479c9d4a4ba4bf46aa5d"}, - {file = "pyasn1-0.4.8.tar.gz", hash = "sha256:aef77c9fb94a3ac588e87841208bdec464471d9871bd5050a287cc9a475cd0ba"}, + {file = "pyasn1-0.5.0-py2.py3-none-any.whl", hash = "sha256:87a2121042a1ac9358cabcaf1d07680ff97ee6404333bacca15f76aa8ad01a57"}, + {file = "pyasn1-0.5.0.tar.gz", hash = "sha256:97b7290ca68e62a832558ec3976f15cbf911bf5d7c7039d8b861c2a0ece69fde"}, ] [[package]] -- cgit 1.5.1 From 1a7aa81715609555cb4d0a7e3cad262b9c234007 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 5 Jun 2023 10:32:16 +0100 Subject: Bump sentry-sdk from 1.22.1 to 1.25.0 (#15714) Bumps [sentry-sdk](https://github.com/getsentry/sentry-python) from 1.22.1 to 1.25.0. - [Release notes](https://github.com/getsentry/sentry-python/releases) - [Changelog](https://github.com/getsentry/sentry-python/blob/master/CHANGELOG.md) - [Commits](https://github.com/getsentry/sentry-python/compare/1.22.1...1.25.0) --- updated-dependencies: - dependency-name: sentry-sdk dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/poetry.lock b/poetry.lock index d2fc2c1c9c..9f91857475 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2397,19 +2397,19 @@ doc = ["Sphinx", "sphinx-rtd-theme"] [[package]] name = "sentry-sdk" -version = "1.22.1" +version = "1.25.0" description = "Python client for Sentry (https://sentry.io)" category = "main" optional = true python-versions = "*" files = [ - {file = "sentry-sdk-1.22.1.tar.gz", hash = "sha256:052dff5069c6f0d836ee014323576824a9b40836fc003fb12489a1f19c60a3c9"}, - {file = "sentry_sdk-1.22.1-py2.py3-none-any.whl", hash = "sha256:c6c6946f8c927adb00af1c5ab6921df38775b2199b9003816d5935a1310352d5"}, + {file = "sentry-sdk-1.25.0.tar.gz", hash = "sha256:5be3296fc574fa8a4d9b213b4dcf8c8d0246c08f8bd78315c6286f386c37555a"}, + {file = "sentry_sdk-1.25.0-py2.py3-none-any.whl", hash = "sha256:fe85cf5d0b3d0aa3480df689f9f6dc487de783defb0a95043368375dc893645e"}, ] [package.dependencies] certifi = "*" -urllib3 = {version = ">=1.26.11,<2.0.0", markers = "python_version >= \"3.6\""} +urllib3 = {version = ">=1.26.11", markers = "python_version >= \"3.6\""} [package.extras] aiohttp = ["aiohttp (>=3.5)"] @@ -2421,10 +2421,11 @@ chalice = ["chalice (>=1.16.0)"] django = ["django (>=1.8)"] falcon = ["falcon (>=1.4)"] fastapi = ["fastapi (>=0.79.0)"] -flask = ["blinker (>=1.1)", "flask (>=0.11)"] +flask = ["blinker (>=1.1)", "flask (>=0.11)", "markupsafe"] grpcio = ["grpcio (>=1.21.1)"] httpx = ["httpx (>=0.16.0)"] huey = ["huey (>=2)"] +loguru = ["loguru (>=0.5)"] opentelemetry = ["opentelemetry-distro (>=0.35b0)"] pure-eval = ["asttokens", "executing", "pure-eval"] pymongo = ["pymongo (>=3.1)"] -- cgit 1.5.1 From 2d97d5b1c359c2a1783365c0db035f17d512dc4c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 5 Jun 2023 10:32:25 +0100 Subject: Bump types-jsonschema from 4.17.0.7 to 4.17.0.8 (#15716) Bumps [types-jsonschema](https://github.com/python/typeshed) from 4.17.0.7 to 4.17.0.8. - [Commits](https://github.com/python/typeshed/commits) --- updated-dependencies: - dependency-name: types-jsonschema dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 9f91857475..c94daa6cef 100644 --- a/poetry.lock +++ b/poetry.lock @@ -3038,14 +3038,14 @@ files = [ [[package]] name = "types-jsonschema" -version = "4.17.0.7" +version = "4.17.0.8" description = "Typing stubs for jsonschema" category = "dev" optional = false python-versions = "*" files = [ - {file = "types-jsonschema-4.17.0.7.tar.gz", hash = "sha256:130e57c5f1ca755f95775d0822ad7a3907294e1461306af54baf804f317fd54c"}, - {file = "types_jsonschema-4.17.0.7-py3-none-any.whl", hash = "sha256:e129b52be6df841d97a98f087631dd558f7812eb91ff7b733c3301bd2446271b"}, + {file = "types-jsonschema-4.17.0.8.tar.gz", hash = "sha256:96a56990910f405e62de58862c0bbb3ac29ee6dba6d3d99aa0ba7f874cc547de"}, + {file = "types_jsonschema-4.17.0.8-py3-none-any.whl", hash = "sha256:f5958eb7b53217dfb5125f0412aeaef226a8a9013eac95816c95b5b523f6796b"}, ] [[package]] -- cgit 1.5.1 From ca8906be2cb821a0fb49ad1adf8440e79e64a398 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 5 Jun 2023 10:39:34 +0100 Subject: Bump types-requests from 2.31.0.0 to 2.31.0.1 (#15715) Bumps [types-requests](https://github.com/python/typeshed) from 2.31.0.0 to 2.31.0.1. - [Commits](https://github.com/python/typeshed/commits) --- updated-dependencies: - dependency-name: types-requests dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index c94daa6cef..1f5cb3a3a8 100644 --- a/poetry.lock +++ b/poetry.lock @@ -3125,14 +3125,14 @@ files = [ [[package]] name = "types-requests" -version = "2.31.0.0" +version = "2.31.0.1" description = "Typing stubs for requests" category = "dev" optional = false python-versions = "*" files = [ - {file = "types-requests-2.31.0.0.tar.gz", hash = "sha256:c1c29d20ab8d84dff468d7febfe8e0cb0b4664543221b386605e14672b44ea25"}, - {file = "types_requests-2.31.0.0-py3-none-any.whl", hash = "sha256:7c5cea7940f8e92ec560bbc468f65bf684aa3dcf0554a6f8c4710f5f708dc598"}, + {file = "types-requests-2.31.0.1.tar.gz", hash = "sha256:3de667cffa123ce698591de0ad7db034a5317457a596eb0b4944e5a9d9e8d1ac"}, + {file = "types_requests-2.31.0.1-py3-none-any.whl", hash = "sha256:afb06ef8f25ba83d59a1d424bd7a5a939082f94b94e90ab5e6116bd2559deaa3"}, ] [package.dependencies] -- cgit 1.5.1 From f9561b9e37e4cbd97a71dd10549f1f03d3f01b5e Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Mon, 5 Jun 2023 23:38:52 -0500 Subject: Some house keeping on `maybe_backfill()` functions (#15709) --- changelog.d/15709.misc | 1 + synapse/handlers/federation.py | 17 +++++++++++++++++ 2 files changed, 18 insertions(+) create mode 100644 changelog.d/15709.misc diff --git a/changelog.d/15709.misc b/changelog.d/15709.misc new file mode 100644 index 0000000000..e9ce84a940 --- /dev/null +++ b/changelog.d/15709.misc @@ -0,0 +1 @@ +Update docstring and traces on `maybe_backfill()` functions. diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 2eb28d55ac..57d6b70cff 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -200,6 +200,7 @@ class FederationHandler: ) @trace + @tag_args async def maybe_backfill( self, room_id: str, current_depth: int, limit: int ) -> bool: @@ -214,6 +215,9 @@ class FederationHandler: limit: The number of events that the pagination request will return. This is used as part of the heuristic to decide if we should back paginate. + + Returns: + True if we actually tried to backfill something, otherwise False. """ # Starting the processing time here so we can include the room backfill # linearizer lock queue in the timing @@ -227,6 +231,8 @@ class FederationHandler: processing_start_time=processing_start_time, ) + @trace + @tag_args async def _maybe_backfill_inner( self, room_id: str, @@ -247,6 +253,9 @@ class FederationHandler: limit: The max number of events to request from the remote federated server. processing_start_time: The time when `maybe_backfill` started processing. Only used for timing. If `None`, no timing observation will be made. + + Returns: + True if we actually tried to backfill something, otherwise False. """ backwards_extremities = [ _BackfillPoint(event_id, depth, _BackfillPointType.BACKWARDS_EXTREMITY) @@ -302,6 +311,14 @@ class FederationHandler: len(sorted_backfill_points), sorted_backfill_points, ) + set_tag( + SynapseTags.RESULT_PREFIX + "sorted_backfill_points", + str(sorted_backfill_points), + ) + set_tag( + SynapseTags.RESULT_PREFIX + "sorted_backfill_points.length", + str(len(sorted_backfill_points)), + ) # If we have no backfill points lower than the `current_depth` then # either we can a) bail or b) still attempt to backfill. We opt to try -- cgit 1.5.1 From f880e64b11bd03d1ebd710b34b541d5b2e044baa Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 6 Jun 2023 04:11:07 -0400 Subject: Stabilize support for MSC3952: Intentional mentions. (#15520) --- changelog.d/15520.feature | 1 + rust/benches/evaluator.rs | 3 --- rust/src/push/base_rules.rs | 8 +++---- rust/src/push/evaluator.rs | 10 ++++----- rust/src/push/mod.rs | 7 ------ stubs/synapse/synapse_rust/push.pyi | 1 - synapse/api/constants.py | 2 +- synapse/config/experimental.py | 5 ----- synapse/events/validator.py | 9 ++------ synapse/push/bulk_push_rule_evaluator.py | 8 +------ synapse/rest/client/versions.py | 2 -- synapse/storage/databases/main/push_rule.py | 1 - tests/push/test_bulk_push_rule_evaluator.py | 34 +++++++++++------------------ 13 files changed, 27 insertions(+), 64 deletions(-) create mode 100644 changelog.d/15520.feature diff --git a/changelog.d/15520.feature b/changelog.d/15520.feature new file mode 100644 index 0000000000..f4fd40ab94 --- /dev/null +++ b/changelog.d/15520.feature @@ -0,0 +1 @@ +Enable support for [MSC3952](https://github.com/matrix-org/matrix-spec-proposals/pull/3952): intentional mentions. diff --git a/rust/benches/evaluator.rs b/rust/benches/evaluator.rs index 64e13f6486..c2f33258a4 100644 --- a/rust/benches/evaluator.rs +++ b/rust/benches/evaluator.rs @@ -13,8 +13,6 @@ // limitations under the License. #![feature(test)] -use std::collections::BTreeSet; - use synapse::push::{ evaluator::PushRuleEvaluator, Condition, EventMatchCondition, FilteredPushRules, JsonValue, PushRules, SimpleJsonValue, @@ -197,7 +195,6 @@ fn bench_eval_message(b: &mut Bencher) { false, false, false, - false, ); b.iter(|| eval.run(&rules, Some("bob"), Some("person"))); diff --git a/rust/src/push/base_rules.rs b/rust/src/push/base_rules.rs index 51372e1553..9d6c304d92 100644 --- a/rust/src/push/base_rules.rs +++ b/rust/src/push/base_rules.rs @@ -142,11 +142,11 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[ default_enabled: true, }, PushRule { - rule_id: Cow::Borrowed(".org.matrix.msc3952.is_user_mention"), + rule_id: Cow::Borrowed("global/override/.m.is_user_mention"), priority_class: 5, conditions: Cow::Borrowed(&[Condition::Known( KnownCondition::ExactEventPropertyContainsType(EventPropertyIsTypeCondition { - key: Cow::Borrowed("content.org\\.matrix\\.msc3952\\.mentions.user_ids"), + key: Cow::Borrowed("content.m\\.mentions.user_ids"), value_type: Cow::Borrowed(&EventMatchPatternType::UserId), }), )]), @@ -163,11 +163,11 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[ default_enabled: true, }, PushRule { - rule_id: Cow::Borrowed(".org.matrix.msc3952.is_room_mention"), + rule_id: Cow::Borrowed("global/override/.m.is_room_mention"), priority_class: 5, conditions: Cow::Borrowed(&[ Condition::Known(KnownCondition::EventPropertyIs(EventPropertyIsCondition { - key: Cow::Borrowed("content.org\\.matrix\\.msc3952\\.mentions.room"), + key: Cow::Borrowed("content.m\\.mentions.room"), value: Cow::Borrowed(&SimpleJsonValue::Bool(true)), })), Condition::Known(KnownCondition::SenderNotificationPermission { diff --git a/rust/src/push/evaluator.rs b/rust/src/push/evaluator.rs index 2d7c4c06be..59c53b1776 100644 --- a/rust/src/push/evaluator.rs +++ b/rust/src/push/evaluator.rs @@ -70,7 +70,9 @@ pub struct PushRuleEvaluator { /// The "content.body", if any. body: String, - /// True if the event has a mentions property and MSC3952 support is enabled. + /// True if the event has a m.mentions property. (Note that this is a separate + /// flag instead of checking flattened_keys since the m.mentions property + /// might be an empty map and not appear in flattened_keys. has_mentions: bool, /// The number of users in the room. @@ -155,9 +157,7 @@ impl PushRuleEvaluator { let rule_id = &push_rule.rule_id().to_string(); // For backwards-compatibility the legacy mention rules are disabled - // if the event contains the 'm.mentions' property (and if the - // experimental feature is enabled, both of these are represented - // by the has_mentions flag). + // if the event contains the 'm.mentions' property. if self.has_mentions && (rule_id == "global/override/.m.rule.contains_display_name" || rule_id == "global/content/.m.rule.contains_user_name" @@ -562,7 +562,7 @@ fn test_requires_room_version_supports_condition() { }; let rules = PushRules::new(vec![custom_rule]); result = evaluator.run( - &FilteredPushRules::py_new(rules, BTreeMap::new(), true, false, true, false, false), + &FilteredPushRules::py_new(rules, BTreeMap::new(), true, false, true, false), None, None, ); diff --git a/rust/src/push/mod.rs b/rust/src/push/mod.rs index f19d3c739f..514980579b 100644 --- a/rust/src/push/mod.rs +++ b/rust/src/push/mod.rs @@ -527,7 +527,6 @@ pub struct FilteredPushRules { msc1767_enabled: bool, msc3381_polls_enabled: bool, msc3664_enabled: bool, - msc3952_intentional_mentions: bool, msc3958_suppress_edits_enabled: bool, } @@ -540,7 +539,6 @@ impl FilteredPushRules { msc1767_enabled: bool, msc3381_polls_enabled: bool, msc3664_enabled: bool, - msc3952_intentional_mentions: bool, msc3958_suppress_edits_enabled: bool, ) -> Self { Self { @@ -549,7 +547,6 @@ impl FilteredPushRules { msc1767_enabled, msc3381_polls_enabled, msc3664_enabled, - msc3952_intentional_mentions, msc3958_suppress_edits_enabled, } } @@ -587,10 +584,6 @@ impl FilteredPushRules { return false; } - if !self.msc3952_intentional_mentions && rule.rule_id.contains("org.matrix.msc3952") - { - return false; - } if !self.msc3958_suppress_edits_enabled && rule.rule_id == "global/override/.com.beeper.suppress_edits" { diff --git a/stubs/synapse/synapse_rust/push.pyi b/stubs/synapse/synapse_rust/push.pyi index 5d0ce4b1a4..d573a37b9a 100644 --- a/stubs/synapse/synapse_rust/push.pyi +++ b/stubs/synapse/synapse_rust/push.pyi @@ -46,7 +46,6 @@ class FilteredPushRules: msc1767_enabled: bool, msc3381_polls_enabled: bool, msc3664_enabled: bool, - msc3952_intentional_mentions: bool, msc3958_suppress_edits_enabled: bool, ): ... def rules(self) -> Collection[Tuple[PushRule, bool]]: ... diff --git a/synapse/api/constants.py b/synapse/api/constants.py index cde9a2ecef..faf0770c66 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -236,7 +236,7 @@ class EventContentFields: AUTHORISING_USER: Final = "join_authorised_via_users_server" # Use for mentioning users. - MSC3952_MENTIONS: Final = "org.matrix.msc3952.mentions" + MENTIONS: Final = "m.mentions" # an unspecced field added to to-device messages to identify them uniquely-ish TO_DEVICE_MSGID: Final = "org.matrix.msgid" diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index a9e002cf08..1d5b5ded45 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -358,11 +358,6 @@ class ExperimentalConfig(Config): # MSC3391: Removing account data. self.msc3391_enabled = experimental.get("msc3391_enabled", False) - # MSC3952: Intentional mentions, this depends on MSC3966. - self.msc3952_intentional_mentions = experimental.get( - "msc3952_intentional_mentions", False - ) - # MSC3959: Do not generate notifications for edits. self.msc3958_supress_edit_notifs = experimental.get( "msc3958_supress_edit_notifs", False diff --git a/synapse/events/validator.py b/synapse/events/validator.py index 47203209db..9278f1a1aa 100644 --- a/synapse/events/validator.py +++ b/synapse/events/validator.py @@ -134,13 +134,8 @@ class EventValidator: ) # If the event contains a mentions key, validate it. - if ( - EventContentFields.MSC3952_MENTIONS in event.content - and config.experimental.msc3952_intentional_mentions - ): - validate_json_object( - event.content[EventContentFields.MSC3952_MENTIONS], Mentions - ) + if EventContentFields.MENTIONS in event.content: + validate_json_object(event.content[EventContentFields.MENTIONS], Mentions) def _validate_retention(self, event: EventBase) -> None: """Checks that an event that defines the retention policy for a room respects the diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index 320084f5f5..33002cc0f2 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -120,9 +120,6 @@ class BulkPushRuleEvaluator: self.should_calculate_push_rules = self.hs.config.push.enable_push self._related_event_match_enabled = self.hs.config.experimental.msc3664_enabled - self._intentional_mentions_enabled = ( - self.hs.config.experimental.msc3952_intentional_mentions - ) self.room_push_rule_cache_metrics = register_cache( "cache", @@ -390,10 +387,7 @@ class BulkPushRuleEvaluator: del notification_levels[key] # Pull out any user and room mentions. - has_mentions = ( - self._intentional_mentions_enabled - and EventContentFields.MSC3952_MENTIONS in event.content - ) + has_mentions = EventContentFields.MENTIONS in event.content evaluator = PushRuleEvaluator( _flatten_dict(event), diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py index 547bf34df1..1910648755 100644 --- a/synapse/rest/client/versions.py +++ b/synapse/rest/client/versions.py @@ -124,8 +124,6 @@ class VersionsRestServlet(RestServlet): is not None, # Adds support for relation-based redactions as per MSC3912. "org.matrix.msc3912": self.config.experimental.msc3912_enabled, - # Adds support for unstable "intentional mentions" behaviour. - "org.matrix.msc3952_intentional_mentions": self.config.experimental.msc3952_intentional_mentions, # Whether recursively provide relations is supported. "org.matrix.msc3981": self.config.experimental.msc3981_recurse_relations, # Adds support for deleting account data. diff --git a/synapse/storage/databases/main/push_rule.py b/synapse/storage/databases/main/push_rule.py index 9f862f00c1..e098ceea3c 100644 --- a/synapse/storage/databases/main/push_rule.py +++ b/synapse/storage/databases/main/push_rule.py @@ -88,7 +88,6 @@ def _load_rules( msc1767_enabled=experimental_config.msc1767_enabled, msc3664_enabled=experimental_config.msc3664_enabled, msc3381_polls_enabled=experimental_config.msc3381_polls_enabled, - msc3952_intentional_mentions=experimental_config.msc3952_intentional_mentions, msc3958_suppress_edits_enabled=experimental_config.msc3958_supress_edit_notifs, ) diff --git a/tests/push/test_bulk_push_rule_evaluator.py b/tests/push/test_bulk_push_rule_evaluator.py index 9501096a77..1e06f86071 100644 --- a/tests/push/test_bulk_push_rule_evaluator.py +++ b/tests/push/test_bulk_push_rule_evaluator.py @@ -228,7 +228,6 @@ class TestBulkPushRuleEvaluator(HomeserverTestCase): ) return len(result) > 0 - @override_config({"experimental_features": {"msc3952_intentional_mentions": True}}) def test_user_mentions(self) -> None: """Test the behavior of an event which includes invalid user mentions.""" bulk_evaluator = BulkPushRuleEvaluator(self.hs) @@ -237,9 +236,7 @@ class TestBulkPushRuleEvaluator(HomeserverTestCase): self.assertFalse(self._create_and_process(bulk_evaluator)) # An empty mentions field should not notify. self.assertFalse( - self._create_and_process( - bulk_evaluator, {EventContentFields.MSC3952_MENTIONS: {}} - ) + self._create_and_process(bulk_evaluator, {EventContentFields.MENTIONS: {}}) ) # Non-dict mentions should be ignored. @@ -253,7 +250,7 @@ class TestBulkPushRuleEvaluator(HomeserverTestCase): for mentions in (None, True, False, 1, "foo", []): self.assertFalse( self._create_and_process( - bulk_evaluator, {EventContentFields.MSC3952_MENTIONS: mentions} + bulk_evaluator, {EventContentFields.MENTIONS: mentions} ) ) @@ -262,7 +259,7 @@ class TestBulkPushRuleEvaluator(HomeserverTestCase): self.assertFalse( self._create_and_process( bulk_evaluator, - {EventContentFields.MSC3952_MENTIONS: {"user_ids": mentions}}, + {EventContentFields.MENTIONS: {"user_ids": mentions}}, ) ) @@ -270,14 +267,14 @@ class TestBulkPushRuleEvaluator(HomeserverTestCase): self.assertTrue( self._create_and_process( bulk_evaluator, - {EventContentFields.MSC3952_MENTIONS: {"user_ids": [self.alice]}}, + {EventContentFields.MENTIONS: {"user_ids": [self.alice]}}, ) ) self.assertTrue( self._create_and_process( bulk_evaluator, { - EventContentFields.MSC3952_MENTIONS: { + EventContentFields.MENTIONS: { "user_ids": ["@another:test", self.alice] } }, @@ -288,11 +285,7 @@ class TestBulkPushRuleEvaluator(HomeserverTestCase): self.assertTrue( self._create_and_process( bulk_evaluator, - { - EventContentFields.MSC3952_MENTIONS: { - "user_ids": [self.alice, self.alice] - } - }, + {EventContentFields.MENTIONS: {"user_ids": [self.alice, self.alice]}}, ) ) @@ -307,7 +300,7 @@ class TestBulkPushRuleEvaluator(HomeserverTestCase): self._create_and_process( bulk_evaluator, { - EventContentFields.MSC3952_MENTIONS: { + EventContentFields.MENTIONS: { "user_ids": [None, True, False, {}, []] } }, @@ -317,7 +310,7 @@ class TestBulkPushRuleEvaluator(HomeserverTestCase): self._create_and_process( bulk_evaluator, { - EventContentFields.MSC3952_MENTIONS: { + EventContentFields.MENTIONS: { "user_ids": [None, True, False, {}, [], self.alice] } }, @@ -331,12 +324,11 @@ class TestBulkPushRuleEvaluator(HomeserverTestCase): { "body": self.alice, "msgtype": "m.text", - EventContentFields.MSC3952_MENTIONS: {}, + EventContentFields.MENTIONS: {}, }, ) ) - @override_config({"experimental_features": {"msc3952_intentional_mentions": True}}) def test_room_mentions(self) -> None: """Test the behavior of an event which includes invalid room mentions.""" bulk_evaluator = BulkPushRuleEvaluator(self.hs) @@ -344,7 +336,7 @@ class TestBulkPushRuleEvaluator(HomeserverTestCase): # Room mentions from those without power should not notify. self.assertFalse( self._create_and_process( - bulk_evaluator, {EventContentFields.MSC3952_MENTIONS: {"room": True}} + bulk_evaluator, {EventContentFields.MENTIONS: {"room": True}} ) ) @@ -358,7 +350,7 @@ class TestBulkPushRuleEvaluator(HomeserverTestCase): ) self.assertTrue( self._create_and_process( - bulk_evaluator, {EventContentFields.MSC3952_MENTIONS: {"room": True}} + bulk_evaluator, {EventContentFields.MENTIONS: {"room": True}} ) ) @@ -374,7 +366,7 @@ class TestBulkPushRuleEvaluator(HomeserverTestCase): self.assertFalse( self._create_and_process( bulk_evaluator, - {EventContentFields.MSC3952_MENTIONS: {"room": mentions}}, + {EventContentFields.MENTIONS: {"room": mentions}}, ) ) @@ -385,7 +377,7 @@ class TestBulkPushRuleEvaluator(HomeserverTestCase): { "body": "@room", "msgtype": "m.text", - EventContentFields.MSC3952_MENTIONS: {}, + EventContentFields.MENTIONS: {}, }, ) ) -- cgit 1.5.1 From 564f37aca6fdf404edc65031f90bbf9385794ae2 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 6 Jun 2023 09:55:42 +0100 Subject: 1.85.0 --- CHANGES.md | 21 +++++++++++++++++++++ debian/changelog | 6 ++++++ pyproject.toml | 2 +- 3 files changed, 28 insertions(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index f0885a2f1e..100ce99270 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,24 @@ +Synapse 1.85.0 (2023-06-06) +=========================== + +No significant changes since 1.85.0rc2. + + +## Security advisory + +The following issues are fixed in 1.85.0. + +- [GHSA-26c5-ppr8-f33p](https://github.com/matrix-org/synapse/security/advisories/GHSA-26c5-ppr8-f33p) / [CVE-2023-32682](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-32683) — Low Severity + + It may be possible for a deactivated user to login when using uncommon configurations. + +- [GHSA-98px-6486-j7qc](https://github.com/matrix-org/synapse/security/advisories/GHSA-98px-6486-j7qc) / [CVE-2023-32683](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-32683) — Low Severity + + A discovered oEmbed or image URL can bypass the `url_preview_url_blacklist` setting potentially allowing server side request forgery or bypassing network policies. Impact is limited to IP addresses allowed by the `url_preview_ip_range_blacklist` setting (by default this only allows public IPs). + +See the advisories for more details. If you have any questions, email security@matrix.org. + + Synapse 1.85.0rc2 (2023-06-01) ============================== diff --git a/debian/changelog b/debian/changelog index ae348ce4df..2278a83283 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.85.0) stable; urgency=medium + + * New Synapse release 1.85.0. + + -- Synapse Packaging team Tue, 06 Jun 2023 09:39:29 +0100 + matrix-synapse-py3 (1.85.0~rc2) stable; urgency=medium * New Synapse release 1.85.0rc2. diff --git a/pyproject.toml b/pyproject.toml index 4ed4214f34..745b58d7b5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -89,7 +89,7 @@ manifest-path = "rust/Cargo.toml" [tool.poetry] name = "matrix-synapse" -version = "1.85.0rc2" +version = "1.85.0" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" -- cgit 1.5.1 From ec71214243eac58a4a6d272c15441a6405f6ae9c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 6 Jun 2023 10:06:21 +0100 Subject: Fixup changelog --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 100ce99270..ea13b554ba 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -6,7 +6,7 @@ No significant changes since 1.85.0rc2. ## Security advisory -The following issues are fixed in 1.85.0. +The following issues are fixed in 1.85.0 (and RCs). - [GHSA-26c5-ppr8-f33p](https://github.com/matrix-org/synapse/security/advisories/GHSA-26c5-ppr8-f33p) / [CVE-2023-32682](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-32683) — Low Severity -- cgit 1.5.1 From ad690037de0708d932380e3759d57ef3cc981345 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 6 Jun 2023 10:58:32 +0100 Subject: Fix link in changelog --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index ea13b554ba..905713b2af 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -8,7 +8,7 @@ No significant changes since 1.85.0rc2. The following issues are fixed in 1.85.0 (and RCs). -- [GHSA-26c5-ppr8-f33p](https://github.com/matrix-org/synapse/security/advisories/GHSA-26c5-ppr8-f33p) / [CVE-2023-32682](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-32683) — Low Severity +- [GHSA-26c5-ppr8-f33p](https://github.com/matrix-org/synapse/security/advisories/GHSA-26c5-ppr8-f33p) / [CVE-2023-32682](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-32682) — Low Severity It may be possible for a deactivated user to login when using uncommon configurations. -- cgit 1.5.1 From dfd77f426e3e4a66dd027db7078ed0345a4c74dd Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Tue, 6 Jun 2023 12:32:29 +0100 Subject: Remove some unused `server_name` fields (#15723) Signed-off-by: Sean Quah --- changelog.d/15723.misc | 1 + synapse/handlers/presence.py | 1 - synapse/handlers/read_marker.py | 1 - synapse/handlers/room.py | 1 - synapse/handlers/stats.py | 1 - synapse/rest/media/upload_resource.py | 1 - 6 files changed, 1 insertion(+), 5 deletions(-) create mode 100644 changelog.d/15723.misc diff --git a/changelog.d/15723.misc b/changelog.d/15723.misc new file mode 100644 index 0000000000..ba331adca7 --- /dev/null +++ b/changelog.d/15723.misc @@ -0,0 +1 @@ +Removed some unused fields. diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 4ad2233573..0a219b7962 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -648,7 +648,6 @@ class PresenceHandler(BasePresenceHandler): def __init__(self, hs: "HomeServer"): super().__init__(hs) self.hs = hs - self.server_name = hs.hostname self.wheel_timer: WheelTimer[str] = WheelTimer() self.notifier = hs.get_notifier() self._presence_enabled = hs.config.server.use_presence diff --git a/synapse/handlers/read_marker.py b/synapse/handlers/read_marker.py index 49a497a860..df5a4f3e22 100644 --- a/synapse/handlers/read_marker.py +++ b/synapse/handlers/read_marker.py @@ -27,7 +27,6 @@ logger = logging.getLogger(__name__) class ReadMarkerHandler: def __init__(self, hs: "HomeServer"): - self.server_name = hs.config.server.server_name self.store = hs.get_datastores().main self.account_data_handler = hs.get_account_data_handler() self.read_marker_linearizer = Linearizer(name="read_marker") diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 5e1702d78a..cb957f2033 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -1490,7 +1490,6 @@ class RoomContextHandler: class TimestampLookupHandler: def __init__(self, hs: "HomeServer"): - self.server_name = hs.hostname self.store = hs.get_datastores().main self.state_handler = hs.get_state_handler() self.federation_client = hs.get_federation_client() diff --git a/synapse/handlers/stats.py b/synapse/handlers/stats.py index 5c01482acf..7cabf7980a 100644 --- a/synapse/handlers/stats.py +++ b/synapse/handlers/stats.py @@ -42,7 +42,6 @@ class StatsHandler: self.store = hs.get_datastores().main self._storage_controllers = hs.get_storage_controllers() self.state = hs.get_state_handler() - self.server_name = hs.hostname self.clock = hs.get_clock() self.notifier = hs.get_notifier() self.is_mine_id = hs.is_mine_id diff --git a/synapse/rest/media/upload_resource.py b/synapse/rest/media/upload_resource.py index 697348613b..043e8d6077 100644 --- a/synapse/rest/media/upload_resource.py +++ b/synapse/rest/media/upload_resource.py @@ -39,7 +39,6 @@ class UploadResource(DirectServeJsonResource): self.filepaths = media_repo.filepaths self.store = hs.get_datastores().main self.clock = hs.get_clock() - self.server_name = hs.hostname self.auth = hs.get_auth() self.max_upload_size = hs.config.media.max_upload_size self.clock = hs.get_clock() -- cgit 1.5.1 From d43c72a6c85ab7cf7391f1b716dfd57f8fd0bf3d Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Tue, 6 Jun 2023 19:29:54 +0100 Subject: Prevent "twisted trunk" and "latest deps" workflows from running on forks (#15726) --- .github/workflows/latest_deps.yml | 23 +++++++++++++++++++++-- .github/workflows/twisted_trunk.yml | 24 ++++++++++++++++++++++-- changelog.d/15726.misc | 1 + 3 files changed, 44 insertions(+), 4 deletions(-) create mode 100644 changelog.d/15726.misc diff --git a/.github/workflows/latest_deps.yml b/.github/workflows/latest_deps.yml index 452600ba16..ec6391cf8f 100644 --- a/.github/workflows/latest_deps.yml +++ b/.github/workflows/latest_deps.yml @@ -22,7 +22,21 @@ concurrency: cancel-in-progress: true jobs: + check_repo: + # Prevent this workflow from running on any fork of Synapse other than matrix-org/synapse, as it is + # only useful to the Synapse core team. + # All other workflow steps depend on this one, thus if 'should_run_workflow' is not 'true', the rest + # of the workflow will be skipped as well. + runs-on: ubuntu-latest + outputs: + should_run_workflow: ${{ steps.check_condition.outputs.should_run_workflow }} + steps: + - id: check_condition + run: echo "should_run_workflow=${{ github.repository == 'matrix-org/synapse' }}" >> "$GITHUB_OUTPUT" + mypy: + needs: check_repo + if: needs.check_repo.outputs.should_run_workflow == 'true' runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 @@ -47,6 +61,8 @@ jobs: run: sed '/warn_unused_ignores = True/d' -i mypy.ini - run: poetry run mypy trial: + needs: check_repo + if: needs.check_repo.outputs.should_run_workflow == 'true' runs-on: ubuntu-latest strategy: matrix: @@ -105,6 +121,8 @@ jobs: sytest: + needs: check_repo + if: needs.check_repo.outputs.should_run_workflow == 'true' runs-on: ubuntu-latest container: image: matrixdotorg/sytest-synapse:testing @@ -156,7 +174,8 @@ jobs: complement: - if: "${{ !failure() && !cancelled() }}" + needs: check_repo + if: "!failure() && !cancelled() && needs.check_repo.outputs.should_run_workflow == 'true'" runs-on: ubuntu-latest strategy: @@ -192,7 +211,7 @@ jobs: # Open an issue if the build fails, so we know about it. # Only do this if we're not experimenting with this action in a PR. open-issue: - if: "failure() && github.event_name != 'push' && github.event_name != 'pull_request'" + if: "failure() && github.event_name != 'push' && github.event_name != 'pull_request' && needs.check_repo.outputs.should_run_workflow == 'true'" needs: # TODO: should mypy be included here? It feels more brittle than the others. - mypy diff --git a/.github/workflows/twisted_trunk.yml b/.github/workflows/twisted_trunk.yml index 14fc6a0389..55081f8133 100644 --- a/.github/workflows/twisted_trunk.yml +++ b/.github/workflows/twisted_trunk.yml @@ -18,7 +18,22 @@ concurrency: cancel-in-progress: true jobs: + check_repo: + # Prevent this workflow from running on any fork of Synapse other than matrix-org/synapse, as it is + # only useful to the Synapse core team. + # All other workflow steps depend on this one, thus if 'should_run_workflow' is not 'true', the rest + # of the workflow will be skipped as well. + if: github.repository == 'matrix-org/synapse' + runs-on: ubuntu-latest + outputs: + should_run_workflow: ${{ steps.check_condition.outputs.should_run_workflow }} + steps: + - id: check_condition + run: echo "should_run_workflow=${{ github.repository == 'matrix-org/synapse' }}" >> "$GITHUB_OUTPUT" + mypy: + needs: check_repo + if: needs.check_repo.outputs.should_run_workflow == 'true' runs-on: ubuntu-latest steps: @@ -41,6 +56,8 @@ jobs: - run: poetry run mypy trial: + needs: check_repo + if: needs.check_repo.outputs.should_run_workflow == 'true' runs-on: ubuntu-latest steps: @@ -75,6 +92,8 @@ jobs: || true sytest: + needs: check_repo + if: needs.check_repo.outputs.should_run_workflow == 'true' runs-on: ubuntu-latest container: image: matrixdotorg/sytest-synapse:buster @@ -119,7 +138,8 @@ jobs: /logs/**/*.log* complement: - if: "${{ !failure() && !cancelled() }}" + needs: check_repo + if: "!failure() && !cancelled() && needs.check_repo.outputs.should_run_workflow == 'true'" runs-on: ubuntu-latest strategy: @@ -166,7 +186,7 @@ jobs: # open an issue if the build fails, so we know about it. open-issue: - if: failure() + if: failure() && needs.check_repo.outputs.should_run_workflow == 'true' needs: - mypy - trial diff --git a/changelog.d/15726.misc b/changelog.d/15726.misc new file mode 100644 index 0000000000..941e541e77 --- /dev/null +++ b/changelog.d/15726.misc @@ -0,0 +1 @@ +Prevent the `latest_deps` and `twisted_trunk` daily GitHub Actions workflows from running on forks of the codebase. \ No newline at end of file -- cgit 1.5.1 From 6ee96e936646d6ccc55dc076f62f8cf518c90d1e Mon Sep 17 00:00:00 2001 From: Shay Date: Tue, 6 Jun 2023 13:16:03 -0700 Subject: Improve performance of user directory search (#15729) --- changelog.d/15729.misc | 1 + synapse/storage/databases/main/user_directory.py | 12 ++++++++---- 2 files changed, 9 insertions(+), 4 deletions(-) create mode 100644 changelog.d/15729.misc diff --git a/changelog.d/15729.misc b/changelog.d/15729.misc new file mode 100644 index 0000000000..3940254305 --- /dev/null +++ b/changelog.d/15729.misc @@ -0,0 +1 @@ +Improve performance of user directory search. diff --git a/synapse/storage/databases/main/user_directory.py b/synapse/storage/databases/main/user_directory.py index a0319575f0..b0a06baf4f 100644 --- a/synapse/storage/databases/main/user_directory.py +++ b/synapse/storage/databases/main/user_directory.py @@ -1061,12 +1061,15 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore): # The array of numbers are the weights for the various part of the # search: (domain, _, display name, localpart) sql = """ + WITH matching_users AS ( + SELECT user_id, vector FROM user_directory_search WHERE vector @@ to_tsquery('simple', ?) + LIMIT 10000 + ) SELECT d.user_id AS user_id, display_name, avatar_url - FROM user_directory_search as t + FROM matching_users as t INNER JOIN user_directory AS d USING (user_id) WHERE %(where_clause)s - AND vector @@ to_tsquery('simple', ?) ORDER BY (CASE WHEN d.user_id IS NOT NULL THEN 4.0 ELSE 1.0 END) * (CASE WHEN display_name IS NOT NULL THEN 1.2 ELSE 1.0 END) @@ -1095,8 +1098,9 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore): "order_case_statements": " ".join(additional_ordering_statements), } args = ( - join_args - + (full_query, exact_query, prefix_query) + (full_query,) + + join_args + + (exact_query, prefix_query) + ordering_arguments + (limit + 1,) ) -- cgit 1.5.1 From 33c3550887f412f015cf651db82a9082bb12cd9e Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Tue, 6 Jun 2023 16:25:03 -0500 Subject: Add context for when/why to use the `long_retries` option when sending Federation requests (#15721) --- changelog.d/15721.misc | 1 + synapse/http/matrixfederationclient.py | 11 +++++++++-- 2 files changed, 10 insertions(+), 2 deletions(-) create mode 100644 changelog.d/15721.misc diff --git a/changelog.d/15721.misc b/changelog.d/15721.misc new file mode 100644 index 0000000000..f4d892daf9 --- /dev/null +++ b/changelog.d/15721.misc @@ -0,0 +1 @@ +Add context for when/why to use the `long_retries` option when sending Federation requests. diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 9094dab0fe..abb5ae5815 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -499,8 +499,15 @@ class MatrixFederationHttpClient: Note that the above intervals are *in addition* to the time spent waiting for the request to complete (up to `timeout` ms). - NB: the long retry algorithm takes over 20 minutes to complete, with - a default timeout of 60s! + NB: the long retry algorithm takes over 20 minutes to complete, with a + default timeout of 60s! It's best not to use the `long_retries` option + for something that is blocking a client so we don't make them wait for + aaaaages, whereas some things like sending transactions (server to + server) we can be a lot more lenient but its very fuzzy / hand-wavey. + + In the future, we could be more intelligent about doing this sort of + thing by looking at things with the bigger picture in mind, + https://github.com/matrix-org/synapse/issues/8917 ignore_backoff: true to ignore the historical backoff data and try the request anyway. -- cgit 1.5.1 From 4e6390cb10676d3f621319663587f49baa57bedc Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Tue, 6 Jun 2023 16:26:12 -0500 Subject: Update error to more plainly explain we can only authorize our own events (#15725) --- changelog.d/15725.misc | 1 + synapse/federation/federation_server.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/15725.misc diff --git a/changelog.d/15725.misc b/changelog.d/15725.misc new file mode 100644 index 0000000000..6c7a8a41d8 --- /dev/null +++ b/changelog.d/15725.misc @@ -0,0 +1 @@ +Update federation error to more plainly explain we can only authorize our own membership events. diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 149351dda0..9425b32507 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -944,7 +944,7 @@ class FederationServer(FederationBase): if not self._is_mine_server_name(authorising_server): raise SynapseError( 400, - f"Cannot authorise request from resident server: {authorising_server}", + f"Cannot authorise membership event for {authorising_server}. We can only authorise requests from our own homeserver", ) event.signatures.update( -- cgit 1.5.1 From 8bfded81f3378ab6333f174e182f2aae6ef01f49 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Tue, 6 Jun 2023 17:39:22 -0500 Subject: Trace functions which return `Awaitable` (#15650) --- changelog.d/15650.misc | 1 + synapse/logging/opentracing.py | 37 +++++++++++++++++++++++---------- tests/logging/test_opentracing.py | 43 +++++++++++++++++++++++++++++---------- 3 files changed, 59 insertions(+), 22 deletions(-) create mode 100644 changelog.d/15650.misc diff --git a/changelog.d/15650.misc b/changelog.d/15650.misc new file mode 100644 index 0000000000..9bbad113e1 --- /dev/null +++ b/changelog.d/15650.misc @@ -0,0 +1 @@ +Add support for tracing functions which return `Awaitable`s. diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py index c70eee649c..75217e3f45 100644 --- a/synapse/logging/opentracing.py +++ b/synapse/logging/opentracing.py @@ -171,6 +171,7 @@ from functools import wraps from typing import ( TYPE_CHECKING, Any, + Awaitable, Callable, Collection, ContextManager, @@ -903,6 +904,7 @@ def _custom_sync_async_decorator( """ if inspect.iscoroutinefunction(func): + # For this branch, we handle async functions like `async def func() -> RInner`. # In this branch, R = Awaitable[RInner], for some other type RInner @wraps(func) async def _wrapper( @@ -914,15 +916,16 @@ def _custom_sync_async_decorator( return await func(*args, **kwargs) # type: ignore[misc] else: - # The other case here handles both sync functions and those - # decorated with inlineDeferred. + # The other case here handles sync functions including those decorated with + # `@defer.inlineCallbacks` or that return a `Deferred` or other `Awaitable`. @wraps(func) - def _wrapper(*args: P.args, **kwargs: P.kwargs) -> R: + def _wrapper(*args: P.args, **kwargs: P.kwargs) -> Any: scope = wrapping_logic(func, *args, **kwargs) scope.__enter__() try: result = func(*args, **kwargs) + if isinstance(result, defer.Deferred): def call_back(result: R) -> R: @@ -930,20 +933,32 @@ def _custom_sync_async_decorator( return result def err_back(result: R) -> R: + # TODO: Pass the error details into `scope.__exit__(...)` for + # consistency with the other paths. scope.__exit__(None, None, None) return result result.addCallbacks(call_back, err_back) + elif inspect.isawaitable(result): + + async def wrap_awaitable() -> Any: + try: + assert isinstance(result, Awaitable) + awaited_result = await result + scope.__exit__(None, None, None) + return awaited_result + except Exception as e: + scope.__exit__(type(e), None, e.__traceback__) + raise + + # The original method returned an awaitable, eg. a coroutine, so we + # create another awaitable wrapping it that calls + # `scope.__exit__(...)`. + return wrap_awaitable() else: - if inspect.isawaitable(result): - logger.error( - "@trace may not have wrapped %s correctly! " - "The function is not async but returned a %s.", - func.__qualname__, - type(result).__name__, - ) - + # Just a simple sync function so we can just exit the scope and + # return the result without any fuss. scope.__exit__(None, None, None) return result diff --git a/tests/logging/test_opentracing.py b/tests/logging/test_opentracing.py index e28ba84cc2..1bc7d64ad9 100644 --- a/tests/logging/test_opentracing.py +++ b/tests/logging/test_opentracing.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import cast +from typing import Awaitable, cast from twisted.internet import defer from twisted.test.proto_helpers import MemoryReactorClock @@ -227,8 +227,6 @@ class LogContextScopeManagerTestCase(TestCase): Test whether we can use `@trace_with_opname` (`@trace`) and `@tag_args` with functions that return deferreds """ - reactor = MemoryReactorClock() - with LoggingContext("root context"): @trace_with_opname("fixture_deferred_func", tracer=self._tracer) @@ -240,9 +238,6 @@ class LogContextScopeManagerTestCase(TestCase): result_d1 = fixture_deferred_func() - # let the tasks complete - reactor.pump((2,) * 8) - self.assertEqual(self.successResultOf(result_d1), "foo") # the span should have been reported @@ -256,8 +251,6 @@ class LogContextScopeManagerTestCase(TestCase): Test whether we can use `@trace_with_opname` (`@trace`) and `@tag_args` with async functions """ - reactor = MemoryReactorClock() - with LoggingContext("root context"): @trace_with_opname("fixture_async_func", tracer=self._tracer) @@ -267,9 +260,6 @@ class LogContextScopeManagerTestCase(TestCase): d1 = defer.ensureDeferred(fixture_async_func()) - # let the tasks complete - reactor.pump((2,) * 8) - self.assertEqual(self.successResultOf(d1), "foo") # the span should have been reported @@ -277,3 +267,34 @@ class LogContextScopeManagerTestCase(TestCase): [span.operation_name for span in self._reporter.get_spans()], ["fixture_async_func"], ) + + def test_trace_decorator_awaitable_return(self) -> None: + """ + Test whether we can use `@trace_with_opname` (`@trace`) and `@tag_args` + with functions that return an awaitable (e.g. a coroutine) + """ + with LoggingContext("root context"): + # Something we can return without `await` to get a coroutine + async def fixture_async_func() -> str: + return "foo" + + # The actual kind of function we want to test that returns an awaitable + @trace_with_opname("fixture_awaitable_return_func", tracer=self._tracer) + @tag_args + def fixture_awaitable_return_func() -> Awaitable[str]: + return fixture_async_func() + + # Something we can run with `defer.ensureDeferred(runner())` and pump the + # whole async tasks through to completion. + async def runner() -> str: + return await fixture_awaitable_return_func() + + d1 = defer.ensureDeferred(runner()) + + self.assertEqual(self.successResultOf(d1), "foo") + + # the span should have been reported + self.assertEqual( + [span.operation_name for span in self._reporter.get_spans()], + ["fixture_awaitable_return_func"], + ) -- cgit 1.5.1 From 9d911b0da651893e0b67cb3506e18582cb0d95b5 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Tue, 6 Jun 2023 22:19:57 -0500 Subject: No need for the extra join since `membership` is built-in to `current_state_events` (#15731) This helps with the upstream `is_host_joined()` and `is_host_invited()` functions. `membership` was added to `current_state_events` in https://github.com/matrix-org/synapse/pull/5706 and forced in https://github.com/matrix-org/synapse/pull/13745 --- changelog.d/15731.misc | 1 + synapse/storage/databases/main/roommember.py | 7 +++---- 2 files changed, 4 insertions(+), 4 deletions(-) create mode 100644 changelog.d/15731.misc diff --git a/changelog.d/15731.misc b/changelog.d/15731.misc new file mode 100644 index 0000000000..906bc26962 --- /dev/null +++ b/changelog.d/15731.misc @@ -0,0 +1 @@ +Remove redundant table join with `room_memberships` when doing a `is_host_joined()`/`is_host_invited()` call (`membership` is already part of the `current_state_events`). diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index ae9c201b87..1b8ec67f54 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -927,11 +927,10 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): raise Exception("Invalid host name") sql = """ - SELECT state_key FROM current_state_events AS c - INNER JOIN room_memberships AS m USING (event_id) - WHERE m.membership = ? + SELECT state_key FROM current_state_events + WHERE membership = ? AND type = 'm.room.member' - AND c.room_id = ? + AND room_id = ? AND state_key LIKE ? LIMIT 1 """ -- cgit 1.5.1 From a701c089fa2a345243985a765506a52b50e50963 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 7 Jun 2023 10:50:32 +0100 Subject: Fix schema delta error in 1.85 (#15738) There appears to be a race where you can end up with entries in `event_push_summary` with both a `NULL` and `main` thread ID. Fixes #15736 Introduced in #15597 --- changelog.d/15738.bugfix | 1 + .../schema/main/delta/77/05thread_notifications_backfill.sql | 8 ++++++++ 2 files changed, 9 insertions(+) create mode 100644 changelog.d/15738.bugfix diff --git a/changelog.d/15738.bugfix b/changelog.d/15738.bugfix new file mode 100644 index 0000000000..7129ab0782 --- /dev/null +++ b/changelog.d/15738.bugfix @@ -0,0 +1 @@ +Fix bug in schema delta that broke upgrades for some deployments. Introduced in v1.85.0. diff --git a/synapse/storage/schema/main/delta/77/05thread_notifications_backfill.sql b/synapse/storage/schema/main/delta/77/05thread_notifications_backfill.sql index ce6f9ff937..b09aa817ae 100644 --- a/synapse/storage/schema/main/delta/77/05thread_notifications_backfill.sql +++ b/synapse/storage/schema/main/delta/77/05thread_notifications_backfill.sql @@ -21,6 +21,14 @@ DELETE FROM background_updates WHERE update_name = 'event_push_backfill_thread_i -- Overwrite any null thread_id values. UPDATE event_push_actions_staging SET thread_id = 'main' WHERE thread_id IS NULL; UPDATE event_push_actions SET thread_id = 'main' WHERE thread_id IS NULL; + +-- Empirically we can end up with entries in the push summary table with both a +-- `NULL` and `main` thread ID, which causes the update below to fail. We fudge +-- this by deleting any `NULL` rows that have a corresponding `main`. +DELETE FROM event_push_summary AS a WHERE thread_id IS NULL AND EXISTS ( + SELECT 1 FROM event_push_summary AS b + WHERE b.thread_id = 'main' AND a.user_id = b.user_id AND a.room_id = b.room_id +); UPDATE event_push_summary SET thread_id = 'main' WHERE thread_id IS NULL; -- Drop the background updates to calculate the indexes used to find null thread_ids. -- cgit 1.5.1 From 7acf7f2f8df9726c961b392f21ee7a92d062fb39 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 7 Jun 2023 10:51:17 +0100 Subject: 1.85.1 --- CHANGES.md | 9 +++++++++ changelog.d/15738.bugfix | 1 - debian/changelog | 6 ++++++ pyproject.toml | 2 +- 4 files changed, 16 insertions(+), 2 deletions(-) delete mode 100644 changelog.d/15738.bugfix diff --git a/CHANGES.md b/CHANGES.md index ea13b554ba..81bf3cc110 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,12 @@ +Synapse 1.85.1 (2023-06-07) +=========================== + +Bugfixes +-------- + +- Fix bug in schema delta that broke upgrades for some deployments. Introduced in v1.85.0. ([\#15738](https://github.com/matrix-org/synapse/issues/15738)) + + Synapse 1.85.0 (2023-06-06) =========================== diff --git a/changelog.d/15738.bugfix b/changelog.d/15738.bugfix deleted file mode 100644 index 7129ab0782..0000000000 --- a/changelog.d/15738.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug in schema delta that broke upgrades for some deployments. Introduced in v1.85.0. diff --git a/debian/changelog b/debian/changelog index 2278a83283..6d6f10ddf1 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.85.1) stable; urgency=medium + + * New Synapse release 1.85.1. + + -- Synapse Packaging team Wed, 07 Jun 2023 10:51:12 +0100 + matrix-synapse-py3 (1.85.0) stable; urgency=medium * New Synapse release 1.85.0. diff --git a/pyproject.toml b/pyproject.toml index 745b58d7b5..5b6123dff6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -89,7 +89,7 @@ manifest-path = "rust/Cargo.toml" [tool.poetry] name = "matrix-synapse" -version = "1.85.0" +version = "1.85.1" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" -- cgit 1.5.1 From f7c6553ebce51a46f1c78aa0a3fc6cc1effb346d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 7 Jun 2023 13:02:42 +0100 Subject: Fix schema delta error in 1.85 (#15739) Some users seem to have multiple rows per user / room with a null thread ID, which we need to handle. --- changelog.d/15739.bugfix | 1 + .../main/delta/77/05thread_notifications_backfill.sql | 16 ++++++++++++++-- 2 files changed, 15 insertions(+), 2 deletions(-) create mode 100644 changelog.d/15739.bugfix diff --git a/changelog.d/15739.bugfix b/changelog.d/15739.bugfix new file mode 100644 index 0000000000..7129ab0782 --- /dev/null +++ b/changelog.d/15739.bugfix @@ -0,0 +1 @@ +Fix bug in schema delta that broke upgrades for some deployments. Introduced in v1.85.0. diff --git a/synapse/storage/schema/main/delta/77/05thread_notifications_backfill.sql b/synapse/storage/schema/main/delta/77/05thread_notifications_backfill.sql index b09aa817ae..a5da7a17a0 100644 --- a/synapse/storage/schema/main/delta/77/05thread_notifications_backfill.sql +++ b/synapse/storage/schema/main/delta/77/05thread_notifications_backfill.sql @@ -23,13 +23,25 @@ UPDATE event_push_actions_staging SET thread_id = 'main' WHERE thread_id IS NULL UPDATE event_push_actions SET thread_id = 'main' WHERE thread_id IS NULL; -- Empirically we can end up with entries in the push summary table with both a --- `NULL` and `main` thread ID, which causes the update below to fail. We fudge +-- `NULL` and `main` thread ID, which causes the insert below to fail. We fudge -- this by deleting any `NULL` rows that have a corresponding `main`. DELETE FROM event_push_summary AS a WHERE thread_id IS NULL AND EXISTS ( SELECT 1 FROM event_push_summary AS b WHERE b.thread_id = 'main' AND a.user_id = b.user_id AND a.room_id = b.room_id ); -UPDATE event_push_summary SET thread_id = 'main' WHERE thread_id IS NULL; +-- Copy the NULL threads to have a 'main' thread ID. +-- +-- Note: Some people seem to have duplicate rows with a `NULL` thread ID, in +-- which case we just fudge it with using MAX of the values. The counts *may* be +-- wrong for such rooms, but a) its an edge case, and b) they'll be fixed when +-- the user reads the room. +INSERT INTO event_push_summary (user_id, room_id, notif_count, stream_ordering, unread_count, last_receipt_stream_ordering, thread_id) + SELECT user_id, room_id, MAX(notif_count), MAX(stream_ordering), MAX(unread_count), MAX(last_receipt_stream_ordering), 'main' + FROM event_push_summary + WHERE thread_id IS NULL + GROUP BY user_id, room_id, thread_id; + +DELETE FROM event_push_summary AS a WHERE thread_id IS NULL; -- Drop the background updates to calculate the indexes used to find null thread_ids. DELETE FROM background_updates WHERE update_name = 'event_push_actions_thread_id_null'; -- cgit 1.5.1 From 28423977be8637bab096ed32085f06e715abe51b Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 7 Jun 2023 13:04:20 +0100 Subject: Update changelog --- CHANGES.md | 2 +- changelog.d/15739.bugfix | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) delete mode 100644 changelog.d/15739.bugfix diff --git a/CHANGES.md b/CHANGES.md index 81bf3cc110..a0f9235cac 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -4,7 +4,7 @@ Synapse 1.85.1 (2023-06-07) Bugfixes -------- -- Fix bug in schema delta that broke upgrades for some deployments. Introduced in v1.85.0. ([\#15738](https://github.com/matrix-org/synapse/issues/15738)) +- Fix bug in schema delta that broke upgrades for some deployments. Introduced in v1.85.0. ([\#15738](https://github.com/matrix-org/synapse/issues/15738), [\#15739](https://github.com/matrix-org/synapse/issues/15739)) Synapse 1.85.0 (2023-06-06) diff --git a/changelog.d/15739.bugfix b/changelog.d/15739.bugfix deleted file mode 100644 index 7129ab0782..0000000000 --- a/changelog.d/15739.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug in schema delta that broke upgrades for some deployments. Introduced in v1.85.0. -- cgit 1.5.1 From 6cd6a2ae59e718b0695774e7348097af2c27d973 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 7 Jun 2023 13:07:40 +0100 Subject: Update changelog --- CHANGES.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGES.md b/CHANGES.md index a0f9235cac..5babc22f2a 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,6 +1,8 @@ Synapse 1.85.1 (2023-06-07) =========================== +Note: this release only fixes a bug that stopped some deployments from upgrading to v1.85.0. There is no need to upgrade to v1.85.1 if successfully running v1.85.0. + Bugfixes -------- -- cgit 1.5.1 From 5c24d7b9ebd8dec2c76dac5118cee22a1bb1032a Mon Sep 17 00:00:00 2001 From: Grant McLean Date: Thu, 8 Jun 2023 03:21:25 +1200 Subject: Check required power levels earlier in createRoom handler. (#15695) * Check required power levels earlier in createRoom handler. - If a server was configured to reject the creation of rooms with E2EE enabled (by specifying an unattainably high power level for "m.room.encryption" in default_power_level_content_override), the 403 error was not being triggered until after the room was created and before the "m.room.power_levels" was sent. This allowed a user to access the partially-configured room and complete the setup of E2EE and power levels manually. - This change causes the power level overrides to be checked earlier and the request to be rejected before the user gains access to the room. - A new `_validate_room_config` method is added to contain checks that should be run before a room is created. - The new test case confirms that a user request is rejected by the new validation method. Signed-off-by: Grant McLean * Add a changelog file. * Formatting fix for black. * Remove unneeded line from test. --------- Signed-off-by: Grant McLean --- changelog.d/15695.bugfix | 1 + synapse/handlers/room.py | 76 +++++++++++++++++++++++++++++++++-------- tests/rest/client/test_rooms.py | 37 ++++++++++++++++++++ 3 files changed, 100 insertions(+), 14 deletions(-) create mode 100644 changelog.d/15695.bugfix diff --git a/changelog.d/15695.bugfix b/changelog.d/15695.bugfix new file mode 100644 index 0000000000..99bf1fe05e --- /dev/null +++ b/changelog.d/15695.bugfix @@ -0,0 +1 @@ +Check permissions for enabling encryption earlier during room creation to avoid creating broken rooms. diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index cb957f2033..bf907b7881 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -872,6 +872,8 @@ class RoomCreationHandler: visibility = config.get("visibility", "private") is_public = visibility == "public" + self._validate_room_config(config, visibility) + room_id = await self._generate_and_create_room_id( creator_id=user_id, is_public=is_public, @@ -1111,20 +1113,7 @@ class RoomCreationHandler: return new_event, new_unpersisted_context - visibility = room_config.get("visibility", "private") - preset_config = room_config.get( - "preset", - RoomCreationPreset.PRIVATE_CHAT - if visibility == "private" - else RoomCreationPreset.PUBLIC_CHAT, - ) - - try: - config = self._presets_dict[preset_config] - except KeyError: - raise SynapseError( - 400, f"'{preset_config}' is not a valid preset", errcode=Codes.BAD_JSON - ) + preset_config, config = self._room_preset_config(room_config) # MSC2175 removes the creator field from the create event. if not room_version.msc2175_implicit_room_creator: @@ -1306,6 +1295,65 @@ class RoomCreationHandler: assert last_event.internal_metadata.stream_ordering is not None return last_event.internal_metadata.stream_ordering, last_event.event_id, depth + def _validate_room_config( + self, + config: JsonDict, + visibility: str, + ) -> None: + """Checks configuration parameters for a /createRoom request. + + If validation detects invalid parameters an exception may be raised to + cause room creation to be aborted and an error response to be returned + to the client. + + Args: + config: A dict of configuration options. Originally from the body of + the /createRoom request + visibility: One of "public" or "private" + """ + + # Validate the requested preset, raise a 400 error if not valid + preset_name, preset_config = self._room_preset_config(config) + + # If the user is trying to create an encrypted room and this is forbidden + # by the configured default_power_level_content_override, then reject the + # request before the room is created. + raw_initial_state = config.get("initial_state", []) + room_encryption_event = any( + s.get("type", "") == EventTypes.RoomEncryption for s in raw_initial_state + ) + + if preset_config["encrypted"] or room_encryption_event: + if self._default_power_level_content_override: + override = self._default_power_level_content_override.get(preset_name) + if override is not None: + event_levels = override.get("events", {}) + room_admin_level = event_levels.get(EventTypes.PowerLevels, 100) + encryption_level = event_levels.get(EventTypes.RoomEncryption, 100) + if encryption_level > room_admin_level: + raise SynapseError( + 403, + f"You cannot create an encrypted room. user_level ({room_admin_level}) < send_level ({encryption_level})", + ) + + def _room_preset_config(self, room_config: JsonDict) -> Tuple[str, dict]: + # The spec says rooms should default to private visibility if + # `visibility` is not specified. + visibility = room_config.get("visibility", "private") + preset_name = room_config.get( + "preset", + RoomCreationPreset.PRIVATE_CHAT + if visibility == "private" + else RoomCreationPreset.PUBLIC_CHAT, + ) + try: + preset_config = self._presets_dict[preset_name] + except KeyError: + raise SynapseError( + 400, f"'{preset_name}' is not a valid preset", errcode=Codes.BAD_JSON + ) + return preset_name, preset_config + def _generate_room_id(self) -> str: """Generates a random room ID. diff --git a/tests/rest/client/test_rooms.py b/tests/rest/client/test_rooms.py index 4d39c89f6f..f1b4e1ad2f 100644 --- a/tests/rest/client/test_rooms.py +++ b/tests/rest/client/test_rooms.py @@ -1941,6 +1941,43 @@ class RoomPowerLevelOverridesInPracticeTestCase(RoomBase): channel.json_body["error"], ) + @unittest.override_config( + { + "default_power_level_content_override": { + "private_chat": { + "events": { + "m.room.avatar": 50, + "m.room.canonical_alias": 50, + "m.room.encryption": 999, + "m.room.history_visibility": 100, + "m.room.name": 50, + "m.room.power_levels": 100, + "m.room.server_acl": 100, + "m.room.tombstone": 100, + }, + "events_default": 0, + }, + } + }, + ) + def test_config_override_blocks_encrypted_room(self) -> None: + # Given the server has config for private_chats, + + # When I attempt to create an encrypted private_chat room + channel = self.make_request( + "POST", + "/createRoom", + '{"creation_content": {"m.federate": false},"name": "Secret Private Room","preset": "private_chat","initial_state": [{"type": "m.room.encryption","state_key": "","content": {"algorithm": "m.megolm.v1.aes-sha2"}}]}', + ) + + # Then I am not allowed because the required power level is unattainable + self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.result["body"]) + self.assertEqual( + "You cannot create an encrypted room. " + + "user_level (100) < send_level (999)", + channel.json_body["error"], + ) + class RoomInitialSyncTestCase(RoomBase): """Tests /rooms/$room_id/initialSync.""" -- cgit 1.5.1 From 195b6a298d509518bf16d5a421d706ecb2ccdce6 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Wed, 7 Jun 2023 11:45:16 -0500 Subject: Remove redundant `room_memberships` join to find participating servers in a room (#15732) Spawning from https://github.com/matrix-org/synapse/pull/15731 --- changelog.d/15732.doc | 1 + docs/usage/administration/admin_faq.md | 5 ++--- 2 files changed, 3 insertions(+), 3 deletions(-) create mode 100644 changelog.d/15732.doc diff --git a/changelog.d/15732.doc b/changelog.d/15732.doc new file mode 100644 index 0000000000..b0e8639df7 --- /dev/null +++ b/changelog.d/15732.doc @@ -0,0 +1 @@ +Simplify query to find participating servers in a room. diff --git a/docs/usage/administration/admin_faq.md b/docs/usage/administration/admin_faq.md index 28c3dd53a5..5c9ee7d0aa 100644 --- a/docs/usage/administration/admin_faq.md +++ b/docs/usage/administration/admin_faq.md @@ -27,9 +27,8 @@ What servers are currently participating in this room? Run this sql query on your db: ```sql SELECT DISTINCT split_part(state_key, ':', 2) - FROM current_state_events AS c - INNER JOIN room_memberships AS m USING (room_id, event_id) - WHERE room_id = '!cURbafjkfsMDVwdRDQ:matrix.org' AND membership = 'join'; +FROM current_state_events +WHERE room_id = '!cURbafjkfsMDVwdRDQ:matrix.org' AND membership = 'join'; ``` What users are registered on my server? -- cgit 1.5.1 From e536f02f68135a8494f80ded75d1a53b98cbcb8d Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Wed, 7 Jun 2023 11:47:01 -0500 Subject: Remove superfluous `room_memberships` join from background update (#15733) Spawning from https://github.com/matrix-org/synapse/pull/15731 --- changelog.d/15733.misc | 1 + synapse/storage/databases/main/roommember.py | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) create mode 100644 changelog.d/15733.misc diff --git a/changelog.d/15733.misc b/changelog.d/15733.misc new file mode 100644 index 0000000000..3ae7be3c27 --- /dev/null +++ b/changelog.d/15733.misc @@ -0,0 +1 @@ +Remove superfluous `room_memberships` join from background update. diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index 1b8ec67f54..582875c91a 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -1460,7 +1460,6 @@ class RoomMemberBackgroundUpdateStore(SQLBaseStore): SELECT stream_ordering, event_id, events.room_id, event_json.json FROM events INNER JOIN event_json USING (event_id) - INNER JOIN room_memberships USING (event_id) WHERE ? <= stream_ordering AND stream_ordering < ? AND type = 'm.room.member' ORDER BY stream_ordering DESC -- cgit 1.5.1 From d162aecaac52fb467822e319e4c3c5b216c33ca9 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Wed, 7 Jun 2023 18:12:23 +0100 Subject: Quick & dirty metric for background update status (#15740) * Quick & dirty metric for background update status * Changelog * Remove debug Co-authored-by: Mathieu Velten * Actually write to _aborted --------- Co-authored-by: Mathieu Velten --- changelog.d/15740.feature | 1 + synapse/metrics/__init__.py | 2 ++ synapse/storage/background_updates.py | 30 ++++++++++++++++++++++++++++++ synapse/storage/database.py | 8 +++++++- 4 files changed, 40 insertions(+), 1 deletion(-) create mode 100644 changelog.d/15740.feature diff --git a/changelog.d/15740.feature b/changelog.d/15740.feature new file mode 100644 index 0000000000..fed342ea55 --- /dev/null +++ b/changelog.d/15740.feature @@ -0,0 +1 @@ +Expose a metric reporting the database background update status. diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py index 8ce5887229..39fc629937 100644 --- a/synapse/metrics/__init__.py +++ b/synapse/metrics/__init__.py @@ -77,6 +77,8 @@ RegistryProxy = cast(CollectorRegistry, _RegistryProxy) @attr.s(slots=True, hash=True, auto_attribs=True) class LaterGauge(Collector): + """A Gauge which periodically calls a user-provided callback to produce metrics.""" + name: str desc: str labels: Optional[Sequence[str]] = attr.ib(hash=False) diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py index ca085ef800..edc97a9d61 100644 --- a/synapse/storage/background_updates.py +++ b/synapse/storage/background_updates.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging +from enum import IntEnum from types import TracebackType from typing import ( TYPE_CHECKING, @@ -136,6 +137,15 @@ class BackgroundUpdatePerformance: return float(self.total_item_count) / float(self.total_duration_ms) +class UpdaterStatus(IntEnum): + # Use negative values for error conditions. + ABORTED = -1 + DISABLED = 0 + NOT_STARTED = 1 + RUNNING_UPDATE = 2 + COMPLETE = 3 + + class BackgroundUpdater: """Background updates are updates to the database that run in the background. Each update processes a batch of data at once. We attempt to @@ -158,11 +168,16 @@ class BackgroundUpdater: self._background_update_performance: Dict[str, BackgroundUpdatePerformance] = {} self._background_update_handlers: Dict[str, _BackgroundUpdateHandler] = {} + # TODO: all these bool flags make me feel icky---can we combine into a status + # enum? self._all_done = False # Whether we're currently running updates self._running = False + # Marker to be set if we abort and halt all background updates. + self._aborted = False + # Whether background updates are enabled. This allows us to # enable/disable background updates via the admin API. self.enabled = True @@ -175,6 +190,20 @@ class BackgroundUpdater: self.sleep_duration_ms = hs.config.background_updates.sleep_duration_ms self.sleep_enabled = hs.config.background_updates.sleep_enabled + def get_status(self) -> UpdaterStatus: + """An integer summarising the updater status. Used as a metric.""" + if self._aborted: + return UpdaterStatus.ABORTED + # TODO: a status for "have seen at least one failure, but haven't aborted yet". + if not self.enabled: + return UpdaterStatus.DISABLED + + if self._all_done: + return UpdaterStatus.COMPLETE + if self._running: + return UpdaterStatus.RUNNING_UPDATE + return UpdaterStatus.NOT_STARTED + def register_update_controller_callbacks( self, on_update: ON_UPDATE_CALLBACK, @@ -296,6 +325,7 @@ class BackgroundUpdater: except Exception: back_to_back_failures += 1 if back_to_back_failures >= 5: + self._aborted = True raise RuntimeError( "5 back-to-back background update failures; aborting." ) diff --git a/synapse/storage/database.py b/synapse/storage/database.py index bdaa508dbe..10fa6c4802 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -54,7 +54,7 @@ from synapse.logging.context import ( current_context, make_deferred_yieldable, ) -from synapse.metrics import register_threadpool +from synapse.metrics import LaterGauge, register_threadpool from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage.background_updates import BackgroundUpdater from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine, Sqlite3Engine @@ -547,6 +547,12 @@ class DatabasePool: self._db_pool = make_pool(hs.get_reactor(), database_config, engine) self.updates = BackgroundUpdater(hs, self) + LaterGauge( + "synapse_background_update_status", + "Background update status", + [], + self.updates.get_status, + ) self._previous_txn_total_time = 0.0 self._current_txn_total_time = 0.0 -- cgit 1.5.1 From 733342ad3ef271a2c5bd4ba442a15fa3be3dab30 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 8 Jun 2023 13:03:48 +0100 Subject: Fix using TLS for replication (#15746) Fixes #15744. --- changelog.d/15746.bugfix | 1 + synapse/http/replicationagent.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/15746.bugfix diff --git a/changelog.d/15746.bugfix b/changelog.d/15746.bugfix new file mode 100644 index 0000000000..8d3e22f2e5 --- /dev/null +++ b/changelog.d/15746.bugfix @@ -0,0 +1 @@ +Fix regression where using TLS for replication did not work. Introduced in v1.85.0. diff --git a/synapse/http/replicationagent.py b/synapse/http/replicationagent.py index 800f21873d..d6ba6f0e57 100644 --- a/synapse/http/replicationagent.py +++ b/synapse/http/replicationagent.py @@ -76,7 +76,7 @@ class ReplicationEndpointFactory: endpoint = wrapClientTLS( # The 'port' argument below isn't actually used by the function self.context_factory.creatorForNetloc( - self.instance_map[worker_name].host, + self.instance_map[worker_name].host.encode("utf-8"), self.instance_map[worker_name].port, ), endpoint, -- cgit 1.5.1 From a4921b23703776c9399433906b57c90fadb55bb6 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 8 Jun 2023 13:04:26 +0100 Subject: 1.85.2 --- CHANGES.md | 9 +++++++++ changelog.d/15746.bugfix | 1 - debian/changelog | 6 ++++++ pyproject.toml | 2 +- 4 files changed, 16 insertions(+), 2 deletions(-) delete mode 100644 changelog.d/15746.bugfix diff --git a/CHANGES.md b/CHANGES.md index 5babc22f2a..f3eb0182f6 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,12 @@ +Synapse 1.85.2 (2023-06-08) +=========================== + +Bugfixes +-------- + +- Fix regression where using TLS for replication did not work. Introduced in v1.85.0. ([\#15746](https://github.com/matrix-org/synapse/issues/15746)) + + Synapse 1.85.1 (2023-06-07) =========================== diff --git a/changelog.d/15746.bugfix b/changelog.d/15746.bugfix deleted file mode 100644 index 8d3e22f2e5..0000000000 --- a/changelog.d/15746.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix regression where using TLS for replication did not work. Introduced in v1.85.0. diff --git a/debian/changelog b/debian/changelog index 6d6f10ddf1..a7503ea60a 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.85.2) stable; urgency=medium + + * New Synapse release 1.85.2. + + -- Synapse Packaging team Thu, 08 Jun 2023 13:04:18 +0100 + matrix-synapse-py3 (1.85.1) stable; urgency=medium * New Synapse release 1.85.1. diff --git a/pyproject.toml b/pyproject.toml index 5b6123dff6..02c9255f6e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -89,7 +89,7 @@ manifest-path = "rust/Cargo.toml" [tool.poetry] name = "matrix-synapse" -version = "1.85.1" +version = "1.85.2" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" -- cgit 1.5.1 From c485ed1c5a4c62ae555531cfd001a5e5f8bc2e44 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 8 Jun 2023 13:14:40 +0100 Subject: Clear event caches when we purge history (#15609) This should help a little with #13476 --------- Co-authored-by: Patrick Cloke --- changelog.d/15609.bugfix | 1 + synapse/storage/_base.py | 31 +++++ synapse/storage/databases/main/cache.py | 134 +++++++++++++++++++++ synapse/storage/databases/main/events_worker.py | 9 ++ synapse/storage/databases/main/purge_events.py | 8 +- synapse/util/caches/lrucache.py | 2 +- tests/handlers/test_sync.py | 2 +- tests/rest/client/test_read_marker.py | 3 - tests/storage/databases/main/test_events_worker.py | 8 +- 9 files changed, 184 insertions(+), 14 deletions(-) create mode 100644 changelog.d/15609.bugfix diff --git a/changelog.d/15609.bugfix b/changelog.d/15609.bugfix new file mode 100644 index 0000000000..b5a990cfec --- /dev/null +++ b/changelog.d/15609.bugfix @@ -0,0 +1 @@ +Correctly clear caches when we delete a room. diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 481fec72fe..fe4a763411 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -86,9 +86,14 @@ class SQLBaseStore(metaclass=ABCMeta): room_id: Room where state changed members_changed: The user_ids of members that have changed """ + + # XXX: If you add something to this function make sure you add it to + # `_invalidate_state_caches_all` as well. + # If there were any membership changes, purge the appropriate caches. for host in {get_domain_from_id(u) for u in members_changed}: self._attempt_to_invalidate_cache("is_host_joined", (room_id, host)) + self._attempt_to_invalidate_cache("is_host_invited", (room_id, host)) if members_changed: self._attempt_to_invalidate_cache("get_users_in_room", (room_id,)) self._attempt_to_invalidate_cache("get_current_hosts_in_room", (room_id,)) @@ -117,6 +122,32 @@ class SQLBaseStore(metaclass=ABCMeta): self._attempt_to_invalidate_cache("get_room_summary", (room_id,)) self._attempt_to_invalidate_cache("get_partial_current_state_ids", (room_id,)) + def _invalidate_state_caches_all(self, room_id: str) -> None: + """Invalidates caches that are based on the current state, but does + not stream invalidations down replication. + + Same as `_invalidate_state_caches`, except that works when we don't know + which memberships have changed. + + Args: + room_id: Room where state changed + """ + self._attempt_to_invalidate_cache("get_partial_current_state_ids", (room_id,)) + self._attempt_to_invalidate_cache("get_users_in_room", (room_id,)) + self._attempt_to_invalidate_cache("is_host_invited", None) + self._attempt_to_invalidate_cache("is_host_joined", None) + self._attempt_to_invalidate_cache("get_current_hosts_in_room", (room_id,)) + self._attempt_to_invalidate_cache("get_users_in_room_with_profiles", (room_id,)) + self._attempt_to_invalidate_cache("get_number_joined_users_in_room", (room_id,)) + self._attempt_to_invalidate_cache("get_local_users_in_room", (room_id,)) + self._attempt_to_invalidate_cache("does_pair_of_users_share_a_room", None) + self._attempt_to_invalidate_cache("get_user_in_room_with_profile", None) + self._attempt_to_invalidate_cache( + "get_rooms_for_user_with_stream_ordering", None + ) + self._attempt_to_invalidate_cache("get_rooms_for_user", None) + self._attempt_to_invalidate_cache("get_room_summary", (room_id,)) + def _attempt_to_invalidate_cache( self, cache_name: str, key: Optional[Collection[Any]] ) -> bool: diff --git a/synapse/storage/databases/main/cache.py b/synapse/storage/databases/main/cache.py index 46fa0a73f9..6e1c7d681f 100644 --- a/synapse/storage/databases/main/cache.py +++ b/synapse/storage/databases/main/cache.py @@ -46,6 +46,12 @@ logger = logging.getLogger(__name__) # based on the current state when notifying workers over replication. CURRENT_STATE_CACHE_NAME = "cs_cache_fake" +# As above, but for invalidating event caches on history deletion +PURGE_HISTORY_CACHE_NAME = "ph_cache_fake" + +# As above, but for invalidating room caches on room deletion +DELETE_ROOM_CACHE_NAME = "dr_cache_fake" + class CacheInvalidationWorkerStore(SQLBaseStore): def __init__( @@ -175,6 +181,23 @@ class CacheInvalidationWorkerStore(SQLBaseStore): room_id = row.keys[0] members_changed = set(row.keys[1:]) self._invalidate_state_caches(room_id, members_changed) + elif row.cache_func == PURGE_HISTORY_CACHE_NAME: + if row.keys is None: + raise Exception( + "Can't send an 'invalidate all' for 'purge history' cache" + ) + + room_id = row.keys[0] + self._invalidate_caches_for_room_events(room_id) + elif row.cache_func == DELETE_ROOM_CACHE_NAME: + if row.keys is None: + raise Exception( + "Can't send an 'invalidate all' for 'delete room' cache" + ) + + room_id = row.keys[0] + self._invalidate_caches_for_room_events(room_id) + self._invalidate_caches_for_room(room_id) else: self._attempt_to_invalidate_cache(row.cache_func, row.keys) @@ -226,6 +249,9 @@ class CacheInvalidationWorkerStore(SQLBaseStore): relates_to: Optional[str], backfilled: bool, ) -> None: + # XXX: If you add something to this function make sure you add it to + # `_invalidate_caches_for_room_events` as well. + # This invalidates any local in-memory cached event objects, the original # process triggering the invalidation is responsible for clearing any external # cached objects. @@ -271,6 +297,106 @@ class CacheInvalidationWorkerStore(SQLBaseStore): self._attempt_to_invalidate_cache("get_thread_participated", (relates_to,)) self._attempt_to_invalidate_cache("get_threads", (room_id,)) + def _invalidate_caches_for_room_events_and_stream( + self, txn: LoggingTransaction, room_id: str + ) -> None: + """Invalidate caches associated with events in a room, and stream to + replication. + + Used when we delete events a room, but don't know which events we've + deleted. + """ + + self._send_invalidation_to_replication(txn, PURGE_HISTORY_CACHE_NAME, [room_id]) + txn.call_after(self._invalidate_caches_for_room_events, room_id) + + def _invalidate_caches_for_room_events(self, room_id: str) -> None: + """Invalidate caches associated with events in a room, and stream to + replication. + + Used when we delete events in a room, but don't know which events we've + deleted. + """ + + self._invalidate_local_get_event_cache_all() # type: ignore[attr-defined] + + self._attempt_to_invalidate_cache("have_seen_event", (room_id,)) + self._attempt_to_invalidate_cache("get_latest_event_ids_in_room", (room_id,)) + self._attempt_to_invalidate_cache( + "get_unread_event_push_actions_by_room_for_user", (room_id,) + ) + + self._attempt_to_invalidate_cache("_get_membership_from_event_id", None) + self._attempt_to_invalidate_cache("get_relations_for_event", None) + self._attempt_to_invalidate_cache("get_applicable_edit", None) + self._attempt_to_invalidate_cache("get_thread_id", None) + self._attempt_to_invalidate_cache("get_thread_id_for_receipts", None) + self._attempt_to_invalidate_cache("get_invited_rooms_for_local_user", None) + self._attempt_to_invalidate_cache( + "get_rooms_for_user_with_stream_ordering", None + ) + self._attempt_to_invalidate_cache("get_rooms_for_user", None) + self._attempt_to_invalidate_cache("get_references_for_event", None) + self._attempt_to_invalidate_cache("get_thread_summary", None) + self._attempt_to_invalidate_cache("get_thread_participated", None) + self._attempt_to_invalidate_cache("get_threads", (room_id,)) + + self._attempt_to_invalidate_cache("_get_state_group_for_event", None) + + self._attempt_to_invalidate_cache("get_event_ordering", None) + self._attempt_to_invalidate_cache("is_partial_state_event", None) + self._attempt_to_invalidate_cache("_get_joined_profile_from_event_id", None) + + def _invalidate_caches_for_room_and_stream( + self, txn: LoggingTransaction, room_id: str + ) -> None: + """Invalidate caches associated with rooms, and stream to replication. + + Used when we delete rooms. + """ + + self._send_invalidation_to_replication(txn, DELETE_ROOM_CACHE_NAME, [room_id]) + txn.call_after(self._invalidate_caches_for_room, room_id) + + def _invalidate_caches_for_room(self, room_id: str) -> None: + """Invalidate caches associated with rooms. + + Used when we delete rooms. + """ + + # If we've deleted the room then we also need to purge all event caches. + self._invalidate_caches_for_room_events(room_id) + + self._attempt_to_invalidate_cache("get_account_data_for_room", None) + self._attempt_to_invalidate_cache("get_account_data_for_room_and_type", None) + self._attempt_to_invalidate_cache("get_aliases_for_room", (room_id,)) + self._attempt_to_invalidate_cache("get_latest_event_ids_in_room", (room_id,)) + self._attempt_to_invalidate_cache("_get_forward_extremeties_for_room", None) + self._attempt_to_invalidate_cache( + "get_unread_event_push_actions_by_room_for_user", (room_id,) + ) + self._attempt_to_invalidate_cache( + "_get_linearized_receipts_for_room", (room_id,) + ) + self._attempt_to_invalidate_cache("is_room_blocked", (room_id,)) + self._attempt_to_invalidate_cache("get_retention_policy_for_room", (room_id,)) + self._attempt_to_invalidate_cache( + "_get_partial_state_servers_at_join", (room_id,) + ) + self._attempt_to_invalidate_cache("is_partial_state_room", (room_id,)) + self._attempt_to_invalidate_cache("get_invited_rooms_for_local_user", None) + self._attempt_to_invalidate_cache( + "get_current_hosts_in_room_ordered", (room_id,) + ) + self._attempt_to_invalidate_cache("did_forget", None) + self._attempt_to_invalidate_cache("get_forgotten_rooms_for_user", None) + self._attempt_to_invalidate_cache("_get_membership_from_event_id", None) + self._attempt_to_invalidate_cache("get_room_version_id", (room_id,)) + + # And delete state caches. + + self._invalidate_state_caches_all(room_id) + async def invalidate_cache_and_stream( self, cache_name: str, keys: Tuple[Any, ...] ) -> None: @@ -377,6 +503,14 @@ class CacheInvalidationWorkerStore(SQLBaseStore): "Can't stream invalidate all with magic current state cache" ) + if cache_name == PURGE_HISTORY_CACHE_NAME and keys is None: + raise Exception( + "Can't stream invalidate all with magic purge history cache" + ) + + if cache_name == DELETE_ROOM_CACHE_NAME and keys is None: + raise Exception("Can't stream invalidate all with magic delete room cache") + if isinstance(self.database_engine, PostgresEngine): assert self._cache_id_gen is not None diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index a39bc90974..d93ffc4efa 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -903,6 +903,15 @@ class EventsWorkerStore(SQLBaseStore): self._event_ref.pop(event_id, None) self._current_event_fetches.pop(event_id, None) + def _invalidate_local_get_event_cache_all(self) -> None: + """Clears the in-memory get event caches. + + Used when we purge room history. + """ + self._get_event_cache.clear() + self._event_ref.clear() + self._current_event_fetches.clear() + async def _get_events_from_cache( self, events: Iterable[str], update_metrics: bool = True ) -> Dict[str, EventCacheEntry]: diff --git a/synapse/storage/databases/main/purge_events.py b/synapse/storage/databases/main/purge_events.py index efbd3e75d9..9773c1fcd2 100644 --- a/synapse/storage/databases/main/purge_events.py +++ b/synapse/storage/databases/main/purge_events.py @@ -308,6 +308,8 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore): logger.info("[purge] done") + self._invalidate_caches_for_room_events_and_stream(txn, room_id) + return referenced_state_groups async def purge_room(self, room_id: str) -> List[int]: @@ -485,10 +487,6 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore): # index on them. In any case we should be clearing out 'stream' tables # periodically anyway (#5888) - # TODO: we could probably usefully do a bunch more cache invalidation here - - # XXX: as with purge_history, this is racy, but no worse than other races - # that already exist. - self._invalidate_cache_and_stream(txn, self.have_seen_event, (room_id,)) + self._invalidate_caches_for_room_and_stream(txn, room_id) return state_groups diff --git a/synapse/util/caches/lrucache.py b/synapse/util/caches/lrucache.py index ed0da17227..6137c85e10 100644 --- a/synapse/util/caches/lrucache.py +++ b/synapse/util/caches/lrucache.py @@ -862,5 +862,5 @@ class AsyncLruCache(Generic[KT, VT]): async def contains(self, key: KT) -> bool: return self._lru_cache.contains(key) - async def clear(self) -> None: + def clear(self) -> None: self._lru_cache.clear() diff --git a/tests/handlers/test_sync.py b/tests/handlers/test_sync.py index 0d9a3de92a..9f035a02dc 100644 --- a/tests/handlers/test_sync.py +++ b/tests/handlers/test_sync.py @@ -163,7 +163,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase): # Blow away caches (supported room versions can only change due to a restart). self.store.get_rooms_for_user_with_stream_ordering.invalidate_all() self.store.get_rooms_for_user.invalidate_all() - self.get_success(self.store._get_event_cache.clear()) + self.store._get_event_cache.clear() self.store._event_ref.clear() # The rooms should be excluded from the sync response. diff --git a/tests/rest/client/test_read_marker.py b/tests/rest/client/test_read_marker.py index 0eedcdb476..5cdd5694a0 100644 --- a/tests/rest/client/test_read_marker.py +++ b/tests/rest/client/test_read_marker.py @@ -131,9 +131,6 @@ class ReadMarkerTestCase(unittest.HomeserverTestCase): event = self.get_success(self.store.get_event(event_id_1, allow_none=True)) assert event is None - # TODO See https://github.com/matrix-org/synapse/issues/13476 - self.store.get_event_ordering.invalidate_all() - # Test moving the read marker to a newer event event_id_2 = send_message() channel = self.make_request( diff --git a/tests/storage/databases/main/test_events_worker.py b/tests/storage/databases/main/test_events_worker.py index 9606ecc43b..788500e38f 100644 --- a/tests/storage/databases/main/test_events_worker.py +++ b/tests/storage/databases/main/test_events_worker.py @@ -188,7 +188,7 @@ class EventCacheTestCase(unittest.HomeserverTestCase): self.event_id = res["event_id"] # Reset the event cache so the tests start with it empty - self.get_success(self.store._get_event_cache.clear()) + self.store._get_event_cache.clear() def test_simple(self) -> None: """Test that we cache events that we pull from the DB.""" @@ -205,7 +205,7 @@ class EventCacheTestCase(unittest.HomeserverTestCase): """ # Reset the event cache - self.get_success(self.store._get_event_cache.clear()) + self.store._get_event_cache.clear() with LoggingContext("test") as ctx: # We keep hold of the event event though we never use it. @@ -215,7 +215,7 @@ class EventCacheTestCase(unittest.HomeserverTestCase): self.assertEqual(ctx.get_resource_usage().evt_db_fetch_count, 1) # Reset the event cache - self.get_success(self.store._get_event_cache.clear()) + self.store._get_event_cache.clear() with LoggingContext("test") as ctx: self.get_success(self.store.get_event(self.event_id)) @@ -390,7 +390,7 @@ class GetEventCancellationTestCase(unittest.HomeserverTestCase): self.event_id = res["event_id"] # Reset the event cache so the tests start with it empty - self.get_success(self.store._get_event_cache.clear()) + self.store._get_event_cache.clear() @contextmanager def blocking_get_event_calls( -- cgit 1.5.1 From ac3a70a7dd4070bf3953b8913f7c316d701db588 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 8 Jun 2023 13:15:56 +0100 Subject: Fix up changelog --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index f3eb0182f6..893ceccaea 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -4,7 +4,7 @@ Synapse 1.85.2 (2023-06-08) Bugfixes -------- -- Fix regression where using TLS for replication did not work. Introduced in v1.85.0. ([\#15746](https://github.com/matrix-org/synapse/issues/15746)) +- Fix regression where using TLS for HTTP replication between workers did not work. Introduced in v1.85.0. ([\#15746](https://github.com/matrix-org/synapse/issues/15746)) Synapse 1.85.1 (2023-06-07) -- cgit 1.5.1 From d84e66144dc12dacf71c987a2ba802dd59c0b68e Mon Sep 17 00:00:00 2001 From: Shay Date: Fri, 9 Jun 2023 00:00:46 -0700 Subject: Allow for the configuration of max request retries and min/max retry delays in the matrix federation client (#12504) Co-authored-by: Mathieu Velten Co-authored-by: Erik Johnston --- changelog.d/12504.misc | 1 + docs/usage/configuration/config_documentation.md | 26 ++++++++++++++++++++++++ synapse/config/federation.py | 10 +++++++++ synapse/http/matrixfederationclient.py | 21 +++++++++++-------- tests/http/test_matrixfederationclient.py | 20 +++++++++++++++++- 5 files changed, 68 insertions(+), 10 deletions(-) create mode 100644 changelog.d/12504.misc diff --git a/changelog.d/12504.misc b/changelog.d/12504.misc new file mode 100644 index 0000000000..0bebaa213d --- /dev/null +++ b/changelog.d/12504.misc @@ -0,0 +1 @@ +Allow for the configuration of max request retries and min/max retry delays in the matrix federation client. diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 0cf6e075ff..8426de0417 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -1196,6 +1196,32 @@ Example configuration: allow_device_name_lookup_over_federation: true ``` --- +### `federation` + +The federation section defines some sub-options related to federation. + +The following options are related to configuring timeout and retry logic for one request, +independently of the others. +Short retry algorithm is used when something or someone will wait for the request to have an +answer, while long retry is used for requests that happen in the background, +like sending a federation transaction. + +* `client_timeout`: timeout for the federation requests in seconds. Default to 60s. +* `max_short_retry_delay`: maximum delay to be used for the short retry algo in seconds. Default to 2s. +* `max_long_retry_delay`: maximum delay to be used for the short retry algo in seconds. Default to 60s. +* `max_short_retries`: maximum number of retries for the short retry algo. Default to 3 attempts. +* `max_long_retries`: maximum number of retries for the long retry algo. Default to 10 attempts. + +Example configuration: +```yaml +federation: + client_timeout: 180 + max_short_retry_delay: 7 + max_long_retry_delay: 100 + max_short_retries: 5 + max_long_retries: 20 +``` +--- ## Caching Options related to caching. diff --git a/synapse/config/federation.py b/synapse/config/federation.py index 336fca578a..d21f7fd02a 100644 --- a/synapse/config/federation.py +++ b/synapse/config/federation.py @@ -22,6 +22,8 @@ class FederationConfig(Config): section = "federation" def read_config(self, config: JsonDict, **kwargs: Any) -> None: + federation_config = config.setdefault("federation", {}) + # FIXME: federation_domain_whitelist needs sytests self.federation_domain_whitelist: Optional[dict] = None federation_domain_whitelist = config.get("federation_domain_whitelist", None) @@ -49,5 +51,13 @@ class FederationConfig(Config): "allow_device_name_lookup_over_federation", False ) + # Allow for the configuration of timeout, max request retries + # and min/max retry delays in the matrix federation client. + self.client_timeout = federation_config.get("client_timeout", 60) + self.max_long_retry_delay = federation_config.get("max_long_retry_delay", 60) + self.max_short_retry_delay = federation_config.get("max_short_retry_delay", 2) + self.max_long_retries = federation_config.get("max_long_retries", 10) + self.max_short_retries = federation_config.get("max_short_retries", 3) + _METRICS_FOR_DOMAINS_SCHEMA = {"type": "array", "items": {"type": "string"}} diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index abb5ae5815..ed36825b67 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -95,8 +95,6 @@ incoming_responses_counter = Counter( ) -MAX_LONG_RETRIES = 10 -MAX_SHORT_RETRIES = 3 MAXINT = sys.maxsize @@ -406,7 +404,12 @@ class MatrixFederationHttpClient: self.clock = hs.get_clock() self._store = hs.get_datastores().main self.version_string_bytes = hs.version_string.encode("ascii") - self.default_timeout = 60 + self.default_timeout = hs.config.federation.client_timeout + + self.max_long_retry_delay = hs.config.federation.max_long_retry_delay + self.max_short_retry_delay = hs.config.federation.max_short_retry_delay + self.max_long_retries = hs.config.federation.max_long_retries + self.max_short_retries = hs.config.federation.max_short_retries self._cooperator = Cooperator(scheduler=_make_scheduler(self.reactor)) @@ -583,9 +586,9 @@ class MatrixFederationHttpClient: # XXX: Would be much nicer to retry only at the transaction-layer # (once we have reliable transactions in place) if long_retries: - retries_left = MAX_LONG_RETRIES + retries_left = self.max_long_retries else: - retries_left = MAX_SHORT_RETRIES + retries_left = self.max_short_retries url_bytes = request.uri url_str = url_bytes.decode("ascii") @@ -730,12 +733,12 @@ class MatrixFederationHttpClient: if retries_left and not timeout: if long_retries: - delay = 4 ** (MAX_LONG_RETRIES + 1 - retries_left) - delay = min(delay, 60) + delay = 4 ** (self.max_long_retries + 1 - retries_left) + delay = min(delay, self.max_long_retry_delay) delay *= random.uniform(0.8, 1.4) else: - delay = 0.5 * 2 ** (MAX_SHORT_RETRIES - retries_left) - delay = min(delay, 2) + delay = 0.5 * 2 ** (self.max_short_retries - retries_left) + delay = min(delay, self.max_short_retry_delay) delay *= random.uniform(0.8, 1.4) logger.debug( diff --git a/tests/http/test_matrixfederationclient.py b/tests/http/test_matrixfederationclient.py index 0dfc03ce50..8565f8ac64 100644 --- a/tests/http/test_matrixfederationclient.py +++ b/tests/http/test_matrixfederationclient.py @@ -40,7 +40,7 @@ from synapse.server import HomeServer from synapse.util import Clock from tests.server import FakeTransport -from tests.unittest import HomeserverTestCase +from tests.unittest import HomeserverTestCase, override_config def check_logcontext(context: LoggingContextOrSentinel) -> None: @@ -640,3 +640,21 @@ class FederationClientTests(HomeserverTestCase): self.cl.build_auth_headers( b"", b"GET", b"https://example.com", destination_is=b"" ) + + @override_config( + { + "federation": { + "client_timeout": 180, + "max_long_retry_delay": 100, + "max_short_retry_delay": 7, + "max_long_retries": 20, + "max_short_retries": 5, + } + } + ) + def test_configurable_retry_and_delay_values(self) -> None: + self.assertEqual(self.cl.default_timeout, 180) + self.assertEqual(self.cl.max_long_retry_delay, 100) + self.assertEqual(self.cl.max_short_retry_delay, 7) + self.assertEqual(self.cl.max_long_retries, 20) + self.assertEqual(self.cl.max_short_retries, 5) -- cgit 1.5.1